text
stringlengths
5
22M
id
stringlengths
12
177
metadata
dict
__index_level_0__
int64
0
1.37k
# flake8: noqa from .binarizer import MagnitudeBinarizer, ThresholdBinarizer, TopKBinarizer from .masked_nn import MaskedLinear
AdaMix/examples/research_projects/movement-pruning/emmental/modules/__init__.py/0
{ "file_path": "AdaMix/examples/research_projects/movement-pruning/emmental/modules/__init__.py", "repo_id": "AdaMix", "token_count": 43 }
41
#!/usr/bin/env python import argparse import glob import logging import os import sys import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import numpy as np import pytorch_lightning as pl import torch from torch.utils.data import DataLoader from callbacks import Seq2SeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback from transformers import MBartTokenizer, T5ForConditionalGeneration from transformers.models.bart.modeling_bart import shift_tokens_right from utils import ( ROUGE_KEYS, LegacySeq2SeqDataset, Seq2SeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, check_output_dir, flatten_list, freeze_embeds, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, ) # need the parent dir module sys.path.insert(2, str(Path(__file__).resolve().parents[1])) from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa logger = logging.getLogger(__name__) class SummarizationModule(BaseTransformer): mode = "summarization" loss_names = ["loss"] metric_names = ROUGE_KEYS default_val_metric = "rouge2" def __init__(self, hparams, **kwargs): if hparams.sortish_sampler and hparams.gpus > 1: hparams.replace_sampler_ddp = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training") if hparams.sortish_sampler: raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously") super().__init__(hparams, num_labels=None, mode=self.mode, **kwargs) use_task_specific_params(self.model, "summarization") save_git_info(self.hparams.output_dir) self.metrics_save_path = Path(self.output_dir) / "metrics.json" self.hparams_save_path = Path(self.output_dir) / "hparams.pkl" pickle_save(self.hparams, self.hparams_save_path) self.step_count = 0 self.metrics = defaultdict(list) self.model_type = self.config.model_type self.vocab_size = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size self.dataset_kwargs: dict = dict( data_dir=self.hparams.data_dir, max_source_length=self.hparams.max_source_length, prefix=self.model.config.prefix or "", ) n_observations_per_split = { "train": self.hparams.n_train, "val": self.hparams.n_val, "test": self.hparams.n_test, } self.n_obs = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} self.target_lens = { "train": self.hparams.max_target_length, "val": self.hparams.val_max_target_length, "test": self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}" assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}" if self.hparams.freeze_embeds: freeze_embeds(self.model) if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder()) assert_all_frozen(self.model.get_encoder()) self.hparams.git_sha = get_git_info()["repo_sha"] self.num_workers = hparams.num_workers self.decoder_start_token_id = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer, MBartTokenizer): self.decoder_start_token_id = self.tokenizer.lang_code_to_id[hparams.tgt_lang] self.model.config.decoder_start_token_id = self.decoder_start_token_id self.dataset_class = ( Seq2SeqDataset if hasattr(self.tokenizer, "prepare_seq2seq_batch") else LegacySeq2SeqDataset ) self.already_saved_batch = False self.eval_beams = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams if self.hparams.eval_max_gen_length is not None: self.eval_max_length = self.hparams.eval_max_gen_length else: self.eval_max_length = self.model.config.max_length self.val_metric = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric def save_readable_batch(self, batch: Dict[str, torch.Tensor]) -> Dict[str, List[str]]: """A debugging utility""" readable_batch = { k: self.tokenizer.batch_decode(v.tolist()) if "mask" not in k else v.shape for k, v in batch.items() } save_json(readable_batch, Path(self.output_dir) / "text_batch.json") save_json({k: v.tolist() for k, v in batch.items()}, Path(self.output_dir) / "tok_batch.json") self.already_saved_batch = True return readable_batch def forward(self, input_ids, **kwargs): return self.model(input_ids, **kwargs) def ids_to_clean_text(self, generated_ids: List[int]): gen_text = self.tokenizer.batch_decode( generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True ) return lmap(str.strip, gen_text) def _step(self, batch: dict) -> Tuple: pad_token_id = self.tokenizer.pad_token_id src_ids, src_mask = batch["input_ids"], batch["attention_mask"] tgt_ids = batch["labels"] if isinstance(self.model, T5ForConditionalGeneration): decoder_input_ids = self.model._shift_right(tgt_ids) else: decoder_input_ids = shift_tokens_right(tgt_ids, pad_token_id) if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero batch["decoder_input_ids"] = decoder_input_ids self.save_readable_batch(batch) outputs = self(src_ids, attention_mask=src_mask, decoder_input_ids=decoder_input_ids, use_cache=False) lm_logits = outputs["logits"] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id ce_loss_fct = torch.nn.CrossEntropyLoss(ignore_index=pad_token_id) assert lm_logits.shape[-1] == self.vocab_size loss = ce_loss_fct(lm_logits.view(-1, lm_logits.shape[-1]), tgt_ids.view(-1)) else: lprobs = torch.nn.functional.log_softmax(lm_logits, dim=-1) loss, nll_loss = label_smoothed_nll_loss( lprobs, tgt_ids, self.hparams.label_smoothing, ignore_index=pad_token_id ) return (loss,) @property def pad(self) -> int: return self.tokenizer.pad_token_id def training_step(self, batch, batch_idx) -> Dict: loss_tensors = self._step(batch) logs = {name: loss for name, loss in zip(self.loss_names, loss_tensors)} # tokens per batch logs["tpb"] = batch["input_ids"].ne(self.pad).sum() + batch["labels"].ne(self.pad).sum() logs["bs"] = batch["input_ids"].shape[0] logs["src_pad_tok"] = batch["input_ids"].eq(self.pad).sum() logs["src_pad_frac"] = batch["input_ids"].eq(self.pad).float().mean() # TODO(SS): make a wandb summary metric for this return {"loss": loss_tensors[0], "log": logs} def validation_step(self, batch, batch_idx) -> Dict: return self._generative_step(batch) def validation_epoch_end(self, outputs, prefix="val") -> Dict: self.step_count += 1 losses = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names} loss = losses["loss"] generative_metrics = { k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ["gen_time", "gen_len"] } metric_val = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) metric_tensor: torch.FloatTensor = torch.tensor(metric_val).type_as(loss) generative_metrics.update({k: v.item() for k, v in losses.items()}) losses.update(generative_metrics) all_metrics = {f"{prefix}_avg_{k}": x for k, x in losses.items()} all_metrics["step_count"] = self.step_count self.metrics[prefix].append(all_metrics) # callback writes this to self.metrics_save_path preds = flatten_list([x["preds"] for x in outputs]) return { "log": all_metrics, "preds": preds, f"{prefix}_loss": loss, f"{prefix}_{self.val_metric}": metric_tensor, } def calc_generative_metrics(self, preds, target) -> Dict: return calculate_rouge(preds, target) def _generative_step(self, batch: dict) -> dict: t0 = time.time() # parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens') generated_ids = self.model.generate( batch["input_ids"], attention_mask=batch["attention_mask"], use_cache=True, decoder_start_token_id=self.decoder_start_token_id, num_beams=self.eval_beams, max_length=self.eval_max_length, ) gen_time = (time.time() - t0) / batch["input_ids"].shape[0] preds: List[str] = self.ids_to_clean_text(generated_ids) target: List[str] = self.ids_to_clean_text(batch["labels"]) loss_tensors = self._step(batch) base_metrics = {name: loss for name, loss in zip(self.loss_names, loss_tensors)} rouge: Dict = self.calc_generative_metrics(preds, target) summ_len = np.mean(lmap(len, generated_ids)) base_metrics.update(gen_time=gen_time, gen_len=summ_len, preds=preds, target=target, **rouge) return base_metrics def test_step(self, batch, batch_idx): return self._generative_step(batch) def test_epoch_end(self, outputs): return self.validation_epoch_end(outputs, prefix="test") def get_dataset(self, type_path) -> Seq2SeqDataset: n_obs = self.n_obs[type_path] max_target_length = self.target_lens[type_path] dataset = self.dataset_class( self.tokenizer, type_path=type_path, n_obs=n_obs, max_target_length=max_target_length, **self.dataset_kwargs, ) return dataset def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False) -> DataLoader: dataset = self.get_dataset(type_path) if self.hparams.sortish_sampler and type_path != "test" and type_path != "val": sampler = dataset.make_sortish_sampler(batch_size, distributed=self.hparams.gpus > 1) return DataLoader( dataset, batch_size=batch_size, collate_fn=dataset.collate_fn, shuffle=False, num_workers=self.num_workers, sampler=sampler, ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val": batch_sampler = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch, distributed=self.hparams.gpus > 1 ) return DataLoader( dataset, batch_sampler=batch_sampler, collate_fn=dataset.collate_fn, # shuffle=False, num_workers=self.num_workers, # batch_size=None, ) else: return DataLoader( dataset, batch_size=batch_size, collate_fn=dataset.collate_fn, shuffle=shuffle, num_workers=self.num_workers, sampler=None, ) def train_dataloader(self) -> DataLoader: dataloader = self.get_dataloader("train", batch_size=self.hparams.train_batch_size, shuffle=True) return dataloader def val_dataloader(self) -> DataLoader: return self.get_dataloader("val", batch_size=self.hparams.eval_batch_size) def test_dataloader(self) -> DataLoader: return self.get_dataloader("test", batch_size=self.hparams.eval_batch_size) @staticmethod def add_model_specific_args(parser, root_dir): BaseTransformer.add_model_specific_args(parser, root_dir) add_generic_args(parser, root_dir) parser.add_argument( "--max_source_length", default=1024, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument( "--max_target_length", default=56, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument( "--val_max_target_length", default=142, # these defaults are optimized for CNNDM. For xsum, see README.md. type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument( "--test_max_target_length", default=142, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument("--freeze_encoder", action="store_true") parser.add_argument("--freeze_embeds", action="store_true") parser.add_argument("--sortish_sampler", action="store_true", default=False) parser.add_argument("--overwrite_output_dir", action="store_true", default=False) parser.add_argument("--max_tokens_per_batch", type=int, default=None) parser.add_argument("--logger_name", type=str, choices=["default", "wandb", "wandb_shared"], default="default") parser.add_argument("--n_train", type=int, default=-1, required=False, help="# examples. -1 means use all.") parser.add_argument("--n_val", type=int, default=500, required=False, help="# examples. -1 means use all.") parser.add_argument("--n_test", type=int, default=-1, required=False, help="# examples. -1 means use all.") parser.add_argument( "--task", type=str, default="summarization", required=False, help="# examples. -1 means use all." ) parser.add_argument("--label_smoothing", type=float, default=0.0, required=False) parser.add_argument("--src_lang", type=str, default="", required=False) parser.add_argument("--tgt_lang", type=str, default="", required=False) parser.add_argument("--eval_beams", type=int, default=None, required=False) parser.add_argument( "--val_metric", type=str, default=None, required=False, choices=["bleu", "rouge2", "loss", None] ) parser.add_argument("--eval_max_gen_length", type=int, default=None, help="never generate more than n tokens") parser.add_argument("--save_top_k", type=int, default=1, required=False, help="How many checkpoints to save") parser.add_argument( "--early_stopping_patience", type=int, default=-1, required=False, help="-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So val_check_interval will effect it.", ) return parser class TranslationModule(SummarizationModule): mode = "translation" loss_names = ["loss"] metric_names = ["bleu"] default_val_metric = "bleu" def __init__(self, hparams, **kwargs): super().__init__(hparams, **kwargs) self.dataset_kwargs["src_lang"] = hparams.src_lang self.dataset_kwargs["tgt_lang"] = hparams.tgt_lang def calc_generative_metrics(self, preds, target) -> dict: return calculate_bleu(preds, target) def main(args, model=None) -> SummarizationModule: Path(args.output_dir).mkdir(exist_ok=True) check_output_dir(args, expected_items=3) if model is None: if "summarization" in args.task: model: SummarizationModule = SummarizationModule(args) else: model: SummarizationModule = TranslationModule(args) dataset = Path(args.data_dir).name if ( args.logger_name == "default" or args.fast_dev_run or str(args.output_dir).startswith("/tmp") or str(args.output_dir).startswith("/var") ): logger = True # don't pollute wandb logs unnecessarily elif args.logger_name == "wandb": from pytorch_lightning.loggers import WandbLogger project = os.environ.get("WANDB_PROJECT", dataset) logger = WandbLogger(name=model.output_dir.name, project=project) elif args.logger_name == "wandb_shared": from pytorch_lightning.loggers import WandbLogger logger = WandbLogger(name=model.output_dir.name, project=f"hf_{dataset}") if args.early_stopping_patience >= 0: es_callback = get_early_stopping_callback(model.val_metric, args.early_stopping_patience) else: es_callback = False lower_is_better = args.val_metric == "loss" trainer: pl.Trainer = generic_train( model, args, logging_callback=Seq2SeqLoggingCallback(), checkpoint_callback=get_checkpoint_callback( args.output_dir, model.val_metric, args.save_top_k, lower_is_better ), early_stopping_callback=es_callback, logger=logger, ) pickle_save(model.hparams, model.output_dir / "hparams.pkl") if not args.do_predict: return model model.hparams.test_checkpoint = "" checkpoints = list(sorted(glob.glob(os.path.join(args.output_dir, "*.ckpt"), recursive=True))) if checkpoints: model.hparams.test_checkpoint = checkpoints[-1] trainer.resume_from_checkpoint = checkpoints[-1] trainer.logger.log_hyperparams(model.hparams) # test() without a model tests using the best checkpoint automatically trainer.test() return model if __name__ == "__main__": parser = argparse.ArgumentParser() parser = pl.Trainer.add_argparse_args(parser) parser = SummarizationModule.add_model_specific_args(parser, os.getcwd()) args = parser.parse_args() main(args)
AdaMix/examples/research_projects/seq2seq-distillation/finetune.py/0
{ "file_path": "AdaMix/examples/research_projects/seq2seq-distillation/finetune.py", "repo_id": "AdaMix", "token_count": 8351 }
42
#!/usr/bin/env bash python run_asr.py \ --output_dir="./wav2vec2-base-100h" \ --num_train_epochs="30" \ --per_device_train_batch_size="32" \ --per_device_eval_batch_size="32" \ --evaluation_strategy="steps" \ --save_total_limit="3" \ --save_steps="500" \ --eval_steps="100" \ --logging_steps="50" \ --learning_rate="5e-4" \ --warmup_steps="3000" \ --model_name_or_path="facebook/wav2vec2-base" \ --fp16 \ --dataset_name="librispeech_asr" \ --dataset_config_name="clean" \ --train_split_name="train.100" \ --preprocessing_num_workers="32" \ --group_by_length \ --freeze_feature_extractor
AdaMix/examples/research_projects/wav2vec2/finetune_base_100.sh/0
{ "file_path": "AdaMix/examples/research_projects/wav2vec2/finetune_base_100.sh", "repo_id": "AdaMix", "token_count": 249 }
43
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import sys import unittest from transformers.integrations import is_deepspeed_available from transformers.testing_utils import ( CaptureStd, TestCasePlus, execute_subprocess_async, get_gpu_count, mockenv_context, require_torch_gpu, require_torch_multi_gpu, slow, ) from transformers.trainer_utils import set_seed bindir = os.path.abspath(os.path.dirname(__file__)) sys.path.append(f"{bindir}/../../../tests") from test_trainer import get_regression_trainer # noqa set_seed(42) MBART_TINY = "sshleifer/tiny-mbart" def load_json(path): with open(path) as f: return json.load(f) # a candidate for testing_utils def require_deepspeed(test_case): """ Decorator marking a test that requires deepspeed """ if not is_deepspeed_available(): return unittest.skip("test requires deepspeed")(test_case) else: return test_case @require_deepspeed @require_torch_gpu class TrainerIntegrationDeepSpeed(TestCasePlus): """ This class is for testing directly via get_regression_trainer """ def setUp(self): super().setUp() self.dist_env_1_gpu = dict( MASTER_ADDR="localhost", MASTER_PORT="10999", RANK="0", LOCAL_RANK="0", WORLD_SIZE="1" ) self.ds_config_file = f"{self.test_file_dir_str}/ds_config.json" def test_fake_notebook_no_launcher(self): # this setup emulates a notebook where a launcher needs to be emulated by hand with CaptureStd() as cs: with mockenv_context(**self.dist_env_1_gpu): trainer = get_regression_trainer(local_rank=0, deepspeed=self.ds_config_file) trainer.train() assert "DeepSpeed info" in cs.out, "expected DeepSpeed logger output but got none" def test_early_get_last_lr(self): # with deepspeed's fp16 and dynamic loss scale enabled the optimizer/scheduler steps may # not run for the first few dozen steps while loss scale is too large, and thus during # that time `get_last_lr` will fail if called during that warm up stage, # # setting `logging_steps=1` forces an early `trainer._maybe_log_save_evaluate()` which calls # `self.lr_scheduler.get_last_lr()` and originally it'd fail on the very first step. with mockenv_context(**self.dist_env_1_gpu): a = b = 0.0 trainer = get_regression_trainer( a=a, b=b, local_rank=0, train_len=8, deepspeed=self.ds_config_file, per_device_train_batch_size=8, logging_steps=1, ) trainer.train() no_grad_accum_a = trainer.model.a.item() # it's enough that train didn't fail for this test, but we must check that # optimizer/scheduler didn't run (since if it did this test isn't testing the right thing) self.assertEqual(no_grad_accum_a, a) def test_gradient_accumulation(self): # this test measures that we get identical weights and similar loss with: # 1. per_device_train_batch_size=8, gradient_accumulation_steps=1 # 2. per_device_train_batch_size=4, gradient_accumulation_steps=2 # since the 2nd should produce the effective batch of 1st, with the same results # # I can get an identical loss for a small train_len=32, plus the power of the initial # dynamic loss scale value set to: # "fp16.initial_scale_power": 1 # plus having the same WarmupLR's warmup_min_lr == warmup_max_lr in the config file # but for some reason going to train_len=64 the weights, weights start to mismatch with this setup. # the culprit seems to be `initial_scale_power` - putting it back to its default 32 keeps the weights identical train_len = 64 a = b = 0.0 with mockenv_context(**self.dist_env_1_gpu): no_grad_accum_trainer = get_regression_trainer( a=a, b=b, local_rank=0, train_len=train_len, deepspeed=self.ds_config_file, per_device_train_batch_size=8, gradient_accumulation_steps=1, ) no_grad_accum_result = no_grad_accum_trainer.train() no_grad_accum_loss = no_grad_accum_result.training_loss no_grad_accum_a = no_grad_accum_trainer.model.a.item() no_grad_accum_b = no_grad_accum_trainer.model.b.item() # make sure the optimizer kicked in - if it hasn't changed from the original value of a then make train_len bigger self.assertNotEqual(no_grad_accum_a, a) with mockenv_context(**self.dist_env_1_gpu): yes_grad_accum_trainer = get_regression_trainer( a=a, b=b, local_rank=0, train_len=train_len, deepspeed=self.ds_config_file, per_device_train_batch_size=4, gradient_accumulation_steps=2, ) yes_grad_accum_result = yes_grad_accum_trainer.train() yes_grad_accum_loss = yes_grad_accum_result.training_loss yes_grad_accum_a = yes_grad_accum_trainer.model.a.item() yes_grad_accum_b = yes_grad_accum_trainer.model.b.item() self.assertNotEqual(yes_grad_accum_a, a) # training with half the batch size but accumulation steps as 2 should give the same weights self.assertEqual(no_grad_accum_a, yes_grad_accum_a) self.assertEqual(no_grad_accum_b, yes_grad_accum_b) # see the note above how to get identical loss on a small bs self.assertAlmostEqual(no_grad_accum_loss, yes_grad_accum_loss, places=5) @slow @require_deepspeed @require_torch_gpu class TestDeepSpeed(TestCasePlus): """ This class is for testing via an external script """ @require_torch_multi_gpu def test_basic_distributed(self): self.run_quick(distributed=True) def test_do_eval_no_train(self): # we should not fail if train is skipped output_dir = self.run_trainer( eval_steps=1, max_len=12, model_name=MBART_TINY, num_train_epochs=1, distributed=False, extra_args_str="--do_eval", remove_args_str="--do_train", ) val_metrics = load_json(os.path.join(output_dir, "eval_results.json")) assert "eval_bleu" in val_metrics # XXX: need to do better validation beyond just that the run was successful def run_quick(self, distributed=True, extra_args_str=None, remove_args_str=None): output_dir = self.run_trainer( eval_steps=1, max_len=12, model_name=MBART_TINY, num_train_epochs=1, distributed=distributed, extra_args_str=extra_args_str, remove_args_str=remove_args_str, ) train_metrics = load_json(os.path.join(output_dir, "train_results.json")) assert "train_runtime" in train_metrics def run_trainer( self, eval_steps: int, max_len: str, model_name: str, num_train_epochs: int, distributed: bool = True, extra_args_str: str = None, remove_args_str: str = None, ): data_dir = self.examples_dir / "test_data/wmt_en_ro" output_dir = self.get_auto_remove_tmp_dir() args = f""" --model_name_or_path {model_name} --train_file {data_dir}/train.json --validation_file {data_dir}/val.json --output_dir {output_dir} --overwrite_output_dir --max_train_samples 8 --max_val_samples 8 --max_source_length {max_len} --max_target_length {max_len} --val_max_target_length {max_len} --do_train --num_train_epochs {str(num_train_epochs)} --per_device_train_batch_size 4 --learning_rate 3e-3 --warmup_steps 8 --predict_with_generate --logging_steps 0 --save_steps {str(eval_steps)} --group_by_length --label_smoothing_factor 0.1 --adafactor --target_lang ro_RO --source_lang en_XX """.split() if extra_args_str is not None: args.extend(extra_args_str.split()) if remove_args_str is not None: remove_args = remove_args_str.split() args = [x for x in args if x not in remove_args] ds_args = f"--deepspeed {self.test_file_dir_str}/ds_config.json".split() script = [f"{self.examples_dir_str}/seq2seq/run_translation.py"] num_gpus = get_gpu_count() if distributed else 1 launcher = f"deepspeed --num_gpus {num_gpus}".split() cmd = launcher + script + args + ds_args # keep for quick debug # print(" ".join([f"PYTHONPATH={self.src_dir_str}"] +cmd)); die execute_subprocess_async(cmd, env=self.get_env()) return output_dir
AdaMix/examples/tests/deepspeed/test_deepspeed.py/0
{ "file_path": "AdaMix/examples/tests/deepspeed/test_deepspeed.py", "repo_id": "AdaMix", "token_count": 4436 }
44
<jupyter_start><jupyter_text>How to export 🤗 Transformers Models to ONNX ? [ONNX](http://onnx.ai/) is open format for machine learning models. It allows to save your neural network's computation graph in a framework agnostic way, which might be particulary helpful when deploying deep learning models.Indeed, businesses might have other requirements _(languages, hardware, ...)_ for which the training framework might not be the best suited in inference scenarios. In that context, having a representation of the actual computation graph that can be shared accross various business units and logics across an organization might be a desirable component.Along with the serialization format, ONNX also provides a runtime library which allows efficient and hardware specific execution of the ONNX graph. This is done through the [onnxruntime](https://microsoft.github.io/onnxruntime/) project and already includes collaborations with many hardware vendors to seamlessly deploy models on various platforms.Through this notebook we'll walk you through the process to convert a PyTorch or TensorFlow transformers model to the [ONNX](http://onnx.ai/) and leverage [onnxruntime](https://microsoft.github.io/onnxruntime/) to run inference tasks on models from 🤗 __transformers__ Exporting 🤗 transformers model to ONNX---Exporting models _(either PyTorch or TensorFlow)_ is easily achieved through the conversion tool provided as part of 🤗 __transformers__ repository. Under the hood the process is sensibly the following: 1. Allocate the model from transformers (**PyTorch or TensorFlow**)2. Forward dummy inputs through the model this way **ONNX** can record the set of operations executed3. Optionally define dynamic axes on input and output tensors4. Save the graph along with the network parameters<jupyter_code>import sys !{sys.executable} -m pip install --upgrade git+https://github.com/huggingface/transformers !{sys.executable} -m pip install --upgrade torch==1.6.0+cpu torchvision==0.7.0+cpu -f https://download.pytorch.org/whl/torch_stable.html !{sys.executable} -m pip install --upgrade onnxruntime==1.4.0 !{sys.executable} -m pip install -i https://test.pypi.org/simple/ ort-nightly !{sys.executable} -m pip install --upgrade onnxruntime-tools !rm -rf onnx/ from pathlib import Path from transformers.convert_graph_to_onnx import convert # Handles all the above steps for you convert(framework="pt", model="bert-base-cased", output=Path("onnx/bert-base-cased.onnx"), opset=11) # Tensorflow # convert(framework="tf", model="bert-base-cased", output="onnx/bert-base-cased.onnx", opset=11)<jupyter_output>loading configuration file https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json from cache at /home/mfuntowicz/.cache/torch/transformers/b945b69218e98b3e2c95acf911789741307dec43c698d35fad11c1ae28bda352.9da767be51e1327499df13488672789394e2ca38b877837e52618a67d7002391 Model config BertConfig { "architectures": [ "BertForMaskedLM" ], "attention_probs_dropout_prob": 0.1, "gradient_checkpointing": false, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "hidden_size": 768, "initializer_range": 0.02, "intermediate_size": 3072, "layer_norm_eps": 1e-12, "max_position_embeddings": 512, "model_type": "bert", "num_attention_heads": 12, "num_hidden_layers": 12, "pad_token_id": 0, "type_vocab_size": 2, "vocab_size": 28996 }<jupyter_text>How to leverage runtime for inference over an ONNX graph---As mentionned in the introduction, **ONNX** is a serialization format and many side projects can load the saved graph and run the actual computations from it. Here, we'll focus on the official [onnxruntime](https://microsoft.github.io/onnxruntime/). The runtime is implemented in C++ for performance reasons and provides API/Bindings for C++, C, C, Java and Python.In the case of this notebook, we will use the Python API to highlight how to load a serialized **ONNX** graph and run inference workload on various backends through **onnxruntime**.**onnxruntime** is available on pypi:- onnxruntime: ONNX + MLAS (Microsoft Linear Algebra Subprograms)- onnxruntime-gpu: ONNX + MLAS + CUDA<jupyter_code>!pip install transformers onnxruntime-gpu onnx psutil matplotlib<jupyter_output>Requirement already satisfied: transformers in /home/mfuntowicz/miniconda3/envs/pytorch/lib/python3.8/site-packages (3.0.2) Requirement already satisfied: onnxruntime-gpu in /home/mfuntowicz/miniconda3/envs/pytorch/lib/python3.8/site-packages (1.3.0) Requirement already satisfied: onnx in /home/mfuntowicz/miniconda3/envs/pytorch/lib/python3.8/site-packages (1.7.0) Requirement already satisfied: psutil in /home/mfuntowicz/.local/lib/python3.8/site-packages/psutil-5.7.0-py3.8-linux-x86_64.egg (5.7.0) Requirement already satisfied: matplotlib in /home/mfuntowicz/miniconda3/envs/pytorch/lib/python3.8/site-packages (3.3.1) Requirement already satisfied: tqdm>=4.27 in /home/mfuntowicz/miniconda3/envs/pytorch/lib/python3.8/site-packages (from transformers) (4.46.1) Requirement already satisfied: numpy in /home/mfuntowicz/miniconda3/envs/pytorch/lib/python3.8/site-packages (from transformers) (1.18.1) Requirement already satisfied: sacremoses in /home/mfuntowicz/miniconda3/envs/pytorch/lib/pyt[...]<jupyter_text>Preparing for an Inference Session---Inference is done using a specific backend definition which turns on hardware specific optimizations of the graph. Optimizations are basically of three kinds: - **Constant Folding**: Convert static variables to constants in the graph - **Deadcode Elimination**: Remove nodes never accessed in the graph- **Operator Fusing**: Merge multiple instruction into one (Linear -> ReLU can be fused to be LinearReLU)ONNX Runtime automatically applies most optimizations by setting specific `SessionOptions`.Note:Some of the latest optimizations that are not yet integrated into ONNX Runtime are available in [optimization script](https://github.com/microsoft/onnxruntime/tree/master/onnxruntime/python/tools/transformers) that tunes models for the best performance.<jupyter_code># # An optional step unless # # you want to get a model with mixed precision for perf accelartion on newer GPU # # or you are working with Tensorflow(tf.keras) models or pytorch models other than bert # !pip install onnxruntime-tools # from onnxruntime_tools import optimizer # # Mixed precision conversion for bert-base-cased model converted from Pytorch # optimized_model = optimizer.optimize_model("bert-base-cased.onnx", model_type='bert', num_heads=12, hidden_size=768) # optimized_model.convert_model_float32_to_float16() # optimized_model.save_model_to_file("bert-base-cased.onnx") # # optimizations for bert-base-cased model converted from Tensorflow(tf.keras) # optimized_model = optimizer.optimize_model("bert-base-cased.onnx", model_type='bert_keras', num_heads=12, hidden_size=768) # optimized_model.save_model_to_file("bert-base-cased.onnx") # optimize transformer-based models with onnxruntime-tools from onnxruntime_tools import optimizer from onnxruntime_tools.transformers.onnx_model_bert import BertOptimizationOptions # disable embedding layer norm optimization for better model size reduction opt_options = BertOptimizationOptions('bert') opt_options.enable_embed_layer_norm = False opt_model = optimizer.optimize_model( 'onnx/bert-base-cased.onnx', 'bert', num_heads=12, hidden_size=768, optimization_options=opt_options) opt_model.save_model_to_file('bert.opt.onnx') from os import environ from psutil import cpu_count # Constants from the performance optimization available in onnxruntime # It needs to be done before importing onnxruntime environ["OMP_NUM_THREADS"] = str(cpu_count(logical=True)) environ["OMP_WAIT_POLICY"] = 'ACTIVE' from onnxruntime import GraphOptimizationLevel, InferenceSession, SessionOptions, get_all_providers from contextlib import contextmanager from dataclasses import dataclass from time import time from tqdm import trange def create_model_for_provider(model_path: str, provider: str) -> InferenceSession: assert provider in get_all_providers(), f"provider {provider} not found, {get_all_providers()}" # Few properties that might have an impact on performances (provided by MS) options = SessionOptions() options.intra_op_num_threads = 1 options.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_ALL # Load the model as a graph and prepare the CPU backend session = InferenceSession(model_path, options, providers=[provider]) session.disable_fallback() return session @contextmanager def track_infer_time(buffer: [int]): start = time() yield end = time() buffer.append(end - start) @dataclass class OnnxInferenceResult: model_inference_time: [int] optimized_model_path: str<jupyter_output><empty_output><jupyter_text>Forwarding through our optimized ONNX model running on CPU---When the model is loaded for inference over a specific provider, for instance **CPUExecutionProvider** as above, an optimized graph can be saved. This graph will might include various optimizations, and you might be able to see some **higher-level** operations in the graph _(through [Netron](https://github.com/lutzroeder/Netron) for instance)_ such as:- **EmbedLayerNormalization**- **Attention**- **FastGeLU**These operations are an example of the kind of optimization **onnxruntime** is doing, for instance here gathering multiple operations into bigger one _(Operator Fusing)_.<jupyter_code>from transformers import BertTokenizerFast tokenizer = BertTokenizerFast.from_pretrained("bert-base-cased") cpu_model = create_model_for_provider("onnx/bert-base-cased.onnx", "CPUExecutionProvider") # Inputs are provided through numpy array model_inputs = tokenizer("My name is Bert", return_tensors="pt") inputs_onnx = {k: v.cpu().detach().numpy() for k, v in model_inputs.items()} # Run the model (None = get all the outputs) sequence, pooled = cpu_model.run(None, inputs_onnx) # Print information about outputs print(f"Sequence output: {sequence.shape}, Pooled output: {pooled.shape}")<jupyter_output>loading file https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt from cache at /home/mfuntowicz/.cache/torch/transformers/5e8a2b4893d13790ed4150ca1906be5f7a03d6c4ddf62296c383f6db42814db2.e13dbb970cb325137104fb2e5f36fe865f27746c6b526f6352861b1980eb80b1<jupyter_text>Benchmarking PyTorch model_Note: PyTorch model benchmark is run on CPU_<jupyter_code>from transformers import BertModel PROVIDERS = { ("cpu", "PyTorch CPU"), # Uncomment this line to enable GPU benchmarking # ("cuda:0", "PyTorch GPU") } results = {} for device, label in PROVIDERS: # Move inputs to the correct device model_inputs_on_device = { arg_name: tensor.to(device) for arg_name, tensor in model_inputs.items() } # Add PyTorch to the providers model_pt = BertModel.from_pretrained("bert-base-cased").to(device) for _ in trange(10, desc="Warming up"): model_pt(**model_inputs_on_device) # Compute time_buffer = [] for _ in trange(100, desc=f"Tracking inference time on PyTorch"): with track_infer_time(time_buffer): model_pt(**model_inputs_on_device) # Store the result results[label] = OnnxInferenceResult( time_buffer, None )<jupyter_output>loading configuration file https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-config.json from cache at /home/mfuntowicz/.cache/torch/transformers/b945b69218e98b3e2c95acf911789741307dec43c698d35fad11c1ae28bda352.9da767be51e1327499df13488672789394e2ca38b877837e52618a67d7002391 Model config BertConfig { "architectures": [ "BertForMaskedLM" ], "attention_probs_dropout_prob": 0.1, "gradient_checkpointing": false, "hidden_act": "gelu", "hidden_dropout_prob": 0.1, "hidden_size": 768, "initializer_range": 0.02, "intermediate_size": 3072, "layer_norm_eps": 1e-12, "max_position_embeddings": 512, "model_type": "bert", "num_attention_heads": 12, "num_hidden_layers": 12, "pad_token_id": 0, "type_vocab_size": 2, "vocab_size": 28996 } loading weights file https://cdn.huggingface.co/bert-base-cased-pytorch_model.bin from cache at /home/mfuntowicz/.cache/torch/transformers/d8f11f061e407be64c4d5d7867ee61d1465263e24085cfa26abf183fdc830569.3fadbea36[...]<jupyter_text>Benchmarking PyTorch & ONNX on CPU_**Disclamer: results may vary from the actual hardware used to run the model**_<jupyter_code>PROVIDERS = { ("CPUExecutionProvider", "ONNX CPU"), # Uncomment this line to enable GPU benchmarking # ("CUDAExecutionProvider", "ONNX GPU") } for provider, label in PROVIDERS: # Create the model with the specified provider model = create_model_for_provider("onnx/bert-base-cased.onnx", provider) # Keep track of the inference time time_buffer = [] # Warm up the model model.run(None, inputs_onnx) # Compute for _ in trange(100, desc=f"Tracking inference time on {provider}"): with track_infer_time(time_buffer): model.run(None, inputs_onnx) # Store the result results[label] = OnnxInferenceResult( time_buffer, model.get_session_options().optimized_model_filepath ) %matplotlib inline import matplotlib import matplotlib.pyplot as plt import numpy as np import os # Compute average inference time + std time_results = {k: np.mean(v.model_inference_time) * 1e3 for k, v in results.items()} time_results_std = np.std([v.model_inference_time for v in results.values()]) * 1000 plt.rcdefaults() fig, ax = plt.subplots(figsize=(16, 12)) ax.set_ylabel("Avg Inference time (ms)") ax.set_title("Average inference time (ms) for each provider") ax.bar(time_results.keys(), time_results.values(), yerr=time_results_std) plt.show()<jupyter_output><empty_output><jupyter_text>Quantization support from transformersQuantization enables the use of integers (_instead of floatting point_) arithmetic to run neural networks models faster. From a high-level point of view, quantization works as mapping the float32 ranges of values as int8 with the less loss in the performances of the model.Hugging Face provides a conversion tool as part of the transformers repository to easily export quantized models to ONNX Runtime. For more information, please refer to the following: - [Hugging Face Documentation on ONNX Runtime quantization supports](https://huggingface.co/transformers/master/serialization.htmlquantization)- [Intel's Explanation of Quantization](https://nervanasystems.github.io/distiller/quantization.html)With this method, the accuracy of the model remains at the same level than the full-precision model. If you want to see benchmarks on model performances, we recommand reading the [ONNX Runtime notebook](https://github.com/microsoft/onnxruntime/blob/master/onnxruntime/python/tools/quantization/notebooks/Bert-GLUE_OnnxRuntime_quantization.ipynb) on the subject. Benchmarking PyTorch quantized model<jupyter_code>import torch # Quantize model_pt_quantized = torch.quantization.quantize_dynamic( model_pt.to("cpu"), {torch.nn.Linear}, dtype=torch.qint8 ) # Warm up model_pt_quantized(**model_inputs) # Benchmark PyTorch quantized model time_buffer = [] for _ in trange(100): with track_infer_time(time_buffer): model_pt_quantized(**model_inputs) results["PyTorch CPU Quantized"] = OnnxInferenceResult( time_buffer, None )<jupyter_output>100%|██████████| 100/100 [00:01<00:00, 90.15it/s]<jupyter_text>Benchmarking ONNX quantized model<jupyter_code>from transformers.convert_graph_to_onnx import quantize # Transformers allow you to easily convert float32 model to quantized int8 with ONNX Runtime quantized_model_path = quantize(Path("bert.opt.onnx")) # Then you just have to load through ONNX runtime as you would normally do quantized_model = create_model_for_provider(quantized_model_path.as_posix(), "CPUExecutionProvider") # Warm up the overall model to have a fair comparaison outputs = quantized_model.run(None, inputs_onnx) # Evaluate performances time_buffer = [] for _ in trange(100, desc=f"Tracking inference time on CPUExecutionProvider with quantized model"): with track_infer_time(time_buffer): outputs = quantized_model.run(None, inputs_onnx) # Store the result results["ONNX CPU Quantized"] = OnnxInferenceResult( time_buffer, quantized_model_path )<jupyter_output>As of onnxruntime 1.4.0, models larger than 2GB will fail to quantize due to protobuf constraint. This limitation will be removed in the next release of onnxruntime. Quantized model has been written at bert.onnx: ✔<jupyter_text>Show the inference performance of each providers<jupyter_code>%matplotlib inline import matplotlib import matplotlib.pyplot as plt import numpy as np import os # Compute average inference time + std time_results = {k: np.mean(v.model_inference_time) * 1e3 for k, v in results.items()} time_results_std = np.std([v.model_inference_time for v in results.values()]) * 1000 plt.rcdefaults() fig, ax = plt.subplots(figsize=(16, 12)) ax.set_ylabel("Avg Inference time (ms)") ax.set_title("Average inference time (ms) for each provider") ax.bar(time_results.keys(), time_results.values(), yerr=time_results_std) plt.show()<jupyter_output><empty_output>
AdaMix/notebooks/04-onnx-export.ipynb/0
{ "file_path": "AdaMix/notebooks/04-onnx-export.ipynb", "repo_id": "AdaMix", "token_count": 5758 }
45
#!/usr/bin/env python # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Usage: # ./gen-card-facebook-wmt19.py import os from pathlib import Path def write_model_card(model_card_dir, src_lang, tgt_lang): texts = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, oder?", } # BLUE scores as follows: # "pair": [fairseq, transformers] scores = { "ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"], "en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"], "en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"], "de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"], } pair = f"{src_lang}-{tgt_lang}" readme = f""" --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - facebook license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}. For more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616). The abbreviation FSMT stands for FairSeqMachineTranslation All four models are available: * [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru) * [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en) * [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de) * [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "facebook/wmt19-{src_lang}-{tgt_lang}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias - The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981) ## Training data Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616). ## Eval results pair | fairseq | transformers -------|---------|---------- {pair} | {scores[pair][0]} | {scores[pair][1]} The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support: - model ensemble, therefore the best performing checkpoint was ported (``model4.pt``). - re-ranking The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=15 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`. ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ```bibtex @inproceedings{{..., year={{2020}}, title={{Facebook FAIR's WMT19 News Translation Task Submission}}, author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}}, booktitle={{Proc. of WMT}}, }} ``` ## TODO - port model ensemble (fairseq uses 4 model checkpoints) """ os.makedirs(model_card_dir, exist_ok=True) path = os.path.join(model_card_dir, "README.md") print(f"Generating {path}") with open(path, "w", encoding="utf-8") as f: f.write(readme) # make sure we are under the root of the project repo_dir = Path(__file__).resolve().parent.parent.parent model_cards_dir = repo_dir / "model_cards" for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]: base, src_lang, tgt_lang = model_name.split("-") model_card_dir = model_cards_dir / "facebook" / model_name write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
AdaMix/scripts/fsmt/gen-card-facebook-wmt19.py/0
{ "file_path": "AdaMix/scripts/fsmt/gen-card-facebook-wmt19.py", "repo_id": "AdaMix", "token_count": 2079 }
46
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Benchmarking the library on inference and training in PyTorch. """ import random import timeit from functools import wraps from typing import Callable, Optional from ..configuration_utils import PretrainedConfig from ..file_utils import is_py3nvml_available, is_tf_available from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING from ..utils import logging from .benchmark_utils import ( Benchmark, Memory, MemorySummary, measure_peak_memory_cpu, start_memory_tracing, stop_memory_tracing, ) if is_tf_available(): import tensorflow as tf from tensorflow.python.framework.errors_impl import ResourceExhaustedError from .benchmark_args_tf import TensorFlowBenchmarkArguments if is_py3nvml_available(): import py3nvml.py3nvml as nvml logger = logging.get_logger(__name__) def run_with_tf_optimizations(do_eager_mode: bool, use_xla: bool): def run_func(func): @wraps(func) def run_in_eager_mode(*args, **kwargs): return func(*args, **kwargs) @wraps(func) @tf.function(experimental_compile=use_xla) def run_in_graph_mode(*args, **kwargs): return func(*args, **kwargs) if do_eager_mode is True: assert ( use_xla is False ), "Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." return run_in_eager_mode else: return run_in_graph_mode return run_func def random_input_ids(batch_size: int, sequence_length: int, vocab_size: int) -> ["tf.Tensor"]: rng = random.Random() values = [rng.randint(0, vocab_size - 1) for i in range(batch_size * sequence_length)] return tf.constant(values, shape=(batch_size, sequence_length), dtype=tf.int32) class TensorFlowBenchmark(Benchmark): args: TensorFlowBenchmarkArguments configs: PretrainedConfig framework: str = "TensorFlow" @property def framework_version(self): return tf.__version__ def _inference_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float: # initialize GPU on separate process strategy = self.args.strategy assert strategy is not None, "A device strategy has to be initialized before using TensorFlow." _inference = self._prepare_inference_func(model_name, batch_size, sequence_length) return self._measure_speed(_inference) def _train_speed(self, model_name: str, batch_size: int, sequence_length: int) -> float: strategy = self.args.strategy assert strategy is not None, "A device strategy has to be initialized before using TensorFlow." _train = self._prepare_train_func(model_name, batch_size, sequence_length) return self._measure_speed(_train) def _inference_memory( self, model_name: str, batch_size: int, sequence_length: int ) -> [Memory, Optional[MemorySummary]]: # initialize GPU on separate process if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], True) strategy = self.args.strategy assert strategy is not None, "A device strategy has to be initialized before using TensorFlow." _inference = self._prepare_inference_func(model_name, batch_size, sequence_length) return self._measure_memory(_inference) def _train_memory( self, model_name: str, batch_size: int, sequence_length: int ) -> [Memory, Optional[MemorySummary]]: if self.args.is_gpu: tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx], True) strategy = self.args.strategy assert strategy is not None, "A device strategy has to be initialized before using TensorFlow." _train = self._prepare_train_func(model_name, batch_size, sequence_length) return self._measure_memory(_train) def _prepare_inference_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]: config = self.config_dict[model_name] if self.args.fp16: raise NotImplementedError("Mixed precision is currently not supported.") has_model_class_in_config = ( hasattr(config, "architectures") and isinstance(config.architectures, list) and len(config.architectures) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: model_class = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model transformers_module = __import__("transformers", fromlist=[model_class]) model_cls = getattr(transformers_module, model_class) model = model_cls(config) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to set `--only_pretrain_model` or `args.only_pretrain_model=True`." ) else: model = TF_MODEL_MAPPING[config.__class__](config) # encoder-decoder has vocab size saved differently vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size input_ids = random_input_ids(batch_size, sequence_length, vocab_size) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla) def encoder_decoder_forward(): return model(input_ids, decoder_input_ids=input_ids, training=False) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla) def encoder_forward(): return model(input_ids, training=False) _inference = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward return _inference def _prepare_train_func(self, model_name: str, batch_size: int, sequence_length: int) -> Callable[[], None]: config = self.config_dict[model_name] assert ( self.args.eager_mode is False ), "Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." if self.args.fp16: raise NotImplementedError("Mixed precision is currently not supported.") has_model_class_in_config = ( hasattr(config, "architectures") and isinstance(config.architectures, list) and len(config.architectures) > 0 ) if not self.args.only_pretrain_model and has_model_class_in_config: try: model_class = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model transformers_module = __import__("transformers", fromlist=[model_class]) model_cls = getattr(transformers_module, model_class) model = model_cls(config) except ImportError: raise ImportError( f"{model_class} does not exist. If you just want to test the pretrained model, you might want to set `--only_pretrain_model` or `args.only_pretrain_model=True`." ) else: model = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](config) # encoder-decoder has vocab size saved differently vocab_size = config.vocab_size if hasattr(config, "vocab_size") else config.encoder.vocab_size input_ids = random_input_ids(batch_size, sequence_length, vocab_size) @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla) def encoder_decoder_train(): loss = model(input_ids, decoder_input_ids=input_ids, labels=input_ids, training=True)[0] gradients = tf.gradients(loss, model.trainable_variables) return gradients @run_with_tf_optimizations(self.args.eager_mode, self.args.use_xla) def encoder_train(): loss = model(input_ids, labels=input_ids, training=True)[0] gradients = tf.gradients(loss, model.trainable_variables) return gradients _train = encoder_decoder_train if config.is_encoder_decoder else encoder_train return _train def _measure_speed(self, func) -> float: with self.args.strategy.scope(): try: if self.args.is_tpu or self.args.use_xla: # run additional 10 times to stabilize compilation for tpu logger.info("Do inference on TPU. Running model 5 times to stabilize compilation") timeit.repeat(func, repeat=1, number=5) # as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average runtimes = timeit.repeat( func, repeat=self.args.repeat, number=10, ) return min(runtimes) / 10.0 except ResourceExhaustedError as e: self.print_fn("Doesn't fit on GPU. {}".format(e)) def _measure_memory(self, func: Callable[[], None]) -> [Memory, MemorySummary]: logger.info( "Note that TensorFlow allocates more memory than" "it might need to speed up computation." "The memory reported here corresponds to the memory" "reported by `nvidia-smi`, which can vary depending" "on total available memory on the GPU that is used." ) with self.args.strategy.scope(): try: if self.args.trace_memory_line_by_line: assert ( self.args.eager_mode ), "`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory consumption line by line." trace = start_memory_tracing("transformers") if self.args.is_tpu: # tpu raise NotImplementedError( "Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking with `args.memory=False`" ) elif self.args.is_gpu: # gpu if not is_py3nvml_available(): logger.warning( "py3nvml not installed, we won't log GPU memory usage. " "Install py3nvml (pip install py3nvml) to log information about GPU." ) memory = "N/A" else: logger.info( "Measuring total GPU usage on GPU device. Make sure to not have additional processes running on the same GPU." ) # init nvml nvml.nvmlInit() func() handle = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx) meminfo = nvml.nvmlDeviceGetMemoryInfo(handle) max_bytes_in_use = meminfo.used memory = Memory(max_bytes_in_use) # shutdown nvml nvml.nvmlShutdown() else: # cpu if self.args.trace_memory_line_by_line: logger.info( "When enabling line by line tracing, the max peak memory for CPU is inaccurate in TensorFlow." ) memory = None else: memory_bytes = measure_peak_memory_cpu(func) memory = Memory(memory_bytes) if isinstance(memory_bytes, int) else memory_bytes if self.args.trace_memory_line_by_line: summary = stop_memory_tracing(trace) if memory is None: memory = summary.total else: summary = None return memory, summary except ResourceExhaustedError as e: self.print_fn("Doesn't fit on GPU. {}".format(e)) return "N/A", None
AdaMix/src/transformers/benchmark/benchmark_tf.py/0
{ "file_path": "AdaMix/src/transformers/benchmark/benchmark_tf.py", "repo_id": "AdaMix", "token_count": 5791 }
47
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Utilities to convert slow tokenizers in their fast tokenizers counterparts. All the conversions are grouped here to gather SentencePiece dependencies outside of the fast tokenizers files and allow to make our dependency on SentencePiece optional. """ from typing import Dict, List, Tuple from tokenizers import Regex, Tokenizer, decoders, normalizers, pre_tokenizers, processors from tokenizers.models import BPE, Unigram, WordPiece from .file_utils import requires_protobuf, requires_sentencepiece class SentencePieceExtractor: """ Extractor implementation for SentencePiece trained models. https://github.com/google/sentencepiece """ def __init__(self, model: str): requires_sentencepiece(self) from sentencepiece import SentencePieceProcessor self.sp = SentencePieceProcessor() self.sp.Load(model) def extract(self) -> Tuple[Dict[str, int], List[Tuple]]: sp = self.sp vocab = {sp.id_to_piece(index): index for index in range(sp.GetPieceSize())} # Merges merges = [] for piece_l in vocab.keys(): for piece_r in vocab.keys(): merge = f"{piece_l}{piece_r}" piece_id = vocab.get(merge, None) if piece_id: merges += [(piece_l, piece_r, piece_id)] merges = sorted(merges, key=lambda val: val[2]) merges = [(val[0], val[1]) for val in merges] return vocab, merges def check_number_comma(piece: str) -> bool: return len(piece) < 2 or piece[-1] != "," or not piece[-2].isdigit() class Converter: def __init__(self, original_tokenizer): self.original_tokenizer = original_tokenizer def converted(self) -> Tokenizer: raise NotImplementedError() class BertConverter(Converter): def converted(self) -> Tokenizer: vocab = self.original_tokenizer.vocab tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token))) tokenize_chinese_chars = False strip_accents = False do_lower_case = False if hasattr(self.original_tokenizer, "basic_tokenizer"): tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case tokenizer.normalizer = normalizers.BertNormalizer( clean_text=True, handle_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, lowercase=do_lower_case, ) tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() cls = str(self.original_tokenizer.cls_token) sep = str(self.original_tokenizer.sep_token) cls_token_id = self.original_tokenizer.cls_token_id sep_token_id = self.original_tokenizer.sep_token_id tokenizer.post_processor = processors.TemplateProcessing( single=f"{cls}:0 $A:0 {sep}:0", pair=f"{cls}:0 $A:0 {sep}:0 $B:1 {sep}:1", special_tokens=[ (cls, cls_token_id), (sep, sep_token_id), ], ) tokenizer.decoder = decoders.WordPiece(prefix="##") return tokenizer class FunnelConverter(Converter): def converted(self) -> Tokenizer: vocab = self.original_tokenizer.vocab tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token))) tokenize_chinese_chars = False strip_accents = False do_lower_case = False if hasattr(self.original_tokenizer, "basic_tokenizer"): tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case tokenizer.normalizer = normalizers.BertNormalizer( clean_text=True, handle_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, lowercase=do_lower_case, ) tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() cls = str(self.original_tokenizer.cls_token) sep = str(self.original_tokenizer.sep_token) cls_token_id = self.original_tokenizer.cls_token_id sep_token_id = self.original_tokenizer.sep_token_id tokenizer.post_processor = processors.TemplateProcessing( single=f"{cls}:2 $A:0 {sep}:0", # token_type_id is 2 for Funnel transformer pair=f"{cls}:2 $A:0 {sep}:0 $B:1 {sep}:1", special_tokens=[ (cls, cls_token_id), (sep, sep_token_id), ], ) tokenizer.decoder = decoders.WordPiece(prefix="##") return tokenizer class MPNetConverter(Converter): def converted(self) -> Tokenizer: vocab = self.original_tokenizer.vocab tokenizer = Tokenizer(WordPiece(vocab, unk_token=str(self.original_tokenizer.unk_token))) tokenize_chinese_chars = False strip_accents = False do_lower_case = False if hasattr(self.original_tokenizer, "basic_tokenizer"): tokenize_chinese_chars = self.original_tokenizer.basic_tokenizer.tokenize_chinese_chars strip_accents = self.original_tokenizer.basic_tokenizer.strip_accents do_lower_case = self.original_tokenizer.basic_tokenizer.do_lower_case tokenizer.normalizer = normalizers.BertNormalizer( clean_text=True, handle_chinese_chars=tokenize_chinese_chars, strip_accents=strip_accents, lowercase=do_lower_case, ) tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() cls = str(self.original_tokenizer.cls_token) sep = str(self.original_tokenizer.sep_token) cls_token_id = self.original_tokenizer.cls_token_id sep_token_id = self.original_tokenizer.sep_token_id tokenizer.post_processor = processors.TemplateProcessing( single=f"{cls}:0 $A:0 {sep}:0", pair=f"{cls}:0 $A:0 {sep}:0 {sep}:0 $B:1 {sep}:1", # MPNet uses two [SEP] tokens special_tokens=[ (cls, cls_token_id), (sep, sep_token_id), ], ) tokenizer.decoder = decoders.WordPiece(prefix="##") return tokenizer class OpenAIGPTConverter(Converter): def converted(self) -> Tokenizer: vocab = self.original_tokenizer.encoder merges = list(self.original_tokenizer.bpe_ranks.keys()) unk_token = self.original_tokenizer.unk_token tokenizer = Tokenizer( BPE( vocab=vocab, merges=merges, dropout=None, unk_token=str(unk_token), end_of_word_suffix="</w>", fuse_unk=False, ) ) if tokenizer.token_to_id(str(unk_token)) is not None: tokenizer.add_special_tokens([str(unk_token)]) tokenizer.normalizer = normalizers.BertNormalizer(lowercase=True) tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() tokenizer.decoder = decoders.BPEDecoder(suffix="</w>") return tokenizer class GPT2Converter(Converter): def converted(self) -> Tokenizer: vocab = self.original_tokenizer.encoder merges = list(self.original_tokenizer.bpe_ranks.keys()) tokenizer = Tokenizer( BPE( vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix="", end_of_word_suffix="", fuse_unk=False, ) ) tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=self.original_tokenizer.add_prefix_space) tokenizer.decoder = decoders.ByteLevel() tokenizer.post_processor = processors.ByteLevel(trim_offsets=False) return tokenizer class HerbertConverter(Converter): def converted(self) -> Tokenizer: tokenizer_info_str = "#version:" token_suffix = "</w>" vocab = self.original_tokenizer.encoder merges = list(self.original_tokenizer.bpe_ranks.keys()) if tokenizer_info_str in merges[0][0]: merges = merges[1:] tokenizer = Tokenizer( BPE( vocab, merges, dropout=None, unk_token=self.original_tokenizer.unk_token, end_of_word_suffix=token_suffix, ) ) tokenizer.normalizer = normalizers.BertNormalizer(lowercase=False, strip_accents=False) tokenizer.pre_tokenizer = pre_tokenizers.BertPreTokenizer() tokenizer.decoder = decoders.BPEDecoder(suffix=token_suffix) tokenizer.post_processor = processors.BertProcessing( sep=(self.original_tokenizer.sep_token, self.original_tokenizer.sep_token_id), cls=(self.original_tokenizer.cls_token, self.original_tokenizer.cls_token_id), ) return tokenizer class RobertaConverter(Converter): def converted(self) -> Tokenizer: ot = self.original_tokenizer vocab = ot.encoder merges = list(ot.bpe_ranks.keys()) tokenizer = Tokenizer( BPE( vocab=vocab, merges=merges, dropout=None, continuing_subword_prefix="", end_of_word_suffix="", fuse_unk=False, ) ) tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel(add_prefix_space=ot.add_prefix_space) tokenizer.decoder = decoders.ByteLevel() tokenizer.post_processor = processors.RobertaProcessing( sep=(ot.sep_token, ot.sep_token_id), cls=(ot.cls_token, ot.cls_token_id), add_prefix_space=ot.add_prefix_space, trim_offsets=True, # True by default on Roberta (historical) ) return tokenizer class SpmConverter(Converter): def __init__(self, *args): requires_protobuf(self) super().__init__(*args) from .utils import sentencepiece_model_pb2 as model_pb2 m = model_pb2.ModelProto() m.ParseFromString(open(self.original_tokenizer.vocab_file, "rb").read()) self.proto = m def vocab(self, proto): return [(piece.piece, piece.score) for piece in proto.pieces] def unk_id(self, proto): return proto.trainer_spec.unk_id def tokenizer(self, proto): model_type = proto.trainer_spec.model_type vocab = self.vocab(proto) unk_id = self.unk_id(proto) if model_type == 1: tokenizer = Tokenizer(Unigram(vocab, unk_id)) elif model_type == 2: _, merges = SentencePieceExtractor(self.original_tokenizer.vocab_file).extract() bpe_vocab = {word: i for i, (word, score) in enumerate(vocab)} tokenizer = Tokenizer( BPE( bpe_vocab, merges, unk_token=proto.trainer_spec.unk_piece, fuse_unk=True, ) ) else: raise Exception( "You're trying to run a `Unigram` model but you're file was trained with a different algorithm" ) return tokenizer def normalizer(self, proto): precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap return normalizers.Sequence( [normalizers.Precompiled(precompiled_charsmap), normalizers.Replace(Regex(" {2,}"), " ")] ) def pre_tokenizer(self, replacement, add_prefix_space): return pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space) def post_processor(self): return None def converted(self) -> Tokenizer: tokenizer = self.tokenizer(self.proto) # Tokenizer assemble tokenizer.normalizer = self.normalizer(self.proto) replacement = "▁" add_prefix_space = True tokenizer.pre_tokenizer = self.pre_tokenizer(replacement, add_prefix_space) tokenizer.decoder = decoders.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space) post_processor = self.post_processor() if post_processor: tokenizer.post_processor = post_processor return tokenizer class AlbertConverter(SpmConverter): def vocab(self, proto): return [ (piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100) for piece in proto.pieces ] def normalizer(self, proto): list_normalizers = [ normalizers.Replace("``", '"'), normalizers.Replace("''", '"'), normalizers.Replace(Regex(" {2,}"), " "), ] if not self.original_tokenizer.keep_accents: list_normalizers.append(normalizers.NFKD()) list_normalizers.append(normalizers.StripAccents()) if self.original_tokenizer.do_lower_case: list_normalizers.append(normalizers.Lowercase()) precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap list_normalizers.append(normalizers.Precompiled(precompiled_charsmap)) return normalizers.Sequence(list_normalizers) def post_processor(self): return processors.TemplateProcessing( single="[CLS]:0 $A:0 [SEP]:0", pair="[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1", special_tokens=[ ("[CLS]", self.original_tokenizer.convert_tokens_to_ids("[CLS]")), ("[SEP]", self.original_tokenizer.convert_tokens_to_ids("[SEP]")), ], ) class BarthezConverter(SpmConverter): def unk_id(self, proto): unk_id = 3 return unk_id def post_processor(self): return processors.TemplateProcessing( single="<s> $A </s>", pair="<s> $A </s> </s> $B </s>", special_tokens=[ ("<s>", self.original_tokenizer.convert_tokens_to_ids("<s>")), ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], ) class CamembertConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>NOTUSED", 0.0), ("<pad>", 0.0), ("</s>NOTUSED", 0.0), ("<unk>", 0.0), ("<unk>NOTUSED", -100), ] # We down-grade the original SentencePiece by -100 to avoid using it and use our added token instead vocab += [(piece.piece, piece.score) for piece in proto.pieces[1:]] vocab += [("<mask>", 0.0)] return vocab def unk_id(self, proto): # See vocab unk position return 3 def post_processor(self): return processors.TemplateProcessing( single="<s> $A </s>", pair="<s> $A </s> </s> $B </s>", special_tokens=[ ("<s>", self.original_tokenizer.convert_tokens_to_ids("<s>")), ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], ) class MBartConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>", 0.0), ("<pad>", 0.0), ("</s>", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] vocab += [ ("ar_AR", 0.0), ("cs_CZ", 0.0), ("de_DE", 0.0), ("en_XX", 0.0), ("es_XX", 0.0), ("et_EE", 0.0), ("fi_FI", 0.0), ("fr_XX", 0.0), ("gu_IN", 0.0), ("hi_IN", 0.0), ("it_IT", 0.0), ("ja_XX", 0.0), ("kk_KZ", 0.0), ("ko_KR", 0.0), ("lt_LT", 0.0), ("lv_LV", 0.0), ("my_MM", 0.0), ("ne_NP", 0.0), ("nl_XX", 0.0), ("ro_RO", 0.0), ("ru_RU", 0.0), ("si_LK", 0.0), ("tr_TR", 0.0), ("vi_VN", 0.0), ("zh_CN", 0.0), ] vocab += [("<mask>", 0.0)] return vocab def unk_id(self, proto): return 3 def post_processor(self): return processors.TemplateProcessing( single="$A </s> en_XX", pair="$A $B </s> en_XX", special_tokens=[ ("en_XX", self.original_tokenizer.convert_tokens_to_ids("en_XX")), ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], ) class MBart50Converter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>", 0.0), ("<pad>", 0.0), ("</s>", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] # fmt: off vocab += [("ar_AR", 0.0), ("cs_CZ", 0.0), ("de_DE", 0.0), ("en_XX", 0.0), ("es_XX", 0.0), ("et_EE", 0.0), ("fi_FI", 0.0), ("fr_XX", 0.0), ("gu_IN", 0.0), ("hi_IN", 0.0), ("it_IT", 0.0), ("ja_XX", 0.0), ("kk_KZ", 0.0), ("ko_KR", 0.0), ("lt_LT", 0.0), ("lv_LV", 0.0), ("my_MM", 0.0), ("ne_NP", 0.0), ("nl_XX", 0.0), ("ro_RO", 0.0), ("ru_RU", 0.0), ("si_LK", 0.0), ("tr_TR", 0.0), ("vi_VN", 0.0), ("zh_CN", 0.0), ("af_ZA", 0.0), ("az_AZ", 0.0), ("bn_IN", 0.0), ("fa_IR", 0.0), ("he_IL", 0.0), ("hr_HR", 0.0), ("id_ID", 0.0), ("ka_GE", 0.0), ("km_KH", 0.0), ("mk_MK", 0.0), ("ml_IN", 0.0), ("mn_MN", 0.0), ("mr_IN", 0.0), ("pl_PL", 0.0), ("ps_AF", 0.0), ("pt_XX", 0.0), ("sv_SE", 0.0), ("sw_KE", 0.0), ("ta_IN", 0.0), ("te_IN", 0.0), ("th_TH", 0.0), ("tl_XX", 0.0), ("uk_UA", 0.0), ("ur_PK", 0.0), ("xh_ZA", 0.0), ("gl_ES", 0.0), ("sl_SI", 0.0)] # fmt: on vocab += [("<mask>", 0.0)] return vocab def unk_id(self, proto): return 3 def post_processor(self): return processors.TemplateProcessing( single="en_XX $A </s>", pair="en_XX $A $B </s>", special_tokens=[ ("en_XX", self.original_tokenizer.convert_tokens_to_ids("en_XX")), ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], ) class XLMRobertaConverter(SpmConverter): def vocab(self, proto): vocab = [ ("<s>", 0.0), ("<pad>", 0.0), ("</s>", 0.0), ("<unk>", 0.0), ] vocab += [(piece.piece, piece.score) for piece in proto.pieces[3:]] vocab += [("<mask>", 0.0)] return vocab def unk_id(self, proto): unk_id = 3 return unk_id def post_processor(self): return processors.TemplateProcessing( single="<s> $A </s>", pair="<s> $A </s> </s> $B </s>", special_tokens=[ ("<s>", self.original_tokenizer.convert_tokens_to_ids("<s>")), ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], ) class XLNetConverter(SpmConverter): def vocab(self, proto): return [ (piece.piece, piece.score) if check_number_comma(piece.piece) else (piece.piece, piece.score - 100) for piece in proto.pieces ] def normalizer(self, proto): list_normalizers = [ normalizers.Replace("``", '"'), normalizers.Replace("''", '"'), normalizers.Replace(Regex(" {2,}"), " "), ] if not self.original_tokenizer.keep_accents: list_normalizers.append(normalizers.NFKD()) list_normalizers.append(normalizers.StripAccents()) if self.original_tokenizer.do_lower_case: list_normalizers.append(normalizers.Lowercase()) precompiled_charsmap = proto.normalizer_spec.precompiled_charsmap list_normalizers.append(normalizers.Precompiled(precompiled_charsmap)) return normalizers.Sequence(list_normalizers) def post_processor(self): return processors.TemplateProcessing( single="$A:0 <sep>:0 <cls>:2", pair="$A:0 <sep>:0 $B:1 <sep>:1 <cls>:2", special_tokens=[ ("<sep>", self.original_tokenizer.convert_tokens_to_ids("<sep>")), ("<cls>", self.original_tokenizer.convert_tokens_to_ids("<cls>")), ], ) class ReformerConverter(SpmConverter): pass class BertGenerationConverter(SpmConverter): pass class PegasusConverter(SpmConverter): def vocab(self, proto): vocab = [ (self.original_tokenizer.pad_token, 0.0), (self.original_tokenizer.eos_token, 0.0), (self.original_tokenizer.mask_token_sent, 0.0), (self.original_tokenizer.mask_token, 0.0), ] vocab += [(f"<unk_{i}>", -100.0) for i in range(2, self.original_tokenizer.offset)] vocab += [(piece.piece, piece.score) for piece in proto.pieces[2:]] return vocab def unk_id(self, proto): return proto.trainer_spec.unk_id + self.original_tokenizer.offset def pre_tokenizer(self, replacement, add_prefix_space): return pre_tokenizers.Sequence( [ pre_tokenizers.WhitespaceSplit(), pre_tokenizers.Metaspace(replacement=replacement, add_prefix_space=add_prefix_space), ] ) def post_processor(self): eos = self.original_tokenizer.eos_token special_tokens = [ (eos, self.original_tokenizer.eos_token_id), ] return processors.TemplateProcessing(single=["$A", eos], pair=["$A", "$B", eos], special_tokens=special_tokens) class T5Converter(SpmConverter): def vocab(self, proto): num_extra_ids = self.original_tokenizer._extra_ids vocab = [(piece.piece, piece.score) for piece in proto.pieces] vocab += [("<extra_id_{}>".format(i), 0.0) for i in range(num_extra_ids - 1, -1, -1)] return vocab def post_processor(self): return processors.TemplateProcessing( single=["$A", "</s>"], pair=["$A", "</s>", "$B", "</s>"], special_tokens=[ ("</s>", self.original_tokenizer.convert_tokens_to_ids("</s>")), ], ) SLOW_TO_FAST_CONVERTERS = { "AlbertTokenizer": AlbertConverter, "BartTokenizer": RobertaConverter, "BarthezTokenizer": BarthezConverter, "BertTokenizer": BertConverter, "CamembertTokenizer": CamembertConverter, "ConvBertTokenizer": BertConverter, "DistilBertTokenizer": BertConverter, "DPRReaderTokenizer": BertConverter, "DPRQuestionEncoderTokenizer": BertConverter, "DPRContextEncoderTokenizer": BertConverter, "ElectraTokenizer": BertConverter, "FunnelTokenizer": FunnelConverter, "GPT2Tokenizer": GPT2Converter, "HerbertTokenizer": HerbertConverter, "LayoutLMTokenizer": BertConverter, "LongformerTokenizer": RobertaConverter, "LEDTokenizer": RobertaConverter, "LxmertTokenizer": BertConverter, "MBartTokenizer": MBartConverter, "MBart50Tokenizer": MBart50Converter, "MPNetTokenizer": MPNetConverter, "MobileBertTokenizer": BertConverter, "OpenAIGPTTokenizer": OpenAIGPTConverter, "PegasusTokenizer": PegasusConverter, "ReformerTokenizer": ReformerConverter, "RetriBertTokenizer": BertConverter, "RobertaTokenizer": RobertaConverter, "SqueezeBertTokenizer": BertConverter, "T5Tokenizer": T5Converter, "XLMRobertaTokenizer": XLMRobertaConverter, "XLNetTokenizer": XLNetConverter, } def convert_slow_tokenizer(transformer_tokenizer) -> Tokenizer: """ Utilities to convert a slow tokenizer instance in a fast tokenizer instance. Args: transformer_tokenizer (:class:`~transformers.tokenization_utils_base.PreTrainedTokenizer`): Instance of a slow tokenizer to convert in the backend tokenizer for :class:`~transformers.tokenization_utils_base.PreTrainedTokenizerFast`. Return: A instance of :class:`~tokenizers.Tokenizer` to be used as the backend tokenizer of a :class:`~transformers.tokenization_utils_base.PreTrainedTokenizerFast` """ tokenizer_class_name = transformer_tokenizer.__class__.__name__ if tokenizer_class_name not in SLOW_TO_FAST_CONVERTERS: raise ValueError( f"An instance of tokenizer class {tokenizer_class_name} cannot be converted in a Fast tokenizer instance. " f"No converter was found. Currently available slow->fast convertors: {list(SLOW_TO_FAST_CONVERTERS.keys())}" ) converter_class = SLOW_TO_FAST_CONVERTERS[tokenizer_class_name] return converter_class(transformer_tokenizer).converted()
AdaMix/src/transformers/convert_slow_tokenizer.py/0
{ "file_path": "AdaMix/src/transformers/convert_slow_tokenizer.py", "repo_id": "AdaMix", "token_count": 12334 }
48
import time import warnings from abc import ABC from typing import Optional import torch from .file_utils import add_start_docstrings LOGITS_PROCESSOR_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.BertTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs: Additional stopping critera specific kwargs. Return: :obj:`bool`. :obj:`False` indicates we should continue, :obj:`True` indicates we should stop. """ class StoppingCriteria(ABC): """Abstract base class for all stopping criteria that can be applied during generation.""" @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, score: torch.FloatTensor, **kwargs) -> bool: raise NotImplementedError("StoppingCriteria needs to be subclassed") class MaxLengthCriteria(StoppingCriteria): """ This class can be used to stop generation whenever the full generated number of tokens exceeds :obj:`max_length`. Keep in mind for decoder-only type of transformers, this will include the initial prompted tokens. Args: max_length (:obj:`int`): The maximum length that the output sequence can have in number of tokens. """ def __init__(self, max_length: int): self.max_length = max_length @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: return input_ids.shape[-1] > self.max_length class MaxTimeCriteria(StoppingCriteria): """ This class can be used to stop generation whenever the full generation exceeds some amount of time. By default, the time will start being counted when you initialize this function. You can override this by passing an :obj:`initial_time`. Args: max_time (:obj:`float`): The maximum allowed time in seconds for the generation. initial_time (:obj:`float`, `optional`, defaults to :obj:`time.time()`): The start of the generation allowed time. """ def __init__(self, max_time: float, initial_timestamp: Optional[float] = None): self.max_time = max_time self.initial_timestamp = time.time() if initial_timestamp is None else initial_timestamp @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: return time.time() - self.initial_timestamp > self.max_time class StoppingCriteriaList(list): @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: return any(criteria(input_ids, scores) for criteria in self) def validate_stopping_criteria(stopping_criteria: StoppingCriteriaList, max_length: int): found = False for stopping_criterium in stopping_criteria: if isinstance(stopping_criterium, MaxLengthCriteria): found = True if stopping_criterium.max_length != max_length: warnings.warn( "You set different `max_length` for stopping criteria and `max_length` parameter", UserWarning ) if not found: stopping_criteria.append(MaxLengthCriteria(max_length=max_length))
AdaMix/src/transformers/generation_stopping_criteria.py/0
{ "file_path": "AdaMix/src/transformers/generation_stopping_criteria.py", "repo_id": "AdaMix", "token_count": 1476 }
49
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ BERT model configuration """ from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json", "bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json", "bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json", "bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json", "bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json", "bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json", "bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json", "bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json", "bert-large-uncased-whole-word-masking": "https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json", "bert-large-cased-whole-word-masking": "https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json", "bert-large-uncased-whole-word-masking-finetuned-squad": "https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json", "bert-large-cased-whole-word-masking-finetuned-squad": "https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json", "bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json", "bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json", "bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json", "cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json", "cl-tohoku/bert-base-japanese-whole-word-masking": "https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json", "cl-tohoku/bert-base-japanese-char": "https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json", "cl-tohoku/bert-base-japanese-char-whole-word-masking": "https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json", "TurkuNLP/bert-base-finnish-cased-v1": "https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json", "TurkuNLP/bert-base-finnish-uncased-v1": "https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json", "wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json", # See all BERT models at https://huggingface.co/models?filter=bert } class BertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a :class:`~transformers.BertModel` or a :class:`~transformers.TFBertModel`. It is used to instantiate a BERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the BERT `bert-base-uncased <https://huggingface.co/bert-base-uncased>`__ architecture. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Args: vocab_size (:obj:`int`, `optional`, defaults to 30522): Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the :obj:`inputs_ids` passed when calling :class:`~transformers.BertModel` or :class:`~transformers.TFBertModel`. hidden_size (:obj:`int`, `optional`, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (:obj:`int`, `optional`, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (:obj:`int`, `optional`, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (:obj:`int`, `optional`, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (:obj:`str` or :obj:`Callable`, `optional`, defaults to :obj:`"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, :obj:`"gelu"`, :obj:`"relu"`, :obj:`"silu"` and :obj:`"gelu_new"` are supported. hidden_dropout_prob (:obj:`float`, `optional`, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (:obj:`int`, `optional`, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (:obj:`int`, `optional`, defaults to 2): The vocabulary size of the :obj:`token_type_ids` passed when calling :class:`~transformers.BertModel` or :class:`~transformers.TFBertModel`. initializer_range (:obj:`float`, `optional`, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12): The epsilon used by the layer normalization layers. gradient_checkpointing (:obj:`bool`, `optional`, defaults to :obj:`False`): If True, use gradient checkpointing to save memory at the expense of slower backward pass. position_embedding_type (:obj:`str`, `optional`, defaults to :obj:`"absolute"`): Type of position embedding. Choose one of :obj:`"absolute"`, :obj:`"relative_key"`, :obj:`"relative_key_query"`. For positional embeddings use :obj:`"absolute"`. For more information on :obj:`"relative_key"`, please refer to `Self-Attention with Relative Position Representations (Shaw et al.) <https://arxiv.org/abs/1803.02155>`__. For more information on :obj:`"relative_key_query"`, please refer to `Method 4` in `Improve Transformer Models with Better Relative Position Embeddings (Huang et al.) <https://arxiv.org/abs/2009.13658>`__. use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not the model should return the last key/values attentions (not used by all models). Only relevant if ``config.is_decoder=True``. apply_lora (:obj:`bool`, `optional`): apply Lora. lora_alpha (:obj:`int`, `optional`): lora alpha. lora_r (:obj:`int`, `optional`): lora r. apply_adapter (:obj:`bool`, `optional`): apply adapter. adapter_type (:obj:`str`, `optional`): houlsby or pfeiffer. adapter_size (:obj:`int`, `optional`): 8 16 32 64. Examples:: >>> from transformers import BertModel, BertConfig >>> # Initializing a BERT bert-base-uncased style configuration >>> configuration = BertConfig() >>> # Initializing a model from the bert-base-uncased style configuration >>> model = BertModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config """ model_type = "bert" def __init__( self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, gradient_checkpointing=False, position_embedding_type="absolute", use_cache=True, apply_lora=False, lora_alpha=None, lora_r=None, apply_adapter=False, adapter_type=None, adapter_size=None, apply_expert_soup=False, use_consistency_loss=0, num_experts=1, inference_level=1, sharing_down=0, sharing_up=0, weight_strategy="soft", sparsity=0.75, **kwargs ): super().__init__(pad_token_id=pad_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.gradient_checkpointing = gradient_checkpointing self.position_embedding_type = position_embedding_type self.use_cache = use_cache self.apply_lora = apply_lora self.lora_alpha = lora_alpha self.lora_r = lora_r self.apply_adapter = apply_adapter self.adapter_type = adapter_type self.adapter_size = adapter_size self.apply_expert_soup = apply_expert_soup self.num_experts = num_experts self.inference_level = inference_level self.sharing_down = sharing_down self.sharing_up = sharing_up self.weight_strategy = weight_strategy self.sparsity = sparsity self.use_consistency_loss = use_consistency_loss
AdaMix/src/transformers/models/bert/configuration_bert.py/0
{ "file_path": "AdaMix/src/transformers/models/bert/configuration_bert.py", "repo_id": "AdaMix", "token_count": 4458 }
50
# coding=utf-8 # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization class for model BertGeneration.""" import os from shutil import copyfile from typing import List, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "bert_for_seq_generation": "https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {"bert_for_seq_generation": 512} class BertGenerationTokenizer(PreTrainedTokenizer): """ Construct a BertGeneration tokenizer. Based on `SentencePiece <https://github.com/google/sentencepiece>`__. This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (:obj:`str`): `SentencePiece <https://github.com/google/sentencepiece>`__ file (generally has a `.spm` extension) that contains the vocabulary necessary to instantiate a tokenizer. eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`): The end of sequence token. bos_token (:obj:`str`, `optional`, defaults to :obj:`"<s>"`): The begin of sequence token. unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`): The token used for padding, for example when batching sequences of different lengths. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES prefix_tokens: List[int] = [] model_input_names = ["input_ids", "attention_mask"] def __init__( self, vocab_file, bos_token="<s>", eos_token="</s>", unk_token="<unk>", pad_token="<pad>", sep_token="<::::>", **kwargs ): # Add extra_ids to the special token list super().__init__( bos_token=bos_token, eos_token=eos_token, unk_token=unk_token, pad_token=pad_token, sep_token=sep_token, **kwargs, ) self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(vocab_file) @property def vocab_size(self): return self.sp_model.get_piece_size() def get_vocab(self): vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d): self.__dict__ = d self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file) def _tokenize(self, text, sample=False): """Take as input a string and return a list of strings (tokens) for words/sub-words""" if not sample: pieces = self.sp_model.EncodeAsPieces(text) else: pieces = self.sp_model.SampleEncodeAsPieces(text, 64, 0.1) return pieces def _convert_token_to_id(self, token): """ Converts a token (str) in an id using the vocab. """ return self.sp_model.piece_to_id(token) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" token = self.sp_model.IdToPiece(index) return token def convert_tokens_to_string(self, tokens): """ Converts a sequence of tokens (string) in a single string. """ out_string = self.sp_model.decode_pieces(tokens) return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
AdaMix/src/transformers/models/bert_generation/tokenization_bert_generation.py/0
{ "file_path": "AdaMix/src/transformers/models/bert_generation/tokenization_bert_generation.py", "repo_id": "AdaMix", "token_count": 2216 }
51
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert ConvBERT checkpoint.""" import argparse from transformers import ConvBertConfig, ConvBertModel, TFConvBertModel, load_tf_weights_in_convbert from transformers.utils import logging logging.set_verbosity_info() def convert_orig_tf1_checkpoint_to_pytorch(tf_checkpoint_path, convbert_config_file, pytorch_dump_path): conf = ConvBertConfig.from_json_file(convbert_config_file) model = ConvBertModel(conf) model = load_tf_weights_in_convbert(model, conf, tf_checkpoint_path) model.save_pretrained(pytorch_dump_path) tf_model = TFConvBertModel.from_pretrained(pytorch_dump_path, from_pt=True) tf_model.save_pretrained(pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--convbert_config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained ConvBERT model. \n" "This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_orig_tf1_checkpoint_to_pytorch(args.tf_checkpoint_path, args.convbert_config_file, args.pytorch_dump_path)
AdaMix/src/transformers/models/convbert/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.py/0
{ "file_path": "AdaMix/src/transformers/models/convbert/convert_convbert_original_tf1_checkpoint_to_pytorch_and_tf2.py", "repo_id": "AdaMix", "token_count": 723 }
52
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os import torch from transformers.file_utils import WEIGHTS_NAME DIALOGPT_MODELS = ["small", "medium", "large"] OLD_KEY = "lm_head.decoder.weight" NEW_KEY = "lm_head.weight" def convert_dialogpt_checkpoint(checkpoint_path: str, pytorch_dump_folder_path: str): d = torch.load(checkpoint_path) d[NEW_KEY] = d.pop(OLD_KEY) os.makedirs(pytorch_dump_folder_path, exist_ok=True) torch.save(d, os.path.join(pytorch_dump_folder_path, WEIGHTS_NAME)) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--dialogpt_path", default=".", type=str) args = parser.parse_args() for MODEL in DIALOGPT_MODELS: checkpoint_path = os.path.join(args.dialogpt_path, f"{MODEL}_ft.pkl") pytorch_dump_folder_path = f"./DialoGPT-{MODEL}" convert_dialogpt_checkpoint( checkpoint_path, pytorch_dump_folder_path, )
AdaMix/src/transformers/models/dialogpt/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py/0
{ "file_path": "AdaMix/src/transformers/models/dialogpt/convert_dialogpt_original_pytorch_checkpoint_to_pytorch.py", "repo_id": "AdaMix", "token_count": 562 }
53
# coding=utf-8 # Copyright 2019 The Google AI Language Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch ELECTRA model. """ import math import os from dataclasses import dataclass from typing import Optional, Tuple import torch import torch.nn as nn import torch.utils.checkpoint from torch.nn import CrossEntropyLoss, MSELoss from ...activations import ACT2FN, get_activation from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_outputs import ( BaseModelOutputWithCrossAttentions, BaseModelOutputWithPastAndCrossAttentions, MaskedLMOutput, MultipleChoiceModelOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import ( PreTrainedModel, SequenceSummary, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from ...utils import logging from .configuration_electra import ElectraConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "google/electra-small-discriminator" _CONFIG_FOR_DOC = "ElectraConfig" _TOKENIZER_FOR_DOC = "ElectraTokenizer" ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST = [ "google/electra-small-generator", "google/electra-base-generator", "google/electra-large-generator", "google/electra-small-discriminator", "google/electra-base-discriminator", "google/electra-large-discriminator", # See all ELECTRA models at https://huggingface.co/models?filter=electra ] def load_tf_weights_in_electra(model, config, tf_checkpoint_path, discriminator_or_generator="discriminator"): """Load tf checkpoints in a pytorch model.""" try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): original_name: str = name try: if isinstance(model, ElectraForMaskedLM): name = name.replace("electra/embeddings/", "generator/embeddings/") if discriminator_or_generator == "generator": name = name.replace("electra/", "discriminator/") name = name.replace("generator/", "electra/") name = name.replace("dense_1", "dense_prediction") name = name.replace("generator_predictions/output_bias", "generator_lm_head/bias") name = name.split("/") # print(original_name, name) # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any(n in ["global_step", "temperature"] for n in name): logger.info("Skipping {}".format(original_name)) continue pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "weight") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") else: pointer = getattr(pointer, scope_names[0]) if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name.endswith("_embeddings"): pointer = getattr(pointer, "weight") elif m_name == "kernel": array = np.transpose(array) try: assert ( pointer.shape == array.shape ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" except AssertionError as e: e.args += (pointer.shape, array.shape) raise print("Initialize PyTorch weight {}".format(name), original_name) pointer.data = torch.from_numpy(array) except AttributeError as e: print("Skipping {}".format(original_name), name, e) continue return model class ElectraEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") # Copied from transformers.models.bert.modeling_bert.BertEmbeddings.forward def forward( self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0 ): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + token_type_embeddings if self.position_embedding_type == "absolute": position_embeddings = self.position_embeddings(position_ids) embeddings += position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings # Copied from transformers.models.bert.modeling_bert.BertSelfAttention with Bert->Electra class ElectraSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads) ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": self.max_position_embeddings = config.max_position_embeddings self.distance_embedding = nn.Embedding(2 * config.max_position_embeddings - 1, self.attention_head_size) self.is_decoder = config.is_decoder def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) if self.position_embedding_type == "relative_key" or self.position_embedding_type == "relative_key_query": seq_length = hidden_states.size()[1] position_ids_l = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(-1, 1) position_ids_r = torch.arange(seq_length, dtype=torch.long, device=hidden_states.device).view(1, -1) distance = position_ids_l - position_ids_r positional_embedding = self.distance_embedding(distance + self.max_position_embeddings - 1) positional_embedding = positional_embedding.to(dtype=query_layer.dtype) # fp16 compatibility if self.position_embedding_type == "relative_key": relative_position_scores = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores elif self.position_embedding_type == "relative_key_query": relative_position_scores_query = torch.einsum("bhld,lrd->bhlr", query_layer, positional_embedding) relative_position_scores_key = torch.einsum("bhrd,lrd->bhlr", key_layer, positional_embedding) attention_scores = attention_scores + relative_position_scores_query + relative_position_scores_key attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in ElectraModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput class ElectraSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Electra class ElectraAttention(nn.Module): def __init__(self, config): super().__init__() self.self = ElectraSelfAttention(config) self.output = ElectraSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate class ElectraIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput class ElectraOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Electra class ElectraLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = ElectraAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added" self.crossattention = ElectraAttention(config) self.intermediate = ElectraIntermediate(config) self.output = ElectraOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: assert hasattr( self, "crossattention" ), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`" # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output # Copied from transformers.models.bert.modeling_bert.BertEncoder with Bert->Electra class ElectraEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([ElectraLayer(config) for _ in range(config.num_hidden_layers)]) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_self_attentions = () if output_attentions else None all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None next_decoder_cache = () if use_cache else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None past_key_value = past_key_values[i] if past_key_values is not None else None if getattr(self.config, "gradient_checkpointing", False) and self.training: if use_cache: logger.warn( "`use_cache=True` is incompatible with `config.gradient_checkpointing=True`. Setting " "`use_cache=False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_value, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[-1],) if output_attentions: all_self_attentions = all_self_attentions + (layer_outputs[1],) if self.config.add_cross_attention: all_cross_attentions = all_cross_attentions + (layer_outputs[2],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [ hidden_states, next_decoder_cache, all_hidden_states, all_self_attentions, all_cross_attentions, ] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_decoder_cache, hidden_states=all_hidden_states, attentions=all_self_attentions, cross_attentions=all_cross_attentions, ) class ElectraDiscriminatorPredictions(nn.Module): """Prediction module for the discriminator, made up of two dense layers.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dense_prediction = nn.Linear(config.hidden_size, 1) self.config = config def forward(self, discriminator_hidden_states): hidden_states = self.dense(discriminator_hidden_states) hidden_states = get_activation(self.config.hidden_act)(hidden_states) logits = self.dense_prediction(hidden_states).squeeze(-1) return logits class ElectraGeneratorPredictions(nn.Module): """Prediction module for the generator, made up of two dense layers.""" def __init__(self, config): super().__init__() self.LayerNorm = nn.LayerNorm(config.embedding_size) self.dense = nn.Linear(config.hidden_size, config.embedding_size) def forward(self, generator_hidden_states): hidden_states = self.dense(generator_hidden_states) hidden_states = get_activation("gelu")(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class ElectraPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = ElectraConfig load_tf_weights = load_tf_weights_in_electra base_model_prefix = "electra" _keys_to_ignore_on_load_missing = [r"position_ids"] _keys_to_ignore_on_load_unexpected = [r"electra\.embeddings_project\.weight", r"electra\.embeddings_project\.bias"] # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) @dataclass class ElectraForPreTrainingOutput(ModelOutput): """ Output type of :class:`~transformers.ElectraForPreTraining`. Args: loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`): Total loss of the ELECTRA objective. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`): Prediction scores of the head (scores for each token before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None ELECTRA_START_DOCSTRING = r""" This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.ElectraConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ ELECTRA_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.ElectraTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: - 0 corresponds to a `sentence A` token, - 1 corresponds to a `sentence B` token. `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ @add_start_docstrings( "The bare Electra Model transformer outputting raw hidden-states without any specific head on top. Identical to " "the BERT model except that it uses an additional linear layer between the embedding layer and the encoder if the " "hidden size and embedding size are different." "" "Both the generator and discriminator checkpoints may be loaded into this model.", ELECTRA_START_DOCSTRING, ) class ElectraModel(ElectraPreTrainedModel): def __init__(self, config): super().__init__(config) self.embeddings = ElectraEmbeddings(config) if config.embedding_size != config.hidden_size: self.embeddings_project = nn.Linear(config.embedding_size, config.hidden_size) self.encoder = ElectraEncoder(config) self.config = config self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, device) head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) hidden_states = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) if hasattr(self, "embeddings_project"): hidden_states = self.embeddings_project(hidden_states) hidden_states = self.encoder( hidden_states, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) return hidden_states class ElectraClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, features, **kwargs): x = features[:, 0, :] # take <s> token (equiv. to [CLS]) x = self.dropout(x) x = self.dense(x) x = get_activation("gelu")(x) # although BERT uses tanh here, it seems Electra authors used gelu here x = self.dropout(x) x = self.out_proj(x) return x @add_start_docstrings( """ ELECTRA Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, ELECTRA_START_DOCSTRING, ) class ElectraForSequenceClassification(ElectraPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.electra = ElectraModel(config) self.classifier = ElectraClassificationHead(config) self.init_weights() @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict discriminator_hidden_states = self.electra( input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, output_attentions, output_hidden_states, return_dict, ) sequence_output = discriminator_hidden_states[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + discriminator_hidden_states[1:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions, ) @add_start_docstrings( """ Electra model with a binary classification head on top as used during pretraining for identifying generated tokens. It is recommended to load the discriminator checkpoint into that model. """, ELECTRA_START_DOCSTRING, ) class ElectraForPreTraining(ElectraPreTrainedModel): def __init__(self, config): super().__init__(config) self.electra = ElectraModel(config) self.discriminator_predictions = ElectraDiscriminatorPredictions(config) self.init_weights() @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=ElectraForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`): Labels for computing the ELECTRA loss. Input should be a sequence of tokens (see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``: - 0 indicates the token is an original token, - 1 indicates the token was replaced. Returns: Examples:: >>> from transformers import ElectraTokenizer, ElectraForPreTraining >>> import torch >>> tokenizer = ElectraTokenizer.from_pretrained('google/electra-small-discriminator') >>> model = ElectraForPreTraining.from_pretrained('google/electra-small-discriminator') >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1 >>> logits = model(input_ids).logits """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict discriminator_hidden_states = self.electra( input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, output_attentions, output_hidden_states, return_dict, ) discriminator_sequence_output = discriminator_hidden_states[0] logits = self.discriminator_predictions(discriminator_sequence_output) loss = None if labels is not None: loss_fct = nn.BCEWithLogitsLoss() if attention_mask is not None: active_loss = attention_mask.view(-1, discriminator_sequence_output.shape[1]) == 1 active_logits = logits.view(-1, discriminator_sequence_output.shape[1])[active_loss] active_labels = labels[active_loss] loss = loss_fct(active_logits, active_labels.float()) else: loss = loss_fct(logits.view(-1, discriminator_sequence_output.shape[1]), labels.float()) if not return_dict: output = (logits,) + discriminator_hidden_states[1:] return ((loss,) + output) if loss is not None else output return ElectraForPreTrainingOutput( loss=loss, logits=logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions, ) @add_start_docstrings( """ Electra model with a language modeling head on top. Even though both the discriminator and generator may be loaded into this model, the generator is the only model of the two to have been trained for the masked language modeling task. """, ELECTRA_START_DOCSTRING, ) class ElectraForMaskedLM(ElectraPreTrainedModel): def __init__(self, config): super().__init__(config) self.electra = ElectraModel(config) self.generator_predictions = ElectraGeneratorPredictions(config) self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size) self.init_weights() def get_output_embeddings(self): return self.generator_lm_head def set_output_embeddings(self, word_embeddings): self.generator_lm_head = word_embeddings @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict generator_hidden_states = self.electra( input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, output_attentions, output_hidden_states, return_dict, ) generator_sequence_output = generator_hidden_states[0] prediction_scores = self.generator_predictions(generator_sequence_output) prediction_scores = self.generator_lm_head(prediction_scores) loss = None # Masked language modeling softmax layer if labels is not None: loss_fct = nn.CrossEntropyLoss() # -100 index = padding token loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + generator_hidden_states[1:] return ((loss,) + output) if loss is not None else output return MaskedLMOutput( loss=loss, logits=prediction_scores, hidden_states=generator_hidden_states.hidden_states, attentions=generator_hidden_states.attentions, ) @add_start_docstrings( """ Electra model with a token classification head on top. Both the discriminator and generator may be loaded into this model. """, ELECTRA_START_DOCSTRING, ) class ElectraForTokenClassification(ElectraPreTrainedModel): def __init__(self, config): super().__init__(config) self.electra = ElectraModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict discriminator_hidden_states = self.electra( input_ids, attention_mask, token_type_ids, position_ids, head_mask, inputs_embeds, output_attentions, output_hidden_states, return_dict, ) discriminator_sequence_output = discriminator_hidden_states[0] discriminator_sequence_output = self.dropout(discriminator_sequence_output) logits = self.classifier(discriminator_sequence_output) loss = None if labels is not None: loss_fct = nn.CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.config.num_labels)[active_loss] active_labels = labels.view(-1)[active_loss] loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.config.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + discriminator_hidden_states[1:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions, ) @add_start_docstrings( """ ELECTRA Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, ELECTRA_START_DOCSTRING, ) class ElectraForQuestionAnswering(ElectraPreTrainedModel): config_class = ElectraConfig base_model_prefix = "electra" def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.electra = ElectraModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict discriminator_hidden_states = self.electra( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) sequence_output = discriminator_hidden_states[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = ( start_logits, end_logits, ) + discriminator_hidden_states[1:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions, ) @add_start_docstrings( """ ELECTRA Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, ELECTRA_START_DOCSTRING, ) class ElectraForMultipleChoice(ElectraPreTrainedModel): def __init__(self, config): super().__init__(config) self.electra = ElectraModel(config) self.sequence_summary = SequenceSummary(config) self.classifier = nn.Linear(config.hidden_size, 1) self.init_weights() @add_start_docstrings_to_model_forward(ELECTRA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See :obj:`input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) discriminator_hidden_states = self.electra( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = discriminator_hidden_states[0] pooled_output = self.sequence_summary(sequence_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + discriminator_hidden_states[1:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=discriminator_hidden_states.hidden_states, attentions=discriminator_hidden_states.attentions, )
AdaMix/src/transformers/models/electra/modeling_electra.py/0
{ "file_path": "AdaMix/src/transformers/models/electra/modeling_electra.py", "repo_id": "AdaMix", "token_count": 25721 }
54
# coding=utf-8 # Copyright 2019-present CNRS, Facebook Inc. and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Flaubert configuration, based on XLM. """ from ...utils import logging from ..xlm.configuration_xlm import XLMConfig logger = logging.get_logger(__name__) FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "flaubert/flaubert_small_cased": "https://huggingface.co/flaubert/flaubert_small_cased/resolve/main/config.json", "flaubert/flaubert_base_uncased": "https://huggingface.co/flaubert/flaubert_base_uncased/resolve/main/config.json", "flaubert/flaubert_base_cased": "https://huggingface.co/flaubert/flaubert_base_cased/resolve/main/config.json", "flaubert/flaubert_large_cased": "https://huggingface.co/flaubert/flaubert_large_cased/resolve/main/config.json", } class FlaubertConfig(XLMConfig): """ This is the configuration class to store the configuration of a :class:`~transformers.FlaubertModel` or a :class:`~transformers.TFFlaubertModel`. It is used to instantiate a FlauBERT model according to the specified arguments, defining the model architecture. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Args: pre_norm (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to apply the layer normalization before or after the feed forward layer following the attention in each layer (Vaswani et al., Tensor2Tensor for Neural Machine Translation. 2018) layerdrop (:obj:`float`, `optional`, defaults to 0.0): Probability to drop layers during training (Fan et al., Reducing Transformer Depth on Demand with Structured Dropout. ICLR 2020) vocab_size (:obj:`int`, `optional`, defaults to 30145): Vocabulary size of the FlauBERT model. Defines the number of different tokens that can be represented by the :obj:`inputs_ids` passed when calling :class:`~transformers.FlaubertModel` or :class:`~transformers.TFFlaubertModel`. emb_dim (:obj:`int`, `optional`, defaults to 2048): Dimensionality of the encoder layers and the pooler layer. n_layer (:obj:`int`, `optional`, defaults to 12): Number of hidden layers in the Transformer encoder. n_head (:obj:`int`, `optional`, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. dropout (:obj:`float`, `optional`, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (:obj:`float`, `optional`, defaults to 0.1): The dropout probability for the attention mechanism gelu_activation (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to use a `gelu` activation instead of `relu`. sinusoidal_embeddings (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to use sinusoidal positional embeddings instead of absolute positional embeddings. causal (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the model should behave in a causal manner. Causal models use a triangular attention mask in order to only attend to the left-side context instead if a bidirectional context. asm (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to use an adaptive log softmax projection layer instead of a linear layer for the prediction layer. n_langs (:obj:`int`, `optional`, defaults to 1): The number of languages the model handles. Set to 1 for monolingual models. use_lang_emb (:obj:`bool`, `optional`, defaults to :obj:`True`) Whether to use language embeddings. Some models use additional language embeddings, see `the multilingual models page <http://huggingface.co/transformers/multilingual.html#xlm-language-embeddings>`__ for information on how to use them. max_position_embeddings (:obj:`int`, `optional`, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). embed_init_std (:obj:`float`, `optional`, defaults to 2048^-0.5): The standard deviation of the truncated_normal_initializer for initializing the embedding matrices. init_std (:obj:`int`, `optional`, defaults to 50257): The standard deviation of the truncated_normal_initializer for initializing all weight matrices except the embedding matrices. layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12): The epsilon used by the layer normalization layers. bos_index (:obj:`int`, `optional`, defaults to 0): The index of the beginning of sentence token in the vocabulary. eos_index (:obj:`int`, `optional`, defaults to 1): The index of the end of sentence token in the vocabulary. pad_index (:obj:`int`, `optional`, defaults to 2): The index of the padding token in the vocabulary. unk_index (:obj:`int`, `optional`, defaults to 3): The index of the unknown token in the vocabulary. mask_index (:obj:`int`, `optional`, defaults to 5): The index of the masking token in the vocabulary. is_encoder(:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not the initialized model should be a transformer encoder or decoder as seen in Vaswani et al. summary_type (:obj:`string`, `optional`, defaults to "first"): Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. Has to be one of the following options: - :obj:`"last"`: Take the last token hidden state (like XLNet). - :obj:`"first"`: Take the first token hidden state (like BERT). - :obj:`"mean"`: Take the mean of all tokens hidden states. - :obj:`"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2). - :obj:`"attn"`: Not implemented now, use multi-head attention. summary_use_proj (:obj:`bool`, `optional`, defaults to :obj:`True`): Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. Whether or not to add a projection after the vector extraction. summary_activation (:obj:`str`, `optional`): Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. Pass :obj:`"tanh"` for a tanh activation to the output, any other value will result in no activation. summary_proj_to_labels (:obj:`bool`, `optional`, defaults to :obj:`True`): Used in the sequence classification and multiple choice models. Whether the projection outputs should have :obj:`config.num_labels` or :obj:`config.hidden_size` classes. summary_first_dropout (:obj:`float`, `optional`, defaults to 0.1): Used in the sequence classification and multiple choice models. The dropout ratio to be used after the projection and activation. start_n_top (:obj:`int`, `optional`, defaults to 5): Used in the SQuAD evaluation script. end_n_top (:obj:`int`, `optional`, defaults to 5): Used in the SQuAD evaluation script. mask_token_id (:obj:`int`, `optional`, defaults to 0): Model agnostic parameter to identify masked tokens when generating text in an MLM context. lang_id (:obj:`int`, `optional`, defaults to 1): The ID of the language used by the model. This parameter is used when generating text in a given language. """ model_type = "flaubert" def __init__(self, layerdrop=0.0, pre_norm=False, pad_token_id=2, bos_token_id=0, **kwargs): """Constructs FlaubertConfig.""" super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, **kwargs) self.layerdrop = layerdrop self.pre_norm = pre_norm
AdaMix/src/transformers/models/flaubert/configuration_flaubert.py/0
{ "file_path": "AdaMix/src/transformers/models/flaubert/configuration_flaubert.py", "repo_id": "AdaMix", "token_count": 3218 }
55
# coding=utf-8 # Copyright 2021 The I-BERT Authors (Sehoon Kim, Amir Gholami, Zhewei Yao, # Michael Mahoney, Kurt Keutzer - UC Berkeley) and The HuggingFace Inc. team. # Copyright (c) 20121, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ I-BERT configuration """ from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json", "kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json", "kssteven/ibert-roberta-large-mnli": "https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json", } class IBertConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a :class:`~transformers.IBertModel`. It is used to instantiate a I-BERT model according to the specified arguments, Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Args: vocab_size (:obj:`int`, `optional`, defaults to 30522): Vocabulary size of the I-BERT model. Defines the number of different tokens that can be represented by the :obj:`inputs_ids` passed when calling :class:`~transformers.IBertModel` hidden_size (:obj:`int`, `optional`, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (:obj:`int`, `optional`, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (:obj:`int`, `optional`, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (:obj:`int`, `optional`, defaults to 3072): Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer encoder. hidden_act (:obj:`str` or :obj:`Callable`, `optional`, defaults to :obj:`"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, :obj:`"gelu"`, :obj:`"relu"`, :obj:`"silu"` and :obj:`"gelu_new"` are supported. hidden_dropout_prob (:obj:`float`, `optional`, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (:obj:`int`, `optional`, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (:obj:`int`, `optional`, defaults to 2): The vocabulary size of the :obj:`token_type_ids` passed when calling :class:`~transformers.IBertModel` initializer_range (:obj:`float`, `optional`, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12): The epsilon used by the layer normalization layers. position_embedding_type (:obj:`str`, `optional`, defaults to :obj:`"absolute"`): Type of position embedding. Choose one of :obj:`"absolute"`, :obj:`"relative_key"`, :obj:`"relative_key_query"`. For positional embeddings use :obj:`"absolute"`. For more information on :obj:`"relative_key"`, please refer to `Self-Attention with Relative Position Representations (Shaw et al.) <https://arxiv.org/abs/1803.02155>`__. For more information on :obj:`"relative_key_query"`, please refer to `Method 4` in `Improve Transformer Models with Better Relative Position Embeddings (Huang et al.) <https://arxiv.org/abs/2009.13658>`__. quant_mode (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to quantize the model or not. force_dequant (:obj:`str`, `optional`, defaults to :obj:`"none"`): Force dequantize specific nonlinear layer. Dequatized layers are then executed with full precision. :obj:`"none"`, :obj:`"gelu"`, :obj:`"softmax"`, :obj:`"layernorm"` and :obj:`"nonlinear"` are supported. As deafult, it is set as :obj:`"none"`, which does not dequantize any layers. Please specify :obj:`"gelu"`, :obj:`"softmax"`, or :obj:`"layernorm"` to dequantize GELU, Softmax, or LayerNorm, respectively. :obj:`"nonlinear"` will dequantize all nonlinear layers, i.e., GELU, Softmax, and LayerNorm. """ model_type = "ibert" def __init__( self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=1, bos_token_id=0, eos_token_id=2, position_embedding_type="absolute", quant_mode=False, force_dequant="none", **kwargs ): super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.position_embedding_type = position_embedding_type self.quant_mode = quant_mode self.force_dequant = force_dequant
AdaMix/src/transformers/models/ibert/configuration_ibert.py/0
{ "file_path": "AdaMix/src/transformers/models/ibert/configuration_ibert.py", "repo_id": "AdaMix", "token_count": 2727 }
56
# coding=utf-8 # Copyright 2020 The Allen Institute for AI team and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch Longformer model. """ import math from dataclasses import dataclass from typing import Optional, Tuple import torch import torch.nn as nn import torch.utils.checkpoint from torch.nn import CrossEntropyLoss, MSELoss from torch.nn import functional as F from ...activations import ACT2FN, gelu from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from ...utils import logging from .configuration_longformer import LongformerConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "allenai/longformer-base-4096" _CONFIG_FOR_DOC = "LongformerConfig" _TOKENIZER_FOR_DOC = "LongformerTokenizer" LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [ "allenai/longformer-base-4096", "allenai/longformer-large-4096", "allenai/longformer-large-4096-finetuned-triviaqa", "allenai/longformer-base-4096-extra.pos.embd.only", "allenai/longformer-large-4096-extra.pos.embd.only", # See all Longformer models at https://huggingface.co/models?filter=longformer ] @dataclass class LongformerBaseModelOutput(ModelOutput): """ Base class for Longformer's outputs, with potential hidden states, local and global attentions. Args: last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where ``x`` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first ``x`` values) and to every token in the attention window (remaining ``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the attention weight of a token to itself is located at index ``x + attention_window / 2`` and the ``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window / 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x`` attention weights. If a token has global attention, the attention weights to all other tokens in :obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`. global_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, x)`, where ``x`` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. """ last_hidden_state: torch.FloatTensor hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None global_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class LongformerBaseModelOutputWithPooling(ModelOutput): """ Base class for Longformer's outputs that also contains a pooling of the last hidden states. Args: last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pretraining. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where ``x`` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first ``x`` values) and to every token in the attention window (remaining ``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the attention weight of a token to itself is located at index ``x + attention_window / 2`` and the ``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window / 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x`` attention weights. If a token has global attention, the attention weights to all other tokens in :obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`. global_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, x)`, where ``x`` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. """ last_hidden_state: torch.FloatTensor pooler_output: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None global_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class LongformerMaskedLMOutput(ModelOutput): """ Base class for masked language models outputs. Args: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided): Masked language modeling (MLM) loss. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where ``x`` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first ``x`` values) and to every token in the attention window (remaining ``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the attention weight of a token to itself is located at index ``x + attention_window / 2`` and the ``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window / 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x`` attention weights. If a token has global attention, the attention weights to all other tokens in :obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`. global_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, x)`, where ``x`` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None global_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class LongformerQuestionAnsweringModelOutput(ModelOutput): """ Base class for outputs of question answering Longformer models. Args: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`): Span-start scores (before SoftMax). end_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`): Span-end scores (before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where ``x`` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first ``x`` values) and to every token in the attention window (remaining ``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the attention weight of a token to itself is located at index ``x + attention_window / 2`` and the ``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window / 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x`` attention weights. If a token has global attention, the attention weights to all other tokens in :obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`. global_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, x)`, where ``x`` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. """ loss: Optional[torch.FloatTensor] = None start_logits: torch.FloatTensor = None end_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None global_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class LongformerSequenceClassifierOutput(ModelOutput): """ Base class for outputs of sentence classification models. Args: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided): Classification (or regression if config.num_labels==1) loss. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where ``x`` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first ``x`` values) and to every token in the attention window (remaining ``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the attention weight of a token to itself is located at index ``x + attention_window / 2`` and the ``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window / 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x`` attention weights. If a token has global attention, the attention weights to all other tokens in :obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`. global_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, x)`, where ``x`` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None global_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class LongformerMultipleChoiceModelOutput(ModelOutput): """ Base class for outputs of multiple choice Longformer models. Args: loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided): Classification loss. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`): `num_choices` is the second dimension of the input tensors. (see `input_ids` above). Classification scores (before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where ``x`` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first ``x`` values) and to every token in the attention window (remaining ``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the attention weight of a token to itself is located at index ``x + attention_window / 2`` and the ``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window / 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x`` attention weights. If a token has global attention, the attention weights to all other tokens in :obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`. global_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, x)`, where ``x`` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None global_attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class LongformerTokenClassifierOutput(ModelOutput): """ Base class for outputs of token classification models. Args: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) : Classification loss. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`): Classification scores (before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, x + attention_window + 1)`, where ``x`` is the number of tokens with global attention mask. Local attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token in the sequence to every token with global attention (first ``x`` values) and to every token in the attention window (remaining ``attention_window + 1`` values). Note that the first ``x`` values refer to tokens with fixed positions in the text, but the remaining ``attention_window + 1`` values refer to tokens with relative positions: the attention weight of a token to itself is located at index ``x + attention_window / 2`` and the ``attention_window / 2`` preceding (succeeding) values are the attention weights to the ``attention_window / 2`` preceding (succeeding) tokens. If the attention window contains a token with global attention, the attention weight at the corresponding index is set to 0; the value should be accessed from the first ``x`` attention weights. If a token has global attention, the attention weights to all other tokens in :obj:`attentions` is set to 0, the values should be accessed from :obj:`global_attentions`. global_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, x)`, where ``x`` is the number of tokens with global attention mask. Global attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. Those are the attention weights from every token with global attention to every token in the sequence. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None global_attentions: Optional[Tuple[torch.FloatTensor]] = None def _get_question_end_index(input_ids, sep_token_id): """ Computes the index of the first occurance of `sep_token_id`. """ sep_token_indices = (input_ids == sep_token_id).nonzero() batch_size = input_ids.shape[0] assert sep_token_indices.shape[1] == 2, "`input_ids` should have two dimensions" assert ( sep_token_indices.shape[0] == 3 * batch_size ), f"There should be exactly three separator tokens: {sep_token_id} in every sample for questions answering. You might also consider to set `global_attention_mask` manually in the forward function to avoid this error." return sep_token_indices.view(batch_size, 3, 2)[:, 0, 1] def _compute_global_attention_mask(input_ids, sep_token_id, before_sep_token=True): """ Computes global attention mask by putting attention on all tokens before `sep_token_id` if `before_sep_token is True` else after `sep_token_id`. """ question_end_index = _get_question_end_index(input_ids, sep_token_id) question_end_index = question_end_index.unsqueeze(dim=1) # size: batch_size x 1 # bool attention mask with True in locations of global attention attention_mask = torch.arange(input_ids.shape[1], device=input_ids.device) if before_sep_token is True: attention_mask = (attention_mask.expand_as(input_ids) < question_end_index).to(torch.uint8) else: # last token is separation token and should not be counted and in the middle are two separation tokens attention_mask = (attention_mask.expand_as(input_ids) > (question_end_index + 1)).to(torch.uint8) * ( attention_mask.expand_as(input_ids) < input_ids.shape[-1] ).to(torch.uint8) return attention_mask def create_position_ids_from_input_ids(input_ids, padding_idx): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = torch.cumsum(mask, dim=1).type_as(mask) * mask return incremental_indices.long() + padding_idx class LongformerEmbeddings(nn.Module): """ Same as BertEmbeddings with a tiny tweak for positional embeddings indexing. """ def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) self.position_embedding_type = getattr(config, "position_embedding_type", "absolute") self.padding_idx = config.pad_token_id self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size, padding_idx=self.padding_idx ) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if position_ids is None: if input_ids is not None: # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = create_position_ids_from_input_ids(input_ids, self.padding_idx).to(input_ids.device) else: position_ids = self.create_position_ids_from_inputs_embeds(inputs_embeds) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings def create_position_ids_from_inputs_embeds(self, inputs_embeds): """ We are provided embeddings directly. We cannot infer which are padded so just generate sequential position ids. Args: inputs_embeds: torch.Tensor inputs_embeds: Returns: torch.Tensor """ input_shape = inputs_embeds.size()[:-1] sequence_length = input_shape[1] position_ids = torch.arange( self.padding_idx + 1, sequence_length + self.padding_idx + 1, dtype=torch.long, device=inputs_embeds.device ) return position_ids.unsqueeze(0).expand(input_shape) class LongformerSelfAttention(nn.Module): def __init__(self, config, layer_id): super().__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads) ) self.num_heads = config.num_attention_heads self.head_dim = int(config.hidden_size / config.num_attention_heads) self.embed_dim = config.hidden_size self.query = nn.Linear(config.hidden_size, self.embed_dim) self.key = nn.Linear(config.hidden_size, self.embed_dim) self.value = nn.Linear(config.hidden_size, self.embed_dim) # separate projection layers for tokens with global attention self.query_global = nn.Linear(config.hidden_size, self.embed_dim) self.key_global = nn.Linear(config.hidden_size, self.embed_dim) self.value_global = nn.Linear(config.hidden_size, self.embed_dim) self.dropout = config.attention_probs_dropout_prob self.layer_id = layer_id attention_window = config.attention_window[self.layer_id] assert ( attention_window % 2 == 0 ), f"`attention_window` for layer {self.layer_id} has to be an even value. Given {attention_window}" assert ( attention_window > 0 ), f"`attention_window` for layer {self.layer_id} has to be positive. Given {attention_window}" self.one_sided_attn_window_size = attention_window // 2 def forward( self, hidden_states, attention_mask=None, layer_head_mask=None, is_index_masked=None, is_index_global_attn=None, is_global_attn=None, output_attentions=False, ): """ :class:`LongformerSelfAttention` expects `len(hidden_states)` to be multiple of `attention_window`. Padding to `attention_window` happens in :meth:`LongformerModel.forward` to avoid redoing the padding on each layer. The `attention_mask` is changed in :meth:`LongformerModel.forward` from 0, 1, 2 to: * -10000: no attention * 0: local attention * +10000: global attention """ hidden_states = hidden_states.transpose(0, 1) # project hidden states query_vectors = self.query(hidden_states) key_vectors = self.key(hidden_states) value_vectors = self.value(hidden_states) seq_len, batch_size, embed_dim = hidden_states.size() assert ( embed_dim == self.embed_dim ), f"hidden_states should have embed_dim = {self.embed_dim}, but has {embed_dim}" # normalize query query_vectors /= math.sqrt(self.head_dim) query_vectors = query_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) key_vectors = key_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) attn_scores = self._sliding_chunks_query_key_matmul( query_vectors, key_vectors, self.one_sided_attn_window_size ) # values to pad for attention probs remove_from_windowed_attention_mask = (attention_mask != 0)[:, :, None, None] # cast to fp32/fp16 then replace 1's with -inf float_mask = remove_from_windowed_attention_mask.type_as(query_vectors).masked_fill( remove_from_windowed_attention_mask, -10000.0 ) # diagonal mask with zeros everywhere and -inf inplace of padding diagonal_mask = self._sliding_chunks_query_key_matmul( float_mask.new_ones(size=float_mask.size()), float_mask, self.one_sided_attn_window_size ) # pad local attention probs attn_scores += diagonal_mask assert list(attn_scores.size()) == [ batch_size, seq_len, self.num_heads, self.one_sided_attn_window_size * 2 + 1, ], f"local_attn_probs should be of size ({batch_size}, {seq_len}, {self.num_heads}, {self.one_sided_attn_window_size * 2 + 1}), but is of size {attn_scores.size()}" # compute local attention probs from global attention keys and contact over window dim if is_global_attn: # compute global attn indices required through out forward fn ( max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, ) = self._get_global_attn_indices(is_index_global_attn) # calculate global attn probs from global key global_key_attn_scores = self._concat_with_global_key_attn_probs( query_vectors=query_vectors, key_vectors=key_vectors, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, ) # concat to local_attn_probs # (batch_size, seq_len, num_heads, extra attention count + 2*window+1) attn_scores = torch.cat((global_key_attn_scores, attn_scores), dim=-1) # free memory del global_key_attn_scores attn_probs = F.softmax(attn_scores, dim=-1, dtype=torch.float32) # use fp32 for numerical stability if layer_head_mask is not None: assert layer_head_mask.size() == ( self.num_heads, ), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}" attn_probs = layer_head_mask.view(1, 1, -1, 1) * attn_probs # softmax sometimes inserts NaN if all positions are masked, replace them with 0 attn_probs = torch.masked_fill(attn_probs, is_index_masked[:, :, None, None], 0.0) attn_probs = attn_probs.type_as(attn_scores) # free memory del attn_scores # apply dropout attn_probs = F.dropout(attn_probs, p=self.dropout, training=self.training) value_vectors = value_vectors.view(seq_len, batch_size, self.num_heads, self.head_dim).transpose(0, 1) # compute local attention output with global attention value and add if is_global_attn: # compute sum of global and local attn attn_output = self._compute_attn_output_with_global_indices( value_vectors=value_vectors, attn_probs=attn_probs, max_num_global_attn_indices=max_num_global_attn_indices, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, ) else: # compute local attn only attn_output = self._sliding_chunks_matmul_attn_probs_value( attn_probs, value_vectors, self.one_sided_attn_window_size ) assert attn_output.size() == (batch_size, seq_len, self.num_heads, self.head_dim), "Unexpected size" attn_output = attn_output.transpose(0, 1).reshape(seq_len, batch_size, embed_dim).contiguous() # compute value for global attention and overwrite to attention output # TODO: remove the redundant computation if is_global_attn: global_attn_output, global_attn_probs = self._compute_global_attn_output_from_hidden( hidden_states=hidden_states, max_num_global_attn_indices=max_num_global_attn_indices, layer_head_mask=layer_head_mask, is_local_index_global_attn_nonzero=is_local_index_global_attn_nonzero, is_index_global_attn_nonzero=is_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero=is_local_index_no_global_attn_nonzero, is_index_masked=is_index_masked, ) # get only non zero global attn output nonzero_global_attn_output = global_attn_output[ is_local_index_global_attn_nonzero[0], :, is_local_index_global_attn_nonzero[1] ] # overwrite values with global attention attn_output[is_index_global_attn_nonzero[::-1]] = nonzero_global_attn_output.view( len(is_local_index_global_attn_nonzero[0]), -1 ) # The attention weights for tokens with global attention are # just filler values, they were never used to compute the output. # Fill with 0 now, the correct values are in 'global_attn_probs'. attn_probs[is_index_global_attn_nonzero] = 0 outputs = (attn_output.transpose(0, 1),) if output_attentions: outputs += (attn_probs,) return outputs + (global_attn_probs,) if (is_global_attn and output_attentions) else outputs @staticmethod def _pad_and_transpose_last_two_dims(hidden_states_padded, padding): """pads rows and then flips rows and columns""" hidden_states_padded = F.pad( hidden_states_padded, padding ) # padding value is not important because it will be overwritten hidden_states_padded = hidden_states_padded.view( *hidden_states_padded.size()[:-2], hidden_states_padded.size(-1), hidden_states_padded.size(-2) ) return hidden_states_padded @staticmethod def _pad_and_diagonalize(chunked_hidden_states): """ shift every row 1 step right, converting columns into diagonals. Example:: chunked_hidden_states: [ 0.4983, 2.6918, -0.0071, 1.0492, -1.8348, 0.7672, 0.2986, 0.0285, -0.7584, 0.4206, -0.0405, 0.1599, 2.0514, -1.1600, 0.5372, 0.2629 ] window_overlap = num_rows = 4 (pad & diagonalize) => [ 0.4983, 2.6918, -0.0071, 1.0492, 0.0000, 0.0000, 0.0000 0.0000, -1.8348, 0.7672, 0.2986, 0.0285, 0.0000, 0.0000 0.0000, 0.0000, -0.7584, 0.4206, -0.0405, 0.1599, 0.0000 0.0000, 0.0000, 0.0000, 2.0514, -1.1600, 0.5372, 0.2629 ] """ total_num_heads, num_chunks, window_overlap, hidden_dim = chunked_hidden_states.size() chunked_hidden_states = F.pad( chunked_hidden_states, (0, window_overlap + 1) ) # total_num_heads x num_chunks x window_overlap x (hidden_dim+window_overlap+1). Padding value is not important because it'll be overwritten chunked_hidden_states = chunked_hidden_states.view( total_num_heads, num_chunks, -1 ) # total_num_heads x num_chunks x window_overlap*window_overlap+window_overlap chunked_hidden_states = chunked_hidden_states[ :, :, :-window_overlap ] # total_num_heads x num_chunks x window_overlap*window_overlap chunked_hidden_states = chunked_hidden_states.view( total_num_heads, num_chunks, window_overlap, window_overlap + hidden_dim ) chunked_hidden_states = chunked_hidden_states[:, :, :, :-1] return chunked_hidden_states @staticmethod def _chunk(hidden_states, window_overlap): """convert into overlapping chunks. Chunk size = 2w, overlap size = w""" # non-overlapping chunks of size = 2w hidden_states = hidden_states.view( hidden_states.size(0), hidden_states.size(1) // (window_overlap * 2), window_overlap * 2, hidden_states.size(2), ) # use `as_strided` to make the chunks overlap with an overlap size = window_overlap chunk_size = list(hidden_states.size()) chunk_size[1] = chunk_size[1] * 2 - 1 chunk_stride = list(hidden_states.stride()) chunk_stride[1] = chunk_stride[1] // 2 return hidden_states.as_strided(size=chunk_size, stride=chunk_stride) @staticmethod def _mask_invalid_locations(input_tensor, affected_seq_len) -> torch.Tensor: beginning_mask_2d = input_tensor.new_ones(affected_seq_len, affected_seq_len + 1).tril().flip(dims=[0]) beginning_mask = beginning_mask_2d[None, :, None, :] ending_mask = beginning_mask.flip(dims=(1, 3)) beginning_input = input_tensor[:, :affected_seq_len, :, : affected_seq_len + 1] beginning_mask = beginning_mask.expand(beginning_input.size()) beginning_input.masked_fill_(beginning_mask == 1, -float("inf")) # `== 1` converts to bool or uint8 ending_input = input_tensor[:, -affected_seq_len:, :, -(affected_seq_len + 1) :] ending_mask = ending_mask.expand(ending_input.size()) ending_input.masked_fill_(ending_mask == 1, -float("inf")) # `== 1` converts to bool or uint8 def _sliding_chunks_query_key_matmul(self, query: torch.Tensor, key: torch.Tensor, window_overlap: int): """ Matrix multiplication of query and key tensors using with a sliding window attention pattern. This implementation splits the input into overlapping chunks of size 2w (e.g. 512 for pretrained Longformer) with an overlap of size window_overlap """ batch_size, seq_len, num_heads, head_dim = query.size() assert ( seq_len % (window_overlap * 2) == 0 ), f"Sequence length should be multiple of {window_overlap * 2}. Given {seq_len}" assert query.size() == key.size() chunks_count = seq_len // window_overlap - 1 # group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size window_overlap * 2 query = query.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) key = key.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) query = self._chunk(query, window_overlap) key = self._chunk(key, window_overlap) # matrix multiplication # bcxd: batch_size * num_heads x chunks x 2window_overlap x head_dim # bcyd: batch_size * num_heads x chunks x 2window_overlap x head_dim # bcxy: batch_size * num_heads x chunks x 2window_overlap x window_overlap diagonal_chunked_attention_scores = torch.einsum("bcxd,bcyd->bcxy", (query, key)) # multiply # convert diagonals into columns diagonal_chunked_attention_scores = self._pad_and_transpose_last_two_dims( diagonal_chunked_attention_scores, padding=(0, 0, 0, 1) ) # allocate space for the overall attention matrix where the chunks are combined. The last dimension # has (window_overlap * 2 + 1) columns. The first (window_overlap) columns are the window_overlap lower triangles (attention from a word to # window_overlap previous words). The following column is attention score from each word to itself, then # followed by window_overlap columns for the upper triangle. diagonal_attention_scores = diagonal_chunked_attention_scores.new_empty( (batch_size * num_heads, chunks_count + 1, window_overlap, window_overlap * 2 + 1) ) # copy parts from diagonal_chunked_attention_scores into the combined matrix of attentions # - copying the main diagonal and the upper triangle diagonal_attention_scores[:, :-1, :, window_overlap:] = diagonal_chunked_attention_scores[ :, :, :window_overlap, : window_overlap + 1 ] diagonal_attention_scores[:, -1, :, window_overlap:] = diagonal_chunked_attention_scores[ :, -1, window_overlap:, : window_overlap + 1 ] # - copying the lower triangle diagonal_attention_scores[:, 1:, :, :window_overlap] = diagonal_chunked_attention_scores[ :, :, -(window_overlap + 1) : -1, window_overlap + 1 : ] diagonal_attention_scores[:, 0, 1:window_overlap, 1:window_overlap] = diagonal_chunked_attention_scores[ :, 0, : window_overlap - 1, 1 - window_overlap : ] # separate batch_size and num_heads dimensions again diagonal_attention_scores = diagonal_attention_scores.view( batch_size, num_heads, seq_len, 2 * window_overlap + 1 ).transpose(2, 1) self._mask_invalid_locations(diagonal_attention_scores, window_overlap) return diagonal_attention_scores def _sliding_chunks_matmul_attn_probs_value( self, attn_probs: torch.Tensor, value: torch.Tensor, window_overlap: int ): """ Same as _sliding_chunks_query_key_matmul but for attn_probs and value tensors. Returned tensor will be of the same shape as `attn_probs` """ batch_size, seq_len, num_heads, head_dim = value.size() assert seq_len % (window_overlap * 2) == 0 assert attn_probs.size()[:3] == value.size()[:3] assert attn_probs.size(3) == 2 * window_overlap + 1 chunks_count = seq_len // window_overlap - 1 # group batch_size and num_heads dimensions into one, then chunk seq_len into chunks of size 2 window overlap chunked_attn_probs = attn_probs.transpose(1, 2).reshape( batch_size * num_heads, seq_len // window_overlap, window_overlap, 2 * window_overlap + 1 ) # group batch_size and num_heads dimensions into one value = value.transpose(1, 2).reshape(batch_size * num_heads, seq_len, head_dim) # pad seq_len with w at the beginning of the sequence and another window overlap at the end padded_value = F.pad(value, (0, 0, window_overlap, window_overlap), value=-1) # chunk padded_value into chunks of size 3 window overlap and an overlap of size window overlap chunked_value_size = (batch_size * num_heads, chunks_count + 1, 3 * window_overlap, head_dim) chunked_value_stride = padded_value.stride() chunked_value_stride = ( chunked_value_stride[0], window_overlap * chunked_value_stride[1], chunked_value_stride[1], chunked_value_stride[2], ) chunked_value = padded_value.as_strided(size=chunked_value_size, stride=chunked_value_stride) chunked_attn_probs = self._pad_and_diagonalize(chunked_attn_probs) context = torch.einsum("bcwd,bcdh->bcwh", (chunked_attn_probs, chunked_value)) return context.view(batch_size, num_heads, seq_len, head_dim).transpose(1, 2) @staticmethod def _get_global_attn_indices(is_index_global_attn): """ compute global attn indices required throughout forward pass """ # helper variable num_global_attn_indices = is_index_global_attn.long().sum(dim=1) # max number of global attn indices in batch max_num_global_attn_indices = num_global_attn_indices.max() # indices of global attn is_index_global_attn_nonzero = is_index_global_attn.nonzero(as_tuple=True) # helper variable is_local_index_global_attn = torch.arange( max_num_global_attn_indices, device=is_index_global_attn.device ) < num_global_attn_indices.unsqueeze(dim=-1) # location of the non-padding values within global attention indices is_local_index_global_attn_nonzero = is_local_index_global_attn.nonzero(as_tuple=True) # location of the padding values within global attention indices is_local_index_no_global_attn_nonzero = (is_local_index_global_attn == 0).nonzero(as_tuple=True) return ( max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, ) def _concat_with_global_key_attn_probs( self, key_vectors, query_vectors, max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, ): batch_size = key_vectors.shape[0] # create only global key vectors key_vectors_only_global = key_vectors.new_zeros( batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim ) key_vectors_only_global[is_local_index_global_attn_nonzero] = key_vectors[is_index_global_attn_nonzero] # (batch_size, seq_len, num_heads, max_num_global_attn_indices) attn_probs_from_global_key = torch.einsum("blhd,bshd->blhs", (query_vectors, key_vectors_only_global)) attn_probs_from_global_key[ is_local_index_no_global_attn_nonzero[0], :, :, is_local_index_no_global_attn_nonzero[1] ] = -10000.0 return attn_probs_from_global_key def _compute_attn_output_with_global_indices( self, value_vectors, attn_probs, max_num_global_attn_indices, is_index_global_attn_nonzero, is_local_index_global_attn_nonzero, ): batch_size = attn_probs.shape[0] # cut local attn probs to global only attn_probs_only_global = attn_probs.narrow(-1, 0, max_num_global_attn_indices) # get value vectors for global only value_vectors_only_global = value_vectors.new_zeros( batch_size, max_num_global_attn_indices, self.num_heads, self.head_dim ) value_vectors_only_global[is_local_index_global_attn_nonzero] = value_vectors[is_index_global_attn_nonzero] # use `matmul` because `einsum` crashes sometimes with fp16 # attn = torch.einsum('blhs,bshd->blhd', (selected_attn_probs, selected_v)) # compute attn output only global attn_output_only_global = torch.matmul( attn_probs_only_global.transpose(1, 2), value_vectors_only_global.transpose(1, 2) ).transpose(1, 2) # reshape attn probs attn_probs_without_global = attn_probs.narrow( -1, max_num_global_attn_indices, attn_probs.size(-1) - max_num_global_attn_indices ).contiguous() # compute attn output with global attn_output_without_global = self._sliding_chunks_matmul_attn_probs_value( attn_probs_without_global, value_vectors, self.one_sided_attn_window_size ) return attn_output_only_global + attn_output_without_global def _compute_global_attn_output_from_hidden( self, hidden_states, max_num_global_attn_indices, layer_head_mask, is_local_index_global_attn_nonzero, is_index_global_attn_nonzero, is_local_index_no_global_attn_nonzero, is_index_masked, ): seq_len, batch_size = hidden_states.shape[:2] # prepare global hidden states global_attn_hidden_states = hidden_states.new_zeros(max_num_global_attn_indices, batch_size, self.embed_dim) global_attn_hidden_states[is_local_index_global_attn_nonzero[::-1]] = hidden_states[ is_index_global_attn_nonzero[::-1] ] # global key, query, value global_query_vectors_only_global = self.query_global(global_attn_hidden_states) global_key_vectors = self.key_global(hidden_states) global_value_vectors = self.value_global(hidden_states) # normalize global_query_vectors_only_global /= math.sqrt(self.head_dim) # reshape global_query_vectors_only_global = ( global_query_vectors_only_global.contiguous() .view(max_num_global_attn_indices, batch_size * self.num_heads, self.head_dim) .transpose(0, 1) ) # (batch_size * self.num_heads, max_num_global_attn_indices, head_dim) global_key_vectors = ( global_key_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1) ) # batch_size * self.num_heads, seq_len, head_dim) global_value_vectors = ( global_value_vectors.contiguous().view(-1, batch_size * self.num_heads, self.head_dim).transpose(0, 1) ) # batch_size * self.num_heads, seq_len, head_dim) # compute attn scores global_attn_scores = torch.bmm(global_query_vectors_only_global, global_key_vectors.transpose(1, 2)) assert list(global_attn_scores.size()) == [ batch_size * self.num_heads, max_num_global_attn_indices, seq_len, ], f"global_attn_scores have the wrong size. Size should be {(batch_size * self.num_heads, max_num_global_attn_indices, seq_len)}, but is {global_attn_scores.size()}." global_attn_scores = global_attn_scores.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len) global_attn_scores[ is_local_index_no_global_attn_nonzero[0], :, is_local_index_no_global_attn_nonzero[1], : ] = -10000.0 global_attn_scores = global_attn_scores.masked_fill( is_index_masked[:, None, None, :], -10000.0, ) global_attn_scores = global_attn_scores.view(batch_size * self.num_heads, max_num_global_attn_indices, seq_len) # compute global attn probs global_attn_probs_float = F.softmax( global_attn_scores, dim=-1, dtype=torch.float32 ) # use fp32 for numerical stability # apply layer head masking if layer_head_mask is not None: assert layer_head_mask.size() == ( self.num_heads, ), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}" global_attn_probs_float = layer_head_mask.view(1, -1, 1, 1) * global_attn_probs_float.view( batch_size, self.num_heads, max_num_global_attn_indices, seq_len ) global_attn_probs_float = global_attn_probs_float.view( batch_size * self.num_heads, max_num_global_attn_indices, seq_len ) global_attn_probs = F.dropout( global_attn_probs_float.type_as(global_attn_scores), p=self.dropout, training=self.training ) # global attn output global_attn_output = torch.bmm(global_attn_probs, global_value_vectors) assert list(global_attn_output.size()) == [ batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim, ], f"global_attn_output tensor has the wrong size. Size should be {(batch_size * self.num_heads, max_num_global_attn_indices, self.head_dim)}, but is {global_attn_output.size()}." global_attn_probs = global_attn_probs.view(batch_size, self.num_heads, max_num_global_attn_indices, seq_len) global_attn_output = global_attn_output.view( batch_size, self.num_heads, max_num_global_attn_indices, self.head_dim ) return global_attn_output, global_attn_probs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput class LongformerSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class LongformerAttention(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.self = LongformerSelfAttention(config, layer_id) self.output = LongformerSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_mask=None, layer_head_mask=None, is_index_masked=None, is_index_global_attn=None, is_global_attn=None, output_attentions=False, ): self_outputs = self.self( hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, output_attentions=output_attentions, ) attn_output = self.output(self_outputs[0], hidden_states) outputs = (attn_output,) + self_outputs[1:] return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate class LongformerIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput class LongformerOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class LongformerLayer(nn.Module): def __init__(self, config, layer_id=0): super().__init__() self.attention = LongformerAttention(config, layer_id) self.intermediate = LongformerIntermediate(config) self.output = LongformerOutput(config) self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 def forward( self, hidden_states, attention_mask=None, layer_head_mask=None, is_index_masked=None, is_index_global_attn=None, is_global_attn=None, output_attentions=False, ): self_attn_outputs = self.attention( hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, output_attentions=output_attentions, ) attn_output = self_attn_outputs[0] outputs = self_attn_outputs[1:] layer_output = apply_chunking_to_forward( self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attn_output ) outputs = (layer_output,) + outputs return outputs def ff_chunk(self, attn_output): intermediate_output = self.intermediate(attn_output) layer_output = self.output(intermediate_output, attn_output) return layer_output class LongformerEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([LongformerLayer(config, layer_id=i) for i in range(config.num_hidden_layers)]) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): is_index_masked = attention_mask < 0 is_index_global_attn = attention_mask > 0 is_global_attn = is_index_global_attn.flatten().any().item() all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # All local attentions. all_global_attentions = () if (output_attentions and is_global_attn) else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: assert head_mask.size()[0] == ( len(self.layer) ), f"The head_mask should be specified for {len(self.layer)} layers, but it is for {head_mask.size()[0]}." for idx, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if getattr(self.config, "gradient_checkpointing", False) and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, is_global_attn, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_mask, head_mask[idx] if head_mask is not None else None, is_index_masked, is_index_global_attn, ) else: layer_outputs = layer_module( hidden_states, attention_mask=attention_mask, layer_head_mask=head_mask[idx] if head_mask is not None else None, is_index_masked=is_index_masked, is_index_global_attn=is_index_global_attn, is_global_attn=is_global_attn, output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: # bzs x seq_len x num_attn_heads x (num_global_attn + attention_window_len + 1) => bzs x num_attn_heads x seq_len x (num_global_attn + attention_window_len + 1) all_attentions = all_attentions + (layer_outputs[1].transpose(1, 2),) if is_global_attn: # bzs x num_attn_heads x num_global_attn x seq_len => bzs x num_attn_heads x seq_len x num_global_attn all_global_attentions = all_global_attentions + (layer_outputs[2].transpose(2, 3),) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, all_hidden_states, all_attentions, all_global_attentions] if v is not None ) return LongformerBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions, global_attentions=all_global_attentions, ) # Copied from transformers.models.bert.modeling_bert.BertPooler class LongformerPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output # Copied from transformers.models.roberta.modeling_roberta.RobertaLMHead with Roberta->Longformer class LongformerLMHead(nn.Module): """Longformer Head for masked language modeling.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.layer_norm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, features, **kwargs): x = self.dense(features) x = gelu(x) x = self.layer_norm(x) # project back to size of vocabulary with bias x = self.decoder(x) return x class LongformerPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = LongformerConfig base_model_prefix = "longformer" _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) LONGFORMER_START_DOCSTRING = r""" This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.LongformerConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ LONGFORMER_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.LongformerTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ global_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`): Mask to decide the attention given on each token, local attention or global attention. Tokens with global attention attends to all other tokens, and all other tokens attend to them. This is important for task-specific finetuning because it makes the model more flexible at representing the task. For example, for classification, the <s> token should be given global attention. For QA, all question tokens should also have global attention. Please refer to the `Longformer paper <https://arxiv.org/abs/2004.05150>`__ for more details. Mask values selected in ``[0, 1]``: - 0 for local attention (a sliding window attention), - 1 for global attention (tokens that attend to all other tokens, and all other tokens attend to them). head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the heas is **masked**. decoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: - 0 corresponds to a `sentence A` token, - 1 corresponds to a `sentence B` token. `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ @add_start_docstrings( "The bare Longformer Model outputting raw hidden-states without any specific head on top.", LONGFORMER_START_DOCSTRING, ) class LongformerModel(LongformerPreTrainedModel): """ This class copied code from :class:`~transformers.RobertaModel` and overwrote standard self-attention with longformer self-attention to provide the ability to process long sequences following the self-attention approach described in `Longformer: the Long-Document Transformer <https://arxiv.org/abs/2004.05150>`__ by Iz Beltagy, Matthew E. Peters, and Arman Cohan. Longformer self-attention combines a local (sliding window) and global attention to extend to long documents without the O(n^2) increase in memory and compute. The self-attention module :obj:`LongformerSelfAttention` implemented here supports the combination of local and global attention but it lacks support for autoregressive attention and dilated attention. Autoregressive and dilated attention are more relevant for autoregressive language modeling than finetuning on downstream tasks. Future release will add support for autoregressive attention, but the support for dilated attention requires a custom CUDA kernel to be memory and compute efficient. """ def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config if isinstance(config.attention_window, int): assert config.attention_window % 2 == 0, "`config.attention_window` has to be an even value" assert config.attention_window > 0, "`config.attention_window` has to be positive" config.attention_window = [config.attention_window] * config.num_hidden_layers # one value per layer else: assert len(config.attention_window) == config.num_hidden_layers, ( "`len(config.attention_window)` should equal `config.num_hidden_layers`. " f"Expected {config.num_hidden_layers}, given {len(config.attention_window)}" ) self.embeddings = LongformerEmbeddings(config) self.encoder = LongformerEncoder(config) self.pooler = LongformerPooler(config) if add_pooling_layer else None self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) def _pad_to_window_size( self, input_ids: torch.Tensor, attention_mask: torch.Tensor, token_type_ids: torch.Tensor, position_ids: torch.Tensor, inputs_embeds: torch.Tensor, pad_token_id: int, ): """A helper function to pad tokens and mask to work with implementation of Longformer self-attention.""" # padding attention_window = ( self.config.attention_window if isinstance(self.config.attention_window, int) else max(self.config.attention_window) ) assert attention_window % 2 == 0, f"`attention_window` should be an even value. Given {attention_window}" input_shape = input_ids.shape if input_ids is not None else inputs_embeds.shape batch_size, seq_len = input_shape[:2] padding_len = (attention_window - seq_len % attention_window) % attention_window if padding_len > 0: logger.info( "Input ids are automatically padded from {} to {} to be a multiple of `config.attention_window`: {}".format( seq_len, seq_len + padding_len, attention_window ) ) if input_ids is not None: input_ids = F.pad(input_ids, (0, padding_len), value=pad_token_id) if position_ids is not None: # pad with position_id = pad_token_id as in modeling_roberta.RobertaEmbeddings position_ids = F.pad(position_ids, (0, padding_len), value=pad_token_id) if inputs_embeds is not None: input_ids_padding = inputs_embeds.new_full( (batch_size, padding_len), self.config.pad_token_id, dtype=torch.long, ) inputs_embeds_padding = self.embeddings(input_ids_padding) inputs_embeds = torch.cat([inputs_embeds, inputs_embeds_padding], dim=-2) attention_mask = F.pad(attention_mask, (0, padding_len), value=False) # no attention on the padding tokens token_type_ids = F.pad(token_type_ids, (0, padding_len), value=0) # pad with token_type_id = 0 return padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds def _merge_to_attention_mask(self, attention_mask: torch.Tensor, global_attention_mask: torch.Tensor): # longformer self attention expects attention mask to have 0 (no attn), 1 (local attn), 2 (global attn) # (global_attention_mask + 1) => 1 for local attention, 2 for global attention # => final attention_mask => 0 for no attention, 1 for local attention 2 for global attention if attention_mask is not None: attention_mask = attention_mask * (global_attention_mask + 1) else: # simply use `global_attention_mask` as `attention_mask` # if no `attention_mask` is given attention_mask = global_attention_mask + 1 return attention_mask @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=LongformerBaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, global_attention_mask=None, head_mask=None, token_type_ids=None, position_ids=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Returns: Examples:: >>> import torch >>> from transformers import LongformerModel, LongformerTokenizer >>> model = LongformerModel.from_pretrained('allenai/longformer-base-4096') >>> tokenizer = LongformerTokenizer.from_pretrained('allenai/longformer-base-4096') >>> SAMPLE_TEXT = ' '.join(['Hello world! '] * 1000) # long input document >>> input_ids = torch.tensor(tokenizer.encode(SAMPLE_TEXT)).unsqueeze(0) # batch of size 1 >>> # Attention mask values -- 0: no attention, 1: local attention, 2: global attention >>> attention_mask = torch.ones(input_ids.shape, dtype=torch.long, device=input_ids.device) # initialize to local attention >>> global_attention_mask = torch.zeros(input_ids.shape, dtype=torch.long, device=input_ids.device) # initialize to global attention to be deactivated for all tokens >>> global_attention_mask[:, [1, 4, 21,]] = 1 # Set global attention to random tokens for the sake of this example ... # Usually, set global attention based on the task. For example, ... # classification: the <s> token ... # QA: question tokens ... # LM: potentially on the beginning of sentences and paragraphs >>> outputs = model(input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask) >>> sequence_output = outputs.last_hidden_state >>> pooled_output = outputs.pooler_output """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # merge `global_attention_mask` and `attention_mask` if global_attention_mask is not None: attention_mask = self._merge_to_attention_mask(attention_mask, global_attention_mask) padding_len, input_ids, attention_mask, token_type_ids, position_ids, inputs_embeds = self._pad_to_window_size( input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, pad_token_id=self.config.pad_token_id, ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device)[ :, 0, 0, : ] embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None # undo padding if padding_len > 0: # unpad `sequence_output` because the calling function is expecting a length == input_ids.size(1) sequence_output = sequence_output[:, :-padding_len] if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return LongformerBaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, global_attentions=encoder_outputs.global_attentions, ) @add_start_docstrings("""Longformer Model with a `language modeling` head on top. """, LONGFORMER_START_DOCSTRING) class LongformerForMaskedLM(LongformerPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] def __init__(self, config): super().__init__(config) self.longformer = LongformerModel(config, add_pooling_layer=False) self.lm_head = LongformerLMHead(config) self.init_weights() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=LongformerMaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, global_attention_mask=None, head_mask=None, token_type_ids=None, position_ids=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` kwargs (:obj:`Dict[str, any]`, optional, defaults to `{}`): Used to hide legacy arguments that have been deprecated. Returns: Examples:: >>> import torch >>> from transformers import LongformerForMaskedLM, LongformerTokenizer >>> model = LongformerForMaskedLM.from_pretrained('allenai/longformer-base-4096') >>> tokenizer = LongformerTokenizer.from_pretrained('allenai/longformer-base-4096') >>> SAMPLE_TEXT = ' '.join(['Hello world! '] * 1000) # long input document >>> input_ids = torch.tensor(tokenizer.encode(SAMPLE_TEXT)).unsqueeze(0) # batch of size 1 >>> attention_mask = None # default is local attention everywhere, which is a good choice for MaskedLM ... # check ``LongformerModel.forward`` for more details how to set `attention_mask` >>> outputs = model(input_ids, attention_mask=attention_mask, labels=input_ids) >>> loss = outputs.loss >>> prediction_logits = output.logits """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.longformer( input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask, head_mask=head_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return LongformerMaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Longformer Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, LONGFORMER_START_DOCSTRING, ) class LongformerForSequenceClassification(LongformerPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.longformer = LongformerModel(config, add_pooling_layer=False) self.classifier = LongformerClassificationHead(config) self.init_weights() @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=LongformerSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, global_attention_mask=None, head_mask=None, token_type_ids=None, position_ids=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if global_attention_mask is None: logger.info("Initializing global attention on CLS token...") global_attention_mask = torch.zeros_like(input_ids) # global attention on cls token global_attention_mask[:, 0] = 1 outputs = self.longformer( input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask, head_mask=head_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return LongformerSequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class LongformerClassificationHead(nn.Module): """Head for sentence-level classification tasks.""" def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.out_proj = nn.Linear(config.hidden_size, config.num_labels) def forward(self, hidden_states, **kwargs): hidden_states = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS]) hidden_states = self.dropout(hidden_states) hidden_states = self.dense(hidden_states) hidden_states = torch.tanh(hidden_states) hidden_states = self.dropout(hidden_states) output = self.out_proj(hidden_states) return output @add_start_docstrings( """ Longformer Model with a span classification head on top for extractive question-answering tasks like SQuAD / TriviaQA (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, LONGFORMER_START_DOCSTRING, ) class LongformerForQuestionAnswering(LongformerPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.longformer = LongformerModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=LongformerQuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, global_attention_mask=None, head_mask=None, token_type_ids=None, position_ids=None, inputs_embeds=None, start_positions=None, end_positions=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. Returns: Examples:: >>> from transformers import LongformerTokenizer, LongformerForQuestionAnswering >>> import torch >>> tokenizer = LongformerTokenizer.from_pretrained("allenai/longformer-large-4096-finetuned-triviaqa") >>> model = LongformerForQuestionAnswering.from_pretrained("allenai/longformer-large-4096-finetuned-triviaqa") >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" >>> encoding = tokenizer(question, text, return_tensors="pt") >>> input_ids = encoding["input_ids"] >>> # default is local attention everywhere >>> # the forward method will automatically set global attention on question tokens >>> attention_mask = encoding["attention_mask"] >>> outputs = model(input_ids, attention_mask=attention_mask) >>> start_logits = outputs.start_logits >>> end_logits = outputs.end_logits >>> all_tokens = tokenizer.convert_ids_to_tokens(input_ids[0].tolist()) >>> answer_tokens = all_tokens[torch.argmax(start_logits) :torch.argmax(end_logits)+1] >>> answer = tokenizer.decode(tokenizer.convert_tokens_to_ids(answer_tokens)) # remove space prepending space token """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if global_attention_mask is None: if input_ids is None: logger.warning( "It is not possible to automatically generate the `global_attention_mask` because input_ids is None. Please make sure that it is correctly set." ) else: # set global attention on question tokens automatically global_attention_mask = _compute_global_attention_mask(input_ids, self.config.sep_token_id) outputs = self.longformer( input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask, head_mask=head_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return LongformerQuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, global_attentions=outputs.global_attentions, ) @add_start_docstrings( """ Longformer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, LONGFORMER_START_DOCSTRING, ) class LongformerForTokenClassification(LongformerPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.longformer = LongformerModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() @add_start_docstrings_to_model_forward(LONGFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=LongformerTokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, global_attention_mask=None, head_mask=None, token_type_ids=None, position_ids=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.longformer( input_ids, attention_mask=attention_mask, global_attention_mask=global_attention_mask, head_mask=head_mask, token_type_ids=token_type_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return LongformerTokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Longformer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, LONGFORMER_START_DOCSTRING, ) class LongformerForMultipleChoice(LongformerPreTrainedModel): def __init__(self, config): super().__init__(config) self.longformer = LongformerModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.init_weights() @add_start_docstrings_to_model_forward( LONGFORMER_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=LongformerMultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, token_type_ids=None, attention_mask=None, global_attention_mask=None, head_mask=None, labels=None, position_ids=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See :obj:`input_ids` above) """ num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] return_dict = return_dict if return_dict is not None else self.config.use_return_dict # set global attention on question tokens if global_attention_mask is None and input_ids is not None: logger.info("Initializing global attention on multiple choice...") # put global attention on all tokens after `config.sep_token_id` global_attention_mask = torch.stack( [ _compute_global_attention_mask(input_ids[:, i], self.config.sep_token_id, before_sep_token=False) for i in range(num_choices) ], dim=1, ) flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None flat_position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None flat_global_attention_mask = ( global_attention_mask.view(-1, global_attention_mask.size(-1)) if global_attention_mask is not None else None ) flat_inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.longformer( flat_input_ids, position_ids=flat_position_ids, token_type_ids=flat_token_type_ids, attention_mask=flat_attention_mask, global_attention_mask=flat_global_attention_mask, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return LongformerMultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, global_attentions=outputs.global_attentions, )
AdaMix/src/transformers/models/longformer/modeling_longformer.py/0
{ "file_path": "AdaMix/src/transformers/models/longformer/modeling_longformer.py", "repo_id": "AdaMix", "token_count": 45243 }
57
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import os import socket import time import warnings from pathlib import Path from typing import Dict, List, Union from zipfile import ZipFile import numpy as np import torch from tqdm import tqdm from transformers import MarianConfig, MarianMTModel, MarianTokenizer from transformers.hf_api import HfApi def remove_suffix(text: str, suffix: str): if text.endswith(suffix): return text[: -len(suffix)] return text # or whatever def remove_prefix(text: str, prefix: str): if text.startswith(prefix): return text[len(prefix) :] return text # or whatever def convert_encoder_layer(opus_dict, layer_prefix: str, converter: dict): sd = {} for k in opus_dict: if not k.startswith(layer_prefix): continue stripped = remove_prefix(k, layer_prefix) v = opus_dict[k].T # besides embeddings, everything must be transposed. sd[converter[stripped]] = torch.tensor(v).squeeze() return sd def load_layers_(layer_lst: torch.nn.ModuleList, opus_state: dict, converter, is_decoder=False): for i, layer in enumerate(layer_lst): layer_tag = f"decoder_l{i + 1}_" if is_decoder else f"encoder_l{i + 1}_" sd = convert_encoder_layer(opus_state, layer_tag, converter) layer.load_state_dict(sd, strict=True) def find_pretrained_model(src_lang: str, tgt_lang: str) -> List[str]: """Find models that can accept src_lang as input and return tgt_lang as output.""" prefix = "Helsinki-NLP/opus-mt-" api = HfApi() model_list = api.model_list() model_ids = [x.modelId for x in model_list if x.modelId.startswith("Helsinki-NLP")] src_and_targ = [ remove_prefix(m, prefix).lower().split("-") for m in model_ids if "+" not in m ] # + cant be loaded. matching = [f"{prefix}{a}-{b}" for (a, b) in src_and_targ if src_lang in a and tgt_lang in b] return matching def add_emb_entries(wemb, final_bias, n_special_tokens=1): vsize, d_model = wemb.shape embs_to_add = np.zeros((n_special_tokens, d_model)) new_embs = np.concatenate([wemb, embs_to_add]) bias_to_add = np.zeros((n_special_tokens, 1)) new_bias = np.concatenate((final_bias, bias_to_add), axis=1) return new_embs, new_bias def _cast_yaml_str(v): bool_dct = {"true": True, "false": False} if not isinstance(v, str): return v elif v in bool_dct: return bool_dct[v] try: return int(v) except (TypeError, ValueError): return v def cast_marian_config(raw_cfg: Dict[str, str]) -> Dict: return {k: _cast_yaml_str(v) for k, v in raw_cfg.items()} CONFIG_KEY = "special:model.yml" def load_config_from_state_dict(opus_dict): import yaml cfg_str = "".join([chr(x) for x in opus_dict[CONFIG_KEY]]) yaml_cfg = yaml.load(cfg_str[:-1], Loader=yaml.BaseLoader) return cast_marian_config(yaml_cfg) def find_model_file(dest_dir): # this one better model_files = list(Path(dest_dir).glob("*.npz")) assert len(model_files) == 1, model_files model_file = model_files[0] return model_file # Group Names Logic: change long opus model names to something shorter, like opus-mt-en-ROMANCE ROM_GROUP = ( "fr+fr_BE+fr_CA+fr_FR+wa+frp+oc+ca+rm+lld+fur+lij+lmo+es+es_AR+es_CL+es_CO+es_CR+es_DO+es_EC+es_ES+es_GT" "+es_HN+es_MX+es_NI+es_PA+es_PE+es_PR+es_SV+es_UY+es_VE+pt+pt_br+pt_BR+pt_PT+gl+lad+an+mwl+it+it_IT+co" "+nap+scn+vec+sc+ro+la" ) GROUPS = [ ("cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh", "ZH"), (ROM_GROUP, "ROMANCE"), ("de+nl+fy+af+da+fo+is+no+nb+nn+sv", "NORTH_EU"), ("da+fo+is+no+nb+nn+sv", "SCANDINAVIA"), ("se+sma+smj+smn+sms", "SAMI"), ("nb_NO+nb+nn_NO+nn+nog+no_nb+no", "NORWAY"), ("ga+cy+br+gd+kw+gv", "CELTIC"), # https://en.wikipedia.org/wiki/Insular_Celtic_languages ] GROUP_TO_OPUS_NAME = { "opus-mt-ZH-de": "cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh-de", "opus-mt-ZH-fi": "cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh-fi", "opus-mt-ZH-sv": "cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh-sv", "opus-mt-SCANDINAVIA-SCANDINAVIA": "da+fo+is+no+nb+nn+sv-da+fo+is+no+nb+nn+sv", "opus-mt-NORTH_EU-NORTH_EU": "de+nl+fy+af+da+fo+is+no+nb+nn+sv-de+nl+fy+af+da+fo+is+no+nb+nn+sv", "opus-mt-de-ZH": "de-cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh", "opus-mt-en_el_es_fi-en_el_es_fi": "en+el+es+fi-en+el+es+fi", "opus-mt-en-ROMANCE": "en-fr+fr_BE+fr_CA+fr_FR+wa+frp+oc+ca+rm+lld+fur+lij+lmo+es+es_AR+es_CL+es_CO+es_CR+es_DO" "+es_EC+es_ES+es_GT+es_HN+es_MX+es_NI+es_PA+es_PE+es_PR+es_SV+es_UY+es_VE+pt+pt_br+pt_BR" "+pt_PT+gl+lad+an+mwl+it+it_IT+co+nap+scn+vec+sc+ro+la", "opus-mt-en-CELTIC": "en-ga+cy+br+gd+kw+gv", "opus-mt-es-NORWAY": "es-nb_NO+nb+nn_NO+nn+nog+no_nb+no", "opus-mt-fi_nb_no_nn_ru_sv_en-SAMI": "fi+nb+no+nn+ru+sv+en-se+sma+smj+smn+sms", "opus-mt-fi-ZH": "fi-cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh", "opus-mt-fi-NORWAY": "fi-nb_NO+nb+nn_NO+nn+nog+no_nb+no", "opus-mt-ROMANCE-en": "fr+fr_BE+fr_CA+fr_FR+wa+frp+oc+ca+rm+lld+fur+lij+lmo+es+es_AR+es_CL+es_CO+es_CR+es_DO" "+es_EC+es_ES+es_GT+es_HN+es_MX+es_NI+es_PA+es_PE+es_PR+es_SV+es_UY+es_VE+pt+pt_br+pt_BR" "+pt_PT+gl+lad+an+mwl+it+it_IT+co+nap+scn+vec+sc+ro+la-en", "opus-mt-CELTIC-en": "ga+cy+br+gd+kw+gv-en", "opus-mt-sv-ZH": "sv-cmn+cn+yue+ze_zh+zh_cn+zh_CN+zh_HK+zh_tw+zh_TW+zh_yue+zhs+zht+zh", "opus-mt-sv-NORWAY": "sv-nb_NO+nb+nn_NO+nn+nog+no_nb+no", } OPUS_GITHUB_URL = "https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/" ORG_NAME = "Helsinki-NLP/" def convert_opus_name_to_hf_name(x): """For OPUS-MT-Train/ DEPRECATED""" for substr, grp_name in GROUPS: x = x.replace(substr, grp_name) return x.replace("+", "_") def convert_hf_name_to_opus_name(hf_model_name): """ Relies on the assumption that there are no language codes like pt_br in models that are not in GROUP_TO_OPUS_NAME. """ hf_model_name = remove_prefix(hf_model_name, ORG_NAME) if hf_model_name in GROUP_TO_OPUS_NAME: opus_w_prefix = GROUP_TO_OPUS_NAME[hf_model_name] else: opus_w_prefix = hf_model_name.replace("_", "+") return remove_prefix(opus_w_prefix, "opus-mt-") def get_system_metadata(repo_root): import git return dict( helsinki_git_sha=git.Repo(path=repo_root, search_parent_directories=True).head.object.hexsha, transformers_git_sha=git.Repo(path=".", search_parent_directories=True).head.object.hexsha, port_machine=socket.gethostname(), port_time=time.strftime("%Y-%m-%d-%H:%M"), ) # docstyle-ignore FRONT_MATTER_TEMPLATE = """--- language: {} tags: - translation license: apache-2.0 --- """ DEFAULT_REPO = "Tatoeba-Challenge" DEFAULT_MODEL_DIR = os.path.join(DEFAULT_REPO, "models") def write_model_card( hf_model_name: str, repo_root=DEFAULT_REPO, save_dir=Path("marian_converted"), dry_run=False, extra_metadata={}, ) -> str: """ Copy the most recent model's readme section from opus, and add metadata. upload command: aws s3 sync model_card_dir s3://models.huggingface.co/bert/Helsinki-NLP/ --dryrun """ import pandas as pd hf_model_name = remove_prefix(hf_model_name, ORG_NAME) opus_name: str = convert_hf_name_to_opus_name(hf_model_name) assert repo_root in ("OPUS-MT-train", "Tatoeba-Challenge") opus_readme_path = Path(repo_root).joinpath("models", opus_name, "README.md") assert opus_readme_path.exists(), f"Readme file {opus_readme_path} not found" opus_src, opus_tgt = [x.split("+") for x in opus_name.split("-")] readme_url = f"https://github.com/Helsinki-NLP/{repo_root}/tree/master/models/{opus_name}/README.md" s, t = ",".join(opus_src), ",".join(opus_tgt) metadata = { "hf_name": hf_model_name, "source_languages": s, "target_languages": t, "opus_readme_url": readme_url, "original_repo": repo_root, "tags": ["translation"], } metadata.update(extra_metadata) metadata.update(get_system_metadata(repo_root)) # combine with opus markdown extra_markdown = ( f"### {hf_model_name}\n\n* source group: {metadata['src_name']} \n* target group: " f"{metadata['tgt_name']} \n* OPUS readme: [{opus_name}]({readme_url})\n" ) content = opus_readme_path.open().read() content = content.split("\n# ")[-1] # Get the lowest level 1 header in the README -- the most recent model. splat = content.split("*")[2:] print(splat[3]) content = "*".join(splat) content = ( FRONT_MATTER_TEMPLATE.format(metadata["src_alpha2"]) + extra_markdown + "\n* " + content.replace("download", "download original weights") ) items = "\n\n".join([f"- {k}: {v}" for k, v in metadata.items()]) sec3 = "\n### System Info: \n" + items content += sec3 if dry_run: return content, metadata sub_dir = save_dir / f"opus-mt-{hf_model_name}" sub_dir.mkdir(exist_ok=True) dest = sub_dir / "README.md" dest.open("w").write(content) pd.Series(metadata).to_json(sub_dir / "metadata.json") # if dry_run: return content, metadata def make_registry(repo_path="Opus-MT-train/models"): if not (Path(repo_path) / "fr-en" / "README.md").exists(): raise ValueError( f"repo_path:{repo_path} does not exist: " "You must run: git clone [email protected]:Helsinki-NLP/Opus-MT-train.git before calling." ) results = {} for p in Path(repo_path).iterdir(): n_dash = p.name.count("-") if n_dash == 0: continue else: lns = list(open(p / "README.md").readlines()) results[p.name] = _parse_readme(lns) return [(k, v["pre-processing"], v["download"], v["download"][:-4] + ".test.txt") for k, v in results.items()] def convert_all_sentencepiece_models(model_list=None, repo_path=None, dest_dir=Path("marian_converted")): """Requires 300GB""" save_dir = Path("marian_ckpt") dest_dir = Path(dest_dir) dest_dir.mkdir(exist_ok=True) save_paths = [] if model_list is None: model_list: list = make_registry(repo_path=repo_path) for k, prepro, download, test_set_url in tqdm(model_list): if "SentencePiece" not in prepro: # dont convert BPE models. continue if not os.path.exists(save_dir / k): download_and_unzip(download, save_dir / k) pair_name = convert_opus_name_to_hf_name(k) convert(save_dir / k, dest_dir / f"opus-mt-{pair_name}") save_paths.append(dest_dir / f"opus-mt-{pair_name}") return save_paths def lmap(f, x) -> List: return list(map(f, x)) def fetch_test_set(test_set_url): import wget fname = wget.download(test_set_url, "opus_test.txt") lns = Path(fname).open().readlines() src = lmap(str.strip, lns[::4]) gold = lmap(str.strip, lns[1::4]) mar_model = lmap(str.strip, lns[2::4]) assert ( len(gold) == len(mar_model) == len(src) ), f"Gold, marian and source lengths {len(gold)}, {len(mar_model)}, {len(src)} mismatched" os.remove(fname) return src, mar_model, gold def convert_whole_dir(path=Path("marian_ckpt/")): for subdir in tqdm(list(path.ls())): dest_dir = f"marian_converted/{subdir.name}" if (dest_dir / "pytorch_model.bin").exists(): continue convert(source_dir, dest_dir) def _parse_readme(lns): """Get link and metadata from opus model card equivalent.""" subres = {} for ln in [x.strip() for x in lns]: if not ln.startswith("*"): continue ln = ln[1:].strip() for k in ["download", "dataset", "models", "model", "pre-processing"]: if ln.startswith(k): break else: continue if k in ["dataset", "model", "pre-processing"]: splat = ln.split(":") _, v = splat subres[k] = v elif k == "download": v = ln.split("(")[-1][:-1] subres[k] = v return subres def save_tokenizer_config(dest_dir: Path): dname = dest_dir.name.split("-") dct = dict(target_lang=dname[-1], source_lang="-".join(dname[:-1])) save_json(dct, dest_dir / "tokenizer_config.json") def add_to_vocab_(vocab: Dict[str, int], special_tokens: List[str]): start = max(vocab.values()) + 1 added = 0 for tok in special_tokens: if tok in vocab: continue vocab[tok] = start + added added += 1 return added def find_vocab_file(model_dir): return list(model_dir.glob("*vocab.yml"))[0] def add_special_tokens_to_vocab(model_dir: Path) -> None: vocab = load_yaml(find_vocab_file(model_dir)) vocab = {k: int(v) for k, v in vocab.items()} num_added = add_to_vocab_(vocab, ["<pad>"]) print(f"added {num_added} tokens to vocab") save_json(vocab, model_dir / "vocab.json") save_tokenizer_config(model_dir) def check_equal(marian_cfg, k1, k2): v1, v2 = marian_cfg[k1], marian_cfg[k2] assert v1 == v2, f"hparams {k1},{k2} differ: {v1} != {v2}" def check_marian_cfg_assumptions(marian_cfg): assumed_settings = { "tied-embeddings-all": True, "layer-normalization": False, "right-left": False, "transformer-ffn-depth": 2, "transformer-aan-depth": 2, "transformer-no-projection": False, "transformer-postprocess-emb": "d", "transformer-postprocess": "dan", # Dropout, add, normalize "transformer-preprocess": "", "type": "transformer", "ulr-dim-emb": 0, "dec-cell-base-depth": 2, "dec-cell-high-depth": 1, "transformer-aan-nogate": False, } for k, v in assumed_settings.items(): actual = marian_cfg[k] assert actual == v, f"Unexpected config value for {k} expected {v} got {actual}" check_equal(marian_cfg, "transformer-ffn-activation", "transformer-aan-activation") check_equal(marian_cfg, "transformer-ffn-depth", "transformer-aan-depth") check_equal(marian_cfg, "transformer-dim-ffn", "transformer-dim-aan") BIAS_KEY = "decoder_ff_logit_out_b" BART_CONVERTER = { # for each encoder and decoder layer "self_Wq": "self_attn.q_proj.weight", "self_Wk": "self_attn.k_proj.weight", "self_Wv": "self_attn.v_proj.weight", "self_Wo": "self_attn.out_proj.weight", "self_bq": "self_attn.q_proj.bias", "self_bk": "self_attn.k_proj.bias", "self_bv": "self_attn.v_proj.bias", "self_bo": "self_attn.out_proj.bias", "self_Wo_ln_scale": "self_attn_layer_norm.weight", "self_Wo_ln_bias": "self_attn_layer_norm.bias", "ffn_W1": "fc1.weight", "ffn_b1": "fc1.bias", "ffn_W2": "fc2.weight", "ffn_b2": "fc2.bias", "ffn_ffn_ln_scale": "final_layer_norm.weight", "ffn_ffn_ln_bias": "final_layer_norm.bias", # Decoder Cross Attention "context_Wk": "encoder_attn.k_proj.weight", "context_Wo": "encoder_attn.out_proj.weight", "context_Wq": "encoder_attn.q_proj.weight", "context_Wv": "encoder_attn.v_proj.weight", "context_bk": "encoder_attn.k_proj.bias", "context_bo": "encoder_attn.out_proj.bias", "context_bq": "encoder_attn.q_proj.bias", "context_bv": "encoder_attn.v_proj.bias", "context_Wo_ln_scale": "encoder_attn_layer_norm.weight", "context_Wo_ln_bias": "encoder_attn_layer_norm.bias", } class OpusState: def __init__(self, source_dir): npz_path = find_model_file(source_dir) self.state_dict = np.load(npz_path) cfg = load_config_from_state_dict(self.state_dict) assert cfg["dim-vocabs"][0] == cfg["dim-vocabs"][1] assert "Wpos" not in self.state_dict, "Wpos key in state dictionary" self.state_dict = dict(self.state_dict) self.wemb, self.final_bias = add_emb_entries(self.state_dict["Wemb"], self.state_dict[BIAS_KEY], 1) self.pad_token_id = self.wemb.shape[0] - 1 cfg["vocab_size"] = self.pad_token_id + 1 # self.state_dict['Wemb'].sha self.state_keys = list(self.state_dict.keys()) assert "Wtype" not in self.state_dict, "Wtype key in state dictionary" self._check_layer_entries() self.source_dir = source_dir self.cfg = cfg hidden_size, intermediate_shape = self.state_dict["encoder_l1_ffn_W1"].shape assert ( hidden_size == cfg["dim-emb"] == 512 ), f"Hidden size {hidden_size} and configured size {cfg['dim_emb']} mismatched or not 512" # Process decoder.yml decoder_yml = cast_marian_config(load_yaml(source_dir / "decoder.yml")) check_marian_cfg_assumptions(cfg) self.hf_config = MarianConfig( vocab_size=cfg["vocab_size"], decoder_layers=cfg["dec-depth"], encoder_layers=cfg["enc-depth"], decoder_attention_heads=cfg["transformer-heads"], encoder_attention_heads=cfg["transformer-heads"], decoder_ffn_dim=cfg["transformer-dim-ffn"], encoder_ffn_dim=cfg["transformer-dim-ffn"], d_model=cfg["dim-emb"], activation_function=cfg["transformer-aan-activation"], pad_token_id=self.pad_token_id, eos_token_id=0, bos_token_id=0, max_position_embeddings=cfg["dim-emb"], scale_embedding=True, normalize_embedding="n" in cfg["transformer-preprocess"], static_position_embeddings=not cfg["transformer-train-position-embeddings"], dropout=0.1, # see opus-mt-train repo/transformer-dropout param. # default: add_final_layer_norm=False, num_beams=decoder_yml["beam-size"], decoder_start_token_id=self.pad_token_id, bad_words_ids=[[self.pad_token_id]], max_length=512, ) def _check_layer_entries(self): self.encoder_l1 = self.sub_keys("encoder_l1") self.decoder_l1 = self.sub_keys("decoder_l1") self.decoder_l2 = self.sub_keys("decoder_l2") if len(self.encoder_l1) != 16: warnings.warn(f"Expected 16 keys for each encoder layer, got {len(self.encoder_l1)}") if len(self.decoder_l1) != 26: warnings.warn(f"Expected 26 keys for each decoder layer, got {len(self.decoder_l1)}") if len(self.decoder_l2) != 26: warnings.warn(f"Expected 26 keys for each decoder layer, got {len(self.decoder_l1)}") @property def extra_keys(self): extra = [] for k in self.state_keys: if ( k.startswith("encoder_l") or k.startswith("decoder_l") or k in [CONFIG_KEY, "Wemb", "Wpos", "decoder_ff_logit_out_b"] ): continue else: extra.append(k) return extra def sub_keys(self, layer_prefix): return [remove_prefix(k, layer_prefix) for k in self.state_dict if k.startswith(layer_prefix)] def load_marian_model(self) -> MarianMTModel: state_dict, cfg = self.state_dict, self.hf_config assert cfg.static_position_embeddings, "config.static_position_embeddings should be True" model = MarianMTModel(cfg) assert "hidden_size" not in cfg.to_dict() load_layers_( model.model.encoder.layers, state_dict, BART_CONVERTER, ) load_layers_(model.model.decoder.layers, state_dict, BART_CONVERTER, is_decoder=True) # handle tensors not associated with layers wemb_tensor = torch.nn.Parameter(torch.FloatTensor(self.wemb)) bias_tensor = torch.nn.Parameter(torch.FloatTensor(self.final_bias)) model.model.shared.weight = wemb_tensor model.model.encoder.embed_tokens = model.model.decoder.embed_tokens = model.model.shared model.final_logits_bias = bias_tensor if "Wpos" in state_dict: print("Unexpected: got Wpos") wpos_tensor = torch.tensor(state_dict["Wpos"]) model.model.encoder.embed_positions.weight = wpos_tensor model.model.decoder.embed_positions.weight = wpos_tensor if cfg.normalize_embedding: assert "encoder_emb_ln_scale_pre" in state_dict raise NotImplementedError("Need to convert layernorm_embedding") assert not self.extra_keys, f"Failed to convert {self.extra_keys}" assert ( model.model.shared.padding_idx == self.pad_token_id ), f"Padding tokens {model.model.shared.padding_idx} and {self.pad_token_id} mismatched" return model def download_and_unzip(url, dest_dir): try: import wget except ImportError: raise ImportError("you must pip install wget") filename = wget.download(url) unzip(filename, dest_dir) os.remove(filename) def convert(source_dir: Path, dest_dir): dest_dir = Path(dest_dir) dest_dir.mkdir(exist_ok=True) add_special_tokens_to_vocab(source_dir) tokenizer = MarianTokenizer.from_pretrained(str(source_dir)) tokenizer.save_pretrained(dest_dir) opus_state = OpusState(source_dir) assert opus_state.cfg["vocab_size"] == len( tokenizer.encoder ), f"Original vocab size {opus_state.cfg['vocab_size']} and new vocab size {len(tokenizer.encoder)} mismatched" # save_json(opus_state.cfg, dest_dir / "marian_original_config.json") # ^^ Uncomment to save human readable marian config for debugging model = opus_state.load_marian_model() model = model.half() model.save_pretrained(dest_dir) model.from_pretrained(dest_dir) # sanity check def load_yaml(path): import yaml with open(path) as f: return yaml.load(f, Loader=yaml.BaseLoader) def save_json(content: Union[Dict, List], path: str) -> None: with open(path, "w") as f: json.dump(content, f) def unzip(zip_path: str, dest_dir: str) -> None: with ZipFile(zip_path, "r") as zipObj: zipObj.extractall(dest_dir) if __name__ == "__main__": """ Tatoeba conversion instructions in scripts/tatoeba/README.md """ parser = argparse.ArgumentParser() # Required parameters parser.add_argument("--src", type=str, help="path to marian model sub dir", default="en-de") parser.add_argument("--dest", type=str, default=None, help="Path to the output PyTorch model.") args = parser.parse_args() source_dir = Path(args.src) assert source_dir.exists(), f"Source directory {source_dir} not found" dest_dir = f"converted-{source_dir.name}" if args.dest is None else args.dest convert(source_dir, dest_dir)
AdaMix/src/transformers/models/marian/convert_marian_to_pytorch.py/0
{ "file_path": "AdaMix/src/transformers/models/marian/convert_marian_to_pytorch.py", "repo_id": "AdaMix", "token_count": 10947 }
58
# coding=utf-8 # Copyright 2020 The Facebook AI Research Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from contextlib import contextmanager from typing import List, Optional from ...tokenization_utils import BatchEncoding from ...utils import logging from ..xlm_roberta.tokenization_xlm_roberta import XLMRobertaTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = {"vocab_file": "sentencepiece.bpe.model"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "facebook/mbart-large-en-ro": "https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model", "facebook/mbart-large-cc25": "https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model", } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "facebook/mbart-large-en-ro": 1024, "facebook/mbart-large-cc25": 1024, } FAIRSEQ_LANGUAGE_CODES = [ "ar_AR", "cs_CZ", "de_DE", "en_XX", "es_XX", "et_EE", "fi_FI", "fr_XX", "gu_IN", "hi_IN", "it_IT", "ja_XX", "kk_KZ", "ko_KR", "lt_LT", "lv_LV", "my_MM", "ne_NP", "nl_XX", "ro_RO", "ru_RU", "si_LK", "tr_TR", "vi_VN", "zh_CN", ] class MBartTokenizer(XLMRobertaTokenizer): """ Construct an MBART tokenizer. :class:`~transformers.MBartTokenizer` is a subclass of :class:`~transformers.XLMRobertaTokenizer`. Refer to superclass :class:`~transformers.XLMRobertaTokenizer` for usage examples and documentation concerning the initialization parameters and other methods. The tokenization method is ``<tokens> <eos> <language code>`` for source language documents, and ``<language code> <tokens> <eos>``` for target language documents. Examples:: >>> from transformers import MBartTokenizer >>> tokenizer = MBartTokenizer.from_pretrained('facebook/mbart-large-en-ro', src_lang="en_XX", tgt_lang="ro_RO") >>> example_english_phrase = " UN Chief Says There Is No Military Solution in Syria" >>> expected_translation_romanian = "Şeful ONU declară că nu există o soluţie militară în Siria" >>> inputs = tokenizer(example_english_phrase, return_tensors="pt) >>> with tokenizer.as_target_tokenizer(): ... labels = tokenizer(expected_translation_romanian, return_tensors="pt") >>> inputs["labels"] = labels["input_ids"] """ vocab_files_names = VOCAB_FILES_NAMES max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP prefix_tokens: List[int] = [] suffix_tokens: List[int] = [] def __init__(self, *args, tokenizer_file=None, src_lang=None, tgt_lang=None, **kwargs): super().__init__(*args, tokenizer_file=tokenizer_file, src_lang=src_lang, tgt_lang=tgt_lang, **kwargs) self.sp_model_size = len(self.sp_model) self.lang_code_to_id = { code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(FAIRSEQ_LANGUAGE_CODES) } self.id_to_lang_code = {v: k for k, v in self.lang_code_to_id.items()} self.fairseq_tokens_to_ids["<mask>"] = len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset self.fairseq_tokens_to_ids.update(self.lang_code_to_id) self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()} self._additional_special_tokens = list(self.lang_code_to_id.keys()) self._src_lang = src_lang if src_lang is not None else "en_XX" self.cur_lang_code_id = self.lang_code_to_id[self._src_lang] self.tgt_lang = tgt_lang self.set_src_lang_special_tokens(self._src_lang) @property def vocab_size(self): return len(self.sp_model) + len(self.lang_code_to_id) + self.fairseq_offset + 1 # Plus 1 for the mask token @property def src_lang(self) -> str: return self._src_lang @src_lang.setter def src_lang(self, new_src_lang: str) -> None: self._src_lang = new_src_lang self.set_src_lang_special_tokens(self._src_lang) def get_special_tokens_mask( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False ) -> List[int]: """ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding special tokens using the tokenizer ``prepare_for_model`` method. Args: token_ids_0 (:obj:`List[int]`): List of IDs. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the token list is already formatted with special tokens for the model. Returns: :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token. """ if already_has_special_tokens: if token_ids_1 is not None: raise ValueError( "You should not supply a second sequence if the provided sequence of " "ids is already formatted with special tokens for the model." ) return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0)) prefix_ones = [1] * len(self.prefix_tokens) suffix_ones = [1] * len(self.suffix_tokens) if token_ids_1 is None: return prefix_ones + ([0] * len(token_ids_0)) + suffix_ones return prefix_ones + ([0] * len(token_ids_0)) + ([0] * len(token_ids_1)) + suffix_ones def build_inputs_with_special_tokens( self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None ) -> List[int]: """ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and adding special tokens. An MBART sequence has the following format, where ``X`` represents the sequence: - ``input_ids`` (for encoder) ``X [eos, src_lang_code]`` - ``decoder_input_ids``: (for decoder) ``X [eos, tgt_lang_code]`` BOS is never used. Pairs of sequences are not the expected use case, but they will be handled without a separator. Args: token_ids_0 (:obj:`List[int]`): List of IDs to which the special tokens will be added. token_ids_1 (:obj:`List[int]`, `optional`): Optional second list of IDs for sequence pairs. Returns: :obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens. """ if token_ids_1 is None: return self.prefix_tokens + token_ids_0 + self.suffix_tokens # We don't expect to process pairs, but leave the pair logic for API consistency return self.prefix_tokens + token_ids_0 + token_ids_1 + self.suffix_tokens def prepare_seq2seq_batch( self, src_texts: List[str], src_lang: str = "en_XX", tgt_texts: Optional[List[str]] = None, tgt_lang: str = "ro_RO", **kwargs, ) -> BatchEncoding: self.src_lang = src_lang self.tgt_lang = tgt_lang return super().prepare_seq2seq_batch(src_texts, tgt_texts, **kwargs) @contextmanager def as_target_tokenizer(self): """ Temporarily sets the tokenizer for encoding the targets. Useful for tokenizer associated to sequence-to-sequence models that need a slightly different processing for the labels. """ self.set_tgt_lang_special_tokens(self.tgt_lang) yield self.set_src_lang_special_tokens(self.src_lang) def set_src_lang_special_tokens(self, src_lang) -> None: """Reset the special tokens to the source lang setting. No prefix and suffix=[eos, src_lang_code].""" self.cur_lang_code = self.lang_code_to_id[src_lang] self.prefix_tokens = [] self.suffix_tokens = [self.eos_token_id, self.cur_lang_code] def set_tgt_lang_special_tokens(self, lang: str) -> None: """Reset the special tokens to the target language setting. No prefix and suffix=[eos, tgt_lang_code].""" self.cur_lang_code = self.lang_code_to_id[lang] self.prefix_tokens = [] self.suffix_tokens = [self.eos_token_id, self.cur_lang_code]
AdaMix/src/transformers/models/mbart/tokenization_mbart.py/0
{ "file_path": "AdaMix/src/transformers/models/mbart/tokenization_mbart.py", "repo_id": "AdaMix", "token_count": 3852 }
59
# MIT License # # Copyright (c) 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and github/lonePatient # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import math import os import warnings from dataclasses import dataclass from typing import Optional, Tuple import torch import torch.nn.functional as F from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, MultipleChoiceModelOutput, NextSentencePredictorOutput, QuestionAnsweringModelOutput, SequenceClassifierOutput, TokenClassifierOutput, ) from ...modeling_utils import PreTrainedModel, find_pruneable_heads_and_indices, prune_linear_layer from ...utils import logging from .configuration_mobilebert import MobileBertConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "google/mobilebert-uncased" _CONFIG_FOR_DOC = "MobileBertConfig" _TOKENIZER_FOR_DOC = "MobileBertTokenizer" MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = ["google/mobilebert-uncased"] def load_tf_weights_in_mobilebert(model, config, tf_checkpoint_path): """Load tf checkpoints in a pytorch model.""" try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.replace("ffn_layer", "ffn") name = name.replace("FakeLayerNorm", "LayerNorm") name = name.replace("extra_output_weights", "dense/kernel") name = name.replace("bert", "mobilebert") name = name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if any( n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"] for n in name ): logger.info("Skipping {}".format("/".join(name))) continue pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") elif scope_names[0] == "output_bias" or scope_names[0] == "beta": pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "weight") elif scope_names[0] == "squad": pointer = getattr(pointer, "classifier") else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info("Skipping {}".format("/".join(name))) continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == "_embeddings": pointer = getattr(pointer, "weight") elif m_name == "kernel": array = np.transpose(array) try: assert ( pointer.shape == array.shape ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) return model def mish(x): return x * torch.tanh(nn.functional.softplus(x)) class NoNorm(nn.Module): def __init__(self, feat_size, eps=None): super().__init__() self.bias = nn.Parameter(torch.zeros(feat_size)) self.weight = nn.Parameter(torch.ones(feat_size)) def forward(self, input_tensor): return input_tensor * self.weight + self.bias NORM2FN = {"layer_norm": torch.nn.LayerNorm, "no_norm": NoNorm} class MobileBertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.trigram_input = config.trigram_input self.embedding_size = config.embedding_size self.hidden_size = config.hidden_size self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) embed_dim_multiplier = 3 if self.trigram_input else 1 embedded_input_size = self.embedding_size * embed_dim_multiplier self.embedding_transformation = nn.Linear(embedded_input_size, config.hidden_size) self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, :seq_length] if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if self.trigram_input: # From the paper MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited # Devices (https://arxiv.org/abs/2004.02984) # # The embedding table in BERT models accounts for a substantial proportion of model size. To compress # the embedding layer, we reduce the embedding dimension to 128 in MobileBERT. # Then, we apply a 1D convolution with kernel size 3 on the raw token embedding to produce a 512 # dimensional output. inputs_embeds = torch.cat( [ F.pad(inputs_embeds[:, 1:], [0, 0, 0, 1, 0, 0], value=0), inputs_embeds, F.pad(inputs_embeds[:, :-1], [0, 0, 1, 0, 0, 0], value=0), ], dim=2, ) if self.trigram_input or self.embedding_size != self.hidden_size: inputs_embeds = self.embedding_transformation(inputs_embeds) # Add positional embeddings and token type embeddings, then layer # normalize and perform dropout. position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class MobileBertSelfAttention(nn.Module): def __init__(self, config): super().__init__() self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.true_hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.true_hidden_size, self.all_head_size) self.key = nn.Linear(config.true_hidden_size, self.all_head_size) self.value = nn.Linear( config.true_hidden_size if config.use_bottleneck_attention else config.hidden_size, self.all_head_size ) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, query_tensor, key_tensor, value_tensor, attention_mask=None, head_mask=None, output_attentions=None, ): mixed_query_layer = self.query(query_tensor) mixed_key_layer = self.key(key_tensor) mixed_value_layer = self.value(value_tensor) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) return outputs class MobileBertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.use_bottleneck = config.use_bottleneck self.dense = nn.Linear(config.true_hidden_size, config.true_hidden_size) self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps) if not self.use_bottleneck: self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, residual_tensor): layer_outputs = self.dense(hidden_states) if not self.use_bottleneck: layer_outputs = self.dropout(layer_outputs) layer_outputs = self.LayerNorm(layer_outputs + residual_tensor) return layer_outputs class MobileBertAttention(nn.Module): def __init__(self, config): super().__init__() self.self = MobileBertSelfAttention(config) self.output = MobileBertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, query_tensor, key_tensor, value_tensor, layer_input, attention_mask=None, head_mask=None, output_attentions=None, ): self_outputs = self.self( query_tensor, key_tensor, value_tensor, attention_mask, head_mask, output_attentions, ) # Run a linear projection of `hidden_size` then add a residual # with `layer_input`. attention_output = self.output(self_outputs[0], layer_input) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class MobileBertIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.true_hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class OutputBottleneck(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.true_hidden_size, config.hidden_size) self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, residual_tensor): layer_outputs = self.dense(hidden_states) layer_outputs = self.dropout(layer_outputs) layer_outputs = self.LayerNorm(layer_outputs + residual_tensor) return layer_outputs class MobileBertOutput(nn.Module): def __init__(self, config): super().__init__() self.use_bottleneck = config.use_bottleneck self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size) self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size) if not self.use_bottleneck: self.dropout = nn.Dropout(config.hidden_dropout_prob) else: self.bottleneck = OutputBottleneck(config) def forward(self, intermediate_states, residual_tensor_1, residual_tensor_2): layer_output = self.dense(intermediate_states) if not self.use_bottleneck: layer_output = self.dropout(layer_output) layer_output = self.LayerNorm(layer_output + residual_tensor_1) else: layer_output = self.LayerNorm(layer_output + residual_tensor_1) layer_output = self.bottleneck(layer_output, residual_tensor_2) return layer_output class BottleneckLayer(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intra_bottleneck_size) self.LayerNorm = NORM2FN[config.normalization_type](config.intra_bottleneck_size, eps=config.layer_norm_eps) def forward(self, hidden_states): layer_input = self.dense(hidden_states) layer_input = self.LayerNorm(layer_input) return layer_input class Bottleneck(nn.Module): def __init__(self, config): super().__init__() self.key_query_shared_bottleneck = config.key_query_shared_bottleneck self.use_bottleneck_attention = config.use_bottleneck_attention self.input = BottleneckLayer(config) if self.key_query_shared_bottleneck: self.attention = BottleneckLayer(config) def forward(self, hidden_states): # This method can return three different tuples of values. These different values make use of bottlenecks, # which are linear layers used to project the hidden states to a lower-dimensional vector, reducing memory # usage. These linear layer have weights that are learned during training. # # If `config.use_bottleneck_attention`, it will return the result of the bottleneck layer four times for the # key, query, value, and "layer input" to be used by the attention layer. # This bottleneck is used to project the hidden. This last layer input will be used as a residual tensor # in the attention self output, after the attention scores have been computed. # # If not `config.use_bottleneck_attention` and `config.key_query_shared_bottleneck`, this will return # four values, three of which have been passed through a bottleneck: the query and key, passed through the same # bottleneck, and the residual layer to be applied in the attention self output, through another bottleneck. # # Finally, in the last case, the values for the query, key and values are the hidden states without bottleneck, # and the residual layer will be this value passed through a bottleneck. bottlenecked_hidden_states = self.input(hidden_states) if self.use_bottleneck_attention: return (bottlenecked_hidden_states,) * 4 elif self.key_query_shared_bottleneck: shared_attention_input = self.attention(hidden_states) return (shared_attention_input, shared_attention_input, hidden_states, bottlenecked_hidden_states) else: return (hidden_states, hidden_states, hidden_states, bottlenecked_hidden_states) class FFNOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size) self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states, residual_tensor): layer_outputs = self.dense(hidden_states) layer_outputs = self.LayerNorm(layer_outputs + residual_tensor) return layer_outputs class FFNLayer(nn.Module): def __init__(self, config): super().__init__() self.intermediate = MobileBertIntermediate(config) self.output = FFNOutput(config) def forward(self, hidden_states): intermediate_output = self.intermediate(hidden_states) layer_outputs = self.output(intermediate_output, hidden_states) return layer_outputs class MobileBertLayer(nn.Module): def __init__(self, config): super().__init__() self.use_bottleneck = config.use_bottleneck self.num_feedforward_networks = config.num_feedforward_networks self.attention = MobileBertAttention(config) self.intermediate = MobileBertIntermediate(config) self.output = MobileBertOutput(config) if self.use_bottleneck: self.bottleneck = Bottleneck(config) if config.num_feedforward_networks > 1: self.ffn = nn.ModuleList([FFNLayer(config) for _ in range(config.num_feedforward_networks - 1)]) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=None, ): if self.use_bottleneck: query_tensor, key_tensor, value_tensor, layer_input = self.bottleneck(hidden_states) else: query_tensor, key_tensor, value_tensor, layer_input = [hidden_states] * 4 self_attention_outputs = self.attention( query_tensor, key_tensor, value_tensor, layer_input, attention_mask, head_mask, output_attentions=output_attentions, ) attention_output = self_attention_outputs[0] s = (attention_output,) outputs = self_attention_outputs[1:] # add self attentions if we output attention weights if self.num_feedforward_networks != 1: for i, ffn_module in enumerate(self.ffn): attention_output = ffn_module(attention_output) s += (attention_output,) intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output, hidden_states) outputs = ( (layer_output,) + outputs + ( torch.tensor(1000), query_tensor, key_tensor, value_tensor, layer_input, attention_output, intermediate_output, ) + s ) return outputs class MobileBertEncoder(nn.Module): def __init__(self, config): super().__init__() self.layer = nn.ModuleList([MobileBertLayer(config) for _ in range(config.num_hidden_layers)]) def forward( self, hidden_states, attention_mask=None, head_mask=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, head_mask[i], output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) class MobileBertPooler(nn.Module): def __init__(self, config): super().__init__() self.do_activate = config.classifier_activation if self.do_activate: self.dense = nn.Linear(config.hidden_size, config.hidden_size) def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] if not self.do_activate: return first_token_tensor else: pooled_output = self.dense(first_token_tensor) pooled_output = torch.tanh(pooled_output) return pooled_output class MobileBertPredictionHeadTransform(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) if isinstance(config.hidden_act, str): self.transform_act_fn = ACT2FN[config.hidden_act] else: self.transform_act_fn = config.hidden_act self.LayerNorm = NORM2FN["layer_norm"](config.hidden_size, eps=config.layer_norm_eps) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class MobileBertLMPredictionHead(nn.Module): def __init__(self, config): super().__init__() self.transform = MobileBertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.dense = nn.Linear(config.vocab_size, config.hidden_size - config.embedding_size, bias=False) self.decoder = nn.Linear(config.embedding_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = hidden_states.matmul(torch.cat([self.decoder.weight.t(), self.dense.weight], dim=0)) hidden_states += self.decoder.bias return hidden_states class MobileBertOnlyMLMHead(nn.Module): def __init__(self, config): super().__init__() self.predictions = MobileBertLMPredictionHead(config) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class MobileBertPreTrainingHeads(nn.Module): def __init__(self, config): super().__init__() self.predictions = MobileBertLMPredictionHead(config) self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, sequence_output, pooled_output): prediction_scores = self.predictions(sequence_output) seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class MobileBertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = MobileBertConfig pretrained_model_archive_map = MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST load_tf_weights = load_tf_weights_in_mobilebert base_model_prefix = "mobilebert" _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, (nn.LayerNorm, NoNorm)): module.bias.data.zero_() module.weight.data.fill_(1.0) @dataclass class MobileBertForPreTrainingOutput(ModelOutput): """ Output type of :class:`~transformers.MobileBertForPreTraining`. Args: loss (`optional`, returned when ``labels`` is provided, ``torch.FloatTensor`` of shape :obj:`(1,)`): Total loss as the sum of the masked language modeling loss and the next sequence prediction (classification) loss. prediction_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). seq_relationship_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2)`): Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None prediction_logits: torch.FloatTensor = None seq_relationship_logits: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None MOBILEBERT_START_DOCSTRING = r""" This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.MobileBertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ MOBILEBERT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.BertTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: - 0 corresponds to a `sentence A` token, - 1 corresponds to a `sentence B` token. `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ @add_start_docstrings( "The bare MobileBert Model transformer outputting raw hidden-states without any specific head on top.", MOBILEBERT_START_DOCSTRING, ) class MobileBertModel(MobileBertPreTrainedModel): """ https://arxiv.org/pdf/2004.02984.pdf """ def __init__(self, config, add_pooling_layer=True): super().__init__(config) self.config = config self.embeddings = MobileBertEmbeddings(config) self.encoder = MobileBertEncoder(config) self.pooler = MobileBertPooler(config) if add_pooling_layer else None self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_hidden_states=None, output_attentions=None, return_dict=None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask( attention_mask, input_shape, self.device ) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings( """ MobileBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a `next sentence prediction (classification)` head. """, MOBILEBERT_START_DOCSTRING, ) class MobileBertForPreTraining(MobileBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.mobilebert = MobileBertModel(config) self.cls = MobileBertPreTrainingHeads(config) self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddigs): self.cls.predictions.decoder = new_embeddigs def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> torch.nn.Embedding: # resize dense output embedings at first self.cls.predictions.dense = self._get_resized_lm_head( self.cls.predictions.dense, new_num_tokens=new_num_tokens, transposed=True ) return super().resize_token_embeddings(new_num_tokens=new_num_tokens) @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=MobileBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, next_sentence_label=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (``torch.LongTensor`` of shape ``(batch_size, sequence_length)``, `optional`): Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` next_sentence_label (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`): Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see :obj:`input_ids` docstring) Indices should be in ``[0, 1]``: - 0 indicates sequence B is a continuation of sequence A, - 1 indicates sequence B is a random sequence. Returns: Examples:: >>> from transformers import MobileBertTokenizer, MobileBertForPreTraining >>> import torch >>> tokenizer = MobileBertTokenizer.from_pretrained("google/mobilebert-uncased") >>> model = MobileBertForPreTraining.from_pretrained("google/mobilebert-uncased") >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1 >>> outputs = model(input_ids) >>> prediction_logits = outptus.prediction_logits >>> seq_relationship_logits = outputs.seq_relationship_logits """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output, pooled_output = outputs[:2] prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output) total_loss = None if labels is not None and next_sentence_label is not None: loss_fct = CrossEntropyLoss() masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) total_loss = masked_lm_loss + next_sentence_loss if not return_dict: output = (prediction_scores, seq_relationship_score) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return MobileBertForPreTrainingOutput( loss=total_loss, prediction_logits=prediction_scores, seq_relationship_logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings("""MobileBert Model with a `language modeling` head on top. """, MOBILEBERT_START_DOCSTRING) class MobileBertForMaskedLM(MobileBertPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] def __init__(self, config): super().__init__(config) self.mobilebert = MobileBertModel(config, add_pooling_layer=False) self.cls = MobileBertOnlyMLMHead(config) self.config = config self.init_weights() def get_output_embeddings(self): return self.cls.predictions.decoder def set_output_embeddings(self, new_embeddigs): self.cls.predictions.decoder = new_embeddigs def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> torch.nn.Embedding: # resize dense output embedings at first self.cls.predictions.dense = self._get_resized_lm_head( self.cls.predictions.dense, new_num_tokens=new_num_tokens, transposed=True ) return super().resize_token_embeddings(new_num_tokens=new_num_tokens) @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.cls(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class MobileBertOnlyNSPHead(nn.Module): def __init__(self, config): super().__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score @add_start_docstrings( """MobileBert Model with a `next sentence prediction (classification)` head on top. """, MOBILEBERT_START_DOCSTRING, ) class MobileBertForNextSentencePrediction(MobileBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.mobilebert = MobileBertModel(config) self.cls = MobileBertOnlyNSPHead(config) self.init_weights() @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair (see ``input_ids`` docstring) Indices should be in ``[0, 1]``. - 0 indicates sequence B is a continuation of sequence A, - 1 indicates sequence B is a random sequence. Returns: Examples:: >>> from transformers import MobileBertTokenizer, MobileBertForNextSentencePrediction >>> import torch >>> tokenizer = MobileBertTokenizer.from_pretrained('google/mobilebert-uncased') >>> model = MobileBertForNextSentencePrediction.from_pretrained('google/mobilebert-uncased') >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light." >>> encoding = tokenizer(prompt, next_sentence, return_tensors='pt') >>> outputs = model(**encoding, labels=torch.LongTensor([1])) >>> loss = outputs.loss >>> logits = outputs.logits """ if "next_sentence_label" in kwargs: warnings.warn( "The `next_sentence_label` argument is deprecated and will be removed in a future version, use `labels` instead.", FutureWarning, ) labels = kwargs.pop("next_sentence_label") return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] seq_relationship_score = self.cls(pooled_output) next_sentence_loss = None if labels is not None: loss_fct = CrossEntropyLoss() next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), labels.view(-1)) if not return_dict: output = (seq_relationship_score,) + outputs[2:] return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output return NextSentencePredictorOutput( loss=next_sentence_loss, logits=seq_relationship_score, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, MOBILEBERT_START_DOCSTRING, ) # Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification with Bert->MobileBert all-casing class MobileBertForSequenceClassification(MobileBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mobilebert = MobileBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ MobileBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, MOBILEBERT_START_DOCSTRING, ) # Copied from transformers.models.bert.modeling_bert.BertForQuestionAnswering with Bert->MobileBert all-casing class MobileBertForQuestionAnswering(MobileBertPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mobilebert = MobileBertModel(config, add_pooling_layer=False) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=QuestionAnsweringModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[2:] return ((total_loss,) + output) if total_loss is not None else output return QuestionAnsweringModelOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ MobileBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, MOBILEBERT_START_DOCSTRING, ) class MobileBertForMultipleChoice(MobileBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.mobilebert = MobileBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.init_weights() @add_start_docstrings_to_model_forward( MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length") ) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=MultipleChoiceModelOutput, config_class=_CONFIG_FOR_DOC, ) # Copied from transformers.models.bert.modeling_bert.BertForMultipleChoice.forward with Bert->MobileBert all-casing def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See :obj:`input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) if not return_dict: output = (reshaped_logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return MultipleChoiceModelOutput( loss=loss, logits=reshaped_logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ MoibleBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, MOBILEBERT_START_DOCSTRING, ) # Copied from transformers.models.bert.modeling_bert.BertForTokenClassification with Bert->MobileBert all-casing class MobileBertForTokenClassification(MobileBertPreTrainedModel): _keys_to_ignore_on_load_unexpected = [r"pooler"] def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.mobilebert = MobileBertModel(config, add_pooling_layer=False) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TokenClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.mobilebert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return TokenClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
AdaMix/src/transformers/models/mobilebert/modeling_mobilebert.py/0
{ "file_path": "AdaMix/src/transformers/models/mobilebert/modeling_mobilebert.py", "repo_id": "AdaMix", "token_count": 27705 }
60
# coding=utf-8 # Copyright 2018 The Open AI Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes for OpenAI GPT.""" import json import os import re from typing import Optional, Tuple from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging from ..bert.tokenization_bert import BasicTokenizer logger = logging.get_logger(__name__) VOCAB_FILES_NAMES = { "vocab_file": "vocab.json", "merges_file": "merges.txt", } PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/vocab.json"}, "merges_file": {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/merges.txt"}, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "openai-gpt": 512, } def get_pairs(word): """ Return set of symbol pairs in a word. word is represented as tuple of symbols (symbols being variable-length strings) """ pairs = set() prev_char = word[0] for char in word[1:]: pairs.add((prev_char, char)) prev_char = char return pairs def text_standardize(text): """ fixes some issues the spacy tokenizer had on books corpus also does some whitespace standardization """ text = text.replace("—", "-") text = text.replace("–", "-") text = text.replace("―", "-") text = text.replace("…", "...") text = text.replace("´", "'") text = re.sub(r"""(-+|~+|!+|"+|;+|\?+|\++|,+|\)+|\(+|\\+|\/+|\*+|\[+|\]+|}+|{+|\|+|_+)""", r" \1 ", text) text = re.sub(r"\s*\n\s*", " \n ", text) text = re.sub(r"[^\S\n]+", " ", text) return text.strip() class OpenAIGPTTokenizer(PreTrainedTokenizer): """ Construct a GPT Tokenizer. Based on Byte-Pair-Encoding with the following peculiarities: - lowercases all inputs, - uses :obj:`SpaCy` tokenizer and :obj:`ftfy` for pre-BPE tokenization if they are installed, fallback to BERT's :obj:`BasicTokenizer` if not. This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (:obj:`str`): Path to the vocabulary file. merges_file (:obj:`str`): Path to the merges file. unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] def __init__(self, vocab_file, merges_file, unk_token="<unk>", **kwargs): super().__init__(unk_token=unk_token, **kwargs) try: import ftfy from spacy.lang.en import English _nlp = English() self.nlp = _nlp.Defaults.create_tokenizer(_nlp) self.fix_text = ftfy.fix_text except ImportError: logger.warning("ftfy or spacy is not installed using BERT BasicTokenizer instead of SpaCy & ftfy.") self.nlp = BasicTokenizer(do_lower_case=True) self.fix_text = None with open(vocab_file, encoding="utf-8") as vocab_handle: self.encoder = json.load(vocab_handle) self.decoder = {v: k for k, v in self.encoder.items()} with open(merges_file, encoding="utf-8") as merges_handle: merges = merges_handle.read().split("\n")[1:-1] merges = [tuple(merge.split()) for merge in merges] self.bpe_ranks = dict(zip(merges, range(len(merges)))) self.cache = {} @property def do_lower_case(self): return True @property def vocab_size(self): return len(self.encoder) def get_vocab(self): return dict(self.encoder, **self.added_tokens_encoder) def bpe(self, token): word = tuple(token[:-1]) + (token[-1] + "</w>",) if token in self.cache: return self.cache[token] pairs = get_pairs(word) if not pairs: return token + "</w>" while True: bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) if bigram not in self.bpe_ranks: break first, second = bigram new_word = [] i = 0 while i < len(word): try: j = word.index(first, i) except ValueError: new_word.extend(word[i:]) break else: new_word.extend(word[i:j]) i = j if word[i] == first and i < len(word) - 1 and word[i + 1] == second: new_word.append(first + second) i += 2 else: new_word.append(word[i]) i += 1 new_word = tuple(new_word) word = new_word if len(word) == 1: break else: pairs = get_pairs(word) word = " ".join(word) if word == "\n </w>": word = "\n</w>" self.cache[token] = word return word def _tokenize(self, text): """ Tokenize a string. """ split_tokens = [] if self.fix_text is None: # Using BERT's BasicTokenizer text = self.nlp.tokenize(text) for token in text: split_tokens.extend([t for t in self.bpe(token).split(" ")]) else: # Using SpaCy & ftfy (original tokenization process of OpenAI GPT) text = self.nlp(text_standardize(self.fix_text(text))) for token in text: split_tokens.extend([t for t in self.bpe(token.text.lower()).split(" ")]) return split_tokens def _convert_token_to_id(self, token): """ Converts a token (str) in an id using the vocab. """ return self.encoder.get(token, self.encoder.get(self.unk_token)) def _convert_id_to_token(self, index): """Converts an id in a token (BPE) using the vocab.""" return self.decoder.get(index, self.unk_token) def convert_tokens_to_string(self, tokens): """ Converts a sequence of tokens (string) in a single string. """ out_string = "".join(tokens).replace("</w>", " ").strip() return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) return vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) merge_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] ) with open(vocab_file, "w", encoding="utf-8") as f: f.write(json.dumps(self.encoder, ensure_ascii=False)) index = 0 with open(merge_file, "w", encoding="utf-8") as writer: writer.write("#version: 0.2\n") for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]): if index != token_index: logger.warning( "Saving vocabulary to {}: BPE merge indices are not consecutive." " Please check that the tokenizer is not corrupted!".format(merge_file) ) index = token_index writer.write(" ".join(bpe_tokens) + "\n") index += 1 return vocab_file, merge_file
AdaMix/src/transformers/models/openai/tokenization_openai.py/0
{ "file_path": "AdaMix/src/transformers/models/openai/tokenization_openai.py", "repo_id": "AdaMix", "token_count": 3902 }
61
# coding=utf-8 # Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch Speech2Text model. """ import math import random from typing import Optional, Tuple import torch import torch.nn.functional as F from torch import nn from torch.nn import CrossEntropyLoss from ...activations import ACT2FN from ...file_utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_outputs import ( BaseModelOutput, BaseModelOutputWithPastAndCrossAttentions, Seq2SeqLMOutput, Seq2SeqModelOutput, ) from ...modeling_utils import PreTrainedModel from ...utils import logging from .configuration_speech_to_text import Speech2TextConfig logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "Speech2TextConfig" _TOKENIZER_FOR_DOC = "Speech2TextTokenizer" SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "facebook/s2t-small-librispeech-asr", # See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text ] # Copied from transformers.models.bart.modeling_bart.shift_tokens_right def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int): """ Shift input ids one token to the right. """ shifted_input_ids = input_ids.new_zeros(input_ids.shape) shifted_input_ids[:, 1:] = input_ids[:, :-1].clone() shifted_input_ids[:, 0] = decoder_start_token_id assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined." # replace possible -100 values in labels by `pad_token_id` shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id) return shifted_input_ids # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask(input_ids_shape: torch.Size, dtype: torch.dtype, past_key_values_length: int = 0): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), float("-inf")) mask_cond = torch.arange(mask.size(-1)) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) # Copied from transformers.models.bart.modeling_bart._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.bool(), torch.finfo(dtype).min) class Conv1dSubsampler(nn.Module): """ Convolutional subsampler: a stack of 1D convolution (along temporal dimension) followed by non-linear activation via gated linear units (https://arxiv.org/abs/1911.08460) """ def __init__(self, config): super(Conv1dSubsampler, self).__init__() self.config = config self.num_layers = config.num_conv_layers self.in_channels = config.input_feat_per_channel * config.input_channels self.mid_channels = config.conv_channels self.out_channels = config.d_model self.kernel_sizes = config.conv_kernel_sizes self.conv_layers = nn.ModuleList( nn.Conv1d( self.in_channels if i == 0 else self.mid_channels // 2, self.mid_channels if i < self.num_layers - 1 else self.out_channels * 2, kernel_size=k, stride=2, padding=k // 2, ) for i, k in enumerate(self.kernel_sizes) ) def forward(self, input_features): hidden_states = input_features.transpose(1, 2).contiguous() # -> B x (C x D) x T for conv in self.conv_layers: hidden_states = conv(hidden_states) hidden_states = nn.functional.glu(hidden_states, dim=1) hidden_states = hidden_states.transpose(1, 2).contiguous() # -> T x B x (C x D) return hidden_states class Speech2TextSinusoidalPositionalEmbedding(nn.Module): """This module produces sinusoidal positional embeddings of any length.""" def __init__(self, num_positions: int, embedding_dim: int, padding_idx: Optional[int] = None): super().__init__() self.offset = 2 self.embedding_dim = embedding_dim self.padding_idx = padding_idx self.make_weights(num_positions + self.offset, embedding_dim, padding_idx) def make_weights(self, num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): emb_weights = self.get_embedding(num_embeddings, embedding_dim, padding_idx) if hasattr(self, "weights"): # in forward, put the weights on correct device emb_weights = emb_weights.to(self.weights.device) self.weights = nn.Parameter(emb_weights) self.weights.requires_grad = False self.weights.detach_() @staticmethod def get_embedding(num_embeddings: int, embedding_dim: int, padding_idx: Optional[int] = None): """ Build sinusoidal embeddings. This matches the implementation in tensor2tensor, but differs slightly from the description in Section 3.5 of "Attention Is All You Need". """ half_dim = embedding_dim // 2 emb = math.log(10000) / (half_dim - 1) emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb) emb = torch.arange(num_embeddings, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0) emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1) if embedding_dim % 2 == 1: # zero pad emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1) if padding_idx is not None: emb[padding_idx, :] = 0 return emb @torch.no_grad() def forward(self, input_ids: torch.Tensor, past_key_values_length: int = 0): bsz, seq_len = input_ids.size() # Create the position ids from the input token ids. Any padded tokens remain padded. position_ids = self.create_position_ids_from_input_ids(input_ids, self.padding_idx, past_key_values_length).to( input_ids.device ) # expand embeddings if needed max_pos = self.padding_idx + 1 + seq_len if max_pos > self.weights.size(0): self.make_weights(max_pos + self.offset, self.embedding_dim, self.padding_idx) return self.weights.index_select(0, position_ids.view(-1)).view(bsz, seq_len, -1).detach() def create_position_ids_from_input_ids( self, input_ids: torch.Tensor, padding_idx: int, past_key_values_length: Optional[int] = 0 ): """ Replace non-padding symbols with their position numbers. Position numbers begin at padding_idx+1. Padding symbols are ignored. This is modified from fairseq's `utils.make_positions`. Args: x: torch.Tensor x: Returns: torch.Tensor """ # The series of casts and type-conversions here are carefully balanced to both work with ONNX export and XLA. mask = input_ids.ne(padding_idx).int() incremental_indices = (torch.cumsum(mask, dim=1).type_as(mask) + past_key_values_length) * mask return incremental_indices.long() + padding_idx # Copied from transformers.models.bart.modeling_bart.BartAttention with Bart->Speech2Text class Speech2TextAttention(nn.Module): """Multi-headed attention from 'Attention Is All You Need' paper""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, ): super().__init__() self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = dropout self.head_dim = embed_dim // num_heads assert ( self.head_dim * num_heads == self.embed_dim ), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads})." self.scaling = self.head_dim ** -0.5 self.is_decoder = is_decoder self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias) self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias) def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int): return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous() def forward( self, hidden_states: torch.Tensor, key_value_states: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False, ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, embed_dim = hidden_states.size() # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = torch.cat([past_key_value[0], key_states], dim=2) value_states = torch.cat([past_key_value[1], value_states], dim=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(torch.Tensor, torch.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(torch.Tensor, torch.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape) key_states = key_states.view(*proj_shape) value_states = value_states.view(*proj_shape) src_len = key_states.size(1) attn_weights = torch.bmm(query_states, key_states.transpose(1, 2)) assert attn_weights.size() == ( bsz * self.num_heads, tgt_len, src_len, ), f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}" if attention_mask is not None: assert attention_mask.size() == ( bsz, 1, tgt_len, src_len, ), f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}" attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) attn_weights = F.softmax(attn_weights, dim=-1) if layer_head_mask is not None: assert layer_head_mask.size() == ( self.num_heads, ), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}" attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len) if output_attentions: # this operation is a bit akward, but it's required to # make sure that attn_weights keeps its gradient. # In order to do so, attn_weights have to reshaped # twice and have to be reused in the following attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len) else: attn_weights_reshaped = None attn_probs = F.dropout(attn_weights, p=self.dropout, training=self.training) attn_output = torch.bmm(attn_probs, value_states) assert attn_output.size() == ( bsz * self.num_heads, tgt_len, self.head_dim, ), f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}" attn_output = ( attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim) .transpose(1, 2) .reshape(bsz, tgt_len, embed_dim) ) attn_output = self.out_proj(attn_output) return attn_output, attn_weights_reshaped, past_key_value class Speech2TextEncoderLayer(nn.Module): def __init__(self, config: Speech2TextConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = Speech2TextAttention( embed_dim=self.embed_dim, num_heads=config.encoder_attention_heads, dropout=config.attention_dropout, ) self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.fc1 = nn.Linear(self.embed_dim, config.encoder_ffn_dim) self.fc2 = nn.Linear(config.encoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: torch.Tensor, layer_head_mask: torch.Tensor, output_attentions: bool = False, ): """ Args: hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape :obj:`(seq_len, batch, embed_dim)` attention_mask (:obj:`torch.FloatTensor`): attention mask of size :obj:`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size :obj:`(config.encoder_attention_heads,)`. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states if hidden_states.dtype == torch.float16 and ( torch.isinf(hidden_states).any() or torch.isnan(hidden_states).any() ): clamp_value = torch.finfo(hidden_states.dtype).max - 1000 hidden_states = torch.clamp(hidden_states, min=-clamp_value, max=clamp_value) outputs = (hidden_states,) if output_attentions: outputs += (attn_weights,) return outputs class Speech2TextDecoderLayer(nn.Module): def __init__(self, config: Speech2TextConfig): super().__init__() self.embed_dim = config.d_model self.self_attn = Speech2TextAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, ) self.dropout = config.dropout self.activation_fn = ACT2FN[config.activation_function] self.activation_dropout = config.activation_dropout self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.encoder_attn = Speech2TextAttention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, is_decoder=True, ) self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim) self.fc1 = nn.Linear(self.embed_dim, config.decoder_ffn_dim) self.fc2 = nn.Linear(config.decoder_ffn_dim, self.embed_dim) self.final_layer_norm = nn.LayerNorm(self.embed_dim) def forward( self, hidden_states: torch.Tensor, attention_mask: Optional[torch.Tensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.Tensor] = None, layer_head_mask: Optional[torch.Tensor] = None, encoder_layer_head_mask: Optional[torch.Tensor] = None, past_key_value: Optional[Tuple[torch.Tensor]] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = True, ): """ Args: hidden_states (:obj:`torch.FloatTensor`): input to the layer of shape :obj:`(seq_len, batch, embed_dim)` attention_mask (:obj:`torch.FloatTensor`): attention mask of size :obj:`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (:obj:`torch.FloatTensor`): cross attention input to the layer of shape :obj:`(seq_len, batch, embed_dim)` encoder_attention_mask (:obj:`torch.FloatTensor`): encoder attention mask of size :obj:`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (:obj:`torch.FloatTensor`): mask for attention heads in a given layer of size :obj:`(config.encoder_attention_heads,)`. encoder_layer_head_mask (:obj:`torch.FloatTensor`): mask for encoder attention heads in a given layer of size :obj:`(config.encoder_attention_heads,)`. past_key_value (:obj:`Tuple(torch.FloatTensor)`): cached past key and value projection states output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, output_attentions=output_attentions, ) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_present_key_value = None cross_attn_weights = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, cross_attn_weights, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=layer_head_mask, past_key_value=cross_attn_past_key_value, output_attentions=output_attentions, ) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = F.dropout(hidden_states, p=self.activation_dropout, training=self.training) hidden_states = self.fc2(hidden_states) hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) hidden_states = residual + hidden_states outputs = (hidden_states,) if output_attentions: outputs += (self_attn_weights, cross_attn_weights) if use_cache: outputs += (present_key_value,) return outputs class Speech2TextPreTrainedModel(PreTrainedModel): config_class = Speech2TextConfig base_model_prefix = "model" def _init_weights(self, module): std = self.config.init_std if isinstance(module, (nn.Linear, nn.Conv1d)): module.weight.data.normal_(mean=0.0, std=std) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=std) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() def _get_subsampled_output_lengths(self, input_lengths: torch.LongTensor): """ Computes the output length of the convolutional layers """ for i in range(self.config.num_conv_layers): input_lengths = (input_lengths - 1) // 2 + 1 return input_lengths def _get_subsampled_encoder_attn_mask(self, attention_mask): # generate creates 3D attention mask, becuase of the shape of input_features # convert it to 2D if thats the case if len(attention_mask.shape) > 2: attention_mask = attention_mask[:, :, -1] subsampled_lengths = self._get_subsampled_output_lengths(attention_mask.sum(-1)) max_len = subsampled_lengths.max().item() bsz = attention_mask.size()[0] attention_mask = torch.zeros((bsz, max_len), dtype=attention_mask.dtype, device=attention_mask.device) # these two operations makes sure that all values # before the output lengths indices are attended to attention_mask[(torch.arange(bsz, device=attention_mask.device), subsampled_lengths - 1)] = 1 attention_mask = attention_mask.flip([-1]).cumsum(-1).flip([-1]).long() return attention_mask SPEECH_TO_TEXT_START_DOCSTRING = r""" This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.Speech2TextConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ SPEECH_TO_TEXT_INPUTS_DOCSTRING = r""" Args: input_features (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length, feature_size)`): Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained by loading a ``.flac`` or ``.wav`` audio file into an array of type :obj:`List[float]` or a :obj:`numpy.ndarray`, *e.g.* via the soundfile library (``pip install soundfile``). To prepare the array into :obj:`input_features`, the :class:`~transformers.Speech2TextTokenizer` should be used for extracting the fbank features, padding and conversion into a tensor of type :obj:`torch.FloatTensor`. See :meth:`~transformers.Speech2TextTokenizer.__call__` attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing convolution and attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ decoder_input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`): Provide for translation and summarization training. By default, the model will create this tensor by shifting the :obj:`input_ids` to the right, following the paper. decoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`): Default behavior: generate a tensor that ignores pad tokens in :obj:`decoder_input_ids`. Causal mask will also be used by default. If you want to change padding behavior, you should read :func:`modeling_speech_to_text._prepare_decoder_inputs` and modify to your needs. See diagram 1 in `the paper <https://arxiv.org/abs/1910.13461>`__ for more information on the default strategy. head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the heas is **masked**. decoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (:obj:`tuple(tuple(torch.FloatTensor)`, `optional`): Tuple consists of (:obj:`last_hidden_state`, `optional`: :obj:`hidden_states`, `optional`: :obj:`attentions`) :obj:`last_hidden_state` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`) is a sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`. decoder_inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, target_sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`decoder_input_ids` you can choose to directly pass an embedded representation. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_inputs_embeds` have to be input (see :obj:`past_key_values`). This is useful if you want more control over how to convert :obj:`decoder_input_ids` indices into associated vectors than the model's internal embedding lookup matrix. If :obj:`decoder_input_ids` and :obj:`decoder_inputs_embeds` are both unset, :obj:`decoder_inputs_embeds` takes the value of :obj:`inputs_embeds`. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ class Speech2TextEncoder(Speech2TextPreTrainedModel): """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a :class:`Speech2TextEncoderLayer`. Args: config: Speech2TextConfig embed_tokens (torch.nn.Embedding): output embedding """ def __init__(self, config: Speech2TextConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.encoder_layerdrop embed_dim = config.d_model self.padding_idx = config.pad_token_id self.max_source_positions = config.max_source_positions self.embed_scale = math.sqrt(embed_dim) if config.scale_embedding else 1.0 self.conv = Conv1dSubsampler(config) self.embed_positions = Speech2TextSinusoidalPositionalEmbedding( self.max_source_positions, embed_dim, self.padding_idx, ) self.layers = nn.ModuleList([Speech2TextEncoderLayer(config) for _ in range(config.encoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) self.init_weights() def forward( self, input_features, attention_mask=None, head_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_features (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length, feature_size)`): Float values of fbank features extracted from the raw speech waveform. Raw speech waveform can be obtained by loading a ``.flac`` or ``.wav`` audio file into an array of type :obj:`List[float]` or a :obj:`numpy.ndarray`, *e.g.* via the soundfile library (``pip install soundfile``). To prepare the array into :obj:`input_features`, the :class:`~transformers.Speech2TextTokenizer` should be used for extracting the fbank features, padding and conversion into a tensor of type :obj:`torch.FloatTensor`. See :meth:`~transformers.Speech2TextTokenizer.__call__` attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing convolution and attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the heas is **masked**. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if attention_mask is not None: attention_mask = self._get_subsampled_encoder_attn_mask(attention_mask) inputs_embeds = self.conv(input_features) inputs_embeds = self.embed_scale * inputs_embeds if attention_mask is None: padding_mask = torch.zeros_like(inputs_embeds, dtype=torch.long) else: padding_mask = attention_mask.ne(1).long() embed_pos = self.embed_positions(padding_mask) hidden_states = inputs_embeds + embed_pos hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) # expand attention_mask if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _expand_mask(attention_mask, inputs_embeds.dtype) encoder_states = () if output_hidden_states else None all_attentions = () if output_attentions else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: assert head_mask.size()[0] == ( len(self.layers) ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}." for idx, encoder_layer in enumerate(self.layers): if output_hidden_states: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if self.training and (dropout_probability < self.layerdrop): # skip the layer layer_outputs = (None, None) else: if getattr(self.config, "gradient_checkpointing", False) and self.training: def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(encoder_layer), hidden_states, attention_mask, (head_mask[idx] if head_mask is not None else None), ) else: layer_outputs = encoder_layer( hidden_states, attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), output_attentions=output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) hidden_states = self.layer_norm(hidden_states) if output_hidden_states: encoder_states = encoder_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) class Speech2TextDecoder(Speech2TextPreTrainedModel): """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`Speech2TextDecoderLayer` Args: config: Speech2TextConfig embed_tokens (torch.nn.Embedding): output embedding """ def __init__(self, config: Speech2TextConfig): super().__init__(config) self.dropout = config.dropout self.layerdrop = config.decoder_layerdrop self.padding_idx = config.pad_token_id self.max_target_positions = config.max_target_positions self.embed_scale = math.sqrt(config.d_model) if config.scale_embedding else 1.0 self.embed_tokens = nn.Embedding(config.vocab_size, config.d_model, self.padding_idx) self.embed_positions = Speech2TextSinusoidalPositionalEmbedding( self.max_target_positions, config.d_model, self.padding_idx, ) self.layers = nn.ModuleList([Speech2TextDecoderLayer(config) for _ in range(config.decoder_layers)]) self.layer_norm = nn.LayerNorm(config.d_model) self.init_weights() def get_input_embeddings(self): return self.embed_tokens def set_input_embeddings(self, value): self.embed_tokens = value def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): # create causal mask # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] combined_attention_mask = None if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, inputs_embeds.dtype, past_key_values_length=past_key_values_length ).to(self.device) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask # Copied from transformers.models.mbart.modeling_mbart.MBartDecoder.forward with MBart->Speech2Text def forward( self, input_ids=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, encoder_head_mask=None, past_key_values=None, inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using :class:`~transformers.Speech2TextTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the heas is **masked**. encoder_head_mask (:obj:`torch.Tensor` of shape :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention on hidden heads. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the heas is **masked**. past_key_values (:obj:`Tuple[Tuple[torch.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict # retrieve input_ids and inputs_embeds if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if inputs_embeds is None: inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale attention_mask = self._prepare_decoder_attention_mask( attention_mask, input_shape, inputs_embeds, past_key_values_length ) # expand encoder attention mask if encoder_hidden_states is not None and encoder_attention_mask is not None: encoder_attention_mask = self._get_subsampled_encoder_attn_mask(encoder_attention_mask) # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] encoder_attention_mask = _expand_mask(encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]) # embed positions positions = self.embed_positions(input_ids, past_key_values_length=past_key_values_length) hidden_states = inputs_embeds + positions hidden_states = F.dropout(hidden_states, p=self.dropout, training=self.training) # decoder layers all_hidden_states = () if output_hidden_states else None all_self_attns = () if output_attentions else None all_cross_attentions = () if (output_attentions and encoder_hidden_states is not None) else None next_decoder_cache = () if use_cache else None # check if head_mask has a correct number of layers specified if desired if head_mask is not None: assert head_mask.size()[0] == ( len(self.layers) ), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}." for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if output_hidden_states: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if self.training and (dropout_probability < self.layerdrop): continue past_key_value = past_key_values[idx] if past_key_values is not None else None if getattr(self.config, "gradient_checkpointing", False) and self.training: if use_cache: logger.warn( "`use_cache = True` is incompatible with `config.gradient_checkpointing = True`. Setting `use_cache = False`..." ) use_cache = False def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value return module(*inputs, output_attentions, use_cache) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(decoder_layer), hidden_states, attention_mask, encoder_hidden_states, encoder_attention_mask, head_mask[idx] if head_mask is not None else None, encoder_head_mask[idx] if encoder_head_mask is not None else None, None, ) else: layer_outputs = decoder_layer( hidden_states, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, layer_head_mask=(head_mask[idx] if head_mask is not None else None), encoder_layer_head_mask=(encoder_head_mask[idx] if encoder_head_mask is not None else None), past_key_value=past_key_value, output_attentions=output_attentions, use_cache=use_cache, ) hidden_states = layer_outputs[0] if use_cache: next_decoder_cache += (layer_outputs[3 if output_attentions else 1],) if output_attentions: all_self_attns += (layer_outputs[1],) if encoder_hidden_states is not None: all_cross_attentions += (layer_outputs[2],) hidden_states = self.layer_norm(hidden_states) # add hidden states from the last decoder layer if output_hidden_states: all_hidden_states += (hidden_states,) next_cache = next_decoder_cache if use_cache else None if not return_dict: return tuple( v for v in [hidden_states, next_cache, all_hidden_states, all_self_attns, all_cross_attentions] if v is not None ) return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=hidden_states, past_key_values=next_cache, hidden_states=all_hidden_states, attentions=all_self_attns, cross_attentions=all_cross_attentions, ) @add_start_docstrings( "The bare Speech2Text Model outputting raw hidden-states without any specific head on top.", SPEECH_TO_TEXT_START_DOCSTRING, ) class Speech2TextModel(Speech2TextPreTrainedModel): def __init__(self, config: Speech2TextConfig): super().__init__(config) self.encoder = Speech2TextEncoder(config) self.decoder = Speech2TextDecoder(config) self.init_weights() def get_input_embeddings(self): return self.decoder.embed_tokens def set_input_embeddings(self, value): self.decoder.embed_tokens = value def get_encoder(self): return self.encoder def get_decoder(self): return self.decoder @add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint="s2t_transformer_s", output_type=Seq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_features=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, encoder_outputs=None, past_key_values=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = return_dict if return_dict is not None else self.config.use_return_dict if encoder_outputs is None: encoder_outputs = self.encoder( input_features, attention_mask=attention_mask, head_mask=head_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) # If the user passed a tuple for encoder_outputs, we wrap it in a BaseModelOutput when return_dict=True elif return_dict and not isinstance(encoder_outputs, BaseModelOutput): encoder_outputs = BaseModelOutput( last_hidden_state=encoder_outputs[0], hidden_states=encoder_outputs[1] if len(encoder_outputs) > 1 else None, attentions=encoder_outputs[2] if len(encoder_outputs) > 2 else None, ) # decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn) decoder_outputs = self.decoder( input_ids=decoder_input_ids, attention_mask=decoder_attention_mask, encoder_hidden_states=encoder_outputs[0], encoder_attention_mask=attention_mask, head_mask=decoder_head_mask, encoder_head_mask=head_mask, past_key_values=past_key_values, inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) if not return_dict: return decoder_outputs + encoder_outputs return Seq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, cross_attentions=decoder_outputs.cross_attentions, encoder_last_hidden_state=encoder_outputs.last_hidden_state, encoder_hidden_states=encoder_outputs.hidden_states, encoder_attentions=encoder_outputs.attentions, ) @add_start_docstrings( "The Speech2Text Model with a language modeling head. Can be used for summarization.", SPEECH_TO_TEXT_START_DOCSTRING, ) class Speech2TextForConditionalGeneration(Speech2TextPreTrainedModel): base_model_prefix = "model" _keys_to_ignore_on_load_missing = [ r"encoder\.version", r"decoder\.version", r"model.encoder.embed_positions.weights", r"model.decoder.embed_positions.weights", ] _keys_to_ignore_on_save = [ r"model.encoder.embed_positions.weights", r"model.decoder.embed_positions.weights", ] def __init__(self, config: Speech2TextConfig): super().__init__(config) self.model = Speech2TextModel(config) self.lm_head = nn.Linear(config.d_model, self.config.vocab_size, bias=False) self.init_weights() def get_encoder(self): return self.model.get_encoder() def get_decoder(self): return self.model.get_decoder() def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding: new_embeddings = super().resize_token_embeddings(new_num_tokens) return new_embeddings def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, new_embeddings): self.lm_head = new_embeddings @add_start_docstrings_to_model_forward(SPEECH_TO_TEXT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=Seq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_features=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, encoder_outputs=None, past_key_values=None, decoder_inputs_embeds=None, labels=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the language modeling loss. Indices should either be in ``[0, ..., config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``. Returns: Example:: >>> import torch >>> from transformers import Speech2TextProcessor, Speech2TextForConditionalGeneration >>> from datasets import load_dataset >>> import soundfile as sf >>> model = Speech2TextForConditionalGeneration.from_pretrained("facebook/s2t-small-librispeech-asr") >>> processor = Speech2Textprocessor.from_pretrained("facebook/s2t-small-librispeech-asr") >>> def map_to_array(batch): >>> speech, _ = sf.read(batch["file"]) >>> batch["speech"] = speech >>> return batch >>> ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation") >>> ds = ds.map(map_to_array) >>> input_features = processor(ds["speech"][0], sampling_rate=16_000, return_tensors="pt").input_features # Batch size 1 >>> generated_ids = model.generate(input_ids=input_features) >>> transcription = processor.batch_decode(generated_ids) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: if decoder_input_ids is None: decoder_input_ids = shift_tokens_right( labels, self.config.pad_token_id, self.config.decoder_start_token_id ) outputs = self.model( input_features, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, encoder_outputs=encoder_outputs, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, past_key_values=past_key_values, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) lm_logits = self.lm_head(outputs[0]) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(lm_logits.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (lm_logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return Seq2SeqLMOutput( loss=loss, logits=lm_logits, past_key_values=outputs.past_key_values, decoder_hidden_states=outputs.decoder_hidden_states, decoder_attentions=outputs.decoder_attentions, cross_attentions=outputs.cross_attentions, encoder_last_hidden_state=outputs.encoder_last_hidden_state, encoder_hidden_states=outputs.encoder_hidden_states, encoder_attentions=outputs.encoder_attentions, ) def prepare_inputs_for_generation( self, decoder_input_ids, past=None, attention_mask=None, head_mask=None, use_cache=None, encoder_outputs=None, **kwargs ): # cut decoder_input_ids if past is used if past is not None: decoder_input_ids = decoder_input_ids[:, -1:] return { "encoder_outputs": encoder_outputs, "past_key_values": past, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "head_mask": head_mask, "use_cache": use_cache, # change this to avoid caching (presumably for debugging) } @staticmethod def _reorder_cache(past, beam_idx): reordered_past = () for layer_past in past: reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) return reordered_past
AdaMix/src/transformers/models/speech_to_text/modeling_speech_to_text.py/0
{ "file_path": "AdaMix/src/transformers/models/speech_to_text/modeling_speech_to_text.py", "repo_id": "AdaMix", "token_count": 27410 }
62
# coding=utf-8 # Copyright 2020 Google Research and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch TAPAS model. """ import enum import math import os from dataclasses import dataclass from typing import Optional, Tuple import torch import torch.nn as nn import torch.utils.checkpoint from torch.nn import CrossEntropyLoss, MSELoss from ...activations import ACT2FN from ...file_utils import ( ModelOutput, add_start_docstrings, add_start_docstrings_to_model_forward, is_scatter_available, replace_return_docstrings, requires_scatter, ) from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, MaskedLMOutput, SequenceClassifierOutput from ...modeling_utils import ( PreTrainedModel, apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer, ) from ...utils import logging from .configuration_tapas import TapasConfig # soft dependency if is_scatter_available(): from torch_scatter import scatter logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "TapasConfig" _TOKENIZER_FOR_DOC = "TapasTokenizer" TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST = [ # large models "google/tapas-large", "google/tapas-large-finetuned-sqa", "google/tapas-large-finetuned-wtq", "google/tapas-large-finetuned-wikisql-supervised", "google/tapas-large-finetuned-tabfact", # base models "google/tapas-base", "google/tapas-base-finetuned-sqa", "google/tapas-base-finetuned-wtq", "google/tapas-base-finetuned-wikisql-supervised", "google/tapas-base-finetuned-tabfact", # small models "google/tapas-small", "google/tapas-small-finetuned-sqa", "google/tapas-small-finetuned-wtq", "google/tapas-small-finetuned-wikisql-supervised", "google/tapas-small-finetuned-tabfact", # mini models "google/tapas-mini", "google/tapas-mini-finetuned-sqa", "google/tapas-mini-finetuned-wtq", "google/tapas-mini-finetuned-wikisql-supervised", "google/tapas-mini-finetuned-tabfact", # tiny models "google/tapas-tiny", "google/tapas-tiny-finetuned-sqa", "google/tapas-tiny-finetuned-wtq", "google/tapas-tiny-finetuned-wikisql-supervised", "google/tapas-tiny-finetuned-tabfact", # See all TAPAS models at https://huggingface.co/models?filter=tapas ] EPSILON_ZERO_DIVISION = 1e-10 CLOSE_ENOUGH_TO_LOG_ZERO = -10000.0 @dataclass class TableQuestionAnsweringOutput(ModelOutput): """ Output type of :class:`~transformers.TapasForQuestionAnswering`. Args: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` (and possibly :obj:`answer`, :obj:`aggregation_labels`, :obj:`numeric_values` and :obj:`numeric_values_scale` are provided)): Total loss as the sum of the hierarchical cell selection log-likelihood loss and (optionally) the semi-supervised regression loss and (optionally) supervised loss for aggregations. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`): Prediction scores of the cell selection head, for every token. logits_aggregation (:obj:`torch.FloatTensor`, `optional`, of shape :obj:`(batch_size, num_aggregation_labels)`): Prediction scores of the aggregation head, for every aggregation operator. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None logits_aggregation: torch.FloatTensor = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None def load_tf_weights_in_tapas(model, config, tf_checkpoint_path): """ Load tf checkpoints in a PyTorch model. This is an adaptation from load_tf_weights_in_bert - add cell selection and aggregation heads - take into account additional token type embedding layers """ try: import re import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_path = os.path.abspath(tf_checkpoint_path) logger.info("Converting TensorFlow checkpoint from {}".format(tf_path)) # Load weights from TF model init_vars = tf.train.list_variables(tf_path) names = [] arrays = [] for name, shape in init_vars: logger.info("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) names.append(name) arrays.append(array) for name, array in zip(names, arrays): name = name.split("/") # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculate m and v # which are not required for using pretrained model if any( n in [ "adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step", "seq_relationship", ] for n in name ): logger.info("Skipping {}".format("/".join(name))) continue # in case the model is TapasForSequenceClassification, we skip output_bias and output_weights # since these are not used for classification if isinstance(model, TapasForSequenceClassification): if any(n in ["output_bias", "output_weights"] for n in name): logger.info("Skipping {}".format("/".join(name))) continue # in case the model is TapasModel, we skip output_bias, output_weights, output_bias_cls and output_weights_cls # since this model does not have MLM and NSP heads if isinstance(model, TapasModel): if any(n in ["output_bias", "output_weights", "output_bias_cls", "output_weights_cls"] for n in name): logger.info("Skipping {}".format("/".join(name))) continue # if first scope name starts with "bert", change it to "tapas" if name[0] == "bert": name[0] = "tapas" pointer = model for m_name in name: if re.fullmatch(r"[A-Za-z]+_\d+", m_name): scope_names = re.split(r"_(\d+)", m_name) else: scope_names = [m_name] if scope_names[0] == "kernel" or scope_names[0] == "gamma": pointer = getattr(pointer, "weight") elif scope_names[0] == "beta": pointer = getattr(pointer, "bias") # cell selection heads elif scope_names[0] == "output_bias": pointer = getattr(pointer, "output_bias") elif scope_names[0] == "output_weights": pointer = getattr(pointer, "output_weights") elif scope_names[0] == "column_output_bias": pointer = getattr(pointer, "column_output_bias") elif scope_names[0] == "column_output_weights": pointer = getattr(pointer, "column_output_weights") # aggregation head elif scope_names[0] == "output_bias_agg": pointer = getattr(pointer, "aggregation_classifier") pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights_agg": pointer = getattr(pointer, "aggregation_classifier") pointer = getattr(pointer, "weight") # classification head elif scope_names[0] == "output_bias_cls": pointer = getattr(pointer, "classifier") pointer = getattr(pointer, "bias") elif scope_names[0] == "output_weights_cls": pointer = getattr(pointer, "classifier") pointer = getattr(pointer, "weight") else: try: pointer = getattr(pointer, scope_names[0]) except AttributeError: logger.info("Skipping {}".format("/".join(name))) continue if len(scope_names) >= 2: num = int(scope_names[1]) pointer = pointer[num] if m_name[-11:] == "_embeddings": pointer = getattr(pointer, "weight") elif m_name[-13:] in [f"_embeddings_{i}" for i in range(7)]: pointer = getattr(pointer, "weight") elif m_name == "kernel": array = np.transpose(array) try: assert ( pointer.shape == array.shape ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info("Initialize PyTorch weight {}".format(name)) # Added a check to see whether the array is a scalar (because bias terms in Tapas checkpoints can be # scalar => should first be converted to numpy arrays) if np.isscalar(array): array = np.array(array) pointer.data = torch.from_numpy(array) return model class TapasEmbeddings(nn.Module): """ Construct the embeddings from word, position and token_type embeddings. Same as BertEmbeddings but with a number of additional token type embeddings to encode tabular structure. """ def __init__(self, config): super().__init__() # we do not include config.disabled_features and config.disable_position_embeddings from the original implementation # word embeddings self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) # position embeddings self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) # token type embeddings for i, type_vocab_sizes in enumerate(config.type_vocab_sizes): name = f"token_type_embeddings_{i}" setattr(self, name, nn.Embedding(type_vocab_sizes, config.hidden_size)) self.number_of_token_type_embeddings = len(config.type_vocab_sizes) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.config = config def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] device = input_ids.device if input_ids is not None else inputs_embeds.device if position_ids is None: # create absolute position embeddings position_ids = torch.arange(seq_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0).expand(input_shape) # when self.config.reset_position_index_per_cell is set to True, create relative position embeddings if self.config.reset_position_index_per_cell: # shape (batch_size, seq_len) col_index = IndexMap(token_type_ids[:, :, 1], self.config.type_vocab_sizes[1], batch_dims=1) # shape (batch_size, seq_len) row_index = IndexMap(token_type_ids[:, :, 2], self.config.type_vocab_sizes[2], batch_dims=1) # shape (batch_size, seq_len) full_index = ProductIndexMap(col_index, row_index) # shape (max_rows * max_columns,). First absolute position for every cell first_position_per_segment = reduce_min(position_ids, full_index)[0] # ? shape (batch_size, seq_len). First absolute position of the cell for every token first_position = gather(first_position_per_segment, full_index) # shape (1, seq_len) position = torch.arange(seq_length, dtype=torch.long, device=device).unsqueeze(0) position_ids = torch.min( torch.as_tensor(self.config.max_position_embeddings - 1, device=device), position - first_position ) if token_type_ids is None: token_type_ids = torch.zeros( (input_shape + self.number_of_token_type_embeddings), dtype=torch.long, device=device ) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) embeddings = inputs_embeds + position_embeddings for i in range(self.number_of_token_type_embeddings): name = f"token_type_embeddings_{i}" embeddings += getattr(self, name)(token_type_ids[:, :, i]) embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class TapasSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( f"The hidden size {config.hidden_size} is not a multiple of the number of attention " f"heads {config.num_attention_heads}" ) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = nn.Linear(config.hidden_size, self.all_head_size) self.key = nn.Linear(config.hidden_size, self.all_head_size) self.value = nn.Linear(config.hidden_size, self.all_head_size) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.is_decoder = config.is_decoder def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): mixed_query_layer = self.query(hidden_states) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. is_cross_attention = encoder_hidden_states is not None if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_layer = past_key_value[0] value_layer = past_key_value[1] attention_mask = encoder_attention_mask elif is_cross_attention: key_layer = self.transpose_for_scores(self.key(encoder_hidden_states)) value_layer = self.transpose_for_scores(self.value(encoder_hidden_states)) attention_mask = encoder_attention_mask elif past_key_value is not None: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) key_layer = torch.cat([past_key_value[0], key_layer], dim=2) value_layer = torch.cat([past_key_value[1], value_layer], dim=2) else: key_layer = self.transpose_for_scores(self.key(hidden_states)) value_layer = self.transpose_for_scores(self.value(hidden_states)) query_layer = self.transpose_for_scores(mixed_query_layer) if self.is_decoder: past_key_value = (key_layer, value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in TapasModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if output_attentions else (context_layer,) if self.is_decoder: outputs = outputs + (past_key_value,) return outputs # Copied from transformers.models.bert.modeling_bert.BertSelfOutput class TapasSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertAttention with Bert->Tapas class TapasAttention(nn.Module): def __init__(self, config): super().__init__() self.self = TapasSelfAttention(config) self.output = TapasSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return heads, index = find_pruneable_heads_and_indices( heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads ) # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, past_key_value, output_attentions, ) attention_output = self.output(self_outputs[0], hidden_states) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs # Copied from transformers.models.bert.modeling_bert.BertIntermediate class TapasIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertOutput class TapasOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states # Copied from transformers.models.bert.modeling_bert.BertLayer with Bert->Tapas class TapasLayer(nn.Module): def __init__(self, config): super().__init__() self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 self.attention = TapasAttention(config) self.is_decoder = config.is_decoder self.add_cross_attention = config.add_cross_attention if self.add_cross_attention: assert self.is_decoder, f"{self} should be used as a decoder model if cross attention is added" self.crossattention = TapasAttention(config) self.intermediate = TapasIntermediate(config) self.output = TapasOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_value=None, output_attentions=False, ): # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None self_attention_outputs = self.attention( hidden_states, attention_mask, head_mask, output_attentions=output_attentions, past_key_value=self_attn_past_key_value, ) attention_output = self_attention_outputs[0] # if decoder, the last output is tuple of self-attn cache if self.is_decoder: outputs = self_attention_outputs[1:-1] present_key_value = self_attention_outputs[-1] else: outputs = self_attention_outputs[1:] # add self attentions if we output attention weights cross_attn_present_key_value = None if self.is_decoder and encoder_hidden_states is not None: assert hasattr( self, "crossattention" ), f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`" # cross_attn cached key/values tuple is at positions 3,4 of past_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, cross_attn_past_key_value, output_attentions, ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:-1] # add cross attentions if we output attention weights # add cross-attn cache to positions 3,4 of present_key_value tuple cross_attn_present_key_value = cross_attention_outputs[-1] present_key_value = present_key_value + cross_attn_present_key_value layer_output = apply_chunking_to_forward( self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output ) outputs = (layer_output,) + outputs # if decoder, return the attn key/values as the last output if self.is_decoder: outputs = outputs + (present_key_value,) return outputs def feed_forward_chunk(self, attention_output): intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class TapasEncoder(nn.Module): def __init__(self, config): super().__init__() self.config = config self.layer = nn.ModuleList([TapasLayer(config) for _ in range(config.num_hidden_layers)]) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=False, output_hidden_states=False, return_dict=True, ): all_hidden_states = () if output_hidden_states else None all_attentions = () if output_attentions else None for i, layer_module in enumerate(self.layer): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_head_mask = head_mask[i] if head_mask is not None else None if getattr(self.config, "gradient_checkpointing", False): def create_custom_forward(module): def custom_forward(*inputs): return module(*inputs, past_key_values, output_attentions) return custom_forward layer_outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(layer_module), hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, ) else: layer_outputs = layer_module( hidden_states, attention_mask, layer_head_mask, encoder_hidden_states, encoder_attention_mask, past_key_values, output_attentions, ) hidden_states = layer_outputs[0] if output_attentions: all_attentions = all_attentions + (layer_outputs[1],) if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return BaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions ) # Copied from transformers.models.bert.modeling_bert.BertPooler class TapasPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class TapasPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = TapasConfig base_model_prefix = "tapas" # Copied from transformers.models.bert.modeling_bert.BertPreTrainedModel._init_weights def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) TAPAS_START_DOCSTRING = r""" This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its models (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.TapasConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ TAPAS_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.TapasTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0}, 7)`, `optional`): Token indices that encode tabular structure. Indices can be obtained using :class:`~transformers.TapasTokenizer`. See this class for more info. `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Indices of positions of each input sequence tokens in the position embeddings. If ``reset_position_index_per_cell`` of :class:`~transformers.TapasConfig` is set to ``True``, relative position embeddings will be used. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ @add_start_docstrings( "The bare Tapas Model transformer outputting raw hidden-states without any specific head on top.", TAPAS_START_DOCSTRING, ) class TapasModel(TapasPreTrainedModel): """ This class is a small change compared to :class:`~transformers.BertModel`, taking into account the additional token type ids. The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `Attention is all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. """ def __init__(self, config, add_pooling_layer=True): requires_scatter(self) super().__init__(config) self.config = config self.embeddings = TapasEmbeddings(config) self.encoder = TapasEncoder(config) self.pooler = TapasPooler(config) if add_pooling_layer else None self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" Returns: Examples:: >>> from transformers import TapasTokenizer, TapasModel >>> import pandas as pd >>> tokenizer = TapasTokenizer.from_pretrained('google/tapas-base') >>> model = TapasModel.from_pretrained('google/tapas-base') >>> data = {'Actors': ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... 'Age': ["56", "45", "59"], ... 'Number of movies': ["87", "53", "69"] ... } >>> table = pd.DataFrame.from_dict(data) >>> queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"] >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros( (*input_shape, len(self.config.type_vocab_sizes)), dtype=torch.long, device=device ) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape, device) # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastabe to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) if self.pooler is not None else None if not return_dict: return (sequence_output, pooled_output) + encoder_outputs[1:] return BaseModelOutputWithPooling( last_hidden_state=sequence_output, pooler_output=pooled_output, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, ) @add_start_docstrings("""Tapas Model with a `language modeling` head on top. """, TAPAS_START_DOCSTRING) class TapasForMaskedLM(TapasPreTrainedModel): config_class = TapasConfig base_model_prefix = "tapas" def __init__(self, config): super().__init__(config) self.tapas = TapasModel(config, add_pooling_layer=False) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size) self.init_weights() def get_output_embeddings(self): return self.lm_head def set_output_embeddings(self, word_embeddings): self.lm_head = word_embeddings @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=MaskedLMOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` Returns: Examples:: >>> from transformers import TapasTokenizer, TapasForMaskedLM >>> import pandas as pd >>> tokenizer = TapasTokenizer.from_pretrained('google/tapas-base') >>> model = TapasForMaskedLM.from_pretrained('google/tapas-base') >>> data = {'Actors': ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... 'Age': ["56", "45", "59"], ... 'Number of movies': ["87", "53", "69"] ... } >>> table = pd.DataFrame.from_dict(data) >>> inputs = tokenizer(table=table, queries="How many [MASK] has George [MASK] played in?", return_tensors="pt") >>> labels = tokenizer(table=table, queries="How many movies has George Clooney played in?", return_tensors="pt")["input_ids"] >>> outputs = model(**inputs, labels=labels) >>> last_hidden_states = outputs.last_hidden_state """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.tapas( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) masked_lm_loss = None if labels is not None: loss_fct = CrossEntropyLoss() # -100 index = padding token masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[2:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return MaskedLMOutput( loss=masked_lm_loss, logits=prediction_scores, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Tapas Model with a cell selection head and optional aggregation head on top for question-answering tasks on tables (linear layers on top of the hidden-states output to compute `logits` and optional `logits_aggregation`), e.g. for SQA, WTQ or WikiSQL-supervised tasks. """, TAPAS_START_DOCSTRING, ) class TapasForQuestionAnswering(TapasPreTrainedModel): def __init__(self, config: TapasConfig): super().__init__(config) # base model self.tapas = TapasModel(config) # dropout (only used when training) self.dropout = nn.Dropout(config.hidden_dropout_prob) # cell selection heads if config.init_cell_selection_weights_to_zero: # init_cell_selection_weights_to_zero: Whether the initial weights should be # set to 0. This ensures that all tokens have the same prior probability. self.output_weights = nn.Parameter(torch.zeros(config.hidden_size)) self.column_output_weights = nn.Parameter(torch.zeros(config.hidden_size)) else: self.output_weights = nn.Parameter(torch.empty(config.hidden_size)) nn.init.normal_( self.output_weights, std=config.initializer_range ) # here, a truncated normal is used in the original implementation self.column_output_weights = nn.Parameter(torch.empty(config.hidden_size)) nn.init.normal_( self.column_output_weights, std=config.initializer_range ) # here, a truncated normal is used in the original implementation self.output_bias = nn.Parameter(torch.zeros([])) self.column_output_bias = nn.Parameter(torch.zeros([])) # aggregation head if config.num_aggregation_labels > 0: self.aggregation_classifier = nn.Linear(config.hidden_size, config.num_aggregation_labels) self.init_weights() @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TableQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, table_mask=None, labels=None, aggregation_labels=None, float_answer=None, numeric_values=None, numeric_values_scale=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" table_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, seq_length)`, `optional`): Mask for the table. Indicates which tokens belong to the table (1). Question tokens, table headers and padding are 0. labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, seq_length)`, `optional`): Labels per token for computing the hierarchical cell selection loss. This encodes the positions of the answer appearing in the table. Can be obtained using :class:`~transformers.TapasTokenizer`. - 1 for tokens that are **part of the answer**, - 0 for tokens that are **not part of the answer**. aggregation_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, )`, `optional`): Aggregation function index for every example in the batch for computing the aggregation loss. Indices should be in :obj:`[0, ..., config.num_aggregation_labels - 1]`. Only required in case of strong supervision for aggregation (WikiSQL-supervised). float_answer (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, )`, `optional`): Float answer for every example in the batch. Set to `float('nan')` for cell selection questions. Only required in case of weak supervision (WTQ) to calculate the aggregate mask and regression loss. numeric_values (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_length)`, `optional`): Numeric values of every token, NaN for tokens which are not numeric values. Can be obtained using :class:`~transformers.TapasTokenizer`. Only required in case of weak supervision for aggregation (WTQ) to calculate the regression loss. numeric_values_scale (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_length)`, `optional`): Scale of the numeric values of every token. Can be obtained using :class:`~transformers.TapasTokenizer`. Only required in case of weak supervision for aggregation (WTQ) to calculate the regression loss. Returns: Examples:: >>> from transformers import TapasTokenizer, TapasForQuestionAnswering >>> import pandas as pd >>> tokenizer = TapasTokenizer.from_pretrained('google/tapas-base-finetuned-wtq') >>> model = TapasForQuestionAnswering.from_pretrained('google/tapas-base-finetuned-wtq') >>> data = {'Actors': ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... 'Age': ["56", "45", "59"], ... 'Number of movies': ["87", "53", "69"] ... } >>> table = pd.DataFrame.from_dict(data) >>> queries = ["How many movies has George Clooney played in?", "How old is Brad Pitt?"] >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="pt") >>> outputs = model(**inputs) >>> logits = outputs.logits >>> logits_aggregation = outputs.logits_aggregation """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.tapas( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] pooled_output = outputs[1] sequence_output = self.dropout(sequence_output) if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] device = input_ids.device if input_ids is not None else inputs_embeds.device # Construct indices for the table. if token_type_ids is None: token_type_ids = torch.zeros( (*input_shape, len(self.config.type_vocab_sizes)), dtype=torch.long, device=device ) token_types = [ "segment_ids", "column_ids", "row_ids", "prev_labels", "column_ranks", "inv_column_ranks", "numeric_relations", ] row_ids = token_type_ids[:, :, token_types.index("row_ids")] column_ids = token_type_ids[:, :, token_types.index("column_ids")] row_index = IndexMap( indices=torch.min(row_ids, torch.as_tensor(self.config.max_num_rows - 1, device=row_ids.device)), num_segments=self.config.max_num_rows, batch_dims=1, ) col_index = IndexMap( indices=torch.min(column_ids, torch.as_tensor(self.config.max_num_columns - 1, device=column_ids.device)), num_segments=self.config.max_num_columns, batch_dims=1, ) cell_index = ProductIndexMap(row_index, col_index) # Masks. input_shape = input_ids.size() if input_ids is not None else inputs_embeds.size()[:-1] device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) # Table cells only, without question tokens and table headers. if table_mask is None: table_mask = torch.where(row_ids > 0, torch.ones_like(row_ids), torch.zeros_like(row_ids)) # torch.FloatTensor[batch_size, seq_length] input_mask_float = attention_mask.float().to(device) table_mask_float = table_mask.float().to(device) # Mask for cells that exist in the table (i.e. that are not padding). cell_mask, _ = reduce_mean(input_mask_float, cell_index) # Compute logits per token. These are used to select individual cells. logits = compute_token_logits(sequence_output, self.config.temperature, self.output_weights, self.output_bias) # Compute logits per column. These are used to select a column. column_logits = None if self.config.select_one_column: column_logits = compute_column_logits( sequence_output, self.column_output_weights, self.column_output_bias, cell_index, cell_mask, self.config.allow_empty_column_selection, ) # Aggregation logits logits_aggregation = None if self.config.num_aggregation_labels > 0: logits_aggregation = self.aggregation_classifier(pooled_output) # Total loss calculation total_loss = 0.0 calculate_loss = False if labels is not None: calculate_loss = True is_supervised = not self.config.num_aggregation_labels > 0 or not self.config.use_answer_as_supervision # Semi-supervised cell selection in case of no aggregation: # If the answer (the denotation) appears directly in the table we might # select the answer without applying any aggregation function. There are # some ambiguous cases, see utils._calculate_aggregate_mask for more info. # `aggregate_mask` is 1 for examples where we chose to aggregate and 0 # for examples where we chose to select the answer directly. # `labels` encodes the positions of the answer appearing in the table. if is_supervised: aggregate_mask = None else: if float_answer is not None: assert ( labels.shape[0] == float_answer.shape[0] ), "Make sure the answers are a FloatTensor of shape (batch_size,)" # <float32>[batch_size] aggregate_mask = _calculate_aggregate_mask( float_answer, pooled_output, self.config.cell_selection_preference, labels, self.aggregation_classifier, ) else: raise ValueError("You have to specify float answers in order to calculate the aggregate mask") # Cell selection log-likelihood if self.config.average_logits_per_cell: logits_per_cell, _ = reduce_mean(logits, cell_index) logits = gather(logits_per_cell, cell_index) dist_per_token = torch.distributions.Bernoulli(logits=logits) # Compute cell selection loss per example. selection_loss_per_example = None if not self.config.select_one_column: weight = torch.where( labels == 0, torch.ones_like(labels, dtype=torch.float32), self.config.positive_label_weight * torch.ones_like(labels, dtype=torch.float32), ) selection_loss_per_token = -dist_per_token.log_prob(labels) * weight selection_loss_per_example = torch.sum(selection_loss_per_token * input_mask_float, dim=1) / ( torch.sum(input_mask_float, dim=1) + EPSILON_ZERO_DIVISION ) else: selection_loss_per_example, logits = _single_column_cell_selection_loss( logits, column_logits, labels, cell_index, col_index, cell_mask ) dist_per_token = torch.distributions.Bernoulli(logits=logits) # Supervised cell selection if self.config.disable_per_token_loss: pass elif is_supervised: total_loss += torch.mean(selection_loss_per_example) else: # For the not supervised case, do not assign loss for cell selection total_loss += torch.mean(selection_loss_per_example * (1.0 - aggregate_mask)) # Semi-supervised regression loss and supervised loss for aggregations if self.config.num_aggregation_labels > 0: if is_supervised: # Note that `aggregate_mask` is None if the setting is supervised. if aggregation_labels is not None: assert ( labels.shape[0] == aggregation_labels.shape[0] ), "Make sure the aggregation labels are a LongTensor of shape (batch_size,)" per_example_additional_loss = _calculate_aggregation_loss( logits_aggregation, aggregate_mask, aggregation_labels, self.config.use_answer_as_supervision, self.config.num_aggregation_labels, self.config.aggregation_loss_weight, ) else: raise ValueError( "You have to specify aggregation labels in order to calculate the aggregation loss" ) else: # Set aggregation labels to zeros aggregation_labels = torch.zeros(labels.shape[0], dtype=torch.long, device=labels.device) per_example_additional_loss = _calculate_aggregation_loss( logits_aggregation, aggregate_mask, aggregation_labels, self.config.use_answer_as_supervision, self.config.num_aggregation_labels, self.config.aggregation_loss_weight, ) if self.config.use_answer_as_supervision: if numeric_values is not None and numeric_values_scale is not None: assert numeric_values.shape == numeric_values_scale.shape # Add regression loss for numeric answers which require aggregation. answer_loss, large_answer_loss_mask = _calculate_regression_loss( float_answer, aggregate_mask, dist_per_token, numeric_values, numeric_values_scale, table_mask_float, logits_aggregation, self.config, ) per_example_additional_loss += answer_loss # Zero loss for examples with answer_loss > cutoff. per_example_additional_loss *= large_answer_loss_mask else: raise ValueError( "You have to specify numeric values and numeric values scale in order to calculate the regression loss" ) total_loss += torch.mean(per_example_additional_loss) else: # if no label ids are provided, set them to zeros in order to properly compute logits labels = torch.zeros_like(logits) _, logits = _single_column_cell_selection_loss( logits, column_logits, labels, cell_index, col_index, cell_mask ) if not return_dict: output = (logits, logits_aggregation) + outputs[2:] return ((total_loss,) + output) if calculate_loss else output return TableQuestionAnsweringOutput( loss=total_loss if calculate_loss else None, logits=logits, logits_aggregation=logits_aggregation, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ Tapas Model with a sequence classification head on top (a linear layer on top of the pooled output), e.g. for table entailment tasks, such as TabFact (Chen et al., 2020). """, TAPAS_START_DOCSTRING, ) class TapasForSequenceClassification(TapasPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.tapas = TapasModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() @add_start_docstrings_to_model_forward(TAPAS_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=SequenceClassifierOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). Note: this is called "classification_class_index" in the original implementation. Returns: Examples:: >>> from transformers import TapasTokenizer, TapasForSequenceClassification >>> import torch >>> import pandas as pd >>> tokenizer = TapasTokenizer.from_pretrained('google/tapas-base-finetuned-tabfact') >>> model = TapasForSequenceClassification.from_pretrained('google/tapas-base-finetuned-tabfact') >>> data = {'Actors': ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], ... 'Age': ["56", "45", "59"], ... 'Number of movies': ["87", "53", "69"] ... } >>> table = pd.DataFrame.from_dict(data) >>> queries = ["There is only one actor who is 45 years old", "There are 3 actors which played in more than 60 movies"] >>> inputs = tokenizer(table=table, queries=queries, padding="max_length", return_tensors="pt") >>> labels = torch.tensor([1, 0]) # 1 means entailed, 0 means refuted >>> outputs = model(**inputs, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.tapas( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) loss = None if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) """ TAPAS utilities.""" class AverageApproximationFunction(str, enum.Enum): RATIO = "ratio" FIRST_ORDER = "first_order" SECOND_ORDER = "second_order" # Beginning of everything related to segmented tensors class IndexMap(object): """Index grouping entries within a tensor.""" def __init__(self, indices, num_segments, batch_dims=0): """ Creates an index Args: indices (:obj:`torch.LongTensor`, same shape as a `values` Tensor to which the indices refer): Tensor containing the indices. num_segments (:obj:`torch.LongTensor`): Scalar tensor, the number of segments. All elements in a batched segmented tensor must have the same number of segments (although many segments can be empty). batch_dims (:obj:`int`, `optional`, defaults to 0): The number of batch dimensions. The first `batch_dims` dimensions of a SegmentedTensor are treated as batch dimensions. Segments in different batch elements are always distinct even if they have the same index. """ self.indices = torch.as_tensor(indices) self.num_segments = torch.as_tensor(num_segments, device=indices.device) self.batch_dims = batch_dims def batch_shape(self): return self.indices.size()[: self.batch_dims] # returns a torch.Size object class ProductIndexMap(IndexMap): """The product of two indices.""" def __init__(self, outer_index, inner_index): """ Combines indices i and j into pairs (i, j). The result is an index where each segment (i, j) is the intersection of segments i and j. For example if the inputs represent table cells indexed by respectively rows and columns the output will be a table indexed by (row, column) pairs, i.e. by cell. The implementation combines indices {0, .., n - 1} and {0, .., m - 1} into {0, .., nm - 1}. The output has `num_segments` equal to `outer_index.num_segments` * `inner_index.num_segments` Args: outer_index (:obj:`IndexMap`): IndexMap. inner_index (:obj:`IndexMap`): IndexMap, must have the same shape as `outer_index`. """ if outer_index.batch_dims != inner_index.batch_dims: raise ValueError("outer_index.batch_dims and inner_index.batch_dims must be the same.") super().__init__( indices=(inner_index.indices + outer_index.indices * inner_index.num_segments), num_segments=inner_index.num_segments * outer_index.num_segments, batch_dims=inner_index.batch_dims, ) self.outer_index = outer_index self.inner_index = inner_index def project_outer(self, index): """Projects an index with the same index set onto the outer components.""" return IndexMap( indices=(index.indices // self.inner_index.num_segments).type(torch.float).floor().type(torch.long), num_segments=self.outer_index.num_segments, batch_dims=index.batch_dims, ) def project_inner(self, index): """Projects an index with the same index set onto the inner components.""" return IndexMap( indices=torch.fmod(index.indices, self.inner_index.num_segments) .type(torch.float) .floor() .type(torch.long), num_segments=self.inner_index.num_segments, batch_dims=index.batch_dims, ) def gather(values, index, name="segmented_gather"): """ Gathers from `values` using the index map. For each element in the domain of the index map this operation looks up a value for that index in `values`. Two elements from the same segment always get assigned the same value. Args: values (:obj:`torch.Tensor` of shape (B1, ..., Bn, num_segments, V1, ...)): Tensor with segment values. index (:obj:`IndexMap` of shape (B1, ..., Bn, I1, ..., Ik)): IndexMap. name (:obj:`str`, `optional`, defaults to 'segmented_gather'): Name for the operation. Currently not used Returns: :obj:`tuple(torch.Tensor)`: Tensor of shape (B1, ..., Bn, I1, ..., Ik, V1, ...) with the gathered values. """ indices = index.indices # first, check whether the indices of the index represent scalar values (i.e. not vectorized) if len(values.shape[index.batch_dims :]) < 2: return torch.gather( values, index.batch_dims, indices.view( values.size()[0], -1 ), # torch.gather expects index to have the same number of dimensions as values ).view(indices.size()) else: # this means we have a vectorized version # we have to adjust the index indices = indices.unsqueeze(-1).expand(values.shape) return torch.gather(values, index.batch_dims, indices) def flatten(index, name="segmented_flatten"): """ Flattens a batched index map (which is typically of shape batch_size, seq_length) to a 1d index map. This operation relabels the segments to keep batch elements distinct. The k-th batch element will have indices shifted by `num_segments` * (k - 1). The result is a tensor with `num_segments` multiplied by the number of elements in the batch. Args: index (:obj:`IndexMap`): IndexMap to flatten. name (:obj:`str`, `optional`, defaults to 'segmented_flatten'): Name for the operation. Currently not used Returns: (:obj:`IndexMap`): The flattened IndexMap. """ # first, get batch_size as scalar tensor batch_size = torch.prod(torch.tensor(list(index.batch_shape()))) # next, create offset as 1-D tensor of length batch_size, # and multiply element-wise by num segments (to offset different elements in the batch) e.g. if batch size is 2: [0, 64] offset = torch.arange(start=0, end=batch_size, device=index.num_segments.device) * index.num_segments offset = offset.view(index.batch_shape()) for _ in range(index.batch_dims, len(index.indices.size())): # typically range(1,2) offset = offset.unsqueeze(-1) indices = offset + index.indices return IndexMap(indices=indices.view(-1), num_segments=index.num_segments * batch_size, batch_dims=0) def range_index_map(batch_shape, num_segments, name="range_index_map"): """ Constructs an index map equal to range(num_segments). Args: batch_shape (:obj:`torch.Size`): Batch shape num_segments (:obj:`int`): Number of segments name (:obj:`str`, `optional`, defaults to 'range_index_map'): Name for the operation. Currently not used Returns: (:obj:`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments). """ batch_shape = torch.as_tensor( batch_shape, dtype=torch.long ) # create a rank 1 tensor vector containing batch_shape (e.g. [2]) assert len(batch_shape.size()) == 1 num_segments = torch.as_tensor(num_segments) # create a rank 0 tensor (scalar) containing num_segments (e.g. 64) assert len(num_segments.size()) == 0 indices = torch.arange( start=0, end=num_segments, device=num_segments.device ) # create a rank 1 vector with num_segments elements new_tensor = torch.cat( [torch.ones_like(batch_shape, dtype=torch.long, device=num_segments.device), num_segments.unsqueeze(dim=0)], dim=0, ) # new_tensor is just a vector of [1 64] for example (assuming only 1 batch dimension) new_shape = [int(x) for x in new_tensor.tolist()] indices = indices.view(new_shape) multiples = torch.cat([batch_shape, torch.as_tensor([1])], dim=0) indices = indices.repeat(multiples.tolist()) # equivalent (in Numpy:) # indices = torch.as_tensor(np.tile(indices.numpy(), multiples.tolist())) return IndexMap(indices=indices, num_segments=num_segments, batch_dims=list(batch_shape.size())[0]) def _segment_reduce(values, index, segment_reduce_fn, name): """ Applies a segment reduction segment-wise. Args: values (:obj:`torch.Tensor`): Tensor with segment values. index (:obj:`IndexMap`): IndexMap. segment_reduce_fn (:obj:`str`): Name for the reduce operation. One of "sum", "mean", "max" or "min". name (:obj:`str`): Name for the operation. Currently not used Returns: (:obj:`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments). """ # Flatten the batch dimensions, as segments ops (scatter) do not support batching. # However if `values` has extra dimensions to the right keep them # unflattened. Segmented ops support vector-valued operations. flat_index = flatten(index) vector_shape = values.size()[len(index.indices.size()) :] # torch.Size object flattened_shape = torch.cat( [torch.as_tensor([-1], dtype=torch.long), torch.as_tensor(vector_shape, dtype=torch.long)], dim=0 ) # changed "view" by "reshape" in the following line flat_values = values.reshape(flattened_shape.tolist()) segment_means = scatter( src=flat_values, index=flat_index.indices.type(torch.long), dim=0, dim_size=flat_index.num_segments, reduce=segment_reduce_fn, ) # Unflatten the values. new_shape = torch.cat( [ torch.as_tensor(index.batch_shape(), dtype=torch.long), torch.as_tensor([index.num_segments], dtype=torch.long), torch.as_tensor(vector_shape, dtype=torch.long), ], dim=0, ) output_values = segment_means.view(new_shape.tolist()) output_index = range_index_map(index.batch_shape(), index.num_segments) return output_values, output_index def reduce_sum(values, index, name="segmented_reduce_sum"): """ Sums a tensor over its segments. Outputs 0 for empty segments. This operations computes the sum over segments, with support for: - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices. - Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be a sum of vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation. Args: values (:obj:`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]): Tensor containing the values of which the sum must be taken segment-wise. index (:obj:`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].): Index defining the segments. name (:obj:`str`, `optional`, defaults to 'segmented_reduce_sum'): Name for the operation. Currently not used Returns: output_values (:obj:`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the output values. output_index (:obj:`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments]. . """ return _segment_reduce(values, index, "sum", name) def reduce_mean(values, index, name="segmented_reduce_mean"): """ Averages a tensor over its segments. Outputs 0 for empty segments. This operations computes the mean over segments, with support for: - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices. - Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be a mean of vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation. Args: values (:obj:`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]): Tensor containing the values of which the mean must be taken segment-wise. index (:obj:`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].): Index defining the segments. name (:obj:`str`, `optional`, defaults to 'segmented_reduce_sum'): Name for the operation. Currently not used Returns: output_values (:obj:`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the output values. output_index (:obj:`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments]. """ return _segment_reduce(values, index, "mean", name) def reduce_max(values, index, name="segmented_reduce_max"): """ Computes the maximum over segments. This operation computes the maximum over segments, with support for: - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices. - Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be an element-wise maximum of vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation. Args: values (:obj:`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]): Tensor containing the values of which the max must be taken segment-wise. index (:obj:`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].): Index defining the segments. name (:obj:`str`, `optional`, defaults to 'segmented_reduce_sum'): Name for the operation. Currently not used Returns: output_values (:obj:`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the output values. output_index (:obj:`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments]. """ return _segment_reduce(values, index, "max", name) def reduce_min(values, index, name="segmented_reduce_min"): """ Computes the minimum over segments. This operations computes the minimum over segments, with support for: - Batching using the first dimensions [B1, B2, ..., Bn]. Each element in a batch can have different indices. - Vectorization using the last dimension [V1, V2, ...]. If they are present, the output will be an element-wise minimum of vectors rather than scalars. Only the middle dimensions [I1, ..., Ik] are reduced by the operation. Args: values (:obj:`torch.Tensor` of shape [B1, B2, ..., Bn, I1, .., Ik, V1, V2, ..]): Tensor containing the values of which the min must be taken segment-wise. index (:obj:`IndexMap`, indices are of shape [B1, B2, ..., Bn, I1, .., Ik].): Index defining the segments. name (:obj:`str`, `optional`, defaults to 'segmented_reduce_sum'): Name for the operation. Currently not used Returns: output_values (:obj:`torch.Tensor`of shape [B1, B2, ..., Bn, num_segments, V1, V2, ..]): Tensor containing the output values. output_index (:obj:`IndexMap`): IndexMap with shape [B1, B2, ..., Bn, num_segments]. """ return _segment_reduce(values, index, "min", name) # End of everything related to segmented tensors def compute_column_logits( sequence_output, column_output_weights, column_output_bias, cell_index, cell_mask, allow_empty_column_selection ): """ Computes the column logits. Args: sequence_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`): Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model. column_output_weights (:obj:`torch.FloatTensor` of shape :obj:`(hidden_size)`): Weights of the linear layer for column selection. column_output_bias (:obj:`torch.FloatTensor` of shape :obj:`()`): Bias of the linear layer for column selection. cell_index (:obj:`ProductIndexMap`): Index that groups tokens into cells. cell_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, max_num_rows * max_num_cols)`): Mask for cells that exist in the table (i.e. that are not padding). allow_empty_column_selection (:obj:`bool`): Whether to allow not to select any column Returns: column_logits (:obj:`torch.FloatTensor`of shape :obj:`(batch_size, max_num_cols)`): Tensor containing the column logits for every example in the batch. """ # First, compute the token logits (batch_size, seq_len) - without temperature token_logits = torch.einsum("bsj,j->bs", sequence_output, column_output_weights) + column_output_bias # Next, average the logits per cell (batch_size, max_num_cols*max_num_rows) cell_logits, cell_logits_index = reduce_mean(token_logits, cell_index) # Finally, average the logits per column (batch_size, max_num_cols) column_index = cell_index.project_inner(cell_logits_index) column_logits, out_index = reduce_sum(cell_logits * cell_mask, column_index) cell_count, _ = reduce_sum(cell_mask, column_index) column_logits /= cell_count + EPSILON_ZERO_DIVISION # Mask columns that do not appear in the example. is_padding = torch.logical_and(cell_count < 0.5, ~torch.eq(out_index.indices, 0)) column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * torch.as_tensor( is_padding, dtype=torch.float32, device=is_padding.device ) if not allow_empty_column_selection: column_logits += CLOSE_ENOUGH_TO_LOG_ZERO * torch.as_tensor( torch.eq(out_index.indices, 0), dtype=torch.float32, device=out_index.indices.device ) return column_logits def _single_column_cell_selection_loss(token_logits, column_logits, labels, cell_index, col_index, cell_mask): """ Computes the loss for cell selection constrained to a single column. The loss is a hierarchical log-likelihood. The model first predicts a column and then selects cells within that column (conditioned on the column). Cells outside the selected column are never selected. Args: token_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`): Tensor containing the logits per token. column_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, max_num_cols)`): Tensor containing the logits per column. labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Labels per token. cell_index (:obj:`ProductIndexMap`): Index that groups tokens into cells. col_index (:obj:`IndexMap`): Index that groups tokens into columns. cell_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, max_num_rows * max_num_cols)`): Mask for cells that exist in the table (i.e. that are not padding). Returns: selection_loss_per_example (:obj:`torch.FloatTensor` of shape :obj:`(batch_size,)`): Loss for each example. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`): New logits which are only allowed to select cells in a single column. Logits outside of the most likely column according to `column_logits` will be set to a very low value (such that the probabilities are 0). """ # Part 1: column loss # First find the column we should select. We use the column with maximum number of selected cells. labels_per_column, _ = reduce_sum(torch.as_tensor(labels, dtype=torch.float32, device=labels.device), col_index) # shape of labels_per_column is (batch_size, max_num_cols). It contains the number of label ids for every column, for every example column_label = torch.argmax(labels_per_column, dim=-1) # shape (batch_size,) # Check if there are no selected cells in the column. In that case the model # should predict the special column id 0, which means "select nothing". no_cell_selected = torch.eq( torch.max(labels_per_column, dim=-1)[0], 0 ) # no_cell_selected is of shape (batch_size,) and equals True # if an example of the batch has no cells selected (i.e. if there are no labels set to 1 for that example) column_label = torch.where( no_cell_selected.view(column_label.size()), torch.zeros_like(column_label), column_label ) column_dist = torch.distributions.Categorical(logits=column_logits) # shape (batch_size, max_num_cols) column_loss_per_example = -column_dist.log_prob(column_label) # Part 2: cell loss # Reduce the labels and logits to per-cell from per-token. # logits_per_cell: shape (batch_size, max_num_rows*max_num_cols) i.e. (batch_size, 64*32) logits_per_cell, _ = reduce_mean(token_logits, cell_index) # labels_per_cell: shape (batch_size, 64*32), indicating whether each cell should be selected (1) or not (0) labels_per_cell, labels_index = reduce_max( torch.as_tensor(labels, dtype=torch.long, device=labels.device), cell_index ) # Mask for the selected column. # column_id_for_cells: shape (batch_size, 64*32), indicating to which column each cell belongs column_id_for_cells = cell_index.project_inner(labels_index).indices # column_mask: shape (batch_size, 64*32), equal to 1 if cell belongs to column to be selected column_mask = torch.as_tensor( torch.eq(column_id_for_cells, torch.unsqueeze(column_label, dim=-1)), dtype=torch.float32, device=cell_mask.device, ) # Compute the log-likelihood for cells, but only for the selected column. cell_dist = torch.distributions.Bernoulli(logits=logits_per_cell) # shape (batch_size, 64*32) cell_log_prob = cell_dist.log_prob(labels_per_cell.type(torch.float32)) # shape(batch_size, 64*32) cell_loss = -torch.sum(cell_log_prob * column_mask * cell_mask, dim=1) # We need to normalize the loss by the number of cells in the column. cell_loss /= torch.sum(column_mask * cell_mask, dim=1) + EPSILON_ZERO_DIVISION selection_loss_per_example = column_loss_per_example selection_loss_per_example += torch.where( no_cell_selected.view(selection_loss_per_example.size()), torch.zeros_like(selection_loss_per_example), cell_loss, ) # Set the probs outside the selected column (selected by the *model*) # to 0. This ensures backwards compatibility with models that select # cells from multiple columns. selected_column_id = torch.as_tensor( torch.argmax(column_logits, dim=-1), dtype=torch.long, device=column_logits.device ) # shape (batch_size,) # selected_column_mask: shape (batch_size, 64*32), equal to 1 if cell belongs to column selected by the model selected_column_mask = torch.as_tensor( torch.eq(column_id_for_cells, torch.unsqueeze(selected_column_id, dim=-1)), dtype=torch.float32, device=selected_column_id.device, ) # Never select cells with the special column id 0. selected_column_mask = torch.where( torch.eq(column_id_for_cells, 0).view(selected_column_mask.size()), torch.zeros_like(selected_column_mask), selected_column_mask, ) new_logits_per_cell = logits_per_cell + CLOSE_ENOUGH_TO_LOG_ZERO * (1.0 - cell_mask * selected_column_mask) logits = gather(new_logits_per_cell, cell_index) return selection_loss_per_example, logits def compute_token_logits(sequence_output, temperature, output_weights, output_bias): """ Computes logits per token Args: sequence_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`): Also known as last_hidden_state. Sequence of hidden-states at the output of the last layer of the model. temperature (:obj:`float`): Temperature for the Bernoulli distribution. output_weights (:obj:`torch.FloatTensor` of shape :obj:`(hidden_size,)`): Weights of the linear layer for cell selection. output_bias (:obj:`torch.FloatTensor` of shape :obj:`()`): Bias of the linear layer for cell selection Returns: logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`): Logits per token. """ logits = (torch.einsum("bsj,j->bs", sequence_output, output_weights) + output_bias) / temperature return logits def _calculate_aggregate_mask(answer, pooled_output, cell_selection_preference, labels, aggregation_classifier): """ Finds examples where the model should select cells with no aggregation. Returns a mask that determines for which examples should the model select answers directly from the table, without any aggregation function. If the answer is a piece of text the case is unambiguous as aggregation functions only apply to numbers. If the answer is a number but does not appear in the table then we must use some aggregation case. The ambiguous case is when the answer is a number that also appears in the table. In this case we use the aggregation function probabilities predicted by the model to decide whether to select or aggregate. The threshold for this is a hyperparameter `cell_selection_preference Args: answer (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, )`): Answer for every example in the batch. Nan if there is no scalar answer. pooled_output (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, hidden_size)`): Output of the pooler (BertPooler) on top of the encoder layer. cell_selection_preference (:obj:`float`): Preference for cell selection in ambiguous cases. labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Labels per token. aggregation_classifier (:obj:`torch.nn.Linear`): Aggregation head Returns: aggregate_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size,)`): A mask set to 1 for examples that should use aggregation functions. """ # torch.FloatTensor(batch_size,) aggregate_mask_init = torch.logical_not(torch.isnan(answer)).type(torch.FloatTensor).to(answer.device) logits_aggregation = aggregation_classifier(pooled_output) dist_aggregation = torch.distributions.categorical.Categorical(logits=logits_aggregation) # Index 0 correponds to "no aggregation". aggregation_ops_total_mass = torch.sum(dist_aggregation.probs[:, 1:], dim=1) # Cell selection examples according to current model. is_pred_cell_selection = aggregation_ops_total_mass <= cell_selection_preference # Examples with non-empty cell selection supervision. is_cell_supervision_available = torch.sum(labels, dim=1) > 0 # torch.where is not equivalent to tf.where (in tensorflow 1) # hence the added .view on the condition to match the shape of the first tensor aggregate_mask = torch.where( torch.logical_and(is_pred_cell_selection, is_cell_supervision_available).view(aggregate_mask_init.size()), torch.zeros_like(aggregate_mask_init, dtype=torch.float32), aggregate_mask_init, ) aggregate_mask = aggregate_mask.detach() return aggregate_mask def _calculate_aggregation_loss_known( logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels ): """ Calculates aggregation loss when its type is known during training. In the weakly supervised setting, the only known information is that for cell selection examples, "no aggregation" should be predicted. For other examples (those that require aggregation), no loss is accumulated. In the setting where aggregation type is always known, standard cross entropy loss is accumulated for all examples Args: logits_aggregation (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_aggregation_labels)`): Logits per aggregation operation. aggregate_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, )`): A mask set to 1 for examples that should use aggregation functions. aggregation_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, )`): Aggregation function id for every example in the batch. use_answer_as_supervision (:obj:`bool`, `optional`): Whether to use the answer as the only supervision for aggregation examples. num_aggregation_labels (:obj:`int`, `optional`, defaults to 0): The number of aggregation operators to predict. Returns: aggregation_loss_known (:obj:`torch.FloatTensor` of shape :obj:`(batch_size,)`): Aggregation loss (when its type is known during training) per example. """ if use_answer_as_supervision: # Prepare "no aggregation" targets for cell selection examples. target_aggregation = torch.zeros_like(aggregate_mask, dtype=torch.long) else: # Use aggregation supervision as the target. target_aggregation = aggregation_labels one_hot_labels = torch.nn.functional.one_hot(target_aggregation, num_classes=num_aggregation_labels).type( torch.float32 ) log_probs = torch.nn.functional.log_softmax(logits_aggregation, dim=-1) # torch.FloatTensor[batch_size] per_example_aggregation_intermediate = -torch.sum(one_hot_labels * log_probs, dim=-1) if use_answer_as_supervision: # Accumulate loss only for examples requiring cell selection # (no aggregation). return per_example_aggregation_intermediate * (1 - aggregate_mask) else: return per_example_aggregation_intermediate def _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask): """ Calculates aggregation loss in the case of answer supervision. Args: logits_aggregation (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_aggregation_labels)`): Logits per aggregation operation. aggregate_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, )`): A mask set to 1 for examples that should use aggregation functions Returns: aggregation_loss_unknown (:obj:`torch.FloatTensor` of shape :obj:`(batch_size,)`): Aggregation loss (in case of answer supervision) per example. """ dist_aggregation = torch.distributions.categorical.Categorical(logits=logits_aggregation) # Index 0 correponds to "no aggregation". aggregation_ops_total_mass = torch.sum(dist_aggregation.probs[:, 1:], dim=1) # Predict some aggregation in case of an answer that needs aggregation. # This increases the probability of all aggregation functions, in a way # similar to MML, but without considering whether the function gives the # correct answer. return -torch.log(aggregation_ops_total_mass) * aggregate_mask def _calculate_aggregation_loss( logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels, aggregation_loss_weight, ): """ Calculates the aggregation loss per example. Args: logits_aggregation (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_aggregation_labels)`): Logits per aggregation operation. aggregate_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, )`): A mask set to 1 for examples that should use aggregation functions. aggregation_labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, )`): Aggregation function id for every example in the batch. use_answer_as_supervision (:obj:`bool`, `optional`): Whether to use the answer as the only supervision for aggregation examples. num_aggregation_labels (:obj:`int`, `optional`, defaults to 0): The number of aggregation operators to predict. aggregation_loss_weight (:obj:`float`, `optional`, defaults to 1.0): Importance weight for the aggregation loss. Returns: aggregation_loss (:obj:`torch.FloatTensor` of shape :obj:`(batch_size,)`): Aggregation loss per example. """ per_example_aggregation_loss = _calculate_aggregation_loss_known( logits_aggregation, aggregate_mask, aggregation_labels, use_answer_as_supervision, num_aggregation_labels ) if use_answer_as_supervision: # Add aggregation loss for numeric answers that need aggregation. per_example_aggregation_loss += _calculate_aggregation_loss_unknown(logits_aggregation, aggregate_mask) return aggregation_loss_weight * per_example_aggregation_loss def _calculate_expected_result( dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config ): """ Calculates the expected result given cell and aggregation probabilities. Args: dist_per_cell (:obj:`torch.distributions.Bernoulli`): Cell selection distribution for each cell. numeric_values (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_length)`): Numeric values of every token. Nan for tokens which are not numeric values. numeric_values_scale (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_length)`): Scale of the numeric values of every token. input_mask_float (:obj: `torch.FloatTensor` of shape :obj:`(batch_size, seq_length)`): Mask for the table, without question tokens and table headers. logits_aggregation (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_aggregation_labels)`): Logits per aggregation operation. config (:class:`~transformers.TapasConfig`): Model configuration class with all the hyperparameters of the model Returns: expected_result (:obj:`torch.FloatTensor` of shape :obj:`(batch_size,)`): The expected result per example. """ if config.use_gumbel_for_cells: gumbel_dist = torch.distributions.RelaxedBernoulli( # The token logits where already divided by the temperature and used for # computing cell selection errors so we need to multiply it again here temperature=config.temperature, logits=dist_per_cell.logits * config.temperature, ) scaled_probability_per_cell = gumbel_dist.sample() else: scaled_probability_per_cell = dist_per_cell.probs # <float32>[batch_size, seq_length] scaled_probability_per_cell = (scaled_probability_per_cell / numeric_values_scale) * input_mask_float count_result = torch.sum(scaled_probability_per_cell, dim=1) numeric_values_masked = torch.where( torch.isnan(numeric_values), torch.zeros_like(numeric_values), numeric_values ) # Mask non-numeric table values to zero. sum_result = torch.sum(scaled_probability_per_cell * numeric_values_masked, dim=1) avg_approximation = config.average_approximation_function if avg_approximation == AverageApproximationFunction.RATIO: average_result = sum_result / (count_result + EPSILON_ZERO_DIVISION) elif avg_approximation == AverageApproximationFunction.FIRST_ORDER: # The sum of all probabilities except that correspond to other cells # Ex here stands for expectation, more explicitly the expectation of the sum of N-1 Bernoulli random variables plus # the constant 1, which is computed as adding all N expected values and subtracting the extra one. It corresponds to X_c # in Appendix D of the original TAPAS paper which is trying to approximate the average of a random set. ex = torch.sum(scaled_probability_per_cell, dim=1, keepdim=True) - scaled_probability_per_cell + 1 average_result = torch.sum(numeric_values_masked * scaled_probability_per_cell / ex, dim=1) elif avg_approximation == AverageApproximationFunction.SECOND_ORDER: # The sum of all probabilities except that correspond to other cells ex = torch.sum(scaled_probability_per_cell, dim=1, keepdim=True) - scaled_probability_per_cell + 1 pointwise_var = scaled_probability_per_cell * (1 - scaled_probability_per_cell) var = torch.sum(pointwise_var, dim=1, keepdim=True) - pointwise_var multiplier = (var / torch.square(ex) + 1) / ex average_result = torch.sum(numeric_values_masked * scaled_probability_per_cell * multiplier, dim=1) else: raise ValueError(f"Invalid average_approximation_function: {config.average_approximation_function}") if config.use_gumbel_for_aggregation: gumbel_dist = torch.distributions.RelaxedOneHotCategorical( config.aggregation_temperature, logits=logits_aggregation[:, 1:] ) # <float32>[batch_size, num_aggregation_labels - 1] aggregation_op_only_probs = gumbel_dist.sample() else: # <float32>[batch_size, num_aggregation_labels - 1] aggregation_op_only_probs = torch.nn.functional.softmax( logits_aggregation[:, 1:] / config.aggregation_temperature, dim=-1 ) all_results = torch.cat( [ torch.unsqueeze(sum_result, dim=1), torch.unsqueeze(average_result, dim=1), torch.unsqueeze(count_result, dim=1), ], dim=1, ) expected_result = torch.sum(all_results * aggregation_op_only_probs, dim=1) return expected_result # PyTorch does not currently support Huber loss with custom delta so we define it ourself def huber_loss(input, target, delta: float = 1.0): errors = torch.abs(input - target) # shape (batch_size,) return torch.where(errors < delta, 0.5 * errors ** 2, errors * delta - (0.5 * delta ** 2)) def _calculate_regression_loss( answer, aggregate_mask, dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config, ): """ Calculates the regression loss per example. Args: answer (:obj: `torch.FloatTensor` of shape :obj:`(batch_size,)`): Answer for every example in the batch. Nan if there is no scalar answer. aggregate_mask (:obj: `torch.FloatTensor` of shape :obj:`(batch_size,)`): A mask set to 1 for examples that should use aggregation functions. dist_per_cell (:obj:`torch.distributions.Bernoulli`): Cell selection distribution for each cell. numeric_values (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_length)`): Numeric values of every token. Nan for tokens which are not numeric values. numeric_values_scale (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, seq_length)`): Scale of the numeric values of every token. input_mask_float (:obj: `torch.FloatTensor` of shape :obj:`(batch_size, seq_length)`): Mask for the table, without question tokens and table headers. logits_aggregation (:obj: `torch.FloatTensor` of shape :obj:`(batch_size, num_aggregation_labels)`): Logits per aggregation operation. config (:class:`~transformers.TapasConfig`): Model configuration class with all the parameters of the model Returns: per_example_answer_loss_scaled (:obj:`torch.FloatTensor` of shape :obj:`(batch_size,)`): Scales answer loss for each example in the batch. large_answer_loss_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size,)`): A mask which is 1 for examples for which their answer loss is larger than the answer_loss_cutoff. """ # float32 (batch_size,) expected_result = _calculate_expected_result( dist_per_cell, numeric_values, numeric_values_scale, input_mask_float, logits_aggregation, config ) # float32 (batch_size,) answer_masked = torch.where(torch.isnan(answer), torch.zeros_like(answer), answer) if config.use_normalized_answer_loss: normalizer = (torch.max(torch.abs(expected_result), torch.abs(answer_masked)) + EPSILON_ZERO_DIVISION).detach() normalized_answer_masked = answer_masked / normalizer normalized_expected_result = expected_result / normalizer per_example_answer_loss = huber_loss( normalized_expected_result * aggregate_mask, normalized_answer_masked * aggregate_mask ) else: per_example_answer_loss = huber_loss( expected_result * aggregate_mask, answer_masked * aggregate_mask, delta=config.huber_loss_delta ) if config.answer_loss_cutoff is None: large_answer_loss_mask = torch.ones_like(per_example_answer_loss, dtype=torch.float32) else: large_answer_loss_mask = torch.where( per_example_answer_loss > config.answer_loss_cutoff, torch.zeros_like(per_example_answer_loss, dtype=torch.float32), torch.ones_like(per_example_answer_loss, dtype=torch.float32), ) per_example_answer_loss_scaled = config.answer_loss_importance * (per_example_answer_loss * aggregate_mask) return per_example_answer_loss_scaled, large_answer_loss_mask
AdaMix/src/transformers/models/tapas/modeling_tapas.py/0
{ "file_path": "AdaMix/src/transformers/models/tapas/modeling_tapas.py", "repo_id": "AdaMix", "token_count": 44590 }
63
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert OpenAI GPT checkpoint.""" import argparse import json import numpy import torch from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES from transformers.utils import logging logging.set_verbosity_info() def convert_xlm_checkpoint_to_pytorch(xlm_checkpoint_path, pytorch_dump_folder_path): # Load checkpoint chkpt = torch.load(xlm_checkpoint_path, map_location="cpu") state_dict = chkpt["model"] # We have the base model one level deeper than the original XLM repository two_levels_state_dict = {} for k, v in state_dict.items(): if "pred_layer" in k: two_levels_state_dict[k] = v else: two_levels_state_dict["transformer." + k] = v config = chkpt["params"] config = dict((n, v) for n, v in config.items() if not isinstance(v, (torch.FloatTensor, numpy.ndarray))) vocab = chkpt["dico_word2id"] vocab = dict((s + "</w>" if s.find("@@") == -1 and i > 13 else s.replace("@@", ""), i) for s, i in vocab.items()) # Save pytorch-model pytorch_weights_dump_path = pytorch_dump_folder_path + "/" + WEIGHTS_NAME pytorch_config_dump_path = pytorch_dump_folder_path + "/" + CONFIG_NAME pytorch_vocab_dump_path = pytorch_dump_folder_path + "/" + VOCAB_FILES_NAMES["vocab_file"] print("Save PyTorch model to {}".format(pytorch_weights_dump_path)) torch.save(two_levels_state_dict, pytorch_weights_dump_path) print("Save configuration file to {}".format(pytorch_config_dump_path)) with open(pytorch_config_dump_path, "w", encoding="utf-8") as f: f.write(json.dumps(config, indent=2) + "\n") print("Save vocab file to {}".format(pytorch_config_dump_path)) with open(pytorch_vocab_dump_path, "w", encoding="utf-8") as f: f.write(json.dumps(vocab, indent=2) + "\n") if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--xlm_checkpoint_path", default=None, type=str, required=True, help="Path the official PyTorch dump." ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_xlm_checkpoint_to_pytorch(args.xlm_checkpoint_path, args.pytorch_dump_folder_path)
AdaMix/src/transformers/models/xlm/convert_xlm_original_pytorch_checkpoint_to_pytorch.py/0
{ "file_path": "AdaMix/src/transformers/models/xlm/convert_xlm_original_pytorch_checkpoint_to_pytorch.py", "repo_id": "AdaMix", "token_count": 1116 }
64
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch XLNet model. """ import warnings from dataclasses import dataclass from typing import List, Optional, Tuple import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from torch.nn import functional as F from ...activations import ACT2FN from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_utils import ( PoolerAnswerClass, PoolerEndLogits, PoolerStartLogits, PreTrainedModel, SequenceSummary, apply_chunking_to_forward, ) from ...utils import logging from .configuration_xlnet import XLNetConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "xlnet-base-cased" _CONFIG_FOR_DOC = "XLNetConfig" _TOKENIZER_FOR_DOC = "XLNetTokenizer" XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = [ "xlnet-base-cased", "xlnet-large-cased", # See all XLNet models at https://huggingface.co/models?filter=xlnet ] def build_tf_xlnet_to_pytorch_map(model, config, tf_weights=None): """ A map of modules from TF to PyTorch. I use a map to keep the PyTorch model as identical to the original PyTorch model as possible. """ tf_to_pt_map = {} if hasattr(model, "transformer"): if hasattr(model, "lm_loss"): # We will load also the output bias tf_to_pt_map["model/lm_loss/bias"] = model.lm_loss.bias if hasattr(model, "sequence_summary") and "model/sequnece_summary/summary/kernel" in tf_weights: # We will load also the sequence summary tf_to_pt_map["model/sequnece_summary/summary/kernel"] = model.sequence_summary.summary.weight tf_to_pt_map["model/sequnece_summary/summary/bias"] = model.sequence_summary.summary.bias if ( hasattr(model, "logits_proj") and config.finetuning_task is not None and "model/regression_{}/logit/kernel".format(config.finetuning_task) in tf_weights ): tf_to_pt_map["model/regression_{}/logit/kernel".format(config.finetuning_task)] = model.logits_proj.weight tf_to_pt_map["model/regression_{}/logit/bias".format(config.finetuning_task)] = model.logits_proj.bias # Now load the rest of the transformer model = model.transformer # Embeddings and output tf_to_pt_map.update( { "model/transformer/word_embedding/lookup_table": model.word_embedding.weight, "model/transformer/mask_emb/mask_emb": model.mask_emb, } ) # Transformer blocks for i, b in enumerate(model.layer): layer_str = "model/transformer/layer_%d/" % i tf_to_pt_map.update( { layer_str + "rel_attn/LayerNorm/gamma": b.rel_attn.layer_norm.weight, layer_str + "rel_attn/LayerNorm/beta": b.rel_attn.layer_norm.bias, layer_str + "rel_attn/o/kernel": b.rel_attn.o, layer_str + "rel_attn/q/kernel": b.rel_attn.q, layer_str + "rel_attn/k/kernel": b.rel_attn.k, layer_str + "rel_attn/r/kernel": b.rel_attn.r, layer_str + "rel_attn/v/kernel": b.rel_attn.v, layer_str + "ff/LayerNorm/gamma": b.ff.layer_norm.weight, layer_str + "ff/LayerNorm/beta": b.ff.layer_norm.bias, layer_str + "ff/layer_1/kernel": b.ff.layer_1.weight, layer_str + "ff/layer_1/bias": b.ff.layer_1.bias, layer_str + "ff/layer_2/kernel": b.ff.layer_2.weight, layer_str + "ff/layer_2/bias": b.ff.layer_2.bias, } ) # Relative positioning biases if config.untie_r: r_r_list = [] r_w_list = [] r_s_list = [] seg_embed_list = [] for b in model.layer: r_r_list.append(b.rel_attn.r_r_bias) r_w_list.append(b.rel_attn.r_w_bias) r_s_list.append(b.rel_attn.r_s_bias) seg_embed_list.append(b.rel_attn.seg_embed) else: r_r_list = [model.r_r_bias] r_w_list = [model.r_w_bias] r_s_list = [model.r_s_bias] seg_embed_list = [model.seg_embed] tf_to_pt_map.update( { "model/transformer/r_r_bias": r_r_list, "model/transformer/r_w_bias": r_w_list, "model/transformer/r_s_bias": r_s_list, "model/transformer/seg_embed": seg_embed_list, } ) return tf_to_pt_map def load_tf_weights_in_xlnet(model, config, tf_path): """Load tf checkpoints in a pytorch model""" try: import numpy as np import tensorflow as tf except ImportError: logger.error( "Loading a TensorFlow models in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise # Load weights from TF model init_vars = tf.train.list_variables(tf_path) tf_weights = {} for name, shape in init_vars: logger.info("Loading TF weight {} with shape {}".format(name, shape)) array = tf.train.load_variable(tf_path, name) tf_weights[name] = array # Build TF to PyTorch weights loading map tf_to_pt_map = build_tf_xlnet_to_pytorch_map(model, config, tf_weights) for name, pointer in tf_to_pt_map.items(): logger.info("Importing {}".format(name)) if name not in tf_weights: logger.info("{} not in tf pre-trained weights, skipping".format(name)) continue array = tf_weights[name] # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v # which are not required for using pretrained model if "kernel" in name and ("ff" in name or "summary" in name or "logit" in name): logger.info("Transposing") array = np.transpose(array) if isinstance(pointer, list): # Here we will split the TF weights assert ( len(pointer) == array.shape[0] ), f"Pointer length {len(pointer)} and array length {array.shape[0]} mismatched" for i, p_i in enumerate(pointer): arr_i = array[i, ...] try: assert ( p_i.shape == arr_i.shape ), f"Pointer shape {p_i.shape} and array shape {arr_i.shape} mismatched" except AssertionError as e: e.args += (p_i.shape, arr_i.shape) raise logger.info("Initialize PyTorch weight {} for layer {}".format(name, i)) p_i.data = torch.from_numpy(arr_i) else: try: assert ( pointer.shape == array.shape ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" except AssertionError as e: e.args += (pointer.shape, array.shape) raise logger.info("Initialize PyTorch weight {}".format(name)) pointer.data = torch.from_numpy(array) tf_weights.pop(name, None) tf_weights.pop(name + "/Adam", None) tf_weights.pop(name + "/Adam_1", None) logger.info("Weights not copied to PyTorch model: {}".format(", ".join(tf_weights.keys()))) return model class XLNetRelativeAttention(nn.Module): def __init__(self, config): super().__init__() if config.d_model % config.n_head != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.d_model, config.n_head) ) self.n_head = config.n_head self.d_head = config.d_head self.d_model = config.d_model self.scale = 1 / (config.d_head ** 0.5) self.q = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.k = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.v = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.o = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.r = nn.Parameter(torch.FloatTensor(config.d_model, self.n_head, self.d_head)) self.r_r_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) self.r_s_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) self.r_w_bias = nn.Parameter(torch.FloatTensor(self.n_head, self.d_head)) self.seg_embed = nn.Parameter(torch.FloatTensor(2, self.n_head, self.d_head)) self.layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.dropout) def prune_heads(self, heads): raise NotImplementedError @staticmethod def rel_shift(x, klen=-1): """perform relative shift to form the relative attention score.""" x_size = x.shape x = x.reshape(x_size[1], x_size[0], x_size[2], x_size[3]) x = x[1:, ...] x = x.reshape(x_size[0], x_size[1] - 1, x_size[2], x_size[3]) # x = x[:, 0:klen, :, :] x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long)) return x @staticmethod def rel_shift_bnij(x, klen=-1): x_size = x.shape x = x.reshape(x_size[0], x_size[1], x_size[3], x_size[2]) x = x[:, :, 1:, :] x = x.reshape(x_size[0], x_size[1], x_size[2], x_size[3] - 1) # Note: the tensor-slice form was faster in my testing than torch.index_select # However, tracing doesn't like the nature of the slice, and if klen changes # during the run then it'll fail, whereas index_select will be fine. x = torch.index_select(x, 3, torch.arange(klen, device=x.device, dtype=torch.long)) # x = x[:, :, :, :klen] return x def rel_attn_core( self, q_head, k_head_h, v_head_h, k_head_r, seg_mat=None, attn_mask=None, head_mask=None, output_attentions=False, ): """Core relative positional attention operations.""" # content based attention score ac = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_w_bias, k_head_h) # position based attention score bd = torch.einsum("ibnd,jbnd->bnij", q_head + self.r_r_bias, k_head_r) bd = self.rel_shift_bnij(bd, klen=ac.shape[3]) # segment based attention score if seg_mat is None: ef = 0 else: ef = torch.einsum("ibnd,snd->ibns", q_head + self.r_s_bias, self.seg_embed) ef = torch.einsum("ijbs,ibns->bnij", seg_mat, ef) # merge attention scores and perform masking attn_score = (ac + bd + ef) * self.scale if attn_mask is not None: # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask if attn_mask.dtype == torch.float16: attn_score = attn_score - 65500 * torch.einsum("ijbn->bnij", attn_mask) else: attn_score = attn_score - 1e30 * torch.einsum("ijbn->bnij", attn_mask) # attention probability attn_prob = F.softmax(attn_score, dim=3) attn_prob = self.dropout(attn_prob) # Mask heads if we want to if head_mask is not None: attn_prob = attn_prob * torch.einsum("ijbn->bnij", head_mask) # attention output attn_vec = torch.einsum("bnij,jbnd->ibnd", attn_prob, v_head_h) if output_attentions: return attn_vec, torch.einsum("bnij->ijbn", attn_prob) return attn_vec def post_attention(self, h, attn_vec, residual=True): """Post-attention processing.""" # post-attention projection (back to `d_model`) attn_out = torch.einsum("ibnd,hnd->ibh", attn_vec, self.o) attn_out = self.dropout(attn_out) if residual: attn_out = attn_out + h output = self.layer_norm(attn_out) return output def forward( self, h, g, attn_mask_h, attn_mask_g, r, seg_mat, mems=None, target_mapping=None, head_mask=None, output_attentions=False, ): if g is not None: # Two-stream attention with relative positional encoding. # content based attention score if mems is not None and mems.dim() > 1: cat = torch.cat([mems, h], dim=0) else: cat = h # content-based key head k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k) # content-based value head v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v) # position-based key head k_head_r = torch.einsum("ibh,hnd->ibnd", r, self.r) # h-stream # content-stream query head q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q) # core attention ops attn_vec_h = self.rel_attn_core( q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask, output_attentions=output_attentions, ) if output_attentions: attn_vec_h, attn_prob_h = attn_vec_h # post processing output_h = self.post_attention(h, attn_vec_h) # g-stream # query-stream query head q_head_g = torch.einsum("ibh,hnd->ibnd", g, self.q) # core attention ops if target_mapping is not None: q_head_g = torch.einsum("mbnd,mlb->lbnd", q_head_g, target_mapping) attn_vec_g = self.rel_attn_core( q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask, output_attentions=output_attentions, ) if output_attentions: attn_vec_g, attn_prob_g = attn_vec_g attn_vec_g = torch.einsum("lbnd,mlb->mbnd", attn_vec_g, target_mapping) else: attn_vec_g = self.rel_attn_core( q_head_g, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_g, head_mask=head_mask, output_attentions=output_attentions, ) if output_attentions: attn_vec_g, attn_prob_g = attn_vec_g # post processing output_g = self.post_attention(g, attn_vec_g) if output_attentions: attn_prob = attn_prob_h, attn_prob_g else: # Multi-head attention with relative positional encoding if mems is not None and mems.dim() > 1: cat = torch.cat([mems, h], dim=0) else: cat = h # content heads q_head_h = torch.einsum("ibh,hnd->ibnd", h, self.q) k_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.k) v_head_h = torch.einsum("ibh,hnd->ibnd", cat, self.v) # positional heads # type casting for fp16 support k_head_r = torch.einsum("ibh,hnd->ibnd", r.type(self.r.dtype), self.r) # core attention ops attn_vec = self.rel_attn_core( q_head_h, k_head_h, v_head_h, k_head_r, seg_mat=seg_mat, attn_mask=attn_mask_h, head_mask=head_mask, output_attentions=output_attentions, ) if output_attentions: attn_vec, attn_prob = attn_vec # post processing output_h = self.post_attention(h, attn_vec) output_g = None outputs = (output_h, output_g) if output_attentions: outputs = outputs + (attn_prob,) return outputs class XLNetFeedForward(nn.Module): def __init__(self, config): super().__init__() self.layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps) self.layer_1 = nn.Linear(config.d_model, config.d_inner) self.layer_2 = nn.Linear(config.d_inner, config.d_model) self.dropout = nn.Dropout(config.dropout) if isinstance(config.ff_activation, str): self.activation_function = ACT2FN[config.ff_activation] else: self.activation_function = config.ff_activation def forward(self, inp): output = inp output = self.layer_1(output) output = self.activation_function(output) output = self.dropout(output) output = self.layer_2(output) output = self.dropout(output) output = self.layer_norm(output + inp) return output class XLNetLayer(nn.Module): def __init__(self, config): super().__init__() self.rel_attn = XLNetRelativeAttention(config) self.ff = XLNetFeedForward(config) self.dropout = nn.Dropout(config.dropout) self.chunk_size_feed_forward = config.chunk_size_feed_forward self.seq_len_dim = 1 def forward( self, output_h, output_g, attn_mask_h, attn_mask_g, r, seg_mat, mems=None, target_mapping=None, head_mask=None, output_attentions=False, ): outputs = self.rel_attn( output_h, output_g, attn_mask_h, attn_mask_g, r, seg_mat, mems=mems, target_mapping=target_mapping, head_mask=head_mask, output_attentions=output_attentions, ) output_h, output_g = outputs[:2] if output_g is not None: output_g = apply_chunking_to_forward( self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, output_g ) output_h = apply_chunking_to_forward(self.ff_chunk, self.chunk_size_feed_forward, self.seq_len_dim, output_h) outputs = (output_h, output_g) + outputs[2:] # Add again attentions if there are there return outputs def ff_chunk(self, output_x): output_x = self.ff(output_x) return output_x class XLNetPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = XLNetConfig load_tf_weights = load_tf_weights_in_xlnet base_model_prefix = "transformer" def _init_weights(self, module): """Initialize the weights.""" if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) elif isinstance(module, XLNetRelativeAttention): for param in [ module.q, module.k, module.v, module.o, module.r, module.r_r_bias, module.r_s_bias, module.r_w_bias, module.seg_embed, ]: param.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, XLNetModel): module.mask_emb.data.normal_(mean=0.0, std=self.config.initializer_range) @dataclass class XLNetModelOutput(ModelOutput): """ Output type of :class:`~transformers.XLNetModel`. Args: last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, hidden_size)`): Sequence of hidden-states at the last layer of the model. ``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then ``num_predict`` corresponds to ``sequence_length``. mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`): Contains pre-computed hidden-states. Can be used (see :obj:`mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they have already been computed. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: torch.FloatTensor mems: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class XLNetLMHeadModelOutput(ModelOutput): """ Output type of :class:`~transformers.XLNetLMHeadModel`. Args: loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided) Language modeling loss (for next-token prediction). logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). ``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then ``num_predict`` corresponds to ``sequence_length``. mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`): Contains pre-computed hidden-states. Can be used (see :obj:`mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they have already been computed. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None mems: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class XLNetForSequenceClassificationOutput(ModelOutput): """ Output type of :class:`~transformers.XLNetForSequenceClassification`. Args: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided): Classification (or regression if config.num_labels==1) loss. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`): Contains pre-computed hidden-states. Can be used (see :obj:`mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they have already been computed. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None mems: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class XLNetForTokenClassificationOutput(ModelOutput): """ Output type of :class:`~transformers.XLNetForTokenClassificationOutput`. Args: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) : Classification loss. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`): Classification scores (before SoftMax). mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`): Contains pre-computed hidden-states. Can be used (see :obj:`mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they have already been computed. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None mems: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class XLNetForMultipleChoiceOutput(ModelOutput): """ Output type of :class:`~transformers.XLNetForMultipleChoice`. Args: loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided): Classification loss. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`): `num_choices` is the second dimension of the input tensors. (see `input_ids` above). Classification scores (before SoftMax). mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`): Contains pre-computed hidden-states. Can be used (see :obj:`mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they have already been computed. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None logits: torch.FloatTensor = None mems: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class XLNetForQuestionAnsweringSimpleOutput(ModelOutput): """ Output type of :class:`~transformers.XLNetForQuestionAnsweringSimple`. Args: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`): Span-start scores (before SoftMax). end_logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`): Span-end scores (before SoftMax). mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`): Contains pre-computed hidden-states. Can be used (see :obj:`mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they have already been computed. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None start_logits: torch.FloatTensor = None end_logits: torch.FloatTensor = None mems: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None @dataclass class XLNetForQuestionAnsweringOutput(ModelOutput): """ Output type of :class:`~transformers.XLNetForQuestionAnswering`. Args: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned if both :obj:`start_positions` and :obj:`end_positions` are provided): Classification loss as the sum of start token, end token (and is_impossible if provided) classification losses. start_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided): Log probabilities for the top config.start_n_top start token possibilities (beam-search). start_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided): Indices for the top config.start_n_top start token possibilities (beam-search). end_top_log_probs (``torch.FloatTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided): Log probabilities for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search). end_top_index (``torch.LongTensor`` of shape ``(batch_size, config.start_n_top * config.end_n_top)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided): Indices for the top ``config.start_n_top * config.end_n_top`` end token possibilities (beam-search). cls_logits (``torch.FloatTensor`` of shape ``(batch_size,)``, `optional`, returned if ``start_positions`` or ``end_positions`` is not provided): Log probabilities for the ``is_impossible`` label of the answers. mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`): Contains pre-computed hidden-states. Can be used (see :obj:`mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they have already been computed. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[torch.FloatTensor] = None start_top_log_probs: Optional[torch.FloatTensor] = None start_top_index: Optional[torch.LongTensor] = None end_top_log_probs: Optional[torch.FloatTensor] = None end_top_index: Optional[torch.LongTensor] = None cls_logits: Optional[torch.FloatTensor] = None mems: Optional[List[torch.FloatTensor]] = None hidden_states: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[torch.FloatTensor]] = None XLNET_START_DOCSTRING = r""" This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.XLNetConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ XLNET_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`{0}`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`transformers.XLNetTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`): Contains pre-computed hidden-states (see :obj:`mems` output below) . Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they have already been computed. :obj:`use_mems` has to be set to :obj:`True` to make use of :obj:`mems`. perm_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, sequence_length)`, `optional`): Mask to indicate the attention pattern for each input token with values selected in ``[0, 1]``: - if ``perm_mask[k, i, j] = 0``, i attend to j in batch k; - if ``perm_mask[k, i, j] = 1``, i does not attend to j in batch k. If not set, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation). target_mapping (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_predict, sequence_length)`, `optional`): Mask to indicate the output tokens to use. If ``target_mapping[k, i, j] = 1``, the i-th predict in batch k is on the j-th token. Only used during pretraining for partial prediction or for sequential decoding (generation). token_type_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: - 0 corresponds to a `sentence A` token, - 1 corresponds to a `sentence B` token. `What are token type IDs? <../glossary.html#token-type-ids>`__ input_mask (:obj:`torch.FloatTensor` of shape :obj:`{0}`, `optional`): Mask to avoid performing attention on padding token indices. Negative of :obj:`attention_mask`, i.e. with 0 for real tokens and 1 for padding which is kept for compatibility with the original code base. Mask values selected in ``[0, 1]``: - 1 for tokens that are **masked**, - 0 for tokens that are **not masked**. You can only uses one of :obj:`input_mask` and :obj:`attention_mask`. head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ @add_start_docstrings( "The bare XLNet Model transformer outputting raw hidden-states without any specific head on top.", XLNET_START_DOCSTRING, ) class XLNetModel(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.mem_len = config.mem_len self.reuse_len = config.reuse_len self.d_model = config.d_model self.same_length = config.same_length self.attn_type = config.attn_type self.bi_data = config.bi_data self.clamp_len = config.clamp_len self.n_layer = config.n_layer self.word_embedding = nn.Embedding(config.vocab_size, config.d_model) self.mask_emb = nn.Parameter(torch.FloatTensor(1, 1, config.d_model)) self.layer = nn.ModuleList([XLNetLayer(config) for _ in range(config.n_layer)]) self.dropout = nn.Dropout(config.dropout) self.init_weights() def get_input_embeddings(self): return self.word_embedding def set_input_embeddings(self, new_embeddings): self.word_embedding = new_embeddings def _prune_heads(self, heads_to_prune): raise NotImplementedError def create_mask(self, qlen, mlen): """ Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked. Args: qlen: Sequence length mlen: Mask length :: same_length=False: same_length=True: <mlen > < qlen > <mlen > < qlen > ^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1] qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1] [0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1] v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0] """ attn_mask = torch.ones([qlen, qlen]) mask_up = torch.triu(attn_mask, diagonal=1) attn_mask_pad = torch.zeros([qlen, mlen]) ret = torch.cat([attn_mask_pad, mask_up], dim=1) if self.same_length: mask_lo = torch.tril(attn_mask, diagonal=-1) ret = torch.cat([ret[:, :qlen] + mask_lo, ret[:, qlen:]], dim=1) ret = ret.to(self.device) return ret def cache_mem(self, curr_out, prev_mem): # cache hidden states into memory. if self.reuse_len is not None and self.reuse_len > 0: curr_out = curr_out[: self.reuse_len] if self.mem_len is None or self.mem_len == 0: # If :obj:`use_mems` is active but no `mem_len` is defined, the model behaves like GPT-2 at inference time # and returns all of the past and current hidden states. cutoff = 0 else: # If :obj:`use_mems` is active and `mem_len` is defined, the model returns the last `mem_len` hidden # states. This is the preferred setting for training and long-form generation. cutoff = -self.mem_len if prev_mem is None: # if :obj:`use_mems` is active and `mem_len` is defined, the model new_mem = curr_out[cutoff:] else: new_mem = torch.cat([prev_mem, curr_out], dim=0)[cutoff:] return new_mem.detach() @staticmethod def positional_embedding(pos_seq, inv_freq, bsz=None): sinusoid_inp = torch.einsum("i,d->id", pos_seq, inv_freq) pos_emb = torch.cat([torch.sin(sinusoid_inp), torch.cos(sinusoid_inp)], dim=-1) pos_emb = pos_emb[:, None, :] if bsz is not None: pos_emb = pos_emb.expand(-1, bsz, -1) return pos_emb def relative_positional_encoding(self, qlen, klen, bsz=None): # create relative positional encoding. freq_seq = torch.arange(0, self.d_model, 2.0, dtype=torch.float) inv_freq = 1 / torch.pow(10000, (freq_seq / self.d_model)) if self.attn_type == "bi": # beg, end = klen - 1, -qlen beg, end = klen, -qlen elif self.attn_type == "uni": # beg, end = klen - 1, -1 beg, end = klen, -1 else: raise ValueError("Unknown `attn_type` {}.".format(self.attn_type)) if self.bi_data: fwd_pos_seq = torch.arange(beg, end, -1.0, dtype=torch.float) bwd_pos_seq = torch.arange(-beg, -end, 1.0, dtype=torch.float) if self.clamp_len > 0: fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len) bwd_pos_seq = bwd_pos_seq.clamp(-self.clamp_len, self.clamp_len) if bsz is not None: fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz // 2) bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz // 2) else: fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq) bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq) pos_emb = torch.cat([fwd_pos_emb, bwd_pos_emb], dim=1) else: fwd_pos_seq = torch.arange(beg, end, -1.0) if self.clamp_len > 0: fwd_pos_seq = fwd_pos_seq.clamp(-self.clamp_len, self.clamp_len) pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz) pos_emb = pos_emb.to(self.device) return pos_emb @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=XLNetModelOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, use_mems=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, # delete after depreciation warning is removed ): output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if "use_cache" in kwargs: warnings.warn( "The `use_cache` argument is deprecated and will be removed in a future version, use `use_mems` instead.", FutureWarning, ) use_mems = kwargs["use_cache"] if self.training: use_mems = use_mems if use_mems is not None else self.config.use_mems_train else: use_mems = use_mems if use_mems is not None else self.config.use_mems_eval # the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end # but we want a unified interface in the library with the batch size on the first dimension # so we move here the first dimension (batch) to the end if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_ids = input_ids.transpose(0, 1).contiguous() qlen, bsz = input_ids.shape[0], input_ids.shape[1] elif inputs_embeds is not None: inputs_embeds = inputs_embeds.transpose(0, 1).contiguous() qlen, bsz = inputs_embeds.shape[0], inputs_embeds.shape[1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") token_type_ids = token_type_ids.transpose(0, 1).contiguous() if token_type_ids is not None else None input_mask = input_mask.transpose(0, 1).contiguous() if input_mask is not None else None attention_mask = attention_mask.transpose(0, 1).contiguous() if attention_mask is not None else None perm_mask = perm_mask.permute(1, 2, 0).contiguous() if perm_mask is not None else None target_mapping = target_mapping.permute(1, 2, 0).contiguous() if target_mapping is not None else None mlen = mems[0].shape[0] if mems is not None and mems[0] is not None else 0 klen = mlen + qlen dtype_float = self.dtype device = self.device # Attention mask # causal attention mask if self.attn_type == "uni": attn_mask = self.create_mask(qlen, mlen) attn_mask = attn_mask[:, :, None, None] elif self.attn_type == "bi": attn_mask = None else: raise ValueError("Unsupported attention type: {}".format(self.attn_type)) # data mask: input mask & perm mask assert input_mask is None or attention_mask is None, "You can only use one of input_mask (uses 1 for padding) " "or attention_mask (uses 0 for padding, added for compatibility with BERT). Please choose one." if input_mask is None and attention_mask is not None: input_mask = 1.0 - attention_mask if input_mask is not None and perm_mask is not None: data_mask = input_mask[None] + perm_mask elif input_mask is not None and perm_mask is None: data_mask = input_mask[None] elif input_mask is None and perm_mask is not None: data_mask = perm_mask else: data_mask = None if data_mask is not None: # all mems can be attended to if mlen > 0: mems_mask = torch.zeros([data_mask.shape[0], mlen, bsz]).to(data_mask) data_mask = torch.cat([mems_mask, data_mask], dim=1) if attn_mask is None: attn_mask = data_mask[:, :, :, None] else: attn_mask += data_mask[:, :, :, None] if attn_mask is not None: attn_mask = (attn_mask > 0).to(dtype_float) if attn_mask is not None: non_tgt_mask = -torch.eye(qlen).to(attn_mask) if mlen > 0: non_tgt_mask = torch.cat([torch.zeros([qlen, mlen]).to(attn_mask), non_tgt_mask], dim=-1) non_tgt_mask = ((attn_mask + non_tgt_mask[:, :, None, None]) > 0).to(attn_mask) else: non_tgt_mask = None # Word embeddings and prepare h & g hidden states if inputs_embeds is not None: word_emb_k = inputs_embeds else: word_emb_k = self.word_embedding(input_ids) output_h = self.dropout(word_emb_k) if target_mapping is not None: word_emb_q = self.mask_emb.expand(target_mapping.shape[0], bsz, -1) # else: # We removed the inp_q input which was same as target mapping # inp_q_ext = inp_q[:, :, None] # word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k output_g = self.dropout(word_emb_q) else: output_g = None # Segment embedding if token_type_ids is not None: # Convert `token_type_ids` to one-hot `seg_mat` if mlen > 0: mem_pad = torch.zeros([mlen, bsz], dtype=torch.long, device=device) cat_ids = torch.cat([mem_pad, token_type_ids], dim=0) else: cat_ids = token_type_ids # `1` indicates not in the same segment [qlen x klen x bsz] seg_mat = (token_type_ids[:, None] != cat_ids[None, :]).long() seg_mat = F.one_hot(seg_mat, num_classes=2).to(dtype_float) else: seg_mat = None # Positional encoding pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz) pos_emb = self.dropout(pos_emb) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer) # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head] if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(0).unsqueeze(0) head_mask = head_mask.expand(self.n_layer, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = head_mask.unsqueeze(1).unsqueeze(1).unsqueeze(1) head_mask = head_mask.to( dtype=next(self.parameters()).dtype ) # switch to float if need + fp16 compatibility else: head_mask = [None] * self.n_layer new_mems = () if mems is None: mems = [None] * len(self.layer) attentions = [] if output_attentions else None hidden_states = [] if output_hidden_states else None for i, layer_module in enumerate(self.layer): if use_mems: # cache new mems new_mems = new_mems + (self.cache_mem(output_h, mems[i]),) if output_hidden_states: hidden_states.append((output_h, output_g) if output_g is not None else output_h) outputs = layer_module( output_h, output_g, attn_mask_h=non_tgt_mask, attn_mask_g=attn_mask, r=pos_emb, seg_mat=seg_mat, mems=mems[i], target_mapping=target_mapping, head_mask=head_mask[i], output_attentions=output_attentions, ) output_h, output_g = outputs[:2] if output_attentions: attentions.append(outputs[2]) # Add last hidden state if output_hidden_states: hidden_states.append((output_h, output_g) if output_g is not None else output_h) output = self.dropout(output_g if output_g is not None else output_h) # Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method) output = output.permute(1, 0, 2).contiguous() if not use_mems: new_mems = None if output_hidden_states: if output_g is not None: hidden_states = tuple(h.permute(1, 0, 2).contiguous() for hs in hidden_states for h in hs) else: hidden_states = tuple(hs.permute(1, 0, 2).contiguous() for hs in hidden_states) if output_attentions: if target_mapping is not None: # when target_mapping is provided, there are 2-tuple of attentions attentions = tuple( tuple(att_stream.permute(2, 3, 0, 1).contiguous() for att_stream in t) for t in attentions ) else: attentions = tuple(t.permute(2, 3, 0, 1).contiguous() for t in attentions) if not return_dict: return tuple(v for v in [output, new_mems, hidden_states, attentions] if v is not None) return XLNetModelOutput( last_hidden_state=output, mems=new_mems, hidden_states=hidden_states, attentions=attentions ) @add_start_docstrings( """ XLNet Model with a language modeling head on top (linear layer with weights tied to the input embeddings). """, XLNET_START_DOCSTRING, ) class XLNetLMHeadModel(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.attn_type = config.attn_type self.same_length = config.same_length self.transformer = XLNetModel(config) self.lm_loss = nn.Linear(config.d_model, config.vocab_size, bias=True) self.init_weights() def get_output_embeddings(self): return self.lm_loss def set_output_embeddings(self, new_embeddings): self.lm_loss = new_embeddings def prepare_inputs_for_generation(self, input_ids, past=None, use_mems=None, **kwargs): # Add dummy token at the end (no attention on this one) effective_batch_size = input_ids.shape[0] dummy_token = torch.zeros((effective_batch_size, 1), dtype=torch.long, device=input_ids.device) # At every pass, the attention values for the new token and the two last generated tokens # are computed, the rest is reloaded from the `past` cache. A purely auto-regressive model would have # offset = 1; offset = 2 seems to have slightly better computation. offset = 2 if past: input_ids = torch.cat([input_ids[:, -offset:], dummy_token], dim=1) else: input_ids = torch.cat([input_ids, dummy_token], dim=1) # Build permutation mask so that previous tokens don't see last token sequence_length = input_ids.shape[1] perm_mask = torch.zeros( (effective_batch_size, sequence_length, sequence_length), dtype=torch.float, device=input_ids.device ) perm_mask[:, :, -1] = 1.0 # We'll only predict the last token target_mapping = torch.zeros( (effective_batch_size, 1, sequence_length), dtype=torch.float, device=input_ids.device ) target_mapping[:, 0, -1] = 1.0 inputs = { "input_ids": input_ids, "perm_mask": perm_mask, "target_mapping": target_mapping, "use_mems": use_mems, } # if past is defined in model kwargs then use it for faster decoding if past: inputs["mems"] = tuple(layer_past[:-offset, :, :] for layer_past in past) return inputs @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=XLNetLMHeadModelOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, labels=None, use_mems=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, # delete when `use_cache` is removed in XLNetModel ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, num_predict)`, `optional`): Labels for masked language modeling. :obj:`num_predict` corresponds to :obj:`target_mapping.shape[1]`. If :obj:`target_mapping` is :obj`None`, then :obj:`num_predict` corresponds to :obj:`sequence_length`. The labels should correspond to the masked input words that should be predicted and depends on :obj:`target_mapping`. Note in order to perform standard auto-regressive language modeling a `<mask>` token has to be added to the :obj:`input_ids` (see the :obj:`prepare_inputs_for_generation` function and examples below) Indices are selected in ``[-100, 0, ..., config.vocab_size]`` All labels set to ``-100`` are ignored, the loss is only computed for labels in ``[0, ..., config.vocab_size]`` Return: Examples:: >>> from transformers import XLNetTokenizer, XLNetLMHeadModel >>> import torch >>> tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased') >>> model = XLNetLMHeadModel.from_pretrained('xlnet-large-cased') >>> # We show how to setup inputs to predict a next token using a bi-directional context. >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False)).unsqueeze(0) # We will predict the masked token >>> perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float) >>> perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token >>> target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float) # Shape [1, 1, seq_length] => let's predict one token >>> target_mapping[0, 0, -1] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token) >>> outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping) >>> next_token_logits = outputs[0] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size] >>> # The same way can the XLNetLMHeadModel be used to be trained by standard auto-regressive language modeling. >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=False)).unsqueeze(0) # We will predict the masked token >>> labels = torch.tensor(tokenizer.encode("cute", add_special_tokens=False)).unsqueeze(0) >>> assert labels.shape[0] == 1, 'only one word will be predicted' >>> perm_mask = torch.zeros((1, input_ids.shape[1], input_ids.shape[1]), dtype=torch.float) >>> perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token as is done in standard auto-regressive lm training >>> target_mapping = torch.zeros((1, 1, input_ids.shape[1]), dtype=torch.float) # Shape [1, 1, seq_length] => let's predict one token >>> target_mapping[0, 0, -1] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token) >>> outputs = model(input_ids, perm_mask=perm_mask, target_mapping=target_mapping, labels=labels) >>> loss = outputs.loss >>> next_token_logits = outputs.logits # Logits have shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size] """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) logits = self.lm_loss(transformer_outputs[0]) loss = None if labels is not None: # Flatten the tokens loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, logits.size(-1)), labels.view(-1)) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return XLNetLMHeadModelOutput( loss=loss, logits=logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @staticmethod def _reorder_cache(mems: List[torch.Tensor], beam_idx: torch.Tensor) -> List[torch.Tensor]: """ This function is used to re-order the :obj:`mems` cache if :meth:`~transformers.PretrainedModel.beam_search` or :meth:`~transformers.PretrainedModel.beam_sample` is called. This is required to match :obj:`mems` with the correct beam_idx at every generation step. """ return [layer_past.index_select(1, beam_idx.to(layer_past.device)) for layer_past in mems] @add_start_docstrings( """ XLNet Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, XLNET_START_DOCSTRING, ) class XLNetForSequenceClassification(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = XLNetModel(config) self.sequence_summary = SequenceSummary(config) self.logits_proj = nn.Linear(config.d_model, config.num_labels) self.init_weights() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=XLNetForSequenceClassificationOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, labels=None, use_mems=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, # delete when `use_cache` is removed in XLNetModel ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in ``[0, ..., config.num_labels - 1]``. If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss), If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy). """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) output = transformer_outputs[0] output = self.sequence_summary(output) logits = self.logits_proj(output) loss = None if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return XLNetForSequenceClassificationOutput( loss=loss, logits=logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @add_start_docstrings( """ XLNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, XLNET_START_DOCSTRING, ) class XLNetForTokenClassification(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = XLNetModel(config) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=XLNetForTokenClassificationOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, labels=None, use_mems=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, # delete when `use_cache` is removed in XLNetModel ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension of the input tensors. (see `input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer( input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] logits = self.classifier(sequence_output) loss = None if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) if not return_dict: output = (logits,) + outputs[1:] return ((loss,) + output) if loss is not None else output return XLNetForTokenClassificationOutput( loss=loss, logits=logits, mems=outputs.mems, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ XLNet Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RACE/SWAG tasks. """, XLNET_START_DOCSTRING, ) class XLNetForMultipleChoice(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.transformer = XLNetModel(config) self.sequence_summary = SequenceSummary(config) self.logits_proj = nn.Linear(config.d_model, 1) self.init_weights() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=XLNetForMultipleChoiceOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, token_type_ids=None, input_mask=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, head_mask=None, inputs_embeds=None, labels=None, use_mems=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, # delete when `use_cache` is removed in XLNetModel ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., num_choices-1]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See :obj:`input_ids` above) """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1] flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None flat_input_mask = input_mask.view(-1, input_mask.size(-1)) if input_mask is not None else None flat_inputs_embeds = ( inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1)) if inputs_embeds is not None else None ) transformer_outputs = self.transformer( flat_input_ids, token_type_ids=flat_token_type_ids, input_mask=flat_input_mask, attention_mask=flat_attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, head_mask=head_mask, inputs_embeds=flat_inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) output = transformer_outputs[0] output = self.sequence_summary(output) logits = self.logits_proj(output) reshaped_logits = logits.view(-1, num_choices) loss = None if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels.view(-1)) if not return_dict: output = (reshaped_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return XLNetForMultipleChoiceOutput( loss=loss, logits=reshaped_logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @add_start_docstrings( """ XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, XLNET_START_DOCSTRING, ) class XLNetForQuestionAnsweringSimple(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.transformer = XLNetModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=XLNetForQuestionAnsweringSimpleOutput, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None, use_mems=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, # delete when `use_cache` is removed in XLNetModel ): r""" start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.transformer( input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) total_loss = None if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if not return_dict: output = (start_logits, end_logits) + outputs[1:] return ((total_loss,) + output) if total_loss is not None else output return XLNetForQuestionAnsweringSimpleOutput( loss=total_loss, start_logits=start_logits, end_logits=end_logits, mems=outputs.mems, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) @add_start_docstrings( """ XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, XLNET_START_DOCSTRING, ) class XLNetForQuestionAnswering(XLNetPreTrainedModel): def __init__(self, config): super().__init__(config) self.start_n_top = config.start_n_top self.end_n_top = config.end_n_top self.transformer = XLNetModel(config) self.start_logits = PoolerStartLogits(config) self.end_logits = PoolerEndLogits(config) self.answer_class = PoolerAnswerClass(config) self.init_weights() @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=XLNetForQuestionAnsweringOutput, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None, is_impossible=None, cls_index=None, p_mask=None, use_mems=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, # delete when `use_cache` is removed in XLNetModel ): r""" start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. is_impossible (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`): Labels whether a question has an answer or no answer (SQuAD 2.0) cls_index (``torch.LongTensor`` of shape ``(batch_size,)``, `optional`): Labels for position (index) of the classification token to use as input for computing plausibility of the answer. p_mask (``torch.FloatTensor`` of shape ``(batch_size, sequence_length)``, `optional`): Optional mask of tokens which can't be in answers (e.g. [CLS], [PAD], ...). 1.0 means token should be masked. 0.0 mean token is not masked. Returns: Example:: >>> from transformers import XLNetTokenizer, XLNetForQuestionAnswering >>> import torch >>> tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased') >>> model = XLNetForQuestionAnswering.from_pretrained('xlnet-base-cased') >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0) # Batch size 1 >>> start_positions = torch.tensor([1]) >>> end_positions = torch.tensor([3]) >>> outputs = model(input_ids, start_positions=start_positions, end_positions=end_positions) >>> loss = outputs.loss """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict transformer_outputs = self.transformer( input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) hidden_states = transformer_outputs[0] start_logits = self.start_logits(hidden_states, p_mask=p_mask) outputs = transformer_outputs[1:] # Keep mems, hidden states, attentions if there are in it if start_positions is not None and end_positions is not None: # If we are on multi-GPU, let's remove the dimension added by batch splitting for x in (start_positions, end_positions, cls_index, is_impossible): if x is not None and x.dim() > 1: x.squeeze_(-1) # during training, compute the end logits based on the ground truth of the start position end_logits = self.end_logits(hidden_states, start_positions=start_positions, p_mask=p_mask) loss_fct = CrossEntropyLoss() start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 if cls_index is not None and is_impossible is not None: # Predict answerability from the representation of CLS and START cls_logits = self.answer_class(hidden_states, start_positions=start_positions, cls_index=cls_index) loss_fct_cls = nn.BCEWithLogitsLoss() cls_loss = loss_fct_cls(cls_logits, is_impossible) # note(zhiliny): by default multiply the loss by 0.5 so that the scale is comparable to start_loss and end_loss total_loss += cls_loss * 0.5 if not return_dict: return (total_loss,) + transformer_outputs[1:] else: return XLNetForQuestionAnsweringOutput( loss=total_loss, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) else: # during inference, compute the end logits based on beam search bsz, slen, hsz = hidden_states.size() start_log_probs = F.softmax(start_logits, dim=-1) # shape (bsz, slen) start_top_log_probs, start_top_index = torch.topk( start_log_probs, self.start_n_top, dim=-1 ) # shape (bsz, start_n_top) start_top_index_exp = start_top_index.unsqueeze(-1).expand(-1, -1, hsz) # shape (bsz, start_n_top, hsz) start_states = torch.gather(hidden_states, -2, start_top_index_exp) # shape (bsz, start_n_top, hsz) start_states = start_states.unsqueeze(1).expand(-1, slen, -1, -1) # shape (bsz, slen, start_n_top, hsz) hidden_states_expanded = hidden_states.unsqueeze(2).expand_as( start_states ) # shape (bsz, slen, start_n_top, hsz) p_mask = p_mask.unsqueeze(-1) if p_mask is not None else None end_logits = self.end_logits(hidden_states_expanded, start_states=start_states, p_mask=p_mask) end_log_probs = F.softmax(end_logits, dim=1) # shape (bsz, slen, start_n_top) end_top_log_probs, end_top_index = torch.topk( end_log_probs, self.end_n_top, dim=1 ) # shape (bsz, end_n_top, start_n_top) end_top_log_probs = end_top_log_probs.view(-1, self.start_n_top * self.end_n_top) end_top_index = end_top_index.view(-1, self.start_n_top * self.end_n_top) start_states = torch.einsum( "blh,bl->bh", hidden_states, start_log_probs ) # get the representation of START as weighted sum of hidden states cls_logits = self.answer_class( hidden_states, start_states=start_states, cls_index=cls_index ) # Shape (batch size,): one single `cls_logits` for each sample if not return_dict: outputs = (start_top_log_probs, start_top_index, end_top_log_probs, end_top_index, cls_logits) return outputs + transformer_outputs[1:] else: return XLNetForQuestionAnsweringOutput( start_top_log_probs=start_top_log_probs, start_top_index=start_top_index, end_top_log_probs=end_top_log_probs, end_top_index=end_top_index, cls_logits=cls_logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, )
AdaMix/src/transformers/models/xlnet/modeling_xlnet.py/0
{ "file_path": "AdaMix/src/transformers/models/xlnet/modeling_xlnet.py", "repo_id": "AdaMix", "token_count": 41251 }
65
from typing import List, Union import numpy as np from ..file_utils import add_end_docstrings from ..tokenization_utils import TruncationStrategy from ..utils import logging from .base import PIPELINE_INIT_ARGS, ArgumentHandler, Pipeline logger = logging.get_logger(__name__) class ZeroShotClassificationArgumentHandler(ArgumentHandler): """ Handles arguments for zero-shot for text classification by turning each possible label into an NLI premise/hypothesis pair. """ def _parse_labels(self, labels): if isinstance(labels, str): labels = [label.strip() for label in labels.split(",")] return labels def __call__(self, sequences, labels, hypothesis_template): if len(labels) == 0 or len(sequences) == 0: raise ValueError("You must include at least one label and at least one sequence.") if hypothesis_template.format(labels[0]) == hypothesis_template: raise ValueError( ( 'The provided hypothesis_template "{}" was not able to be formatted with the target labels. ' "Make sure the passed template includes formatting syntax such as {{}} where the label should go." ).format(hypothesis_template) ) if isinstance(sequences, str): sequences = [sequences] labels = self._parse_labels(labels) sequence_pairs = [] for sequence in sequences: sequence_pairs.extend([[sequence, hypothesis_template.format(label)] for label in labels]) return sequence_pairs @add_end_docstrings(PIPELINE_INIT_ARGS) class ZeroShotClassificationPipeline(Pipeline): """ NLI-based zero-shot classification pipeline using a :obj:`ModelForSequenceClassification` trained on NLI (natural language inference) tasks. Any combination of sequences and labels can be passed and each combination will be posed as a premise/hypothesis pair and passed to the pretrained model. Then, the logit for `entailment` is taken as the logit for the candidate label being valid. Any NLI model can be used, but the id of the `entailment` label must be included in the model config's :attr:`~transformers.PretrainedConfig.label2id`. This NLI pipeline can currently be loaded from :func:`~transformers.pipeline` using the following task identifier: :obj:`"zero-shot-classification"`. The models that this pipeline can use are models that have been fine-tuned on an NLI task. See the up-to-date list of available models on `huggingface.co/models <https://huggingface.co/models?search=nli>`__. """ def __init__(self, args_parser=ZeroShotClassificationArgumentHandler(), *args, **kwargs): super().__init__(*args, **kwargs) self._args_parser = args_parser if self.entailment_id == -1: logger.warning( "Failed to determine 'entailment' label id from the label2id mapping in the model config. Setting to " "-1. Define a descriptive label2id mapping in the model config to ensure correct outputs." ) @property def entailment_id(self): for label, ind in self.model.config.label2id.items(): if label.lower().startswith("entail"): return ind return -1 def _parse_and_tokenize( self, sequences, candidate_labels, hypothesis_template, padding=True, add_special_tokens=True, truncation=TruncationStrategy.ONLY_FIRST, **kwargs ): """ Parse arguments and tokenize only_first so that hypothesis (label) is not truncated """ sequence_pairs = self._args_parser(sequences, candidate_labels, hypothesis_template) inputs = self.tokenizer( sequence_pairs, add_special_tokens=add_special_tokens, return_tensors=self.framework, padding=padding, truncation=truncation, ) return inputs def __call__( self, sequences: Union[str, List[str]], candidate_labels, hypothesis_template="This example is {}.", multi_label=False, **kwargs, ): """ Classify the sequence(s) given as inputs. See the :obj:`~transformers.ZeroShotClassificationPipeline` documentation for more information. Args: sequences (:obj:`str` or :obj:`List[str]`): The sequence(s) to classify, will be truncated if the model input is too large. candidate_labels (:obj:`str` or :obj:`List[str]`): The set of possible class labels to classify each sequence into. Can be a single label, a string of comma-separated labels, or a list of labels. hypothesis_template (:obj:`str`, `optional`, defaults to :obj:`"This example is {}."`): The template used to turn each label into an NLI-style hypothesis. This template must include a {} or similar syntax for the candidate label to be inserted into the template. For example, the default template is :obj:`"This example is {}."` With the candidate label :obj:`"sports"`, this would be fed into the model like :obj:`"<cls> sequence to classify <sep> This example is sports . <sep>"`. The default template works well in many cases, but it may be worthwhile to experiment with different templates depending on the task setting. multi_label (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not multiple candidate labels can be true. If :obj:`False`, the scores are normalized such that the sum of the label likelihoods for each sequence is 1. If :obj:`True`, the labels are considered independent and probabilities are normalized for each candidate by doing a softmax of the entailment score vs. the contradiction score. Return: A :obj:`dict` or a list of :obj:`dict`: Each result comes as a dictionary with the following keys: - **sequence** (:obj:`str`) -- The sequence for which this is the output. - **labels** (:obj:`List[str]`) -- The labels sorted by order of likelihood. - **scores** (:obj:`List[float]`) -- The probabilities for each of the labels. """ if "multi_class" in kwargs and kwargs["multi_class"] is not None: multi_label = kwargs.pop("multi_class") logger.warn( "The `multi_class` argument has been deprecated and renamed to `multi_label`. " "`multi_class` will be removed in a future version of Transformers." ) if sequences and isinstance(sequences, str): sequences = [sequences] outputs = super().__call__(sequences, candidate_labels, hypothesis_template) num_sequences = len(sequences) candidate_labels = self._args_parser._parse_labels(candidate_labels) reshaped_outputs = outputs.reshape((num_sequences, len(candidate_labels), -1)) if len(candidate_labels) == 1: multi_label = True if not multi_label: # softmax the "entailment" logits over all candidate labels entail_logits = reshaped_outputs[..., self.entailment_id] scores = np.exp(entail_logits) / np.exp(entail_logits).sum(-1, keepdims=True) else: # softmax over the entailment vs. contradiction dim for each label independently entailment_id = self.entailment_id contradiction_id = -1 if entailment_id == 0 else 0 entail_contr_logits = reshaped_outputs[..., [contradiction_id, entailment_id]] scores = np.exp(entail_contr_logits) / np.exp(entail_contr_logits).sum(-1, keepdims=True) scores = scores[..., 1] result = [] for iseq in range(num_sequences): top_inds = list(reversed(scores[iseq].argsort())) result.append( { "sequence": sequences if isinstance(sequences, str) else sequences[iseq], "labels": [candidate_labels[i] for i in top_inds], "scores": scores[iseq][top_inds].tolist(), } ) if len(result) == 1: return result[0] return result
AdaMix/src/transformers/pipelines/zero_shot_classification.py/0
{ "file_path": "AdaMix/src/transformers/pipelines/zero_shot_classification.py", "repo_id": "AdaMix", "token_count": 3416 }
66
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import warnings from dataclasses import dataclass, field from typing import Tuple from .file_utils import cached_property, is_tf_available, tf_required from .training_args import TrainingArguments from .utils import logging logger = logging.get_logger(__name__) if is_tf_available(): import tensorflow as tf @dataclass class TFTrainingArguments(TrainingArguments): """ TrainingArguments is the subset of the arguments we use in our example scripts **which relate to the training loop itself**. Using :class:`~transformers.HfArgumentParser` we can turn this class into `argparse <https://docs.python.org/3/library/argparse.html#module-argparse>`__ arguments that can be specified on the command line. Parameters: output_dir (:obj:`str`): The output directory where the model predictions and checkpoints will be written. overwrite_output_dir (:obj:`bool`, `optional`, defaults to :obj:`False`): If :obj:`True`, overwrite the content of the output directory. Use this to continue training if :obj:`output_dir` points to a checkpoint directory. do_train (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to run training or not. This argument is not directly used by :class:`~transformers.Trainer`, it's intended to be used by your training/evaluation scripts instead. See the `example scripts <https://github.com/huggingface/transformers/tree/master/examples>`__ for more details. do_eval (:obj:`bool`, `optional`): Whether to run evaluation on the validation set or not. Will be set to :obj:`True` if :obj:`evaluation_strategy` is different from :obj:`"no"`. This argument is not directly used by :class:`~transformers.Trainer`, it's intended to be used by your training/evaluation scripts instead. See the `example scripts <https://github.com/huggingface/transformers/tree/master/examples>`__ for more details. do_predict (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to run predictions on the test set or not. This argument is not directly used by :class:`~transformers.Trainer`, it's intended to be used by your training/evaluation scripts instead. See the `example scripts <https://github.com/huggingface/transformers/tree/master/examples>`__ for more details. evaluation_strategy (:obj:`str` or :class:`~transformers.trainer_utils.IntervalStrategy`, `optional`, defaults to :obj:`"no"`): The evaluation strategy to adopt during training. Possible values are: * :obj:`"no"`: No evaluation is done during training. * :obj:`"steps"`: Evaluation is done (and logged) every :obj:`eval_steps`. * :obj:`"epoch"`: Evaluation is done at the end of each epoch. per_device_train_batch_size (:obj:`int`, `optional`, defaults to 8): The batch size per GPU/TPU core/CPU for training. per_device_eval_batch_size (:obj:`int`, `optional`, defaults to 8): The batch size per GPU/TPU core/CPU for evaluation. gradient_accumulation_steps: (:obj:`int`, `optional`, defaults to 1): Number of updates steps to accumulate the gradients for, before performing a backward/update pass. .. warning:: When using gradient accumulation, one step is counted as one step with backward pass. Therefore, logging, evaluation, save will be conducted every ``gradient_accumulation_steps * xxx_step`` training examples. learning_rate (:obj:`float`, `optional`, defaults to 5e-5): The initial learning rate for Adam. weight_decay (:obj:`float`, `optional`, defaults to 0): The weight decay to apply (if not zero). adam_beta1 (:obj:`float`, `optional`, defaults to 0.9): The beta1 hyperparameter for the Adam optimizer. adam_beta2 (:obj:`float`, `optional`, defaults to 0.999): The beta2 hyperparameter for the Adam optimizer. adam_epsilon (:obj:`float`, `optional`, defaults to 1e-8): The epsilon hyperparameter for the Adam optimizer. max_grad_norm (:obj:`float`, `optional`, defaults to 1.0): Maximum gradient norm (for gradient clipping). num_train_epochs(:obj:`float`, `optional`, defaults to 3.0): Total number of training epochs to perform. max_steps (:obj:`int`, `optional`, defaults to -1): If set to a positive number, the total number of training steps to perform. Overrides :obj:`num_train_epochs`. warmup_ratio (:obj:`float`, `optional`, defaults to 0.0): Ratio of total training steps used for a linear warmup from 0 to :obj:`learning_rate`. warmup_steps (:obj:`int`, `optional`, defaults to 0): Number of steps used for a linear warmup from 0 to :obj:`learning_rate`. Overrides any effect of :obj:`warmup_ratio`. logging_dir (:obj:`str`, `optional`): `TensorBoard <https://www.tensorflow.org/tensorboard>`__ log directory. Will default to `runs/**CURRENT_DATETIME_HOSTNAME**`. logging_strategy (:obj:`str` or :class:`~transformers.trainer_utils.IntervalStrategy`, `optional`, defaults to :obj:`"steps"`): The logging strategy to adopt during training. Possible values are: * :obj:`"no"`: No logging is done during training. * :obj:`"epoch"`: Logging is done at the end of each epoch. * :obj:`"steps"`: Logging is done every :obj:`logging_steps`. logging_first_step (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to log and evaluate the first :obj:`global_step` or not. logging_steps (:obj:`int`, `optional`, defaults to 500): Number of update steps between two logs if :obj:`logging_strategy="steps"`. save_strategy (:obj:`str` or :class:`~transformers.trainer_utils.IntervalStrategy`, `optional`, defaults to :obj:`"steps"`): The checkpoint save strategy to adopt during training. Possible values are: * :obj:`"no"`: No save is done during training. * :obj:`"epoch"`: Save is done at the end of each epoch. * :obj:`"steps"`: Save is done every :obj:`save_steps`. save_steps (:obj:`int`, `optional`, defaults to 500): Number of updates steps before two checkpoint saves if :obj:`save_strategy="steps"`. save_total_limit (:obj:`int`, `optional`): If a value is passed, will limit the total amount of checkpoints. Deletes the older checkpoints in :obj:`output_dir`. no_cuda (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to not use CUDA even when it is available or not. seed (:obj:`int`, `optional`, defaults to 42): Random seed that will be set at the beginning of training. fp16 (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to use 16-bit (mixed) precision training (through NVIDIA Apex) instead of 32-bit training. fp16_opt_level (:obj:`str`, `optional`, defaults to 'O1'): For :obj:`fp16` training, Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. See details on the `Apex documentation <https://nvidia.github.io/apex/amp.html>`__. local_rank (:obj:`int`, `optional`, defaults to -1): During distributed training, the rank of the process. tpu_num_cores (:obj:`int`, `optional`): When training on TPU, the number of TPU cores (automatically passed by launcher script). debug (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to activate the trace to record computation graphs and profiling information or not. dataloader_drop_last (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to drop the last incomplete batch (if the length of the dataset is not divisible by the batch size) or not. eval_steps (:obj:`int`, `optional`, defaults to 1000): Number of update steps before two evaluations. past_index (:obj:`int`, `optional`, defaults to -1): Some models like :doc:`TransformerXL <../model_doc/transformerxl>` or :doc`XLNet <../model_doc/xlnet>` can make use of the past hidden states for their predictions. If this argument is set to a positive int, the ``Trainer`` will use the corresponding output (usually index 2) as the past state and feed it to the model at the next training step under the keyword argument ``mems``. tpu_name (:obj:`str`, `optional`): The name of the TPU the process is running on. tpu_zone (:obj:`str`, `optional`): The zone of the TPU the process is running on. If not specified, we will attempt to automatically detect from metadata. gcp_project (:obj:`str`, `optional`): Google Cloud Project name for the Cloud TPU-enabled project. If not specified, we will attempt to automatically detect from metadata. run_name (:obj:`str`, `optional`): A descriptor for the run. Notably used for wandb logging. xla (:obj:`bool`, `optional`): Whether to activate the XLA compilation or not. """ tpu_name: str = field( default=None, metadata={"help": "Name of TPU"}, ) tpu_zone: str = field( default=None, metadata={"help": "Zone of TPU"}, ) gcp_project: str = field( default=None, metadata={"help": "Name of Cloud TPU-enabled project"}, ) poly_power: float = field( default=1.0, metadata={"help": "Power for the Polynomial decay LR scheduler."}, ) xla: bool = field(default=False, metadata={"help": "Whether to activate the XLA compilation or not"}) @cached_property @tf_required def _setup_strategy(self) -> Tuple["tf.distribute.Strategy", int]: logger.info("Tensorflow: setting up strategy") if self.xla: tf.config.optimizer.set_jit(True) gpus = tf.config.list_physical_devices("GPU") # Set to float16 at first if self.fp16: policy = tf.keras.mixed_precision.experimental.Policy("mixed_float16") tf.keras.mixed_precision.experimental.set_policy(policy) if self.no_cuda: strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0") else: try: if self.tpu_name: tpu = tf.distribute.cluster_resolver.TPUClusterResolver( self.tpu_name, zone=self.tpu_zone, project=self.gcp_project ) else: tpu = tf.distribute.cluster_resolver.TPUClusterResolver() except ValueError: tpu = None if tpu: # Set to bfloat16 in case of TPU if self.fp16: policy = tf.keras.mixed_precision.experimental.Policy("mixed_bfloat16") tf.keras.mixed_precision.experimental.set_policy(policy) tf.config.experimental_connect_to_cluster(tpu) tf.tpu.experimental.initialize_tpu_system(tpu) strategy = tf.distribute.TPUStrategy(tpu) elif len(gpus) == 0: strategy = tf.distribute.OneDeviceStrategy(device="/cpu:0") elif len(gpus) == 1: strategy = tf.distribute.OneDeviceStrategy(device="/gpu:0") elif len(gpus) > 1: # If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0` strategy = tf.distribute.MirroredStrategy() else: raise ValueError("Cannot find the proper strategy please check your environment properties.") return strategy @property @tf_required def strategy(self) -> "tf.distribute.Strategy": """ The strategy used for distributed training. """ return self._setup_strategy @property @tf_required def n_replicas(self) -> int: """ The number of replicas (CPUs, GPUs or TPU cores) used in this training. """ return self._setup_strategy.num_replicas_in_sync @property def train_batch_size(self) -> int: """ The actual batch size for training (may differ from :obj:`per_gpu_train_batch_size` in distributed training). """ if self.per_gpu_train_batch_size: logger.warning( "Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future " "version. Using `--per_device_train_batch_size` is preferred." ) per_device_batch_size = self.per_gpu_train_batch_size or self.per_device_train_batch_size return per_device_batch_size * self.n_replicas @property def eval_batch_size(self) -> int: """ The actual batch size for evaluation (may differ from :obj:`per_gpu_eval_batch_size` in distributed training). """ if self.per_gpu_eval_batch_size: logger.warning( "Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future " "version. Using `--per_device_eval_batch_size` is preferred." ) per_device_batch_size = self.per_gpu_eval_batch_size or self.per_device_eval_batch_size return per_device_batch_size * self.n_replicas @property @tf_required def n_gpu(self) -> int: """ The number of replicas (CPUs, GPUs or TPU cores) used in this training. """ warnings.warn( "The n_gpu argument is deprecated and will be removed in a future version, use n_replicas instead.", FutureWarning, ) return self._setup_strategy.num_replicas_in_sync
AdaMix/src/transformers/training_args_tf.py/0
{ "file_path": "AdaMix/src/transformers/training_args_tf.py", "repo_id": "AdaMix", "token_count": 5960 }
67
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from math import ceil def assert_device_map(device_map, num_blocks): blocks = list(range(0, num_blocks)) device_map_blocks = [item for sublist in list(device_map.values()) for item in sublist] # Duplicate check duplicate_blocks = [] for i in device_map_blocks: if device_map_blocks.count(i) > 1 and i not in duplicate_blocks: duplicate_blocks.append(i) # Missing blocks missing_blocks = [i for i in blocks if i not in device_map_blocks] extra_blocks = [i for i in device_map_blocks if i not in blocks] assert len(duplicate_blocks) == 0, ( "Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device. These " "attention blocks were specified more than once: " + str(duplicate_blocks) ) assert len(missing_blocks) == 0, ( "There are attention blocks for this model that are not specified in the device_map. Add these attention " "blocks to a device on the device_map: " + str(missing_blocks) ) assert ( len(extra_blocks) == 0 ), "The device_map contains more attention blocks than this model has. Remove these from the device_map:" + str( extra_blocks ) def get_device_map(n_layers, devices): """Returns a dictionary of layers distributed evenly across all devices.""" layers = list(range(n_layers)) n_blocks = int(ceil(n_layers / len(devices))) layers_list = list(layers[i : i + n_blocks] for i in range(0, n_layers, n_blocks)) return dict(zip(devices, layers_list))
AdaMix/src/transformers/utils/model_parallel_utils.py/0
{ "file_path": "AdaMix/src/transformers/utils/model_parallel_utils.py", "repo_id": "AdaMix", "token_count": 715 }
68
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch {{cookiecutter.modelname}} model. """ {% if cookiecutter.is_encoder_decoder_model == "False" -%} import unittest from tests.test_modeling_common import floats_tensor from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from .test_configuration_common import ConfigTester from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( {{cookiecutter.camelcase_modelname}}Config, {{cookiecutter.camelcase_modelname}}ForCausalLM, {{cookiecutter.camelcase_modelname}}ForMaskedLM, {{cookiecutter.camelcase_modelname}}ForMultipleChoice, {{cookiecutter.camelcase_modelname}}ForQuestionAnswering, {{cookiecutter.camelcase_modelname}}ForSequenceClassification, {{cookiecutter.camelcase_modelname}}ForTokenClassification, {{cookiecutter.camelcase_modelname}}Model, ) from transformers.models.{{cookiecutter.lowercase_modelname}}.modeling_{{cookiecutter.lowercase_modelname}} import ( {{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST, ) class {{cookiecutter.camelcase_modelname}}ModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = {{cookiecutter.camelcase_modelname}}Config( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def prepare_config_and_inputs_for_decoder(self): ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = self.prepare_config_and_inputs() config.is_decoder = True encoder_hidden_states = floats_tensor([self.batch_size, self.seq_length, self.hidden_size]) encoder_attention_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) return ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = {{cookiecutter.camelcase_modelname}}Model(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_model_as_decoder( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.add_cross_attention = True model = {{cookiecutter.camelcase_modelname}}Model(config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, ) result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, encoder_hidden_states=encoder_hidden_states, ) result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_for_causal_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): model = {{cookiecutter.camelcase_modelname}}ForCausalLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = {{cookiecutter.camelcase_modelname}}ForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_decoder_model_past_large_inputs( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ): config.is_decoder = True config.add_cross_attention = True model = {{cookiecutter.camelcase_modelname}}ForCausalLM(config=config) model.to(torch_device) model.eval() # first forward pass outputs = model( input_ids, attention_mask=input_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=True, ) past_key_values = outputs.past_key_values # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_mask = ids_tensor((self.batch_size, 3), vocab_size=2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([input_mask, next_mask], dim=-1) output_from_no_past = model( next_input_ids, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, output_hidden_states=True, )["hidden_states"][0] output_from_past = model( next_tokens, attention_mask=next_attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, output_hidden_states=True, )["hidden_states"][0] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = {{cookiecutter.camelcase_modelname}}ForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = {{cookiecutter.camelcase_modelname}}ForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = {{cookiecutter.camelcase_modelname}}ForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = {{cookiecutter.camelcase_modelname}}ForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class {{cookiecutter.camelcase_modelname}}ModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = ( ( {{cookiecutter.camelcase_modelname}}Model, {{cookiecutter.camelcase_modelname}}ForMaskedLM, {{cookiecutter.camelcase_modelname}}ForCausalLM, {{cookiecutter.camelcase_modelname}}ForMultipleChoice, {{cookiecutter.camelcase_modelname}}ForQuestionAnswering, {{cookiecutter.camelcase_modelname}}ForSequenceClassification, {{cookiecutter.camelcase_modelname}}ForTokenClassification, ) if is_torch_available() else () ) all_generative_model_classes = ({{cookiecutter.camelcase_modelname}}ForCausalLM,) if is_torch_available() else () def setUp(self): self.model_tester = {{cookiecutter.camelcase_modelname}}ModelTester(self) self.config_tester = ConfigTester(self, config_class={{cookiecutter.camelcase_modelname}}Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_model_various_embeddings(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() for type in ["absolute", "relative_key", "relative_key_query"]: config_and_inputs[0].position_embedding_type = type self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) def test_model_as_decoder(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_decoder() self.model_tester.create_and_check_model_as_decoder(*config_and_inputs) def test_model_as_decoder_with_default_input_mask(self): # This regression test was failing with PyTorch < 1.3 ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) = self.model_tester.prepare_config_and_inputs_for_decoder() input_mask = None self.model_tester.create_and_check_model_as_decoder( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, encoder_hidden_states, encoder_attention_mask, ) @slow def test_model_from_pretrained(self): for model_name in {{cookiecutter.uppercase_modelname}}_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = {{cookiecutter.camelcase_modelname}}Model.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class {{cookiecutter.camelcase_modelname}}ModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = {{cookiecutter.camelcase_modelname}}ForMaskedLM.from_pretrained("{{cookiecutter.checkpoint_identifier}}") input_ids = torch.tensor([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] # TODO Replace vocab size vocab_size = 32000 expected_shape = torch.Size((1, 6, vocab_size)) self.assertEqual(output.shape, expected_shape) # TODO Replace values below with what was printed above. expected_slice = torch.tensor( [[[-0.0483, 0.1188, -0.0313], [-0.0606, 0.1435, 0.0199], [-0.0235, 0.1519, 0.0175]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4)) {% else -%} import copy import tempfile import unittest from transformers import is_torch_available from transformers.file_utils import cached_property from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from .test_configuration_common import ConfigTester from .test_generation_utils import GenerationTesterMixin from .test_modeling_common import ModelTesterMixin, ids_tensor if is_torch_available(): import torch from transformers import ( {{cookiecutter.camelcase_modelname}}Config, {{cookiecutter.camelcase_modelname}}ForConditionalGeneration, {{cookiecutter.camelcase_modelname}}ForQuestionAnswering, {{cookiecutter.camelcase_modelname}}ForCausalLM, {{cookiecutter.camelcase_modelname}}ForSequenceClassification, {{cookiecutter.camelcase_modelname}}Model, {{cookiecutter.camelcase_modelname}}Tokenizer, ) from transformers.models.{{cookiecutter.lowercase_modelname}}.modeling_{{cookiecutter.lowercase_modelname}} import ( {{cookiecutter.camelcase_modelname}}Decoder, {{cookiecutter.camelcase_modelname}}Encoder, ) def prepare_{{cookiecutter.lowercase_modelname}}_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, ): if attention_mask is None: attention_mask = input_ids.ne(config.pad_token_id) if decoder_attention_mask is None: decoder_attention_mask = decoder_input_ids.ne(config.pad_token_id) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": attention_mask, } @require_torch class {{cookiecutter.camelcase_modelname}}ModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=16, num_hidden_layers=2, num_attention_heads=4, intermediate_size=4, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size).clamp( 3, ) input_ids[:, -1] = self.eos_token_id # Eos Token decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = {{cookiecutter.camelcase_modelname}}Config( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, ) inputs_dict = prepare_{{cookiecutter.lowercase_modelname}}_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def prepare_config_and_inputs_for_common(self): config, inputs_dict = self.prepare_config_and_inputs() return config, inputs_dict def create_and_check_decoder_model_past_large_inputs(self, config, inputs_dict): model = {{cookiecutter.camelcase_modelname}}Model(config=config).get_decoder().to(torch_device).eval() input_ids = inputs_dict["input_ids"] attention_mask = inputs_dict["attention_mask"] # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() # create hypothetical multiple next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = ids_tensor((self.batch_size, 3), 2) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) next_attention_mask = torch.cat([attention_mask, next_attn_mask], dim=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)["last_hidden_state"] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx].detach() output_from_past_slice = output_from_past[:, :, random_slice_idx].detach() self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1]) # test that outputs are equal for slice self.parent.assertTrue(torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2)) def check_encoder_decoder_model_standalone(self, config, inputs_dict): model = {{cookiecutter.camelcase_modelname}}Model(config=config).to(torch_device).eval() outputs = model(**inputs_dict) encoder_last_hidden_state = outputs.encoder_last_hidden_state last_hidden_state = outputs.last_hidden_state with tempfile.TemporaryDirectory() as tmpdirname: encoder = model.get_encoder() encoder.save_pretrained(tmpdirname) encoder = {{cookiecutter.camelcase_modelname}}Encoder.from_pretrained(tmpdirname).to(torch_device) encoder_last_hidden_state_2 = encoder(inputs_dict["input_ids"], attention_mask=inputs_dict["attention_mask"])[ 0 ] self.parent.assertTrue((encoder_last_hidden_state_2 - encoder_last_hidden_state).abs().max().item() < 1e-3) with tempfile.TemporaryDirectory() as tmpdirname: decoder = model.get_decoder() decoder.save_pretrained(tmpdirname) decoder = {{cookiecutter.camelcase_modelname}}Decoder.from_pretrained(tmpdirname).to(torch_device) last_hidden_state_2 = decoder( input_ids=inputs_dict["decoder_input_ids"], attention_mask=inputs_dict["decoder_attention_mask"], encoder_hidden_states=encoder_last_hidden_state, encoder_attention_mask=inputs_dict["attention_mask"], )[0] self.parent.assertTrue((last_hidden_state_2 - last_hidden_state).abs().max().item() < 1e-3) @require_torch class {{cookiecutter.camelcase_modelname}}ModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = ( ({{cookiecutter.camelcase_modelname}}Model, {{cookiecutter.camelcase_modelname}}ForConditionalGeneration, {{cookiecutter.camelcase_modelname}}ForSequenceClassification, {{cookiecutter.camelcase_modelname}}ForQuestionAnswering) if is_torch_available() else () ) all_generative_model_classes = ({{cookiecutter.camelcase_modelname}}ForConditionalGeneration,) if is_torch_available() else () is_encoder_decoder = True test_pruning = False test_head_masking = False test_missing_keys = False def setUp(self): self.model_tester = {{cookiecutter.camelcase_modelname}}ModelTester(self) self.config_tester = ConfigTester(self, config_class={{cookiecutter.camelcase_modelname}}Config) def test_config(self): self.config_tester.run_common_tests() def test_save_load_strict(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs() for model_class in self.all_model_classes: model = model_class(config) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model2, info = model_class.from_pretrained(tmpdirname, output_loading_info=True) self.assertEqual(info["missing_keys"], []) def test_decoder_model_past_with_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past_large_inputs(*config_and_inputs) def test_encoder_decoder_model_standalone(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_encoder_decoder_model_standalone(*config_and_inputs) # {{cookiecutter.camelcase_modelname}}ForSequenceClassification does not support inputs_embeds def test_inputs_embeds(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in ({{cookiecutter.camelcase_modelname}}Model, {{cookiecutter.camelcase_modelname}}ForConditionalGeneration, {{cookiecutter.camelcase_modelname}}ForQuestionAnswering): model = model_class(config) model.to(torch_device) model.eval() inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class)) if not self.is_encoder_decoder: input_ids = inputs["input_ids"] del inputs["input_ids"] else: encoder_input_ids = inputs["input_ids"] decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids) del inputs["input_ids"] inputs.pop("decoder_input_ids", None) wte = model.get_input_embeddings() if not self.is_encoder_decoder: inputs["inputs_embeds"] = wte(input_ids) else: inputs["inputs_embeds"] = wte(encoder_input_ids) inputs["decoder_inputs_embeds"] = wte(decoder_input_ids) with torch.no_grad(): model(**inputs)[0] def test_generate_fp16(self): config, input_dict = self.model_tester.prepare_config_and_inputs() input_ids = input_dict["input_ids"] attention_mask = input_ids.ne(1).to(torch_device) model = {{cookiecutter.camelcase_modelname}}ForConditionalGeneration(config).eval().to(torch_device) if torch_device == "cuda": model.half() model.generate(input_ids, attention_mask=attention_mask) model.generate(num_beams=4, do_sample=True, early_stopping=False, num_return_sequences=3) def assert_tensors_close(a, b, atol=1e-12, prefix=""): """If tensors have different shapes, different values or a and b are not both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if torch.allclose(a, b, atol=atol): return True raise except Exception: pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item() if a.numel() > 100: msg = f"tensor values are {pct_different:.1%} percent different." else: msg = f"{a} != {b}" if prefix: msg = prefix + ": " + msg raise AssertionError(msg) def _long_tensor(tok_lst): return torch.tensor(tok_lst, dtype=torch.long, device=torch_device) TOLERANCE = 1e-4 @require_torch @require_sentencepiece @require_tokenizers @slow class {{cookiecutter.camelcase_modelname}}ModelIntegrationTests(unittest.TestCase): @cached_property def default_tokenizer(self): return {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}') def test_inference_no_head(self): model = {{cookiecutter.camelcase_modelname}}Model.from_pretrained('{{cookiecutter.checkpoint_identifier}}').to(torch_device) input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) decoder_input_ids = _long_tensor([[2, 0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588]]) inputs_dict = prepare_{{cookiecutter.lowercase_modelname}}_inputs_dict(model.config, input_ids, decoder_input_ids) with torch.no_grad(): output = model(**inputs_dict)[0] expected_shape = torch.Size((1, 11, 1024)) self.assertEqual(output.shape, expected_shape) # change to expected output here expected_slice = torch.tensor( [[0.7144, 0.8143, -1.2813], [0.7144, 0.8143, -1.2813], [-0.0467, 2.5911, -2.1845]], device=torch_device ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE)) def test_inference_head(self): model = {{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}').to(torch_device) # change to intended input input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) decoder_input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) inputs_dict = prepare_{{cookiecutter.lowercase_modelname}}_inputs_dict(model.config, input_ids, decoder_input_ids) with torch.no_grad(): output = model(**inputs_dict)[0] expected_shape = torch.Size((1, 11, model.config.vocab_size)) self.assertEqual(output.shape, expected_shape) # change to expected output here expected_slice = torch.tensor( [[0.7144, 0.8143, -1.2813], [0.7144, 0.8143, -1.2813], [-0.0467, 2.5911, -2.1845]], device=torch_device ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=TOLERANCE)) def test_seq_to_seq_generation(self): hf = {{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}').to(torch_device) tok = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}') batch_input = [ # string 1, # string 2, # string 3, # string 4, ] # The below article tests that we don't add any hypotheses outside of the top n_beams dct = tok.batch_encode_plus( batch_input, max_length=512, padding="max_length", truncation_strategy="only_first", truncation=True, return_tensors="pt", ) hypotheses_batch = hf.generate( input_ids=dct["input_ids"].to(torch_device), attention_mask=dct["attention_mask"].to(torch_device), num_beams=2, ) EXPECTED = [ # here expected 1, # here expected 2, # here expected 3, # here expected 4, ] generated = tok.batch_decode( hypotheses_batch.tolist(), clean_up_tokenization_spaces=True, skip_special_tokens=True ) assert generated == EXPECTED class {{cookiecutter.camelcase_modelname}}StandaloneDecoderModelTester: def __init__( self, parent, vocab_size=99, batch_size=13, d_model=16, decoder_seq_length=7, is_training=True, is_decoder=True, use_attention_mask=True, use_cache=False, use_labels=True, decoder_start_token_id=2, decoder_ffn_dim=32, decoder_layers=4, encoder_attention_heads=4, decoder_attention_heads=4, max_position_embeddings=30, is_encoder_decoder=False, pad_token_id=0, bos_token_id=1, eos_token_id=2, scope=None, ): self.parent = parent self.batch_size = batch_size self.decoder_seq_length = decoder_seq_length # For common tests self.seq_length = self.decoder_seq_length self.is_training = is_training self.use_attention_mask = use_attention_mask self.use_labels = use_labels self.vocab_size = vocab_size self.d_model = d_model self.hidden_size = d_model self.num_hidden_layers = decoder_layers self.decoder_layers = decoder_layers self.decoder_ffn_dim = decoder_ffn_dim self.encoder_attention_heads = encoder_attention_heads self.decoder_attention_heads = decoder_attention_heads self.num_attention_heads = decoder_attention_heads self.eos_token_id = eos_token_id self.bos_token_id = bos_token_id self.pad_token_id = pad_token_id self.decoder_start_token_id = decoder_start_token_id self.use_cache = use_cache self.max_position_embeddings = max_position_embeddings self.is_encoder_decoder = is_encoder_decoder self.scope = None self.decoder_key_length = decoder_seq_length self.base_model_out_len = 2 self.decoder_attention_idx = 1 def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) attention_mask = None if self.use_attention_mask: attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size) config = {{cookiecutter.camelcase_modelname}}Config( vocab_size=self.vocab_size, d_model=self.d_model, decoder_layers=self.decoder_layers, decoder_ffn_dim=self.decoder_ffn_dim, encoder_attention_heads=self.encoder_attention_heads, decoder_attention_heads=self.decoder_attention_heads, eos_token_id=self.eos_token_id, bos_token_id=self.bos_token_id, use_cache=self.use_cache, pad_token_id=self.pad_token_id, decoder_start_token_id=self.decoder_start_token_id, max_position_embeddings=self.max_position_embeddings, is_encoder_decoder=self.is_encoder_decoder, ) return ( config, input_ids, attention_mask, lm_labels, ) def create_and_check_decoder_model_past( self, config, input_ids, attention_mask, lm_labels, ): config.use_cache = True model = {{cookiecutter.camelcase_modelname}}Decoder(config=config).to(torch_device).eval() # first forward pass outputs = model(input_ids, use_cache=True) outputs_use_cache_conf = model(input_ids) outputs_no_past = model(input_ids, use_cache=False) self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf)) self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1) past_key_values = outputs["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # append to next input_ids and next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3) def create_and_check_decoder_model_attention_mask_past( self, config, input_ids, attention_mask, lm_labels, ): model = {{cookiecutter.camelcase_modelname}}Decoder(config=config).to(torch_device).eval() # create attention mask attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device) half_seq_length = input_ids.shape[-1] // 2 attn_mask[:, half_seq_length:] = 0 # first forward pass past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size) # change a random masked slice from input_ids random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1 random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1) input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens # append to next input_ids and attn_mask next_input_ids = torch.cat([input_ids, next_tokens], dim=-1) attn_mask = torch.cat( [attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)], dim=1, ) # get two different outputs output_from_no_past = model(next_input_ids)["last_hidden_state"] output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"] # select random slice random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item() output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach() output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach() # test that outputs are equal for slice assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-2) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, attention_mask, lm_labels, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "attention_mask": attention_mask, } return config, inputs_dict @require_torch class {{cookiecutter.camelcase_modelname}}StandaloneDecoderModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = ({{cookiecutter.camelcase_modelname}}Decoder, {{cookiecutter.camelcase_modelname}}ForCausalLM) if is_torch_available() else () all_generative_model_classes = ({{cookiecutter.camelcase_modelname}}ForCausalLM,) if is_torch_available() else () test_pruning = False is_encoder_decoder = False def setUp( self, ): self.model_tester = {{cookiecutter.camelcase_modelname}}StandaloneDecoderModelTester(self, is_training=False) self.config_tester = ConfigTester(self, config_class={{cookiecutter.camelcase_modelname}}Config) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_past(*config_and_inputs) def test_decoder_model_attn_mask_past(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_decoder_model_attention_mask_past(*config_and_inputs) def test_retain_grad_hidden_states_attentions(self): # decoder cannot keep gradients return {% endif -%}
AdaMix/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/test_modeling_{{cookiecutter.lowercase_modelname}}.py/0
{ "file_path": "AdaMix/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/test_modeling_{{cookiecutter.lowercase_modelname}}.py", "repo_id": "AdaMix", "token_count": 19917 }
69
{ "model_type": "roberta" }
AdaMix/tests/fixtures/dummy-config.json/0
{ "file_path": "AdaMix/tests/fixtures/dummy-config.json", "repo_id": "AdaMix", "token_count": 15 }
70
# coding=utf-8 # Copyright 2021 HuggingFace Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import os import tempfile class FeatureExtractionSavingTestMixin: def test_feat_extract_to_json_string(self): feat_extract = self.feature_extraction_class(**self.feat_extract_dict) obj = json.loads(feat_extract.to_json_string()) for key, value in self.feat_extract_dict.items(): self.assertEqual(obj[key], value) def test_feat_extract_to_json_file(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: json_file_path = os.path.join(tmpdirname, "feat_extract.json") feat_extract_first.to_json_file(json_file_path) feat_extract_second = self.feature_extraction_class.from_json_file(json_file_path) self.assertEqual(feat_extract_second.to_dict(), feat_extract_first.to_dict()) def test_feat_extract_from_and_save_pretrained(self): feat_extract_first = self.feature_extraction_class(**self.feat_extract_dict) with tempfile.TemporaryDirectory() as tmpdirname: feat_extract_first.save_pretrained(tmpdirname) feat_extract_second = self.feature_extraction_class.from_pretrained(tmpdirname) self.assertEqual(feat_extract_second.to_dict(), feat_extract_first.to_dict()) def test_init_without_params(self): feat_extract = self.feature_extraction_class() self.assertIsNotNone(feat_extract)
AdaMix/tests/test_feature_extraction_common.py/0
{ "file_path": "AdaMix/tests/test_feature_extraction_common.py", "repo_id": "AdaMix", "token_count": 760 }
71
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from .test_configuration_common import ConfigTester from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( FlaubertConfig, FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST class FlaubertModelTester(object): def __init__( self, parent, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_lengths = True self.use_token_type_ids = True self.use_labels = True self.gelu_activation = True self.sinusoidal_embeddings = False self.causal = False self.asm = False self.n_langs = 2 self.vocab_size = 99 self.n_special = 0 self.hidden_size = 32 self.num_hidden_layers = 5 self.num_attention_heads = 4 self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 12 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.summary_type = "last" self.use_proj = None self.scope = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = random_attention_mask([self.batch_size, self.seq_length]) input_lengths = None if self.use_input_lengths: input_lengths = ( ids_tensor([self.batch_size], vocab_size=2) + self.seq_length - 2 ) # small variation of seq_length token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.n_langs) sequence_labels = None token_labels = None is_impossible_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) is_impossible_labels = ids_tensor([self.batch_size], 2).float() choice_labels = ids_tensor([self.batch_size], self.num_choices) config = FlaubertConfig( vocab_size=self.vocab_size, n_special=self.n_special, emb_dim=self.hidden_size, n_layers=self.num_hidden_layers, n_heads=self.num_attention_heads, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, gelu_activation=self.gelu_activation, sinusoidal_embeddings=self.sinusoidal_embeddings, asm=self.asm, causal=self.causal, n_langs=self.n_langs, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, summary_type=self.summary_type, use_proj=self.use_proj, ) return ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) def create_and_check_flaubert_model( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, lengths=input_lengths, langs=token_type_ids) result = model(input_ids, langs=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_flaubert_lm_head( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertWithLMHeadModel(config) model.to(torch_device) model.eval() result = model(input_ids, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_flaubert_simple_qa( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertForQuestionAnsweringSimple(config) model.to(torch_device) model.eval() result = model(input_ids) result = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_flaubert_qa( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertForQuestionAnswering(config) model.to(torch_device) model.eval() result = model(input_ids) result_with_labels = model( input_ids, start_positions=sequence_labels, end_positions=sequence_labels, cls_index=sequence_labels, is_impossible=is_impossible_labels, p_mask=input_mask, ) result_with_labels = model( input_ids, start_positions=sequence_labels, end_positions=sequence_labels, cls_index=sequence_labels, is_impossible=is_impossible_labels, ) (total_loss,) = result_with_labels.to_tuple() result_with_labels = model(input_ids, start_positions=sequence_labels, end_positions=sequence_labels) (total_loss,) = result_with_labels.to_tuple() self.parent.assertEqual(result_with_labels.loss.shape, ()) self.parent.assertEqual(result.start_top_log_probs.shape, (self.batch_size, model.config.start_n_top)) self.parent.assertEqual(result.start_top_index.shape, (self.batch_size, model.config.start_n_top)) self.parent.assertEqual( result.end_top_log_probs.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual( result.end_top_index.shape, (self.batch_size, model.config.start_n_top * model.config.end_n_top) ) self.parent.assertEqual(result.cls_logits.shape, (self.batch_size,)) def create_and_check_flaubert_sequence_classif( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): model = FlaubertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids) result = model(input_ids, labels=sequence_labels) self.parent.assertEqual(result.loss.shape, ()) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.type_sequence_label_size)) def create_and_check_flaubert_token_classif( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): config.num_labels = self.num_labels model = FlaubertForTokenClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_flaubert_multiple_choice( self, config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ): config.num_choices = self.num_choices model = FlaubertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_lengths, sequence_labels, token_labels, is_impossible_labels, choice_labels, input_mask, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "lengths": input_lengths} return config, inputs_dict @require_torch class FlaubertModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = ( ( FlaubertModel, FlaubertWithLMHeadModel, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertForMultipleChoice, ) if is_torch_available() else () ) # Flaubert has 2 QA models -> need to manually set the correct labels for one of them here def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class.__name__ == "FlaubertForQuestionAnswering": inputs_dict["start_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) inputs_dict["end_positions"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = FlaubertModelTester(self) self.config_tester = ConfigTester(self, config_class=FlaubertConfig, emb_dim=37) def test_config(self): self.config_tester.run_common_tests() def test_flaubert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_model(*config_and_inputs) def test_flaubert_lm_head(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_lm_head(*config_and_inputs) def test_flaubert_simple_qa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_simple_qa(*config_and_inputs) def test_flaubert_qa(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_qa(*config_and_inputs) def test_flaubert_sequence_classif(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_sequence_classif(*config_and_inputs) def test_flaubert_token_classif(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_token_classif(*config_and_inputs) def test_flaubert_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_flaubert_multiple_choice(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = FlaubertModel.from_pretrained(model_name) self.assertIsNotNone(model) @require_torch class FlaubertModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head_absolute_embedding(self): model = FlaubertModel.from_pretrained("flaubert/flaubert_base_cased") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) output = model(input_ids)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] ) self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
AdaMix/tests/test_modeling_flaubert.py/0
{ "file_path": "AdaMix/tests/test_modeling_flaubert.py", "repo_id": "AdaMix", "token_count": 6950 }
72
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team, Microsoft Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, slow, torch_device from .test_configuration_common import ConfigTester from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MPNetConfig, MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) class MPNetModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=False, use_labels=True, vocab_size=99, hidden_size=64, num_hidden_layers=5, num_attention_heads=4, intermediate_size=64, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def get_large_model_config(self): return MPNetConfig.from_pretrained("microsoft/mpnet-base") def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = MPNetConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, initializer_range=self.initializer_range, ) return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels def create_and_check_mpnet_model( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = MPNetModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, input_mask) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_mpnet_for_question_answering( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = MPNetForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_mpnet_for_sequence_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = MPNetForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_mpnet_for_multiple_choice( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = MPNetForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_mpnet_for_token_classification( self, config, input_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = MPNetForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids, input_mask, sequence_labels, token_labels, choice_labels) = config_and_inputs inputs_dict = {"input_ids": input_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class MPNetModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = ( ( MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetModel, ) if is_torch_available() else () ) test_pruning = False test_torchscript = True test_resize_embeddings = True def setUp(self): self.model_tester = MPNetModelTester(self) self.config_tester = ConfigTester(self, config_class=MPNetConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_mpnet_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_model(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_sequence_classification(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_multiple_choice(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_token_classification(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mpnet_for_question_answering(*config_and_inputs) @require_torch class MPNetModelIntegrationTest(unittest.TestCase): @slow def test_inference_no_head(self): model = MPNetModel.from_pretrained("microsoft/mpnet-base") input_ids = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]]) output = model(input_ids)[0] expected_shape = torch.Size((1, 11, 768)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [[[-0.0550, 0.1943, -0.0740], [-0.0562, 0.2211, -0.0579], [-0.0437, 0.3337, -0.0641]]] ) # compare the actual values for a slice. self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-4))
AdaMix/tests/test_modeling_mpnet.py/0
{ "file_path": "AdaMix/tests/test_modeling_mpnet.py", "repo_id": "AdaMix", "token_count": 4330 }
73
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import BlenderbotConfig, BlenderbotTokenizer, is_tf_available from transformers.file_utils import cached_property from transformers.testing_utils import require_tf, require_tokenizers, slow from .test_configuration_common import ConfigTester from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeq2SeqLM, TFBlenderbotForConditionalGeneration, TFBlenderbotModel @require_tf class TFBlenderbotModelTester: config_cls = BlenderbotConfig config_updates = {} hidden_act = "gelu" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs_for_common(self): input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size) eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1) input_ids = tf.concat([input_ids, eos_tensor], axis=1) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) inputs_dict = prepare_blenderbot_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def check_decoder_model_past_large_inputs(self, config, inputs_dict): model = TFBlenderbotModel(config=config).get_decoder() input_ids = inputs_dict["input_ids"] input_ids = input_ids[:1, :] attention_mask = inputs_dict["attention_mask"][:1, :] head_mask = inputs_dict["head_mask"] self.batch_size = 1 # first forward pass outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True) output, past_key_values = outputs.to_tuple() past_key_values = past_key_values[1] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.int8) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def prepare_blenderbot_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, ): if attention_mask is None: attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8) if decoder_attention_mask is None: decoder_attention_mask = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.int8), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id), tf.int8), ], axis=-1, ) if head_mask is None: head_mask = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class TFBlenderbotModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = (TFBlenderbotForConditionalGeneration, TFBlenderbotModel) if is_tf_available() else () all_generative_model_classes = (TFBlenderbotForConditionalGeneration,) if is_tf_available() else () is_encoder_decoder = True test_pruning = False test_onnx = False def setUp(self): self.model_tester = TFBlenderbotModelTester(self) self.config_tester = ConfigTester(self, config_class=BlenderbotConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs) def test_model_common_attributes(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer) if model_class in self.all_generative_model_classes: x = model.get_output_embeddings() assert isinstance(x, tf.keras.layers.Layer) name = model.get_bias() assert isinstance(name, dict) for k, v in name.items(): assert isinstance(v, tf.Variable) else: x = model.get_output_embeddings() assert x is None name = model.get_bias() assert name is None def test_saved_model_creation(self): # This test is too long (>30sec) and makes fail the CI pass def test_resize_token_embeddings(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(model, embedding_layer): if hasattr(embedding_layer, "weight"): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model(model.dummy_inputs) if hasattr(embedding_layer, "weight"): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10, None]: # build the embeddings model = model_class(config=config) old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings()) old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings()) old_final_logits_bias = model.get_bias() # reshape the embeddings model.resize_token_embeddings(size) new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings()) new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings()) new_final_logits_bias = model.get_bias() # check that the resized embeddings size matches the desired size. assert_size = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0], assert_size) # check that weights remain the same after resizing models_equal = True for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0], assert_size) models_equal = True for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) if old_final_logits_bias is not None and new_final_logits_bias is not None: old_final_logits_bias = old_final_logits_bias["final_logits_bias"] new_final_logits_bias = new_final_logits_bias["final_logits_bias"] self.assertEqual(new_final_logits_bias.shape[0], 1) self.assertEqual(new_final_logits_bias.shape[1], assert_size) models_equal = True for old, new in zip(old_final_logits_bias.value(), new_final_logits_bias.value()): for p1, p2 in zip(old, new): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) def _assert_tensors_equal(a, b, atol=1e-12, prefix=""): """If tensors not close, or a and b arent both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if tf.debugging.assert_near(a, b, atol=atol): return True raise except Exception: msg = "{} != {}".format(a, b) if prefix: msg = prefix + ": " + msg raise AssertionError(msg) def _long_tensor(tok_lst): return tf.constant(tok_lst, dtype=tf.int32) @require_tokenizers @require_tf class TFBlenderbot400MIntegrationTests(unittest.TestCase): src_text = ["My friends are cool but they eat too many carbs."] model_name = "facebook/blenderbot-400M-distill" @cached_property def tokenizer(self): return BlenderbotTokenizer.from_pretrained(self.model_name) @cached_property def model(self): model = TFAutoModelForSeq2SeqLM.from_pretrained(self.model_name) return model @slow def test_generation_from_long_input(self): model_inputs = self.tokenizer(self.src_text, return_tensors="tf") generated_ids = self.model.generate( model_inputs.input_ids, ) generated_words = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=True)[0] assert ( generated_words == " That's unfortunate. Are they trying to lose weight or are they just trying to be healthier?" )
AdaMix/tests/test_modeling_tf_blenderbot.py/0
{ "file_path": "AdaMix/tests/test_modeling_tf_blenderbot.py", "repo_id": "AdaMix", "token_count": 6114 }
74
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tempfile import unittest import warnings from transformers import AutoTokenizer, MarianConfig, MarianTokenizer, TranslationPipeline, is_tf_available from transformers.file_utils import cached_property from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from .test_configuration_common import ConfigTester from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor if is_tf_available(): import tensorflow as tf from transformers import TFAutoModelForSeq2SeqLM, TFMarianModel, TFMarianMTModel @require_tf class TFMarianModelTester: config_cls = MarianConfig config_updates = {} hidden_act = "gelu" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs_for_common(self): input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size) eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1) input_ids = tf.concat([input_ids, eos_tensor], axis=1) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) inputs_dict = prepare_marian_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def check_decoder_model_past_large_inputs(self, config, inputs_dict): model = TFMarianModel(config=config).get_decoder() input_ids = inputs_dict["input_ids"] input_ids = input_ids[:1, :] attention_mask = inputs_dict["attention_mask"][:1, :] head_mask = inputs_dict["head_mask"] self.batch_size = 1 # first forward pass outputs = model(input_ids, attention_mask=attention_mask, head_mask=head_mask, use_cache=True) output, past_key_values = outputs.to_tuple() past_key_values = past_key_values[1] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.int8) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def prepare_marian_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, ): if attention_mask is None: attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8) if decoder_attention_mask is None: decoder_attention_mask = tf.concat( [ tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.int8), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id), tf.int8), ], axis=-1, ) if head_mask is None: head_mask = tf.ones((config.encoder_layers, config.encoder_attention_heads)) if decoder_head_mask is None: decoder_head_mask = tf.ones((config.decoder_layers, config.decoder_attention_heads)) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "head_mask": head_mask, "decoder_head_mask": decoder_head_mask, } @require_tf class TFMarianModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = (TFMarianMTModel, TFMarianModel) if is_tf_available() else () all_generative_model_classes = (TFMarianMTModel,) if is_tf_available() else () is_encoder_decoder = True test_pruning = False test_onnx = False def setUp(self): self.model_tester = TFMarianModelTester(self) self.config_tester = ConfigTester(self, config_class=MarianConfig) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs) def test_compile_tf_model(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08, clipnorm=1.0) loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) metric = tf.keras.metrics.SparseCategoricalAccuracy("accuracy") model_class = self.all_generative_model_classes[0] input_ids = { "decoder_input_ids": tf.keras.Input(batch_shape=(2, 2000), name="decoder_input_ids", dtype="int32"), "input_ids": tf.keras.Input(batch_shape=(2, 2000), name="input_ids", dtype="int32"), } # Prepare our model model = model_class(config) model(self._prepare_for_class(inputs_dict, model_class)) # Model must be called before saving. # Let's load it from the disk to be sure we can use pre-trained weights with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname) outputs_dict = model(input_ids) hidden_states = outputs_dict[0] # Add a dense layer on top to test integration with other keras modules outputs = tf.keras.layers.Dense(2, activation="softmax", name="outputs")(hidden_states) # Compile extended model extended_model = tf.keras.Model(inputs=[input_ids], outputs=[outputs]) extended_model.compile(optimizer=optimizer, loss=loss, metrics=[metric]) def test_model_common_attributes(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer) if model_class in self.all_generative_model_classes: x = model.get_output_embeddings() assert isinstance(x, tf.keras.layers.Layer) name = model.get_bias() assert isinstance(name, dict) for k, v in name.items(): assert isinstance(v, tf.Variable) else: x = model.get_output_embeddings() assert x is None name = model.get_bias() assert name is None def test_saved_model_creation(self): # This test is too long (>30sec) and makes fail the CI pass def test_resize_token_embeddings(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(model, embedding_layer): if hasattr(embedding_layer, "weight"): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model(model.dummy_inputs) if hasattr(embedding_layer, "weight"): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10, None]: # build the embeddings model = model_class(config=config) old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings()) old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings()) old_final_logits_bias = model.get_bias() # reshape the embeddings model.resize_token_embeddings(size) new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings()) new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings()) new_final_logits_bias = model.get_bias() # check that the resized embeddings size matches the desired size. assert_size = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0], assert_size) # check that weights remain the same after resizing models_equal = True for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0], assert_size) models_equal = True for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) if old_final_logits_bias is not None and new_final_logits_bias is not None: old_final_logits_bias = old_final_logits_bias["final_logits_bias"] new_final_logits_bias = new_final_logits_bias["final_logits_bias"] self.assertEqual(new_final_logits_bias.shape[0], 1) self.assertEqual(new_final_logits_bias.shape[1], assert_size) models_equal = True for old, new in zip(old_final_logits_bias.value(), new_final_logits_bias.value()): for p1, p2 in zip(old, new): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) def _assert_tensors_equal(a, b, atol=1e-12, prefix=""): """If tensors not close, or a and b arent both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if tf.debugging.assert_near(a, b, atol=atol): return True raise except Exception: msg = "{} != {}".format(a, b) if prefix: msg = prefix + ": " + msg raise AssertionError(msg) def _long_tensor(tok_lst): return tf.constant(tok_lst, dtype=tf.int32) @require_tf class AbstractMarianIntegrationTest(unittest.TestCase): maxDiff = 1000 # show more chars for failing integration tests @classmethod def setUpClass(cls) -> None: cls.model_name = f"Helsinki-NLP/opus-mt-{cls.src}-{cls.tgt}" return cls @cached_property def tokenizer(self) -> MarianTokenizer: return AutoTokenizer.from_pretrained(self.model_name) @property def eos_token_id(self) -> int: return self.tokenizer.eos_token_id @cached_property def model(self): warnings.simplefilter("error") model: TFMarianMTModel = TFAutoModelForSeq2SeqLM.from_pretrained(self.model_name) assert isinstance(model, TFMarianMTModel) c = model.config self.assertListEqual(c.bad_words_ids, [[c.pad_token_id]]) self.assertEqual(c.max_length, 512) self.assertEqual(c.decoder_start_token_id, c.pad_token_id) return model def _assert_generated_batch_equal_expected(self, **tokenizer_kwargs): generated_words = self.translate_src_text(**tokenizer_kwargs) self.assertListEqual(self.expected_text, generated_words) def translate_src_text(self, **tokenizer_kwargs): model_inputs = self.tokenizer(self.src_text, **tokenizer_kwargs, padding=True, return_tensors="tf") generated_ids = self.model.generate( model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, max_length=128 ) generated_words = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=True) return generated_words @require_sentencepiece @require_tokenizers @require_tf class TestMarian_MT_EN(AbstractMarianIntegrationTest): """Cover low resource/high perplexity setting. This breaks if pad_token_id logits not set to LARGE_NEGATIVE.""" src = "mt" tgt = "en" src_text = ["Billi messu b'mod ġentili, Ġesù fejjaq raġel li kien milqut bil - marda kerha tal - ġdiem."] expected_text = ["Touching gently, Jesus healed a man who was affected by the sad disease of leprosy."] @slow def test_batch_generation_mt_en(self): self._assert_generated_batch_equal_expected() @require_sentencepiece @require_tokenizers @require_tf class TestMarian_en_zh(AbstractMarianIntegrationTest): src = "en" tgt = "zh" src_text = ["My name is Wolfgang and I live in Berlin"] expected_text = ["我叫沃尔夫冈 我住在柏林"] @slow def test_batch_generation_en_zh(self): self._assert_generated_batch_equal_expected() @require_sentencepiece @require_tokenizers @require_tf class TestMarian_en_ROMANCE(AbstractMarianIntegrationTest): """Multilingual on target side.""" src = "en" tgt = "ROMANCE" src_text = [ ">>fr<< Don't spend so much time watching TV.", ">>pt<< Your message has been sent.", ">>es<< He's two years older than me.", ] expected_text = [ "Ne passez pas autant de temps à regarder la télé.", "A sua mensagem foi enviada.", "Es dos años más viejo que yo.", ] @slow def test_batch_generation_en_ROMANCE_multi(self): self._assert_generated_batch_equal_expected() @slow def test_pipeline(self): pipeline = TranslationPipeline(self.model, self.tokenizer, framework="tf") output = pipeline(self.src_text) self.assertEqual(self.expected_text, [x["translation_text"] for x in output])
AdaMix/tests/test_modeling_tf_marian.py/0
{ "file_path": "AdaMix/tests/test_modeling_tf_marian.py", "repo_id": "AdaMix", "token_count": 7767 }
75
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Testing suite for the PyTorch Wav2Vec2 model. """ import math import unittest from tests.test_modeling_common import floats_tensor, ids_tensor, random_attention_mask from transformers import is_torch_available from transformers.testing_utils import require_datasets, require_soundfile, require_torch, slow, torch_device from .test_configuration_common import ConfigTester from .test_modeling_common import ModelTesterMixin, _config_zero_init if is_torch_available(): import torch from transformers import Wav2Vec2Config, Wav2Vec2ForCTC, Wav2Vec2ForMaskedLM, Wav2Vec2Model, Wav2Vec2Processor from transformers.models.wav2vec2.modeling_wav2vec2 import _compute_mask_indices class Wav2Vec2ModelTester: def __init__( self, parent, batch_size=13, seq_length=1024, # speech is longer is_training=False, hidden_size=16, feat_extract_norm="group", feat_extract_dropout=0.0, feat_extract_activation="gelu", conv_dim=(32, 32, 32), conv_stride=(4, 4, 4), conv_kernel=(8, 8, 8), conv_bias=False, num_conv_pos_embeddings=16, num_conv_pos_embedding_groups=2, num_hidden_layers=4, num_attention_heads=2, hidden_dropout_prob=0.1, # this is most likely not correctly set yet intermediate_size=20, layer_norm_eps=1e-5, hidden_act="gelu", initializer_range=0.02, vocab_size=32, do_stable_layer_norm=False, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.hidden_size = hidden_size self.feat_extract_norm = feat_extract_norm self.feat_extract_dropout = feat_extract_dropout self.feat_extract_activation = feat_extract_activation self.conv_dim = conv_dim self.conv_stride = conv_stride self.conv_kernel = conv_kernel self.conv_bias = conv_bias self.num_conv_pos_embeddings = num_conv_pos_embeddings self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_dropout_prob = hidden_dropout_prob self.intermediate_size = intermediate_size self.layer_norm_eps = layer_norm_eps self.hidden_act = hidden_act self.initializer_range = initializer_range self.vocab_size = vocab_size self.do_stable_layer_norm = do_stable_layer_norm self.scope = scope output_seq_length = self.seq_length for kernel, stride in zip(self.conv_kernel, self.conv_stride): output_seq_length = (output_seq_length - (kernel - 1)) / stride self.output_seq_length = int(math.ceil(output_seq_length)) self.encoder_seq_length = self.output_seq_length def prepare_config_and_inputs(self): input_values = floats_tensor([self.batch_size, self.seq_length], self.vocab_size) attention_mask = random_attention_mask([self.batch_size, self.seq_length]) config = Wav2Vec2Config( hidden_size=self.hidden_size, feat_extract_norm=self.feat_extract_norm, feat_extract_dropout=self.feat_extract_dropout, feat_extract_activation=self.feat_extract_activation, conv_dim=self.conv_dim, conv_stride=self.conv_stride, conv_kernel=self.conv_kernel, conv_bias=self.conv_bias, num_conv_pos_embeddings=self.num_conv_pos_embeddings, num_conv_pos_embedding_groups=self.num_conv_pos_embedding_groups, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, hidden_dropout_prob=self.hidden_dropout_prob, intermediate_size=self.intermediate_size, layer_norm_eps=self.layer_norm_eps, hidden_act=self.hidden_act, initializer_range=self.initializer_range, vocab_size=self.vocab_size, ) return config, input_values, attention_mask def create_and_check_model(self, config, input_values, attention_mask): model = Wav2Vec2Model(config=config) model.to(torch_device) model.eval() result = model(input_values, attention_mask=attention_mask) self.parent.assertEqual( result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, self.hidden_size) ) def create_and_check_batch_inference(self, config, input_values, *args): # test does not pass for models making use of `group_norm` # check: https://github.com/pytorch/fairseq/issues/3227 model = Wav2Vec2Model(config=config) model.to(torch_device) model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0.0 batch_outputs = model(input_values, attention_mask=attention_mask).last_hidden_state for i in range(input_values.shape[0]): input_slice = input_values[i : i + 1, : input_lengths[i]] output = model(input_slice).last_hidden_state batch_output = batch_outputs[i : i + 1, : output.shape[1]] self.parent.assertTrue(torch.allclose(output, batch_output, atol=1e-3)) def check_ctc_loss(self, config, input_values, *args): model = Wav2Vec2ForCTC(config=config) model.to(torch_device) # make sure that dropout is disabled model.eval() input_values = input_values[:3] attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.bool) input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 attention_mask[i, input_lengths[i] :] = 0.0 model.config.ctc_loss_reduction = "sum" sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss model.config.ctc_loss_reduction = "mean" mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss self.parent.assertTrue(abs(labels.shape[0] * labels.shape[1] * mean_loss.item() - sum_loss.item()) < 1e-3) def check_training(self, config, input_values, *args): config.ctc_zero_infinity = True model = Wav2Vec2ForCTC(config=config) model.to(torch_device) model.train() # freeze feature encoder model.freeze_feature_extractor() input_values = input_values[:3] input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]] max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths)) labels = ids_tensor((input_values.shape[0], max(max_length_labels) - 2), model.config.vocab_size) # pad input for i in range(len(input_lengths)): input_values[i, input_lengths[i] :] = 0.0 if max_length_labels[i] < labels.shape[-1]: # it's important that we make sure that target lenghts are at least # one shorter than logit lenghts to prevent -inf labels[i, max_length_labels[i] - 1 :] = -100 loss = model(input_values, labels=labels).loss self.parent.assertFalse(torch.isinf(loss).item()) loss.backward() def prepare_config_and_inputs_for_common(self): config, input_values, attention_mask = self.prepare_config_and_inputs() inputs_dict = {"input_values": input_values, "attention_mask": attention_mask} return config, inputs_dict @require_torch class Wav2Vec2ModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = ( ( Wav2Vec2ForCTC, Wav2Vec2Model, Wav2Vec2ForMaskedLM, ) if is_torch_available() else () ) test_pruning = False test_headmasking = False test_torchscript = False def setUp(self): self.model_tester = Wav2Vec2ModelTester(self) self.config_tester = ConfigTester(self, config_class=Wav2Vec2Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_training(*config_and_inputs) # Wav2Vec2 has no inputs_embeds def test_inputs_embeds(self): pass # `input_ids` is renamed to `input_values` def test_forward_signature(self): pass # Wav2Vec2 cannot resize token embeddings # since it has no tokens embeddings def test_resize_tokens_embeddings(self): pass # Wav2Vec2 has no inputs_embeds # and thus the `get_input_embeddings` fn # is not implemented def test_model_common_attributes(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) # set layer drop to 0 model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if "conv.weight" in name or "masked_spec_embed" in name: self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg="Parameter {} of model {} seems not properly initialized".format(name, model_class), ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg="Parameter {} of model {} seems not properly initialized".format(name, model_class), ) @slow def test_model_from_pretrained(self): model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h") self.assertIsNotNone(model) @require_torch class Wav2Vec2RobustModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = (Wav2Vec2ForCTC, Wav2Vec2Model, Wav2Vec2ForMaskedLM) if is_torch_available() else () test_pruning = False test_headmasking = False test_torchscript = False def setUp(self): self.model_tester = Wav2Vec2ModelTester( self, conv_stride=(3, 3, 3), feat_extract_norm="layer", do_stable_layer_norm=True ) self.config_tester = ConfigTester(self, config_class=Wav2Vec2Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_batched_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_batch_inference(*config_and_inputs) def test_ctc_loss_inference(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_ctc_loss(*config_and_inputs) def test_train(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.check_training(*config_and_inputs) # Wav2Vec2 has no inputs_embeds def test_inputs_embeds(self): pass # `input_ids` is renamed to `input_values` def test_forward_signature(self): pass # Wav2Vec2 cannot resize token embeddings # since it has no tokens embeddings def test_resize_tokens_embeddings(self): pass # Wav2Vec2 has no inputs_embeds # and thus the `get_input_embeddings` fn # is not implemented def test_model_common_attributes(self): pass def test_retain_grad_hidden_states_attentions(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True # no need to test all models as different heads yield the same functionality model_class = self.all_model_classes[0] model = model_class(config) model.to(torch_device) # set layer drop to 0 model.config.layerdrop = 0.0 input_values = inputs_dict["input_values"] input_lengths = torch.tensor( [input_values.shape[1] for _ in range(input_values.shape[0])], dtype=torch.long, device=torch_device ) output_lengths = model._get_feat_extract_output_lengths(input_lengths) labels = ids_tensor((input_values.shape[0], output_lengths[0] - 2), self.model_tester.vocab_size) inputs_dict["attention_mask"] = torch.ones_like(inputs_dict["attention_mask"]) inputs_dict["labels"] = labels outputs = model(**inputs_dict) output = outputs[0] # Encoder-/Decoder-only models hidden_states = outputs.hidden_states[0] attentions = outputs.attentions[0] hidden_states.retain_grad() attentions.retain_grad() output.flatten()[0].backward(retain_graph=True) self.assertIsNotNone(hidden_states.grad) self.assertIsNotNone(attentions.grad) def test_initialization(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() configs_no_init = _config_zero_init(config) for model_class in self.all_model_classes: model = model_class(config=configs_no_init) for name, param in model.named_parameters(): if param.requires_grad: if "conv.weight" in name or "masked_spec_embed" in name: self.assertTrue( -1.0 <= ((param.data.mean() * 1e9).round() / 1e9).item() <= 1.0, msg="Parameter {} of model {} seems not properly initialized".format(name, model_class), ) else: self.assertIn( ((param.data.mean() * 1e9).round() / 1e9).item(), [0.0, 1.0], msg="Parameter {} of model {} seems not properly initialized".format(name, model_class), ) @slow def test_model_from_pretrained(self): model = Wav2Vec2Model.from_pretrained("facebook/wav2vec2-base-960h") self.assertIsNotNone(model) @require_torch class Wav2Vec2UtilsTest(unittest.TestCase): def test_compute_mask_indices(self): batch_size = 4 sequence_length = 60 mask_prob = 0.5 mask_length = 1 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length for _ in range(batch_size)]) attention_mask = torch.ones((batch_size, sequence_length), device=torch_device, dtype=torch.long) attention_mask[:, -sequence_length // 2 :] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask ) self.assertListEqual(mask.sum(axis=-1).tolist(), [mask_prob * sequence_length // 2 for _ in range(batch_size)]) def test_compute_mask_indices_overlap(self): batch_size = 4 sequence_length = 60 mask_prob = 0.5 mask_length = 4 mask = _compute_mask_indices((batch_size, sequence_length), mask_prob, mask_length) # because of overlap there is a range of possible masks for batch_sum in mask.sum(axis=-1): self.assertIn( int(batch_sum), list(range(int(mask_prob // mask_length * sequence_length), int(mask_prob * sequence_length))), ) attention_mask = torch.ones((batch_size, sequence_length), device=torch_device, dtype=torch.long) attention_mask[:, -sequence_length // 2 :] = 0 mask = _compute_mask_indices( (batch_size, sequence_length), mask_prob, mask_length, attention_mask=attention_mask ) # because of overlap there is a range of possible masks for batch_sum in mask.sum(axis=-1): self.assertIn( int(batch_sum), list( range(int(mask_prob // mask_length * sequence_length // 2), int(mask_prob * sequence_length // 2)) ), ) @require_torch @slow @require_datasets @require_soundfile class Wav2Vec2ModelIntegrationTest(unittest.TestCase): def _load_datasamples(self, num_samples): from datasets import load_dataset import soundfile as sf ids = [f"1272-141231-000{i}" for i in range(num_samples)] # map files to raw def map_to_array(batch): speech, _ = sf.read(batch["file"]) batch["speech"] = speech return batch ds = load_dataset("patrickvonplaten/librispeech_asr_dummy", "clean", split="validation") ds = ds.filter(lambda x: x["id"] in ids).sort("id").map(map_to_array) return ds["speech"][:num_samples] def test_inference_ctc_normal(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") model.to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h", do_lower_case=True) input_speech = self._load_datasamples(1) input_values = processor(input_speech, return_tensors="pt").input_values.to(torch_device) with torch.no_grad(): logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = ["a man said to the universe sir i exist"] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_ctc_normal_batched(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h") model.to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-base-960h", do_lower_case=True) input_speech = self._load_datasamples(2) inputs = processor(input_speech, return_tensors="pt", padding=True, truncation=True) input_values = inputs.input_values.to(torch_device) with torch.no_grad(): logits = model(input_values).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight lowing cloth that was the only garment he wore", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS) def test_inference_ctc_robust_batched(self): model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-large-960h-lv60-self").to(torch_device) processor = Wav2Vec2Processor.from_pretrained("facebook/wav2vec2-large-960h-lv60-self", do_lower_case=True) input_speech = self._load_datasamples(4) inputs = processor(input_speech, return_tensors="pt", padding=True, truncation=True) input_values = inputs.input_values.to(torch_device) attention_mask = inputs.attention_mask.to(torch_device) with torch.no_grad(): logits = model(input_values, attention_mask=attention_mask).logits predicted_ids = torch.argmax(logits, dim=-1) predicted_trans = processor.batch_decode(predicted_ids) EXPECTED_TRANSCRIPTIONS = [ "a man said to the universe sir i exist", "sweat covered brion's body trickling into the tight loin cloth that was the only garment he wore", "the cut on his chest still dripping blood the ache of his overstrained eyes even the soaring arena around him with the thousands of spectators were trivialities not worth thinking about", "his instant panic was followed by a small sharp blow high on his chest", ] self.assertListEqual(predicted_trans, EXPECTED_TRANSCRIPTIONS)
AdaMix/tests/test_modeling_wav2vec2.py/0
{ "file_path": "AdaMix/tests/test_modeling_wav2vec2.py", "repo_id": "AdaMix", "token_count": 10374 }
76
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from transformers import BertGenerationTokenizer from transformers.file_utils import cached_property from transformers.testing_utils import require_sentencepiece, require_torch, slow from .test_tokenization_common import TokenizerTesterMixin SPIECE_UNDERLINE = "▁" SAMPLE_VOCAB = os.path.join(os.path.dirname(os.path.abspath(__file__)), "fixtures/test_sentencepiece.model") @require_sentencepiece class BertGenerationTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = BertGenerationTokenizer def setUp(self): super().setUp() tokenizer = BertGenerationTokenizer(SAMPLE_VOCAB, keep_accents=True) tokenizer.save_pretrained(self.tmpdirname) def test_full_tokenizer(self): tokenizer = BertGenerationTokenizer(SAMPLE_VOCAB, keep_accents=True) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual( tokenizer.convert_tokens_to_ids(tokens), [285, 46, 10, 170, 382], ) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual( ids, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4], ) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) @cached_property def big_tokenizer(self): return BertGenerationTokenizer.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") @slow def test_tokenization_base_easy_symbols(self): symbols = "Hello World!" original_tokenizer_encodings = [18536, 2260, 101] self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols)) @slow def test_tokenization_base_hard_symbols(self): symbols = 'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) " [ ] ! : - . Also we will add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth' original_tokenizer_encodings = [ 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 34324, 497, 391, 408, 11342, 1244, 385, 100, 938, 985, 456, 574, 362, 12597, 3200, 3129, 1172, ] self.assertListEqual(original_tokenizer_encodings, self.big_tokenizer.encode(symbols)) @require_torch @slow def test_torch_encode_plus_sent_to_model(self): import torch from transformers import BertGenerationConfig, BertGenerationEncoder # Build sequence first_ten_tokens = list(self.big_tokenizer.get_vocab().keys())[:10] sequence = " ".join(first_ten_tokens) encoded_sequence = self.big_tokenizer.encode_plus(sequence, return_tensors="pt", return_token_type_ids=False) batch_encoded_sequence = self.big_tokenizer.batch_encode_plus( [sequence + " " + sequence], return_tensors="pt", return_token_type_ids=False ) config = BertGenerationConfig() model = BertGenerationEncoder(config) assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size with torch.no_grad(): model(**encoded_sequence) model(**batch_encoded_sequence)
AdaMix/tests/test_tokenization_bert_generation.py/0
{ "file_path": "AdaMix/tests/test_tokenization_bert_generation.py", "repo_id": "AdaMix", "token_count": 3378 }
77
# coding=utf-8 # Copyright 2018 Google T5 Authors and HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import SPIECE_UNDERLINE, BatchEncoding, T5Tokenizer, T5TokenizerFast from transformers.file_utils import cached_property, is_tf_available, is_torch_available from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers from .test_tokenization_common import TokenizerTesterMixin SAMPLE_VOCAB = get_tests_dir("fixtures/test_sentencepiece.model") if is_torch_available(): FRAMEWORK = "pt" elif is_tf_available(): FRAMEWORK = "tf" else: FRAMEWORK = "jax" @require_sentencepiece @require_tokenizers class T5TokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = T5Tokenizer rust_tokenizer_class = T5TokenizerFast test_rust_tokenizer = True def setUp(self): super().setUp() # We have a SentencePiece fixture for testing tokenizer = T5Tokenizer(SAMPLE_VOCAB) tokenizer.save_pretrained(self.tmpdirname) def test_full_tokenizer(self): tokenizer = T5Tokenizer(SAMPLE_VOCAB) tokens = tokenizer.tokenize("This is a test") self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [285, 46, 10, 170, 382]) tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.") self.assertListEqual( tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "9", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "é", ".", ], ) ids = tokenizer.convert_tokens_to_ids(tokens) self.assertListEqual(ids, [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4]) back_tokens = tokenizer.convert_ids_to_tokens(ids) self.assertListEqual( back_tokens, [ SPIECE_UNDERLINE + "I", SPIECE_UNDERLINE + "was", SPIECE_UNDERLINE + "b", "or", "n", SPIECE_UNDERLINE + "in", SPIECE_UNDERLINE + "", "<unk>", "2", "0", "0", "0", ",", SPIECE_UNDERLINE + "and", SPIECE_UNDERLINE + "this", SPIECE_UNDERLINE + "is", SPIECE_UNDERLINE + "f", "al", "s", "<unk>", ".", ], ) @cached_property def t5_base_tokenizer(self): return T5Tokenizer.from_pretrained("t5-base") @cached_property def t5_base_tokenizer_fast(self): return T5TokenizerFast.from_pretrained("t5-base") def get_tokenizer(self, **kwargs) -> T5Tokenizer: return self.tokenizer_class.from_pretrained(self.tmpdirname, pad_token=None, **kwargs) def get_rust_tokenizer(self, **kwargs) -> T5TokenizerFast: return self.rust_tokenizer_class.from_pretrained(self.tmpdirname, pad_token=None, **kwargs) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: return tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "I was born in 92000, and this is falsé." tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) def test_eos_treatment(self): tokenizer = self.t5_base_tokenizer batch_with_eos_added = tokenizer(["hi</s>", "I went to the gym</s>", "</s>"]) batch_without_eos_added = tokenizer(["hi", "I went to the gym", ""]) self.assertListEqual(batch_with_eos_added["input_ids"], batch_without_eos_added["input_ids"]) def test_prepare_batch(self): tokenizer = self.t5_base_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] expected_src_tokens = [71, 307, 8986, 21, 4505, 1635, 1707, 5, tokenizer.eos_token_id] batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) self.assertIsInstance(batch, BatchEncoding) if FRAMEWORK != "jax": result = list(batch.input_ids.numpy()[0]) else: result = list(batch.input_ids.tolist()[0]) self.assertListEqual(expected_src_tokens, result) self.assertEqual((2, 9), batch.input_ids.shape) self.assertEqual((2, 9), batch.attention_mask.shape) def test_empty_target_text(self): tokenizer = self.t5_base_tokenizer src_text = ["A long paragraph for summarization.", "Another paragraph for summarization."] batch = tokenizer(src_text, padding=True, return_tensors=FRAMEWORK) # check if input_ids are returned and no decoder_input_ids self.assertIn("input_ids", batch) self.assertIn("attention_mask", batch) self.assertNotIn("decoder_input_ids", batch) self.assertNotIn("decoder_attention_mask", batch) def test_max_length(self): tokenizer = self.t5_base_tokenizer tgt_text = [ "Summary of the text.", "Another summary.", ] with tokenizer.as_target_tokenizer(): targets = tokenizer( tgt_text, max_length=32, padding="max_length", truncation=True, return_tensors=FRAMEWORK ) self.assertEqual(32, targets["input_ids"].shape[1]) def test_outputs_not_longer_than_maxlen(self): tokenizer = self.t5_base_tokenizer batch = tokenizer( ["I am a small frog" * 1000, "I am a small frog"], padding=True, truncation=True, return_tensors=FRAMEWORK ) self.assertIsInstance(batch, BatchEncoding) self.assertEqual(batch.input_ids.shape, (2, 512)) def test_eos_in_input(self): tokenizer = self.t5_base_tokenizer src_text = ["A long paragraph for summarization. </s>"] tgt_text = ["Summary of the text. </s>"] expected_src_tokens = [71, 307, 8986, 21, 4505, 1635, 1707, 5, 1] expected_tgt_tokens = [20698, 13, 8, 1499, 5, 1] batch = tokenizer(src_text) with tokenizer.as_target_tokenizer(): targets = tokenizer(tgt_text) self.assertEqual(expected_src_tokens, batch["input_ids"][0]) self.assertEqual(expected_tgt_tokens, targets["input_ids"][0]) def test_token_type_ids(self): src_text_1 = ["A first paragraph for summarization."] src_text_2 = ["A second paragraph for summarization."] fast_token_type_ids = self.t5_base_tokenizer_fast( src_text_1, src_text_2, add_special_tokens=True, return_token_type_ids=True ).token_type_ids slow_token_type_ids = self.t5_base_tokenizer( src_text_1, src_text_2, add_special_tokens=True, return_token_type_ids=True ).token_type_ids self.assertEqual(slow_token_type_ids, fast_token_type_ids) self.assertEqual(len(slow_token_type_ids[0]), 18) def test_fast_and_slow_same_result(self): src_text = "<pad> Today is <unk> nice day </s>" tgt_ids = [0, 1960, 19, 2, 1245, 239, 1] tgt_text = "<pad> Today is<unk> nice day</s>" fast_ids = self.t5_base_tokenizer_fast(src_text, add_special_tokens=False).input_ids slow_ids = self.t5_base_tokenizer(src_text, add_special_tokens=False).input_ids self.assertEqual(tgt_ids, fast_ids) self.assertEqual(tgt_ids, slow_ids) fast_text = self.t5_base_tokenizer_fast.decode(fast_ids) slow_text = self.t5_base_tokenizer.decode(fast_ids) self.assertEqual(tgt_text, fast_text) self.assertEqual(tgt_text, slow_text)
AdaMix/tests/test_tokenization_t5.py/0
{ "file_path": "AdaMix/tests/test_tokenization_t5.py", "repo_id": "AdaMix", "token_count": 4402 }
78
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import numpy import pkg_resources from transformers.testing_utils import TestCasePlus from transformers.utils.versions import require_version, require_version_core, require_version_examples numpy_ver = numpy.__version__ python_ver = ".".join([str(x) for x in sys.version_info[:3]]) class DependencyVersionCheckTest(TestCasePlus): def test_core(self): # lt + different version strings require_version_core("numpy<1000.4.5") require_version_core("numpy<1000.4") require_version_core("numpy<1000") # le require_version_core("numpy<=1000.4.5") require_version_core(f"numpy<={numpy_ver}") # eq require_version_core(f"numpy=={numpy_ver}") # ne require_version_core("numpy!=1000.4.5") # ge require_version_core("numpy>=1.0") require_version_core("numpy>=1.0.0") require_version_core(f"numpy>={numpy_ver}") # gt require_version_core("numpy>1.0.0") # requirement w/o version require_version_core("numpy") # unmet requirements due to version conflict for req in ["numpy==1.0.0", "numpy>=1000.0.0", f"numpy<{numpy_ver}"]: try: require_version_core(req) except pkg_resources.VersionConflict as e: self.assertIn(f"{req} is required", str(e)) self.assertIn("but found", str(e)) # unmet requirements due to missing module for req in ["numpipypie>1", "numpipypie2"]: try: require_version_core(req) except pkg_resources.DistributionNotFound as e: self.assertIn(f"The '{req}' distribution was not found and is required by this application", str(e)) self.assertIn("Try: pip install transformers -U", str(e)) # bogus requirements formats: # 1. whole thing for req in ["numpy??1.0.0", "numpy1.0.0"]: try: require_version_core(req) except ValueError as e: self.assertIn("requirement needs to be in the pip package format", str(e)) # 2. only operators for req in ["numpy=1.0.0", "numpy == 1.00", "numpy<>1.0.0", "numpy><1.00", "numpy>>1.0.0"]: try: require_version_core(req) except ValueError as e: self.assertIn("need one of ", str(e)) def test_examples(self): # the main functionality is tested in `test_core`, this is just the hint check try: require_version_examples("numpy>1000.4.5") except pkg_resources.VersionConflict as e: self.assertIn("is required", str(e)) self.assertIn("pip install -r examples/requirements.txt", str(e)) def test_python(self): # matching requirement require_version("python>=3.6.0") # not matching requirements for req in ["python>9.9.9", "python<3.0.0"]: try: require_version_core(req) except pkg_resources.VersionConflict as e: self.assertIn(f"{req} is required", str(e)) self.assertIn(f"but found python=={python_ver}", str(e))
AdaMix/tests/test_versions_utils.py/0
{ "file_path": "AdaMix/tests/test_versions_utils.py", "repo_id": "AdaMix", "token_count": 1673 }
79
--- - step: name: Execute python examples/text-classification/run_glue.py image: pytorch/pytorch:nightly-devel-cuda10.0-cudnn7 command: - python /valohai/repository/utils/download_glue_data.py --data_dir=/glue_data - pip install -e . - pip install -r examples/requirements.txt - python examples/text-classification/run_glue.py --do_train --data_dir=/glue_data/{parameter-value:task_name} {parameters} parameters: - name: model_type pass-as: --model_type={v} type: string default: bert - name: model_name_or_path pass-as: --model_name_or_path={v} type: string default: bert-base-uncased - name: task_name pass-as: --task_name={v} type: string default: MRPC - name: max_seq_length pass-as: --max_seq_length={v} description: The maximum total input sequence length after tokenization. Sequences longer than this will be truncated, sequences shorter will be padded. type: integer default: 128 - name: per_gpu_train_batch_size pass-as: --per_gpu_train_batch_size={v} description: Batch size per GPU/CPU for training. type: integer default: 8 - name: per_gpu_eval_batch_size pass-as: --per_gpu_eval_batch_size={v} description: Batch size per GPU/CPU for evaluation. type: integer default: 8 - name: gradient_accumulation_steps pass-as: --gradient_accumulation_steps={v} description: Number of updates steps to accumulate before performing a backward/update pass. type: integer default: 1 - name: learning_rate pass-as: --learning_rate={v} description: The initial learning rate for Adam. type: float default: 0.00005 - name: adam_epsilon pass-as: --adam_epsilon={v} description: Epsilon for Adam optimizer. type: float default: 0.00000001 - name: max_grad_norm pass-as: --max_grad_norm={v} description: Max gradient norm. type: float default: 1.0 - name: num_train_epochs pass-as: --num_train_epochs={v} description: Total number of training epochs to perform. type: integer default: 3 - name: max_steps pass-as: --max_steps={v} description: If > 0, set total number of training steps to perform. Override num_train_epochs. type: integer default: -1 - name: warmup_steps pass-as: --warmup_steps={v} description: Linear warmup over warmup_steps. type: integer default: -1 - name: logging_steps pass-as: --logging_steps={v} description: Log every X updates steps. type: integer default: 25 - name: save_steps pass-as: --save_steps={v} description: Save checkpoint every X updates steps. type: integer default: -1 - name: output_dir pass-as: --output_dir={v} type: string default: /valohai/outputs - name: evaluation_strategy description: The evaluation strategy to use. type: string default: steps
AdaMix/valohai.yaml/0
{ "file_path": "AdaMix/valohai.yaml", "repo_id": "AdaMix", "token_count": 1425 }
80
# AirSim Drone Racing Lab <img src="https://github.com/madratman/airsim_neurips_gifs/blob/master/imgs/neurips_b99_3_drones.gif?raw=true" width="275"> <img src="https://github.com/madratman/airsim_neurips_gifs/blob/master/imgs/neurips_soccer_field_8_drones.gif?raw=true" width="275"> <img src="https://github.com/madratman/airsim_neurips_gifs/blob/master/imgs/neurips_zhangjiajie_4_drones.gif?raw=true" width="275"> ## Quickstart - [Linux and Windows Binaries](https://github.com/microsoft/AirSim-Drone-Racing-Lab/releases) - Python API: - [`pip install airsimdroneracinglab`](https://pypi.org/project/airsimdroneracinglab/) - [Documentation](https://microsoft.github.io/AirSim-Drone-Racing-Lab/) Note: If you use this repository in your research, please cite our pre-print, [AirSim Drone Racing Lab](https://arxiv.org/abs/2003.05654). ``` @article{madaan2020airsim, title={AirSim Drone Racing Lab}, author={Madaan, Ratnesh and Gyde, Nicholas and Vemprala, Sai and Brown, Matthew and Nagami, Keiko and Taubner, Tim and Cristofalo, Eric and Scaramuzza, Davide and Schwager, Mac and Kapoor, Ashish}, journal={arXiv preprint arXiv:2003.05654}, year={2020} } ``` ## Getting Binaries - [Linux](docs/using_binaries.md#linux) - [Windows](docs/using_binaries.md#windows) - [Docker on Linux](docs/docker.md) ## Using AirSim Drone Racing Lab - [AirSim Drone Racing Lab Overview](docs/adrl_overview.md) - [API Overview](docs/api_overview.md) - [Baselines](docs/baselines.md) - [Race Monitoring](docs/race_monitoring.md) - [Documentation](https://microsoft.github.io/AirSim-Drone-Racing-Lab) ## Questions Please open a Github Issue on **this** repository (not [AirSim](https://github.com/microsoft/AirSim)) for any technical questions associated with AirSim Drone Racing Lab.
AirSim-Drone-Racing-Lab/README.md/0
{ "file_path": "AirSim-Drone-Racing-Lab/README.md", "repo_id": "AirSim-Drone-Racing-Lab", "token_count": 643 }
81
# Learning Visuomotor Policies for Aerial Navigation Using Cross-Modal Representations ![Teaser](figs/giphy.gif) This repository provides a code base to evaluate and train models from the paper "*Learning Visuomotor Policies for Aerial Navigation Using Cross-Modal Representations*". ArXiv pre-print: [https://arxiv.org/abs/1909.06993](https://arxiv.org/abs/1909.06993) Paper video: https://youtu.be/VKc3A5HlUU8 ## License and Citation This project is licensed under the terms of the MIT license. By using the software, you are agreeing to the terms of the [license agreement](LICENSE). If you use this code in your research, please cite us as follows: ``` @article{bonatti2020learning, title={Learning Visuomotor Policies for Aerial Navigation Using Cross-Modal Representations}, author={Bonatti, Rogerio and Madaan, Ratnesh and Vineet, Vibhav and Scherer, Sebastian and Kapoor, Ashish}, journal={arXiv preprint arXiv:1909.06993}, year={2020} } ``` ## Recommended system Recommended system (tested): - Ubuntu 18.04 - Python 2.7.15 Python packages used by the example provided and their recommended version: - [airsimdroneracingvae](https://pypi.org/project/airsimdroneracingvae/)==1.0.0 - tensorflow==2.0.0-beta1 - msgpack-rpc-python==0.4.1 - numpy==1.16.4 - matplotlib==2.1.1 - scikit-learn==0.20.4 - scipy==1.2.2 - pandas==0.24.2 ## Downloading the drone racing files In order for you to train the models and run Airsim you first need to download all image datasets, behavior cloning datasets, network weights and Airsim binaries: - Download all files and datasets [Drone Racing files v. 1.0](https://drive.google.com/drive/folders/1NKk_qmLhBW-coqouHrRBPgUkvV-GntSd?usp=sharing) - Extract all individual files in the folders - Place the `settings.json` file inside `~/Documents/AirSim` in your computer ## Training and testing the cross-modal VAE representation In order to train the cross-modal representations you can either use the downloaded image dataset from the previous step, or generate the data yourself using Airsim. ![Teaser](figs/arch.png) ### Training with downloaded dataset - Go to folder `cmvae`, and inside file `train_cmvae.py` edit variable `data_dir` to the correct path of the extracted dataset within your computer. The default value is the directory with 1K images. But for final training you will need more images, such as the 50K or 300K datasets - Also, edit variable `output_dir` to the correct place where you want the models to be saved - Run ``` train_cmvae.py ``` - Network weights will be saved every 5 epochs by default, and you can check loss values with tensorboard or by looking at the terminal - Once the network is trained you can evaluate it using another script, which will automatically plot histograms of errors, image reconstructions and latent space interpolations: ``` eval_cmvae.py ``` ### Generating your own dataset with Airsim You may want to generate a custom dataset for training you cross-modal VAE. Here are the steps to do it: - Start the Airsim environment from the binary file: ``` $ cd /yourpath/all_files/airsim_binaries/vae_env $ ./AirSimExe.sh -windowed ``` - If it asks if you want the car model, click `No` - Inside the file `datagen/img_generator/main.py` first change the desired number of samples and saved dataset path - Run the script for generating data: ``` main.py # inside datagen/img_generator ``` - Once the dataset is generated, follow the previous scripts for training the CM-VAE ## Generating imitation learning data for racing In order to train the behavior cloning networks you can either use the downloaded image-action pairs dataset or generate the data yourself using Airsim. ![Teaser](figs/process_low.png) ### Training with downloaded dataset - Go to folder `imitation_learning`, and inside file `train_bc.py` edit variables `base_path`, `data_dir_list`, and `output_dir`. By default you will be using downloaded datasets with 0m to 3m of random gate displacement amplitude over a course with 8m of nominal radius - Edit the variables relative to the training mode (full end-to-end, latent representation or regression as latent representation) and weights path for the latent representations (not applicable for full end-to-end learning) - Run the script for training the behavior cloning policies: ``` train_bc.py ``` ### Generating your own imitation learning dataset with Airsim You may want to generate a custom dataset for training you behavior cloning policies. Here are the steps to do it: - Start the Airsim environment from the binary file (not the same one for generating images for the cross-modal representation!): ``` $ cd /yourpath/all_files/airsim_binaries/recording_env $ ./AirSimExe.sh -windowed ``` - Inside the file `datagen/action_generator/src/soccer_datagen.py` change the desired meta-parameters (number of gates, track radius, gate displacement noise, etc) - Run the script for generating data: ``` soccer_datagen.py ``` - Once you're satisfied with the motion, turn off trajectory visualization parameter `viz_traj`. Otherwise the recorded images will show the motion line - Once the quad is flying, press `r` on your keyboard to start recording images. Velocities will be automatically recorded. Both are saved inside `~/Documents/AirSim` Now you`ll need to process the raw recording so that you can match the time-stamps from velocity commands and images into a cohesive dataset. To do it: - Inside `/Documents/AirSim`, copy the contents of both folders (`moveOnSpline_vel_cmd.txt`, `images` folder and `images.txt` file) into a new directory, for example `/all_files/il_datasets/bc_test`. - In `datagen/action_generator/src/data_processor.py`, modify variable `base_path` to `/all_files/il_datasets/bc_test`. Then run: ``` data_processor.py ``` - Finally, can train `train_bc.py` following the previous steps. You can combine different datasets with different noise levels to train the same policy ## Deploying the trained policies Now you can deploy the trained policies in AirSim, following these steps: - Start the Airsim environment from correct binary file: ``` $ cd /yourpath/all_files/airsim_binaries/vae_env $ ./AirSimExe.sh -windowed ``` - In file `imitation_learning/bc_navigation.py`, modify `policy_type` and `gate_noise`. Then run: ``` bc_navigation.py ``` The policies trained in AirSim using the cross-modal representations can transferred directly towards real-world applications. Please check out the paper and video to see more results from real-life deployment. ![](figs/main_lowres.png) # Contributing This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA) declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.opensource.microsoft.com. When you submit a pull request, a CLA bot will automatically determine whether you need to provide a CLA and decorate the PR appropriately (e.g., status check, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA. This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [[email protected]](mailto:[email protected]) with any additional questions or comments.
AirSim-Drone-Racing-VAE-Imitation/README.md/0
{ "file_path": "AirSim-Drone-Racing-VAE-Imitation/README.md", "repo_id": "AirSim-Drone-Racing-VAE-Imitation", "token_count": 2060 }
82
import tensorflow as tf import os import sys curr_dir = os.path.dirname(os.path.abspath(__file__)) # imports import_path = os.path.join(curr_dir, '..') sys.path.insert(0, import_path) import racing_models import racing_utils ########################################### # DEFINE TRAINING META PARAMETERS base_path = '/home/rb/all_files' data_dir_list = ['/home/rb/all_files/il_datasets/bc_v5_n0', '/home/rb/all_files/il_datasets/bc_v5_n1', '/home/rb/all_files/il_datasets/bc_v5_n2', '/home/rb/all_files/il_datasets/bc_v5_n3'] output_dir = '/home/rb/all_files/model_outputs/bc_test' training_mode = 'latent' # 'full' or 'latent' or 'reg' cmvae_weights_path = '/home/rb/all_files/model_outputs/cmvae_con/cmvae_model_40.ckpt' # cmvae_weights_path = '/home/rb/all_files/model_outputs/cmvae_unc/cmvae_model_65.ckpt' # cmvae_weights_path = '/home/rb/all_files/model_outputs/cmvae_img/cmvae_model_45.ckpt' # training_mode = 'reg' # 'full' or 'latent' or 'reg' # reg_weights_path = '/home/rb/all_files/model_outputs/reg/reg_model_25.ckpt' # training_mode = 'full' # 'full' or 'latent' or 'reg' # no auxiliary feature extraction weights n_z = 10 batch_size = 32 epochs = 400 img_res = 64 max_size = None # default is None learning_rate = 1e-2 # 1e-2 for latent, 1e-3 for full ########################################### # CUSTOM FUNCTIONS @tf.function def reset_metrics(): train_loss_rec_v.reset_states() test_loss_rec_v.reset_states() @tf.function def compute_loss(labels, predictions): recon_loss = tf.losses.mean_squared_error(labels, predictions) return recon_loss @tf.function def train(images, labels, epoch, training_mode): with tf.GradientTape() as tape: if training_mode == 'full': predictions = bc_model(images) elif training_mode == 'latent': z, _, _ = cmvae_model.encode(images) predictions = bc_model(z) elif training_mode == 'reg': z = reg_model(images) predictions = bc_model(z) recon_loss = tf.reduce_mean(compute_loss(labels, predictions)) gradients = tape.gradient(recon_loss, bc_model.trainable_variables) optimizer.apply_gradients(zip(gradients, bc_model.trainable_variables)) train_loss_rec_v(recon_loss) @tf.function def test(images, labels, training_mode): if training_mode == 'full': predictions = bc_model(images) elif training_mode == 'latent': z, _, _ = cmvae_model.encode(images) predictions = bc_model(z) elif training_mode == 'reg': z = reg_model(images) predictions = bc_model(z) recon_loss = tf.reduce_mean(compute_loss(labels, predictions)) test_loss_rec_v(recon_loss) ########################################### os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' # 0 = all messages are logged (default behavior) # 1 = INFO messages are not printed # 2 = INFO and WARNING messages are not printed # 3 = INFO, WARNING, and ERROR messages are not printed # allow growth is possible using an env var in tf2.0 os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true' # load dataset print('Starting dataset') # train_ds, test_ds = racing_utils.dataset_utils.create_dataset_txt(data_dir, batch_size, img_res, data_mode='train') train_ds, test_ds = racing_utils.dataset_utils.create_dataset_multiple_sources(data_dir_list, batch_size, img_res, data_mode='train', base_path=base_path) print('Done with dataset') # create models if training_mode == 'full': bc_model = racing_models.bc_full.BcFull() elif training_mode == 'latent': # cmvae_model = racing_models.cmvae.Cmvae(n_z=n_z, gate_dim=4, res=img_res, trainable_model=True) cmvae_model = racing_models.cmvae.CmvaeDirect(n_z=n_z, gate_dim=4, res=img_res, trainable_model=True) cmvae_model.load_weights(cmvae_weights_path) cmvae_model.trainable = False bc_model = racing_models.bc_latent.BcLatent() elif training_mode == 'reg': reg_model = model = racing_models.dronet.Dronet(num_outputs=4, include_top=True) reg_model.load_weights(reg_weights_path) reg_model.trainable = False bc_model = racing_models.bc_latent.BcLatent() optimizer = tf.keras.optimizers.Adam(lr=learning_rate) # define metrics train_loss_rec_v = tf.keras.metrics.Mean(name='train_loss_rec_v') test_loss_rec_v = tf.keras.metrics.Mean(name='test_loss_rec_v') metrics_writer = tf.summary.create_file_writer(output_dir) # check if output folder exists if not os.path.isdir(output_dir): os.makedirs(output_dir) # train print('Start training ...') flag = True for epoch in range(epochs): # print('MODE NOW: {}'.format(mode)) for train_images, train_labels in train_ds: train(train_images, train_labels, epoch, training_mode) if flag: bc_model.summary() flag = False for test_images, test_labels in test_ds: test(test_images, test_labels, training_mode) # save model if epoch % 10 == 0 and epoch > 0: print('Saving weights to {}'.format(output_dir)) bc_model.save_weights(os.path.join(output_dir, "bc_model_{}.ckpt".format(epoch))) with metrics_writer.as_default(): tf.summary.scalar('train_loss_rec_gate', train_loss_rec_v.result(), step=epoch) tf.summary.scalar('test_loss_rec_gate', test_loss_rec_v.result(), step=epoch) print('Epoch {} | Train L_gate: {} | Test L_gate: {}' .format(epoch, train_loss_rec_v.result(), test_loss_rec_v.result())) reset_metrics() # reset all the accumulators of metrics print('bla')
AirSim-Drone-Racing-VAE-Imitation/imitation_learning/train_bc.py/0
{ "file_path": "AirSim-Drone-Racing-VAE-Imitation/imitation_learning/train_bc.py", "repo_id": "AirSim-Drone-Racing-VAE-Imitation", "token_count": 2264 }
83
from utils import AirSimSettingsCreator AirSimSettingsCreator().write_airsim_neurips_baseline_settings_file()
AirSim-NeurIPS2019-Drone-Racing/baselines/generate_settings_file.py/0
{ "file_path": "AirSim-NeurIPS2019-Drone-Racing/baselines/generate_settings_file.py", "repo_id": "AirSim-NeurIPS2019-Drone-Racing", "token_count": 33 }
84
# ------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License in the project root for # license information. # -------------------------------------------------------------------------- VERSION = "0.1.0"
ApplicationInsights-Python/azure-monitor-events-extension/azure/monitor/events/extension/_version.py/0
{ "file_path": "ApplicationInsights-Python/azure-monitor-events-extension/azure/monitor/events/extension/_version.py", "repo_id": "ApplicationInsights-Python", "token_count": 52 }
85
#!/bin/bash set -e get_latest_release() { curl --silent "https://api.github.com/repos/$1/releases/latest" | grep '"tag_name":' | sed -E 's/.*"v([^"]+)".*/\1/' } VERSION=${1:-"$(get_latest_release cli/cli)"} INSTALL_DIR=${2:-"$HOME/.local/bin"} CMD=gh NAME="GitHub CLI" echo -e "\e[34m»»» 📦 \e[32mInstalling \e[33m$NAME \e[35mv$VERSION\e[0m ..." mkdir -p "$INSTALL_DIR" curl -sSL "https://github.com/cli/cli/releases/download/v${VERSION}/gh_${VERSION}_linux_amd64.tar.gz" -o /tmp/gh.tar.gz tar -zxvf /tmp/gh.tar.gz --strip-components 2 -C "$INSTALL_DIR" "gh_${VERSION}_linux_amd64/bin/gh" > /dev/null chmod +x "$INSTALL_DIR/gh" rm -rf /tmp/gh.tar.gz echo -e "\n\e[34m»»» 💾 \e[32mInstalled to: \e[33m$(which $CMD)" echo -e "\e[34m»»» 💡 \e[32mVersion details: \e[39m$($CMD --version)"
AzureTRE/.devcontainer/scripts/gh.sh/0
{ "file_path": "AzureTRE/.devcontainer/scripts/gh.sh", "repo_id": "AzureTRE", "token_count": 384 }
86
.PHONY: bootstrap-init mgmt-deploy mgmt-destroy build-api-image push-api-image deploy-tre destroy-tre letsencrypt .DEFAULT_GOAL := help SHELL:=/bin/bash MAKEFILE_FULLPATH := $(abspath $(lastword $(MAKEFILE_LIST))) MAKEFILE_DIR := $(dir $(MAKEFILE_FULLPATH)) IMAGE_NAME_PREFIX?="microsoft/azuretre" ACR_DOMAIN_SUFFIX?=`az cloud show --query suffixes.acrLoginServerEndpoint --output tsv` ACR_NAME?=`echo "$${ACR_NAME}" | tr A-Z a-z` ACR_FQDN?="${ACR_NAME}${ACR_DOMAIN_SUFFIX}" FULL_IMAGE_NAME_PREFIX:=${ACR_FQDN}/${IMAGE_NAME_PREFIX} LINTER_REGEX_INCLUDE?=all # regular expression used to specify which files to include in local linting (defaults to "all") E2E_TESTS_NUMBER_PROCESSES_DEFAULT=4 # can be overridden in e2e_tests/.env target_title = @echo -e "\n\e[34m»»» 🧩 \e[96m$(1)\e[0m..." all: bootstrap mgmt-deploy images tre-deploy ## 🚀 Provision all the application resources from beginning to end tre-deploy: deploy-core build-and-deploy-ui firewall-install db-migrate show-core-output ## 🚀 Provision TRE using existing images images: build-and-push-api build-and-push-resource-processor build-and-push-airlock-processor ## 📦 Build and push all images build-and-push-api: build-api-image push-api-image build-and-push-resource-processor: build-resource-processor-vm-porter-image push-resource-processor-vm-porter-image build-and-push-airlock-processor: build-airlock-processor push-airlock-processor help: ## 💬 This help message :) @grep -E '[a-zA-Z_-]+:.*?## .*$$' $(firstword $(MAKEFILE_LIST)) | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-25s\033[0m %s\n", $$1, $$2}' # to move your environment from the single 'core' deployment (which includes the firewall) # toward the shared services model, where it is split out - run the following make target before a tre-deploy # This will remove + import the resource state into a shared service migrate-firewall-state: prepare-tf-state bootstrap: $(call target_title, "Bootstrap Terraform") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh nodocker,env \ && cd ${MAKEFILE_DIR}/devops/terraform && ./bootstrap.sh mgmt-deploy: $(call target_title, "Deploying management infrastructure") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh nodocker,env \ && cd ${MAKEFILE_DIR}/devops/terraform && ./deploy.sh mgmt-destroy: $(call target_title, "Destroying management infrastructure") \ . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh nodocker,env \ && cd ${MAKEFILE_DIR}/devops/terraform && ./destroy.sh # A recipe for building images. Parameters: # 1. Image name suffix # 2. Version file path # 3. Docker file path # 4. Docker context path # Example: $(call build_image,"api","./api_app/_version.py","api_app/Dockerfile","./api_app/") # The CI_CACHE_ACR_NAME is an optional container registry used for caching in addition to what's in ACR_NAME define build_image $(call target_title, "Building $(1) Image") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh env \ && . ${MAKEFILE_DIR}/devops/scripts/set_docker_sock_permission.sh \ && source <(grep = $(2) | sed 's/ *= */=/g') \ && az acr login -n ${ACR_NAME} \ && if [ -n "$${CI_CACHE_ACR_NAME:-}" ]; then \ az acr login -n $${CI_CACHE_ACR_NAME}; \ ci_cache="--cache-from $${CI_CACHE_ACR_NAME}${ACR_DOMAIN_SUFFIX}/${IMAGE_NAME_PREFIX}/$(1):$${__version__}"; fi \ && docker build -t ${FULL_IMAGE_NAME_PREFIX}/$(1):$${__version__} --build-arg BUILDKIT_INLINE_CACHE=1 \ --cache-from ${FULL_IMAGE_NAME_PREFIX}/$(1):$${__version__} $${ci_cache:-} -f $(3) $(4) endef build-api-image: $(call build_image,"api","${MAKEFILE_DIR}/api_app/_version.py","${MAKEFILE_DIR}/api_app/Dockerfile","${MAKEFILE_DIR}/api_app/") build-resource-processor-vm-porter-image: $(call build_image,"resource-processor-vm-porter","${MAKEFILE_DIR}/resource_processor/_version.py","${MAKEFILE_DIR}/resource_processor/vmss_porter/Dockerfile","${MAKEFILE_DIR}/resource_processor/") build-airlock-processor: $(call build_image,"airlock-processor","${MAKEFILE_DIR}/airlock_processor/_version.py","${MAKEFILE_DIR}/airlock_processor/Dockerfile","${MAKEFILE_DIR}/airlock_processor/") # A recipe for pushing images. Parameters: # 1. Image name suffix # 2. Version file path # Example: $(call push_image,"api","./api_app/_version.py") define push_image $(call target_title, "Pushing $(1) Image") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh env \ && . ${MAKEFILE_DIR}/devops/scripts/set_docker_sock_permission.sh \ && source <(grep = $(2) | sed 's/ *= */=/g') \ && az acr login -n ${ACR_NAME} \ && docker push "${FULL_IMAGE_NAME_PREFIX}/$(1):$${__version__}" endef push-api-image: $(call push_image,"api","${MAKEFILE_DIR}/api_app/_version.py") push-resource-processor-vm-porter-image: $(call push_image,"resource-processor-vm-porter","${MAKEFILE_DIR}/resource_processor/_version.py") push-airlock-processor: $(call push_image,"airlock-processor","${MAKEFILE_DIR}/airlock_processor/_version.py") # # These targets are for a graceful migration of Firewall # # from terraform state in Core to a Shared Service. # # See https://github.com/microsoft/AzureTRE/issues/1177 prepare-tf-state: $(call target_title, "Preparing terraform state") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh nodocker,env \ && pushd ${MAKEFILE_DIR}/core/terraform > /dev/null && ../../shared_services/firewall/terraform/remove_state.sh && popd > /dev/null \ && pushd ${MAKEFILE_DIR}/templates/shared_services/firewall/terraform > /dev/null && ./import_state.sh && popd > /dev/null # / End migration targets deploy-core: tre-start $(call target_title, "Deploying TRE") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh nodocker,env \ && rm -fr ~/.config/tre/environment.json \ && if [[ "$${TF_LOG}" == "DEBUG" ]]; \ then echo "TF DEBUG set - output supressed - see tflogs container for log file" && cd ${MAKEFILE_DIR}/core/terraform/ \ && ./deploy.sh 1>/dev/null 2>/dev/null; \ else cd ${MAKEFILE_DIR}/core/terraform/ && ./deploy.sh; fi; letsencrypt: $(call target_title, "Requesting LetsEncrypt SSL certificate") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh nodocker,certbot,env \ && pushd ${MAKEFILE_DIR}/core/terraform/ > /dev/null && . ./outputs.sh && popd > /dev/null \ && . ${MAKEFILE_DIR}/devops/scripts/load_env.sh ${MAKEFILE_DIR}/core/private.env \ && ${MAKEFILE_DIR}/core/terraform/scripts/letsencrypt.sh tre-start: ## ⏩ Start the TRE Service $(call target_title, "Starting TRE") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh env \ && ${MAKEFILE_DIR}/devops/scripts/control_tre.sh start tre-stop: ## ⛔ Stop the TRE Service $(call target_title, "Stopping TRE") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh env \ && ${MAKEFILE_DIR}/devops/scripts/control_tre.sh stop tre-destroy: ## 🧨 Destroy the TRE Service $(call target_title, "Destroying TRE") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh nodocker,env \ && . ${MAKEFILE_DIR}/devops/scripts/destroy_env_no_terraform.sh terraform-deploy: $(call target_title, "Deploying ${DIR} with Terraform") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh env \ && . ${MAKEFILE_DIR}/devops/scripts/load_and_validate_env.sh \ && . ${MAKEFILE_DIR}/devops/scripts/load_env.sh ${DIR}/.env \ && cd ${DIR}/terraform/ && ./deploy.sh terraform-import: $(call target_title, "Importing ${DIR} with Terraform") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh env \ && cd ${DIR}/terraform/ && ./import.sh terraform-destroy: $(call target_title, "Destroying ${DIR} Service") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh env \ && . ${MAKEFILE_DIR}/devops/scripts/load_and_validate_env.sh \ && . ${MAKEFILE_DIR}/devops/scripts/load_env.sh ${DIR}/.env \ && cd ${DIR}/terraform/ && ./destroy.sh # This will validate all files, not only the changed ones as the CI version does. lint: ## 🧹 Lint all files $(call target_title, "Linting") @terraform fmt -check -recursive -diff @# LOG_LEVEL=NOTICE reduces noise but it might also seem like the process is stuck - it's not... @docker run --name superlinter --pull=always --rm \ -e RUN_LOCAL=true \ -e LOG_LEVEL=NOTICE \ -e VALIDATE_MARKDOWN=true \ -e VALIDATE_PYTHON_FLAKE8=true \ -e VALIDATE_YAML=true \ -e VALIDATE_TERRAFORM_TFLINT=true \ -e VALIDATE_JAVA=true \ -e JAVA_FILE_NAME=checkstyle.xml \ -e VALIDATE_BASH=true \ -e VALIDATE_BASH_EXEC=true \ -e VALIDATE_GITHUB_ACTIONS=true \ -e VALIDATE_DOCKERFILE_HADOLINT=true \ -e VALIDATE_TSX=true \ -e VALIDATE_TYPESCRIPT_ES=true \ -e FILTER_REGEX_INCLUDE=${LINTER_REGEX_INCLUDE} \ -v $${LOCAL_WORKSPACE_FOLDER}:/tmp/lint \ github/super-linter:slim-v5.0.0 lint-docs: LINTER_REGEX_INCLUDE='./docs/.*\|./mkdocs.yml' $(MAKE) lint # check-params is called at the end since it needs the bundle image, # so we build it first and then run the check. bundle-build: $(call target_title, "Building ${DIR} bundle with Porter") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh porter,env \ && . ${MAKEFILE_DIR}/devops/scripts/set_docker_sock_permission.sh \ && cd ${DIR} \ && if [ -d terraform ]; then terraform -chdir=terraform init -backend=false; terraform -chdir=terraform validate; fi \ && FULL_IMAGE_NAME_PREFIX=${FULL_IMAGE_NAME_PREFIX} IMAGE_NAME_PREFIX=${IMAGE_NAME_PREFIX} \ ${MAKEFILE_DIR}/devops/scripts/bundle_runtime_image_build.sh \ && porter build \ $(MAKE) bundle-check-params bundle-install: bundle-check-params $(call target_title, "Deploying ${DIR} with Porter") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh porter,env \ && . ${MAKEFILE_DIR}/devops/scripts/load_and_validate_env.sh \ && cd ${DIR} \ && . ${MAKEFILE_DIR}/devops/scripts/load_env.sh .env \ && porter parameters apply parameters.json \ && porter credentials apply ${MAKEFILE_DIR}/resource_processor/vmss_porter/aad_auth_local_debugging.json \ && porter credentials apply ${MAKEFILE_DIR}/resource_processor/vmss_porter/arm_auth_local_debugging.json \ && . ${MAKEFILE_DIR}/devops/scripts/porter_local_env.sh \ && porter install --autobuild-disabled --parameter-set $$(yq ".name" porter.yaml) \ --credential-set arm_auth \ --credential-set aad_auth \ --debug # Validates that the parameters file is synced with the bundle. # The file is used when installing the bundle from a local machine. # We remove arm_use_msi on both sides since it shouldn't take effect locally anyway. bundle-check-params: $(call target_title, "Checking bundle parameters in ${DIR}") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh nodocker,porter \ && cd ${DIR} \ && if [ ! -f "parameters.json" ]; then echo "Error - please create a parameters.json file."; exit 1; fi \ && if [ "$$(jq -r '.name' parameters.json)" != "$$(yq eval '.name' porter.yaml)" ]; then echo "Error - ParameterSet name isn't equal to bundle's name."; exit 1; fi \ && if ! porter explain --autobuild-disabled > /dev/null; then echo "Error - porter explain issue!"; exit 1; fi \ && comm_output=$$(set -o pipefail && comm -3 --output-delimiter=: <(porter explain --autobuild-disabled -ojson | jq -r '.parameters[].name | select (. != "arm_use_msi")' | sort) <(jq -r '.parameters[].name | select(. != "arm_use_msi")' parameters.json | sort)) \ && if [ ! -z "$${comm_output}" ]; \ then echo -e "*** Add to params ***:*** Remove from params ***\n$$comm_output" | column -t -s ":"; exit 1; \ else echo "parameters.json file up-to-date."; fi bundle-uninstall: $(call target_title, "Uninstalling ${DIR} with Porter") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh porter,env \ && . ${MAKEFILE_DIR}/devops/scripts/load_and_validate_env.sh \ && cd ${DIR} \ && . ${MAKEFILE_DIR}/devops/scripts/load_env.sh .env \ && porter parameters apply parameters.json \ && porter credentials apply ${MAKEFILE_DIR}/resource_processor/vmss_porter/aad_auth_local_debugging.json \ && porter credentials apply ${MAKEFILE_DIR}/resource_processor/vmss_porter/arm_auth_local_debugging.json \ && porter uninstall --autobuild-disabled --parameter-set $$(yq ".name" porter.yaml) \ --credential-set arm_auth \ --credential-set aad_auth \ --debug bundle-custom-action: $(call target_title, "Performing:${ACTION} ${DIR} with Porter") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh porter,env \ && . ${MAKEFILE_DIR}/devops/scripts/load_and_validate_env.sh \ && cd ${DIR} && . ${MAKEFILE_DIR}/devops/scripts/load_env.sh .env \ && porter parameters apply parameters.json \ && porter credentials apply ${MAKEFILE_DIR}/resource_processor/vmss_porter/aad_auth_local_debugging.json \ && porter credentials apply ${MAKEFILE_DIR}/resource_processor/vmss_porter/arm_auth_local_debugging.json \ && porter invoke --autobuild-disabled --action ${ACTION} --parameter-set $$(yq ".name" porter.yaml) \ --credential-set arm_auth \ --credential-set aad_auth \ --debug bundle-publish: $(call target_title, "Publishing ${DIR} bundle with Porter") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh porter,env \ && . ${MAKEFILE_DIR}/devops/scripts/set_docker_sock_permission.sh \ && az acr login --name ${ACR_NAME} \ && cd ${DIR} \ && FULL_IMAGE_NAME_PREFIX=${FULL_IMAGE_NAME_PREFIX} \ ${MAKEFILE_DIR}/devops/scripts/bundle_runtime_image_push.sh \ && porter publish --registry "${ACR_FQDN}" --force bundle-register: $(call target_title, "Registering ${DIR} bundle") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh porter,env \ && . ${MAKEFILE_DIR}/devops/scripts/set_docker_sock_permission.sh \ && az acr login --name ${ACR_NAME} \ && ${MAKEFILE_DIR}/devops/scripts/ensure_cli_signed_in.sh $${TRE_URL} \ && cd ${DIR} \ && ${MAKEFILE_DIR}/devops/scripts/register_bundle_with_api.sh --acr-name "${ACR_NAME}" --bundle-type "$${BUNDLE_TYPE}" \ --current --verify \ --workspace-service-name "$${WORKSPACE_SERVICE_NAME}" workspace_bundle: $(MAKE) bundle-build bundle-publish bundle-register \ DIR="${MAKEFILE_DIR}/templates/workspaces/${BUNDLE}" BUNDLE_TYPE=workspace workspace_service_bundle: $(MAKE) bundle-build bundle-publish bundle-register \ DIR="${MAKEFILE_DIR}/templates/workspace_services/${BUNDLE}" BUNDLE_TYPE=workspace_service shared_service_bundle: $(MAKE) bundle-build bundle-publish bundle-register \ DIR="${MAKEFILE_DIR}/templates/shared_services/${BUNDLE}" BUNDLE_TYPE=shared_service user_resource_bundle: $(MAKE) bundle-build bundle-publish bundle-register \ DIR="${MAKEFILE_DIR}/templates/workspace_services/${WORKSPACE_SERVICE}/user_resources/${BUNDLE}" BUNDLE_TYPE=user_resource WORKSPACE_SERVICE_NAME=tre-service-${WORKSPACE_SERVICE} bundle-publish-register-all: ${MAKEFILE_DIR}/devops/scripts/publish_and_register_all_bundles.sh deploy-shared-service: $(call target_title, "Deploying ${DIR} shared service") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh porter,env \ && ${MAKEFILE_DIR}/devops/scripts/ensure_cli_signed_in.sh $${TRE_URL} \ && cd ${DIR} \ && ${MAKEFILE_DIR}/devops/scripts/deploy_shared_service.sh $${PROPS} firewall-install: $(MAKE) bundle-build bundle-publish bundle-register deploy-shared-service \ DIR=${MAKEFILE_DIR}/templates/shared_services/firewall/ BUNDLE_TYPE=shared_service static-web-upload: $(call target_title, "Uploading to static website") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh nodocker,env \ && pushd ${MAKEFILE_DIR}/core/terraform/ > /dev/null && . ./outputs.sh && popd > /dev/null \ && . ${MAKEFILE_DIR}/devops/scripts/load_env.sh ${MAKEFILE_DIR}/core/private.env \ && ${MAKEFILE_DIR}/devops/scripts/upload_static_web.sh build-and-deploy-ui: $(call target_title, "Build and deploy UI") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh nodocker,env \ && pushd ${MAKEFILE_DIR}/core/terraform/ > /dev/null && . ./outputs.sh && popd > /dev/null \ && . ${MAKEFILE_DIR}/devops/scripts/load_env.sh ${MAKEFILE_DIR}/core/private.env \ && if [ "$${DEPLOY_UI}" != "false" ]; then ${MAKEFILE_DIR}/devops/scripts/build_deploy_ui.sh; else echo "UI Deploy skipped as DEPLOY_UI is false"; fi \ prepare-for-e2e: $(MAKE) workspace_bundle BUNDLE=base $(MAKE) workspace_service_bundle BUNDLE=guacamole $(MAKE) shared_service_bundle BUNDLE=gitea $(MAKE) user_resource_bundle WORKSPACE_SERVICE=guacamole BUNDLE=guacamole-azure-windowsvm $(MAKE) user_resource_bundle WORKSPACE_SERVICE=guacamole BUNDLE=guacamole-azure-linuxvm test-e2e-smoke: ## 🧪 Run E2E smoke tests $(call target_title, "Running E2E smoke tests") && \ $(MAKE) test-e2e-custom SELECTOR=smoke test-e2e-extended: ## 🧪 Run E2E extended tests $(call target_title, "Running E2E extended tests") && \ $(MAKE) test-e2e-custom SELECTOR=extended test-e2e-extended-aad: ## 🧪 Run E2E extended AAD tests $(call target_title, "Running E2E extended AAD tests") && \ $(MAKE) test-e2e-custom SELECTOR=extended_aad test-e2e-shared-services: ## 🧪 Run E2E shared service tests $(call target_title, "Running E2E shared service tests") && \ $(MAKE) test-e2e-custom SELECTOR=shared_services test-e2e-custom: ## 🧪 Run E2E tests with custom selector (SELECTOR=) $(call target_title, "Running E2E tests with custom selector ${SELECTOR}") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh env,auth \ && . ${MAKEFILE_DIR}/devops/scripts/load_env.sh ${MAKEFILE_DIR}/e2e_tests/.env \ && cd ${MAKEFILE_DIR}/e2e_tests \ && \ if [[ -n "$${E2E_TESTS_NUMBER_PROCESSES}" && "$${E2E_TESTS_NUMBER_PROCESSES}" -ne 1 ]]; then \ python -m pytest -n "$${E2E_TESTS_NUMBER_PROCESSES}" -m "${SELECTOR}" --verify $${IS_API_SECURED:-true} --junit-xml "pytest_e2e_$${SELECTOR// /_}.xml"; \ elif [[ "$${E2E_TESTS_NUMBER_PROCESSES}" -eq 1 ]]; then \ python -m pytest -m "${SELECTOR}" --verify $${IS_API_SECURED:-true} --junit-xml "pytest_e2e_$${SELECTOR// /_}.xml"; \ else \ python -m pytest -n "${E2E_TESTS_NUMBER_PROCESSES_DEFAULT}" -m "${SELECTOR}" --verify $${IS_API_SECURED:-true} --junit-xml "pytest_e2e_$${SELECTOR// /_}.xml"; fi setup-local-debugging: ## 🛠️ Setup local debugging $(call target_title,"Setting up the ability to debug the API and Resource Processor") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh nodocker,env \ && pushd ${MAKEFILE_DIR}/core/terraform/ > /dev/null && . ./outputs.sh && popd > /dev/null \ && . ${MAKEFILE_DIR}/devops/scripts/load_env.sh ${MAKEFILE_DIR}/core/private.env \ && . ${MAKEFILE_DIR}/devops/scripts/setup_local_debugging.sh auth: ## 🔐 Create the necessary Azure Active Directory assets $(call target_title,"Setting up Azure Active Directory") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh nodocker,env \ && ${MAKEFILE_DIR}/devops/scripts/create_aad_assets.sh show-core-output: $(call target_title,"Display TRE core output") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh env \ && pushd ${MAKEFILE_DIR}/core/terraform/ > /dev/null && terraform show && popd > /dev/null api-healthcheck: $(call target_title,"Checking API Health") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh nodocker,env \ && . ${MAKEFILE_DIR}/devops/scripts/load_env.sh ${MAKEFILE_DIR}/core/private.env \ && ${MAKEFILE_DIR}/devops/scripts/api_healthcheck.sh db-migrate: api-healthcheck ## 🗄️ Run database migrations $(call target_title,"Migrating Cosmos Data") \ && . ${MAKEFILE_DIR}/devops/scripts/check_dependencies.sh nodocker,env \ && pushd ${MAKEFILE_DIR}/core/terraform/ > /dev/null && . ./outputs.sh && popd > /dev/null \ && . ${MAKEFILE_DIR}/devops/scripts/load_env.sh ${MAKEFILE_DIR}/core/private.env \ && . ${MAKEFILE_DIR}/devops/scripts/get_access_token.sh \ && . ${MAKEFILE_DIR}/devops/scripts/migrate_state_store.sh --tre_url $${TRE_URL} --insecure
AzureTRE/Makefile/0
{ "file_path": "AzureTRE/Makefile", "repo_id": "AzureTRE", "token_count": 7647 }
87
__version__ = "0.7.1"
AzureTRE/airlock_processor/_version.py/0
{ "file_path": "AzureTRE/airlock_processor/_version.py", "repo_id": "AzureTRE", "token_count": 12 }
88
__version__ = "0.18.6"
AzureTRE/api_app/_version.py/0
{ "file_path": "AzureTRE/api_app/_version.py", "repo_id": "AzureTRE", "token_count": 12 }
89
from datetime import datetime from dateutil.relativedelta import relativedelta from fastapi import APIRouter, Depends, Query, HTTPException, status from fastapi.responses import JSONResponse from typing import Optional from pydantic import UUID4 from models.schemas.costs import get_cost_report_responses, get_workspace_cost_report_responses from core import config from api.helpers import get_repository from db.repositories.shared_services import SharedServiceRepository from db.repositories.user_resources import UserResourceRepository from db.repositories.workspace_services import WorkspaceServiceRepository from db.repositories.workspaces import WorkspaceRepository from models.domain.costs import CostReport, GranularityEnum, WorkspaceCostReport from resources import strings from services.authentication import get_current_admin_user, get_current_workspace_owner_or_tre_admin from services.cost_service import CostService, ServiceUnavailable, SubscriptionNotSupported, TooManyRequests, WorkspaceDoesNotExist, cost_service_factory from services.logging import logger costs_core_router = APIRouter(dependencies=[Depends(get_current_admin_user)]) costs_workspace_router = APIRouter(dependencies=[Depends(get_current_workspace_owner_or_tre_admin)]) def validate_report_period(from_date: Optional[datetime], to_date: Optional[datetime]): if from_date is None and to_date is None: # valid option, month to date report return if to_date is None or from_date >= to_date: raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=strings.API_GET_COSTS_TO_DATE_NEED_TO_BE_LATER_THEN_FROM_DATE) if from_date is None: raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=strings.API_GET_COSTS_FROM_DATE_NEED_TO_BE_BEFORE_TO_DATE) if relativedelta(to_date, from_date).years > 0: raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=strings.API_GET_COSTS_MAX_TIME_PERIOD) class CostsQueryParams: def __init__( self, from_date: Optional[datetime] = Query(default=None, description="The start date to pull data from, requiered if to_date is set, otherwise report will return month to date (iso-8601, UTC)."), to_date: Optional[datetime] = Query(default=None, description="The end date to pull data to, requiered if from_date is set, otherwise report will return month to date (iso-8601, UTC)."), granularity: GranularityEnum = Query(default="None", description="The granularity of rows in the query.") ): self.from_date = from_date self.to_date = to_date self.granularity = granularity @costs_core_router.get("/costs", response_model=CostReport, name=strings.API_GET_COSTS, responses=get_cost_report_responses()) async def costs( params: CostsQueryParams = Depends(), cost_service: CostService = Depends(cost_service_factory), workspace_repo=Depends(get_repository(WorkspaceRepository)), shared_services_repo=Depends(get_repository(SharedServiceRepository))) -> CostReport: validate_report_period(params.from_date, params.to_date) try: return await cost_service.query_tre_costs( config.TRE_ID, params.granularity, params.from_date, params.to_date, workspace_repo, shared_services_repo) except SubscriptionNotSupported: raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=strings.API_GET_COSTS_SUBSCRIPTION_NOT_SUPPORTED) except TooManyRequests as e: return JSONResponse(content={ "error": { "code": "429", "message": strings.API_GET_COSTS_TOO_MANY_REQUESTS, "retry-after": str(e.retry_after) }}, status_code=429, headers={"Retry-After": str(e.retry_after)}) except ServiceUnavailable as e: return JSONResponse(content={ "error": { "code": "503", "message": strings.API_GET_COSTS_SERVICE_UNAVAILABLE, "retry-after": str(e.retry_after) }}, status_code=503, headers={"Retry-After": str(e.retry_after)}) except Exception: logger.exception("Failed to query Azure TRE costs") raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=strings.API_GET_COSTS_INTERNAL_SERVER_ERROR) @costs_workspace_router.get("/workspaces/{workspace_id}/costs", response_model=WorkspaceCostReport, name=strings.API_GET_WORKSPACE_COSTS, dependencies=[Depends(get_current_workspace_owner_or_tre_admin)], responses=get_workspace_cost_report_responses()) async def workspace_costs(workspace_id: UUID4, params: CostsQueryParams = Depends(), cost_service: CostService = Depends(cost_service_factory), workspace_repo=Depends(get_repository(WorkspaceRepository)), workspace_services_repo=Depends(get_repository(WorkspaceServiceRepository)), user_resource_repo=Depends(get_repository(UserResourceRepository))) -> WorkspaceCostReport: validate_report_period(params.from_date, params.to_date) try: return await cost_service.query_tre_workspace_costs( str(workspace_id), params.granularity, params.from_date, params.to_date, workspace_repo, workspace_services_repo, user_resource_repo) except WorkspaceDoesNotExist: raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=strings.WORKSPACE_DOES_NOT_EXIST) except SubscriptionNotSupported: raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=strings.API_GET_COSTS_SUBSCRIPTION_NOT_SUPPORTED) except TooManyRequests as e: return JSONResponse(content={ "error": { "code": "429", "message": strings.API_GET_COSTS_TOO_MANY_REQUESTS, "retry-after": str(e.retry_after) }}, status_code=429, headers={"Retry-After": str(e.retry_after)}) except ServiceUnavailable as e: return JSONResponse(content={ "error": { "code": "503", "message": strings.API_GET_COSTS_SERVICE_UNAVAILABLE, "retry-after": str(e.retry_after) }}, status_code=503, headers={"Retry-After": str(e.retry_after)}) except Exception: logger.exception("Failed to query Azure TRE costs") raise HTTPException(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=strings.API_GET_COSTS_INTERNAL_SERVER_ERROR)
AzureTRE/api_app/api/routes/costs.py/0
{ "file_path": "AzureTRE/api_app/api/routes/costs.py", "repo_id": "AzureTRE", "token_count": 3055 }
90
import uuid from typing import List, Tuple from pydantic import parse_obj_as from db.repositories.resources_history import ResourceHistoryRepository from models.domain.resource_template import ResourceTemplate from models.domain.authentication import User from db.repositories.resource_templates import ResourceTemplateRepository from db.repositories.resources import ResourceRepository, IS_NOT_DELETED_CLAUSE from db.repositories.operations import OperationRepository from models.domain.workspace_service import WorkspaceService from models.schemas.resource import ResourcePatch from models.schemas.workspace_service import WorkspaceServiceInCreate from db.errors import ResourceIsNotDeployed, EntityDoesNotExist from models.domain.resource import ResourceType class WorkspaceServiceRepository(ResourceRepository): @classmethod async def create(cls): cls = WorkspaceServiceRepository() await super().create() return cls @staticmethod def workspace_services_query(workspace_id: str): return f'SELECT * FROM c WHERE c.resourceType = "{ResourceType.WorkspaceService}" AND c.workspaceId = "{workspace_id}"' @staticmethod def active_workspace_services_query(workspace_id: str): return f'SELECT * FROM c WHERE {IS_NOT_DELETED_CLAUSE} AND c.resourceType = "{ResourceType.WorkspaceService}" AND c.workspaceId = "{workspace_id}"' async def get_active_workspace_services_for_workspace(self, workspace_id: str) -> List[WorkspaceService]: """ returns list of "non-deleted" workspace services linked to this workspace """ query = WorkspaceServiceRepository.active_workspace_services_query(workspace_id) workspace_services = await self.query(query=query) return parse_obj_as(List[WorkspaceService], workspace_services) async def get_deployed_workspace_service_by_id(self, workspace_id: str, service_id: str, operations_repo: OperationRepository) -> WorkspaceService: workspace_service = await self.get_workspace_service_by_id(workspace_id, service_id) if (not await operations_repo.resource_has_deployed_operation(resource_id=service_id)): raise ResourceIsNotDeployed return workspace_service async def get_workspace_service_by_id(self, workspace_id: str, service_id: str) -> WorkspaceService: query = self.workspace_services_query(workspace_id) + f' AND c.id = "{service_id}"' workspace_services = await self.query(query=query) if not workspace_services: raise EntityDoesNotExist return parse_obj_as(WorkspaceService, workspace_services[0]) def get_workspace_service_spec_params(self): return self.get_resource_base_spec_params() async def create_workspace_service_item(self, workspace_service_input: WorkspaceServiceInCreate, workspace_id: str, user_roles=List[str]) -> Tuple[WorkspaceService, ResourceTemplate]: full_workspace_service_id = str(uuid.uuid4()) template = await self.validate_input_against_template(workspace_service_input.templateName, workspace_service_input, ResourceType.WorkspaceService, user_roles) # we don't want something in the input to overwrite the system parameters, so dict.update can't work. resource_spec_parameters = {**workspace_service_input.properties, **self.get_workspace_service_spec_params()} workspace_service = WorkspaceService( id=full_workspace_service_id, workspaceId=workspace_id, templateName=workspace_service_input.templateName, templateVersion=template.version, properties=resource_spec_parameters, resourcePath=f'/workspaces/{workspace_id}/workspace-services/{full_workspace_service_id}', etag='' ) return workspace_service, template async def patch_workspace_service(self, workspace_service: WorkspaceService, workspace_service_patch: ResourcePatch, etag: str, resource_template_repo: ResourceTemplateRepository, resource_history_repo: ResourceHistoryRepository, user: User, force_version_update: bool) -> Tuple[WorkspaceService, ResourceTemplate]: # get workspace service template workspace_service_template = await resource_template_repo.get_template_by_name_and_version(workspace_service.templateName, workspace_service.templateVersion, ResourceType.WorkspaceService) return await self.patch_resource(workspace_service, workspace_service_patch, workspace_service_template, etag, resource_template_repo, resource_history_repo, user, force_version_update)
AzureTRE/api_app/db/repositories/workspace_services.py/0
{ "file_path": "AzureTRE/api_app/db/repositories/workspace_services.py", "repo_id": "AzureTRE", "token_count": 1530 }
91
from enum import Enum class RequestAction(str, Enum): Install = "install" UnInstall = "uninstall" Upgrade = "upgrade"
AzureTRE/api_app/models/domain/request_action.py/0
{ "file_path": "AzureTRE/api_app/models/domain/request_action.py", "repo_id": "AzureTRE", "token_count": 47 }
92
from typing import List from pydantic import BaseModel, Field from models.domain.operation import Operation def get_sample_operation(operation_id: str) -> dict: return { "id": operation_id, "resourceId": "933ad738-7265-4b5f-9eae-a1a62928772e", "resourcePath": "/workspaces/933ad738-7265-4b5f-9eae-a1a62928772e", "resourceVersion": 0, "status": "awaiting_deployment", "action": "install", "message": "", "createdWhen": 1642611942.423857, "updatedWhen": 1642611942.423857, "steps": [ { "stepId": "main", "stepTitle": "deployment for main", "resourceId": "933ad738-7265-4b5f-9eae-a1a62928772e", "resourceTemplateName": "tre-workspace-base", "resourceType": "workspace", "resourceAction": "install", "status": "awaiting_deployment", "updatedWhen": 1642611942.423857 } ] } class OperationInResponse(BaseModel): operation: Operation class Config: schema_extra = { "example": { "operation": get_sample_operation("7ac667f0-fd3f-4a6c-815b-82d0cb7a2132") } } class OperationInList(BaseModel): operations: List[Operation] = Field([], title="Operations") class Config: schema_extra = { "example": { "operations": [ get_sample_operation("7ac667f0-fd3f-4a6c-815b-82d0cb7a2132"), get_sample_operation("640488fe-9408-4b9f-a239-3b03bc0c5df0") ] } }
AzureTRE/api_app/models/schemas/operation.py/0
{ "file_path": "AzureTRE/api_app/models/schemas/operation.py", "repo_id": "AzureTRE", "token_count": 882 }
93
# RG CORE_RESOURCE_GROUP_NAME = "rg-{}" WORKSPACE_RESOURCE_GROUP_NAME = "rg-{}-ws-{}" IMPORT_TYPE = "import" EXPORT_TYPE = "export" # Import STORAGE_ACCOUNT_NAME_IMPORT_EXTERNAL = "stalimex{}" STORAGE_ACCOUNT_NAME_IMPORT_INPROGRESS = "stalimip{}" STORAGE_ACCOUNT_NAME_IMPORT_APPROVED = "stalimappws{}" STORAGE_ACCOUNT_NAME_IMPORT_REJECTED = "stalimrej{}" STORAGE_ACCOUNT_NAME_IMPORT_BLOCKED = "stalimblocked{}" # Export STORAGE_ACCOUNT_NAME_EXPORT_INTERNAL = "stalexintws{}" STORAGE_ACCOUNT_NAME_EXPORT_INPROGRESS = "stalexipws{}" STORAGE_ACCOUNT_NAME_EXPORT_APPROVED = "stalexapp{}" STORAGE_ACCOUNT_NAME_EXPORT_REJECTED = "stalexrejws{}" STORAGE_ACCOUNT_NAME_EXPORT_BLOCKED = "stalexblockedws{}"
AzureTRE/api_app/resources/constants.py/0
{ "file_path": "AzureTRE/api_app/resources/constants.py", "repo_id": "AzureTRE", "token_count": 313 }
94
from abc import abstractmethod from typing import List from fastapi.security import OAuth2AuthorizationCodeBearer from models.domain.workspace import Workspace, WorkspaceRole from models.domain.authentication import User, RoleAssignment class AuthConfigValidationError(Exception): """Raised when the input auth information is invalid""" class AccessService(OAuth2AuthorizationCodeBearer): @abstractmethod def extract_workspace_auth_information(self, data: dict) -> dict: pass @abstractmethod def get_identity_role_assignments(self, user_id: str) -> dict: pass @abstractmethod def get_workspace_role_assignment_details(self, workspace: Workspace) -> dict: pass @staticmethod @abstractmethod def get_workspace_role(user: User, workspace: Workspace, user_role_assignments: List[RoleAssignment]) -> WorkspaceRole: pass
AzureTRE/api_app/services/access_service.py/0
{ "file_path": "AzureTRE/api_app/services/access_service.py", "repo_id": "AzureTRE", "token_count": 290 }
95
import pytest from httpx import AsyncClient from starlette.status import HTTP_422_UNPROCESSABLE_ENTITY pytestmark = pytest.mark.asyncio async def test_frw_validation_error_format(app): @app.get("/wrong_path/{param}") def route_for_test(param: int) -> None: # pragma: no cover pass async with AsyncClient(base_url="http://testserver", app=app) as client: response = await client.get("/wrong_path/asd") assert response.status_code == HTTP_422_UNPROCESSABLE_ENTITY assert "error" in response.text
AzureTRE/api_app/tests_ma/test_api/test_errors/test_422_error.py/0
{ "file_path": "AzureTRE/api_app/tests_ma/test_api/test_errors/test_422_error.py", "repo_id": "AzureTRE", "token_count": 203 }
96
from mock import patch import pytest from azure.identity.aio import ( DefaultAzureCredential as DefaultAzureCredentialASync, ManagedIdentityCredential as ManagedIdentityCredentialASync ) from core.credentials import get_credential_async pytestmark = pytest.mark.asyncio @patch("core.credentials.MANAGED_IDENTITY_CLIENT_ID", "mocked_client_id") async def test_get_credential_async_with_managed_identity_client_id(): credential = await get_credential_async() assert isinstance(credential.credentials[0], ManagedIdentityCredentialASync) async def test_get_credential_async_without_managed_identity_client_id(): credential = await get_credential_async() assert isinstance(credential, DefaultAzureCredentialASync)
AzureTRE/api_app/tests_ma/test_core/test_credentials.py/0
{ "file_path": "AzureTRE/api_app/tests_ma/test_core/test_credentials.py", "repo_id": "AzureTRE", "token_count": 261 }
97
from unittest.mock import AsyncMock from mock import patch, MagicMock import pytest import pytest_asyncio from db.errors import EntityDoesNotExist, ResourceIsNotDeployed from db.repositories.workspace_services import WorkspaceServiceRepository from db.repositories.operations import OperationRepository from models.domain.resource import ResourceType from models.domain.workspace_service import WorkspaceService from models.schemas.workspace_service import WorkspaceServiceInCreate pytestmark = pytest.mark.asyncio WORKSPACE_ID = "abc000d3-82da-4bfc-b6e9-9a7853ef753e" SERVICE_ID = "000000d3-82da-4bfc-b6e9-9a7853ef753e" @pytest_asyncio.fixture async def workspace_service_repo(): with patch('api.dependencies.database.Database.get_container_proxy', return_value=MagicMock()): workspace_repo = await WorkspaceServiceRepository().create() yield workspace_repo @pytest_asyncio.fixture async def operations_repo(): with patch('api.dependencies.database.Database.get_container_proxy', return_value=MagicMock()): operations_repo = await OperationRepository().create() yield operations_repo @pytest.fixture def basic_workspace_service_request(): return WorkspaceServiceInCreate(templateName="workspace-service-type", properties={"display_name": "test", "description": "test", "tre_id": "test"}) @pytest.fixture def workspace_service(): workspace_service = WorkspaceService( id=SERVICE_ID, templateVersion="0.1.0", etag='', properties={}, templateName="my-workspace-service", resourcePath="test" ) return workspace_service async def test_get_active_workspace_services_for_workspace_queries_db(workspace_service_repo): workspace_service_repo.query = AsyncMock(return_value=[]) await workspace_service_repo.get_active_workspace_services_for_workspace(WORKSPACE_ID) workspace_service_repo.query.assert_called_once_with(query=WorkspaceServiceRepository.active_workspace_services_query(WORKSPACE_ID)) async def test_get_deployed_workspace_service_by_id_raises_resource_is_not_deployed_if_not_deployed(workspace_service_repo, workspace_service, operations_repo): service = workspace_service workspace_service_repo.get_workspace_service_by_id = AsyncMock(return_value=service) operations_repo.resource_has_deployed_operation = AsyncMock(return_value=False) with pytest.raises(ResourceIsNotDeployed): await workspace_service_repo.get_deployed_workspace_service_by_id(WORKSPACE_ID, SERVICE_ID, operations_repo) async def test_get_deployed_workspace_service_by_id_return_workspace_service_if_deployed(workspace_service_repo, workspace_service, operations_repo): service = workspace_service workspace_service_repo.get_workspace_service_by_id = AsyncMock(return_value=service) operations_repo.resource_has_deployed_operation = AsyncMock(return_value=True) actual_service = await workspace_service_repo.get_deployed_workspace_service_by_id(WORKSPACE_ID, SERVICE_ID, operations_repo) assert actual_service == service async def test_get_workspace_service_by_id_raises_entity_does_not_exist_if_no_available_services(workspace_service_repo): workspace_service_repo.query = AsyncMock(return_value=[]) with pytest.raises(EntityDoesNotExist): await workspace_service_repo.get_workspace_service_by_id(WORKSPACE_ID, SERVICE_ID) async def test_get_workspace_service_by_id_queries_db(workspace_service_repo, workspace_service): workspace_service_repo.query = AsyncMock(return_value=[workspace_service]) expected_query = f'SELECT * FROM c WHERE c.resourceType = "workspace-service" AND c.workspaceId = "{WORKSPACE_ID}" AND c.id = "{SERVICE_ID}"' await workspace_service_repo.get_workspace_service_by_id(WORKSPACE_ID, SERVICE_ID) workspace_service_repo.query.assert_called_once_with(query=expected_query) @patch('db.repositories.workspace_services.WorkspaceServiceRepository.validate_input_against_template') @patch('core.config.TRE_ID', "9876") async def test_create_workspace_service_item_creates_a_workspace_with_the_right_values(validate_input_mock, workspace_service_repo, basic_workspace_service_request, basic_workspace_service_template): workspace_service_to_create = basic_workspace_service_request resource_template = basic_workspace_service_template resource_template.required = ["display_name", "description"] validate_input_mock.return_value = basic_workspace_service_template workspace_service, _ = await workspace_service_repo.create_workspace_service_item(workspace_service_to_create, WORKSPACE_ID) assert workspace_service.templateName == basic_workspace_service_request.templateName assert workspace_service.resourceType == ResourceType.WorkspaceService assert workspace_service.workspaceId == WORKSPACE_ID assert len(workspace_service.properties["tre_id"]) > 0 # need to make sure request doesn't override system param assert workspace_service.properties["tre_id"] != "test" @patch('db.repositories.workspace_services.WorkspaceServiceRepository.validate_input_against_template', side_effect=ValueError) async def test_create_workspace_item_raises_value_error_if_template_is_invalid(_, workspace_service_repo, basic_workspace_service_request): workspace_service_to_create = basic_workspace_service_request with pytest.raises(ValueError): await workspace_service_repo.create_workspace_service_item(workspace_service_to_create, WORKSPACE_ID)
AzureTRE/api_app/tests_ma/test_db/test_repositories/test_workpaces_service_repository.py/0
{ "file_path": "AzureTRE/api_app/tests_ma/test_db/test_repositories/test_workpaces_service_repository.py", "repo_id": "AzureTRE", "token_count": 1880 }
98
### Get health status (no auth) GET {{baseUrl}}/health Accept: application/json
AzureTRE/api_http_requests/API Other Endpoints.http/0
{ "file_path": "AzureTRE/api_http_requests/API Other Endpoints.http", "repo_id": "AzureTRE", "token_count": 24 }
99
import sys from typing import Union import click import json import msal import os from httpx import Client, Response from logging import Logger from pathlib import Path from tre.authentication import get_auth_token_client_credentials, get_public_client_application class ApiException(click.ClickException): """An exception that Click can handle and show to the user containing API call error info.""" # Use exit code 2 for API errors that are JSON exit_code = 2 def __init__(self, message: str) -> None: super().__init__(message) self.message = message def show(self, file=None) -> None: # Write (JSON) message stdout without any extra info to allow callers to parse it click.echo(self.message, file=file) class ApiClient: def __init__(self, base_url: str, verify: bool): self.base_url = base_url self.verify = verify @staticmethod def get_api_client_from_config() -> "ApiClient": config_path = Path("~/.config/tre/environment.json").expanduser() if not config_path.exists(): raise click.ClickException( "You need to log in (tre login) before calling this command" ) config_text = config_path.read_text(encoding="utf-8") config = json.loads(config_text) if os.getenv("TRECLI_BASE_URL"): base_url = os.getenv("TRECLI_BASE_URL") click.echo(f"Using API base URL '{base_url}' (overridden by TRECLI_BASE_URL)", err=True) else: base_url = config["base-url"] login_method = config["login-method"] if login_method == "client-credentials": return ClientCredentialsApiClient( base_url, config["verify"], config["client-id"], config["client-secret"], config["aad-tenant-id"], config["api-scope"] ) elif login_method == "device-code": return DeviceCodeApiClient( base_url, config["verify"], config["token-cache-file"], config["client-id"], config["aad-tenant-id"], config["api-scope"] ) else: raise click.ClickException(f"Unhandled login method: {login_method}") @staticmethod def get_api_metadata(api_base_url: str) -> "Union[dict[str, str], None]": with Client() as client: url = f"{api_base_url}/api/.metadata" response = client.get(url) if response.status_code == 200: response_json = response.json() return response_json else: return None def call_api( self, log: Logger, method: str, url: str, headers: "dict[str, str]" = {}, json_data=None, scope_id: str = None, throw_on_error: bool = True, params: "Union[dict[str, str], None]" = None ) -> Response: with Client(verify=self.verify) as client: headers = headers.copy() headers['Authorization'] = f"Bearer {self.get_auth_token(log, scope_id)}" response = client.request(method, f'{self.base_url}{url}', headers=headers, json=json_data, params=params) if throw_on_error and response.is_error: error_info = { 'status_code': response.status_code, 'body': response.text, } raise ApiException(message=json.dumps(error_info, indent=2)) return response def get_workspace_scope(self, log, workspace_id: str) -> str: workspace_response = self.call_api( log, "GET", f'/api/workspaces/{workspace_id}', ) workspace_json = workspace_response.json() workspace_scope = workspace_json["workspace"]["properties"]["scope_id"] return workspace_scope def get_auth_token() -> str: pass class ClientCredentialsApiClient(ApiClient): def __init__(self, base_url: str, verify: bool, client_id: str, client_secret: str, aad_tenant_id: str, scope: str): while base_url.endswith("/"): base_url = base_url[0:-1] super().__init__(base_url, verify) self._client_id = client_id self._client_secret = client_secret self._aad_tenant_id = aad_tenant_id self._scope = scope def get_auth_token(self, log, scope): return get_auth_token_client_credentials(log, self._client_id, self._client_secret, self._aad_tenant_id, scope or self._scope, self.verify) class DeviceCodeApiClient(ApiClient): def __init__(self, base_url: str, verify: bool, token_cache_file: str, client_id: str, aad_tenant_id: str, scope: str): super().__init__(base_url, verify) self._token_cache_file = token_cache_file self._client_id = client_id self._aad_tenant_id = aad_tenant_id self._scope = scope def get_auth_token(self, log, scope): effective_scope = scope or self._scope cache = msal.SerializableTokenCache() if os.path.exists(self._token_cache_file): cache.deserialize(open(self._token_cache_file, "r").read()) app = get_public_client_application(self._client_id, self._aad_tenant_id, cache) accounts = app.get_accounts() if accounts: auth_result = app.acquire_token_silent(scopes=[effective_scope], account=accounts[0]) try: auth_result = app.acquire_token_silent(scopes=[effective_scope], account=accounts[0]) except Exception: auth_result = app.acquire_token_for_client(scopes=[effective_scope]) if cache.has_state_changed: with open(self._token_cache_file, "w") as cache_file: cache_file.write(cache.serialize()) if auth_result is not None: if "access_token" in auth_result: token = auth_result["access_token"] return token else: raise click.ClickException(f"Failed to get access_token: ${str(auth_result)}") if sys.stdin.isatty() or sys.stdout.isatty(): # We have TTY - try interactive acquire :-) click.echo(f"No cached token - initiating device code flow for scope '{effective_scope}'", err=True) flow = app.initiate_device_flow(scopes=[effective_scope]) if "user_code" not in flow: raise click.ClickException("unable to initiate device flow") click.echo(flow['message'], err=True) auth_result = app.acquire_token_by_device_flow(flow) if cache.has_state_changed: with open(self._token_cache_file, "w") as cache_file: cache_file.write(cache.serialize()) if auth_result is not None: if "access_token" in auth_result: token = auth_result["access_token"] return token else: raise click.ClickException(f"Failed to get access_token: ${str(auth_result)}") raise RuntimeError(f"Failed to get auth token for scope '{scope}'") def get_workspace_scope(self, log, workspace_id: str) -> str: # device code flow wants "/user_impersonation" suffix, but client creds doesn't # Override here to append workspace_scope = super().get_workspace_scope(log, workspace_id) return workspace_scope + "/user_impersonation"
AzureTRE/cli/tre/api_client.py/0
{ "file_path": "AzureTRE/cli/tre/api_client.py", "repo_id": "AzureTRE", "token_count": 3736 }
100
import logging import click from tre.commands.operation import get_operation_id_completion, operation_show from tre.output import output_option, query_option from .contexts import pass_shared_service_operation_context, SharedServiceOperationContext def operation_id_completion(ctx: click.Context, param: click.Parameter, incomplete: str): log = logging.getLogger(__name__) parent_ctx = ctx.parent workspace_id = parent_ctx.params["workspace_id"] list_url = f'/api/workspaces/{workspace_id}/operations' return get_operation_id_completion(ctx, log, list_url, param, incomplete) @click.group(name="operation", invoke_without_command=True, help="Perform actions on an operation") @click.argument('operation_id', required=True, type=click.UUID, shell_complete=operation_id_completion) @click.pass_context def shared_service_operation(ctx: click.Context, operation_id) -> None: ctx.obj = SharedServiceOperationContext.add_operation_id_to_context_obj(ctx, operation_id) @click.command(name="show", help="SharedService operation") @click.option('--no-wait', help="If an operation is in progress, do not wait for it to complete", flag_value=True, default=False) @output_option() @query_option() @pass_shared_service_operation_context def shared_service_operation_show(shared_service_operation_context: SharedServiceOperationContext, no_wait, output_format, query, suppress_output: bool = False): log = logging.getLogger(__name__) shared_service_id = shared_service_operation_context.shared_service_id if shared_service_id is None: raise click.UsageError('Missing shared_service ID') operation_id = shared_service_operation_context.operation_id if operation_id is None: raise click.UsageError('Missing operation ID') operation_url = f'/api/shared-services/{shared_service_id}/operations/{operation_id}' operation_show(log, operation_url, no_wait, suppress_output, output_format=output_format, query=query) shared_service_operation.add_command(shared_service_operation_show)
AzureTRE/cli/tre/commands/shared_services/operation.py/0
{ "file_path": "AzureTRE/cli/tre/commands/shared_services/operation.py", "repo_id": "AzureTRE", "token_count": 675 }
101
import logging import click from tre.api_client import ApiClient from tre.commands.operation import operations_list from tre.output import output_option, query_option from .contexts import UserResourceContext, pass_user_resource_context @click.group(name="operations", help="List operations ") def user_resource_operations(): pass @click.command(name="list", help="List user resource operations") @output_option() @query_option() @pass_user_resource_context def user_resource_operations_list(user_resource_operation_context: UserResourceContext, output_format, query): log = logging.getLogger(__name__) workspace_id = user_resource_operation_context.workspace_id if workspace_id is None: raise click.UsageError('Missing workspace ID') workspace_service_id = user_resource_operation_context.workspace_service_id if workspace_service_id is None: raise click.UsageError('Missing workspace-service ID') user_resource_id = user_resource_operation_context.user_resource_id if user_resource_id is None: raise click.UsageError('Missing user-resource ID') operations_url = f'/api/workspaces/{workspace_id}/workspace-services/{workspace_service_id}/user-resources/{user_resource_id}/operations' client = ApiClient.get_api_client_from_config() workspace_scope = client.get_workspace_scope(log, workspace_id) operations_list(log, operations_url, output_format, query, scope_id=workspace_scope) user_resource_operations.add_command(user_resource_operations_list)
AzureTRE/cli/tre/commands/workspaces/workspace_services/user_resources/operations.py/0
{ "file_path": "AzureTRE/cli/tre/commands/workspaces/workspace_services/user_resources/operations.py", "repo_id": "AzureTRE", "token_count": 483 }
102
output "event_grid_status_changed_topic_endpoint" { value = azurerm_eventgrid_topic.status_changed.endpoint } output "event_grid_airlock_notification_topic_endpoint" { value = azurerm_eventgrid_topic.airlock_notification.endpoint } output "service_bus_step_result_queue" { value = azurerm_servicebus_queue.step_result.name } output "event_grid_status_changed_topic_resource_id" { value = azurerm_eventgrid_topic.status_changed.id } output "event_grid_airlock_notification_topic_resource_id" { value = azurerm_eventgrid_topic.airlock_notification.id }
AzureTRE/core/terraform/airlock/outputs.tf/0
{ "file_path": "AzureTRE/core/terraform/airlock/outputs.tf", "repo_id": "AzureTRE", "token_count": 198 }
103
#!/bin/bash set -o errexit set -o pipefail set -o nounset # set -o xtrace terraform_wrapper_path="../../devops/scripts/terraform_wrapper.sh" # This variables are loaded in for us # shellcheck disable=SC2154 terraform init -input=false -backend=true -reconfigure \ -backend-config="resource_group_name=${TF_VAR_mgmt_resource_group_name}" \ -backend-config="storage_account_name=${TF_VAR_mgmt_storage_account_name}" \ -backend-config="container_name=${TF_VAR_terraform_state_container_name}" \ -backend-config="key=${TRE_ID}" echo "*** Migrating TF Resources... ***" # terraform show might fail if provider schema has changed. Since we don't call apply at this stage a refresh is needed terraform refresh # 1. Check we have a root_module in state # 2. Grab the Resource ID # 3. Delete the old resource from state # 4. Import the new resource type in using the existing Azure Resource ID terraform_show_json=$(terraform show -json) # azurerm_app_service_plan -> azurerm_service_plan core_app_service_plan_id=$(echo "${terraform_show_json}" \ | jq -r 'select(.values.root_module.resources != null) | .values.root_module.resources[] | select(.address=="azurerm_app_service_plan.core") | .values.id') if [ -n "${core_app_service_plan_id}" ]; then echo "Migrating ${core_app_service_plan_id}" terraform state rm azurerm_app_service_plan.core if [[ $(az resource list --query "[?id=='${core_app_service_plan_id}'] | length(@)") == 0 ]]; then echo "The resource doesn't exist on Azure. Skipping importing it back to state." else terraform import azurerm_service_plan.core "${core_app_service_plan_id}" fi fi # azurerm_app_service -> azurerm_linux_web_app api_app_service_id=$(echo "${terraform_show_json}" \ | jq -r 'select(.values.root_module.resources != null) | .values.root_module.resources[] | select(.address=="azurerm_app_service.api") | .values.id') if [ -n "${api_app_service_id}" ]; then echo "Migrating ${api_app_service_id}" terraform state rm azurerm_app_service.api if [[ $(az resource list --query "[?id=='${api_app_service_id}'] | length(@)") == 0 ]]; then echo "The resource doesn't exist on Azure. Skipping importing it back to state." else terraform import azurerm_linux_web_app.api "${api_app_service_id}" fi fi # app insights via -> native tf resource app_insights_via_arm=$(echo "${terraform_show_json}" \ | jq -r 'select(.values.root_module.child_modules != null) .values.root_module.child_modules[] | select (.address=="module.azure_monitor") | .resources[] | select(.address=="module.azure_monitor.azurerm_resource_group_template_deployment.app_insights_core") | .values.id') if [ -n "${app_insights_via_arm}" ]; then echo "Migrating ${app_insights_via_arm}" PLAN_FILE="tfplan$$" TS=$(date +"%s") LOG_FILE="${TS}-tre-core-migrate.log" # This variables are loaded in for us # shellcheck disable=SC2154 "${terraform_wrapper_path}" \ -g "${TF_VAR_mgmt_resource_group_name}" \ -s "${TF_VAR_mgmt_storage_account_name}" \ -n "${TF_VAR_terraform_state_container_name}" \ -k "${TRE_ID}" \ -l "${LOG_FILE}" \ -c "terraform plan -target module.azure_monitor.azurerm_resource_group_template_deployment.app_insights_core -target module.azure_monitor.azurerm_resource_group_template_deployment.ampls_core -out ${PLAN_FILE} && \ terraform apply -input=false -auto-approve ${PLAN_FILE}" fi # support downgrading core app service plan core_plan=$(echo "${terraform_show_json}" \ | jq -r 'select(.values.root_module.resources != null) | .values.root_module.resources[] | select(.address=="azurerm_service_plan.core") | .values.id') api_diag=$(echo "${terraform_show_json}" \ | jq -r 'select(.values.root_module.resources != null) | .values.root_module.resources[] | select(.address=="azurerm_monitor_diagnostic_setting.webapp_api") | .values.id') if [ -n "${core_plan}" ] && [ -n "${api_diag}" ]; then set +o errexit terraform plan -target "azurerm_service_plan.core" -detailed-exitcode plan_exit_code=$? set -o errexit if [ "${plan_exit_code}" == "2" ]; then echo "Migrating ${api_diag}" PLAN_FILE="tfplan$$" TS=$(date +"%s") LOG_FILE="${TS}-tre-core-migrate.log" # This variables are loaded in for us # shellcheck disable=SC2154 "${terraform_wrapper_path}" \ -g "${TF_VAR_mgmt_resource_group_name}" \ -s "${TF_VAR_mgmt_storage_account_name}" \ -n "${TF_VAR_terraform_state_container_name}" \ -k "${TRE_ID}" \ -l "${LOG_FILE}" \ -c "terraform plan -destroy -target azurerm_monitor_diagnostic_setting.webapp_api -out ${PLAN_FILE} && \ terraform apply -input=false -auto-approve ${PLAN_FILE}" fi fi # remove app insights profiler storage account app_insights_byo_storage=$(echo "${terraform_show_json}" \ | jq -r 'select(.values.root_module.child_modules != null) .values.root_module.child_modules[] | select (.address=="module.azure_monitor") | .resources[] | select(.address=="module.azure_monitor.azurerm_resource_group_template_deployment.app_insights_byo_storage") | .values.id') if [ -n "${app_insights_byo_storage}" ]; then echo "Removing state of app_insights_byo_storage" terraform state rm module.azure_monitor.azurerm_resource_group_template_deployment.app_insights_byo_storage fi # airlock inline vnet integration (instead of via swift) airlock_vnet_integration=$(echo "${terraform_show_json}" \ | jq -r 'select(.values.root_module.child_modules != null) .values.root_module.child_modules[] | select (.address=="module.airlock_resources") | .resources[] | select(.address=="module.airlock_resources.azurerm_app_service_virtual_network_swift_connection.airlock_integrated_vnet") | .values.id') if [ -n "${airlock_vnet_integration}" ]; then echo "Migrating ${airlock_vnet_integration}" PLAN_FILE="tfplan$$" TS=$(date +"%s") LOG_FILE="${TS}-tre-core-migrate.log" # This variables are loaded in for us # shellcheck disable=SC2154 "${terraform_wrapper_path}" \ -g "${TF_VAR_mgmt_resource_group_name}" \ -s "${TF_VAR_mgmt_storage_account_name}" \ -n "${TF_VAR_terraform_state_container_name}" \ -k "${TRE_ID}" \ -l "${LOG_FILE}" \ -c "terraform plan -target module.airlock_resources.azurerm_app_service_virtual_network_swift_connection.airlock_integrated_vnet -out ${PLAN_FILE} && \ terraform apply -input=false -auto-approve ${PLAN_FILE}" fi # api inline vnet integration (instead of via swift) api_vnet_integration=$(echo "${terraform_show_json}" \ | jq -r 'select(.values.root_module.resources != null) | .values.root_module.resources[] | select(.address=="azurerm_app_service_virtual_network_swift_connection.api_integrated_vnet") | .values.id') if [ -n "${api_vnet_integration}" ]; then echo "Migrating ${api_vnet_integration}" PLAN_FILE="tfplan$$" TS=$(date +"%s") LOG_FILE="${TS}-tre-core-migrate.log" # This variables are loaded in for us # shellcheck disable=SC2154 "${terraform_wrapper_path}" \ -g "${TF_VAR_mgmt_resource_group_name}" \ -s "${TF_VAR_mgmt_storage_account_name}" \ -n "${TF_VAR_terraform_state_container_name}" \ -k "${TRE_ID}" \ -l "${LOG_FILE}" \ -c "terraform plan -target azurerm_app_service_virtual_network_swift_connection.api_integrated_vnet -out ${PLAN_FILE} && \ terraform apply -input=false -auto-approve ${PLAN_FILE}" fi # support changing the resource processor subnet size rp_subnet=$(echo "${terraform_show_json}" \ | jq -r 'select(.values.root_module.child_modules != null) .values.root_module.child_modules[] | select (.address=="module.network") | .resources[] | select(.address=="module.network.azurerm_subnet.resource_processor") | .values.id') if [ -n "${rp_subnet}" ]; then set +o errexit terraform plan -target "module.network.azurerm_subnet.resource_processor" -detailed-exitcode plan_exit_code=$? set -o errexit if [ "${plan_exit_code}" == "2" ]; then echo "Migrating ${rp_subnet}" PLAN_FILE="tfplan$$" TS=$(date +"%s") LOG_FILE="${TS}-tre-core-migrate-rp-subnet.log" # This variables are loaded in for us # shellcheck disable=SC2154 "${terraform_wrapper_path}" \ -g "${TF_VAR_mgmt_resource_group_name}" \ -s "${TF_VAR_mgmt_storage_account_name}" \ -n "${TF_VAR_terraform_state_container_name}" \ -k "${TRE_ID}" \ -l "${LOG_FILE}" \ -c "terraform plan -destroy -target module.resource_processor_vmss_porter[0].azurerm_linux_virtual_machine_scale_set.vm_linux \ -target azurerm_private_endpoint.sbpe \ -target azurerm_private_endpoint.mongo \ -out ${PLAN_FILE} && \ terraform apply -input=false -auto-approve ${PLAN_FILE}" fi fi # DNS Zones migration. We can't use a moved block due the the vars being used. nexus_dns_zone=$(echo "${terraform_show_json}" \ | jq -r 'select(.values.root_module.child_modules != null) .values.root_module.child_modules[] | select (.address=="module.network") | .resources[] | select(.address=="module.network.azurerm_private_dns_zone.nexus") | .values.id') if [ -n "${nexus_dns_zone}" ]; then terraform state rm module.network.azurerm_private_dns_zone.nexus terraform import azurerm_private_dns_zone.non_core[\""nexus-${TRE_ID}.${LOCATION}.cloudapp.azure.com"\"] "${nexus_dns_zone}" fi # Additional DNS Zones migration. We changed the name for the nexus dns zone hence we need to apply the change. NEXUS_DNS_NAME="nexus-${TRE_ID}.${LOCATION}.cloudapp.azure.com" nexus_dns_zone_changed=$(echo "${terraform_show_json}" \ | jq -r --arg nexus_dns_name "$NEXUS_DNS_NAME" 'select(.values.root_module.resources != null) .values.root_module.resources[] | select (.address=="azurerm_private_dns_zone.non_core[\""+$nexus_dns_name+"\"]") | .values.id') if [ -n "${nexus_dns_zone_changed}" ]; then terraform state rm azurerm_private_dns_zone.non_core[\""nexus-${TRE_ID}.${LOCATION}.cloudapp.azure.com"\"] terraform import azurerm_private_dns_zone.nexus "${nexus_dns_zone_changed}" fi # this isn't a classic migration, but impacts how terraform handles the deployment in the next phase state_store_serverless=$(echo "${terraform_show_json}" \ | jq 'select(.values.root_module.resources != null) | .values.root_module.resources[] | select(.address=="azurerm_cosmosdb_account.tre_db_account") | any(.values.capabilities[]; .name=="EnableServerless")') # false = resource EXITS in the state WITHOUT the serverless capability. # true = exists with the capability, empty value = resource doesn't exist. if [ "${state_store_serverless}" == "false" ]; then echo "Identified CosmosDB with defined throughput." TF_VAR_is_cosmos_defined_throughput="true" export TF_VAR_is_cosmos_defined_throughput fi # prep for migration of azurerm_servicebus_namespace_network_rule_set https://github.com/microsoft/AzureTRE/pull/3858 # as described https://github.com/hashicorp/terraform-provider-azurerm/issues/23954 state_store_servicebus_network_rule_set=$(echo "${terraform_show_json}" \ | jq 'select(.values.root_module.resources != null) | .values.root_module.resources[] | select(.address=="azurerm_servicebus_namespace_network_rule_set.servicebus_network_rule_set") | .values.id') if [ -n "${state_store_servicebus_network_rule_set}" ]; then echo "Removing state of azurerm_servicebus_namespace_network_rule_set" terraform state rm azurerm_servicebus_namespace_network_rule_set.servicebus_network_rule_set fi echo "*** Migration is done. ***"
AzureTRE/core/terraform/migrate.sh/0
{ "file_path": "AzureTRE/core/terraform/migrate.sh", "repo_id": "AzureTRE", "token_count": 4282 }
104
#!/bin/bash cat << EOF > 'validation.txt' ${CERTBOT_VALIDATION} EOF # shellcheck disable=SC2016 az storage blob upload \ --account-name "${STORAGE_ACCOUNT}" \ --auth-mode login \ --container-name '$web' \ --file 'validation.txt' \ --name ".well-known/acme-challenge/${CERTBOT_TOKEN}" \ --no-progress \ --only-show-errors sleep 10s
AzureTRE/core/terraform/scripts/auth-hook.sh/0
{ "file_path": "AzureTRE/core/terraform/scripts/auth-hook.sh", "repo_id": "AzureTRE", "token_count": 153 }
105
#!/bin/bash # This script is designed to be `source`d to create reusable helper functions # Grants admin consent for the given app permission. # # Parameters: # 1. principalId is the object ID of the service principal/managed application # 2. resourceId is the object ID of the resource service principal (can in some cases be the same as principalId) # 3. appRoleId is the ID of the permission function grant_admin_consent() { principalId=$1 resourceId=$2 appRoleId=$3 local msGraphUri="" msGraphUri="$(az cloud show --query endpoints.microsoftGraphResourceId --output tsv)/v1.0" # test if enabled to avoid "Permission being assigned already exists on the object" error is_enabled=$(az rest --method GET \ --uri "${msGraphUri}/servicePrincipals/${principalId}/appRoleAssignments" -o json \ | jq -r ".value | map( select(.appRoleId==\"${appRoleId}\") ) | length") if [[ "$is_enabled" != "1" ]]; then data=$(jq -c . << JSON { "principalId": "${principalId}", "resourceId": "${resourceId}", "appRoleId": "${appRoleId}" } JSON ) az rest --method POST --uri "${msGraphUri}/servicePrincipals/${principalId}/appRoleAssignments" --body "${data}" fi }
AzureTRE/devops/scripts/aad/grant_admin_consent.sh/0
{ "file_path": "AzureTRE/devops/scripts/aad/grant_admin_consent.sh", "repo_id": "AzureTRE", "token_count": 479 }
106
#!/bin/bash set -o errexit set -o pipefail set -o nounset # Uncomment this line to see each command for debugging (careful: this will show secrets!) # set -o xtrace base_url="${TRE_URL}" cli_env_file="${HOME}/.config/tre/environment.json" # Are we already signed in? already_signed_in=0 got_token_from_cli=$(tre get-token >/dev/null 2>&1; echo $?) if [[ $got_token_from_cli == "0" ]]; then if [ -f "${cli_env_file}" ] && [ "${base_url}" == "$(jq -r '."base-url"' "${cli_env_file}")" ]; then already_signed_in=1 fi fi if [ $already_signed_in == 1 ]; then echo "CLI already signed in" else if [ -n "${TEST_ACCOUNT_CLIENT_ID:-}" ] && [ -n "${TEST_ACCOUNT_CLIENT_SECRET:-}" ] && [ -n "${AAD_TENANT_ID:-}" ] && [ -n "${API_CLIENT_ID:-}" ]; then # Use client credentials flow with TEST_ACCOUNT_CLIENT_ID/SECRET echo "Using TEST_ACCOUNT_CLIENT_ID to sign in to tre CLI" tre login client-credentials \ --base-url "${base_url}" \ --client-id "$TEST_ACCOUNT_CLIENT_ID" \ --client-secret "$TEST_ACCOUNT_CLIENT_SECRET" \ --aad-tenant-id "$AAD_TENANT_ID" \ --api-scope "api://${API_CLIENT_ID}" \ --no-verify # skip SSL verification in case certs aren't set up else # Use resource owner password credentials flow with USERNAME/PASSWORD echo "tre CLI not already signed in and missing one of TEST_ACCOUNT_CLIENT_ID, TEST_ACCOUNT_CLIENT_SECRET, AAD_TENANT_ID or API_CLIENT_ID" exit 1 fi fi
AzureTRE/devops/scripts/ensure_cli_signed_in.sh/0
{ "file_path": "AzureTRE/devops/scripts/ensure_cli_signed_in.sh", "repo_id": "AzureTRE", "token_count": 590 }
107
#!/bin/bash set -o errexit set -o pipefail set -o nounset # Uncomment this line to see each command for debugging (careful: this will show secrets!) # set -o xtrace # Find all terraform folders and create/upgrade lock files. # Run from root folder find . -type d -name terraform -not -path "*/.cnab/*" -exec echo In Dir: {} \; -exec terraform -chdir={} init -upgrade=true -backend=false \;
AzureTRE/devops/scripts/upgrade_lock_files.sh/0
{ "file_path": "AzureTRE/devops/scripts/upgrade_lock_files.sh", "repo_id": "AzureTRE", "token_count": 132 }
108
<?xml version="1.0" encoding="UTF-8" standalone="no"?> <svg viewBox="0 0 34 34" class="" role="presentation" focusable="false" id="FxSymbol0-03d" data-type="1" version="1.1" sodipodi:docname="azure-tre-logo.svg" inkscape:version="1.1.2 (b8e25be833, 2022-02-05)" xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape" xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" xmlns="http://www.w3.org/2000/svg" xmlns:svg="http://www.w3.org/2000/svg"> <sodipodi:namedview id="namedview91" pagecolor="#ffffff" bordercolor="#666666" borderopacity="1.0" inkscape:pageshadow="2" inkscape:pageopacity="0.0" inkscape:pagecheckerboard="0" showgrid="false" inkscape:zoom="23.529412" inkscape:cx="16.97875" inkscape:cy="17" inkscape:window-width="1920" inkscape:window-height="1009" inkscape:window-x="-8" inkscape:window-y="-8" inkscape:window-maximized="1" inkscape:current-layer="FxSymbol0-03d" /> <g inkscape:groupmode="layer" id="layer1" inkscape:label="Layer 1"> <path d="m 23.10482,25.158823 -4.140171,-4.115167 -0.110432,-0.108348 a 3.7046921,3.7046921 0 0 1 0.108349,-5.231993 l 1.521049,-1.525216 -1.17725,-1.712743 A 1.0001419,1.0001419 0 0 1 19.13134,11.898609 V 3.5057511 A 0.4979873,0.4979873 0 0 1 19.629327,3.0077638 H 20.09606 A 0.99805822,0.99805822 0 0 0 21.094118,2.0097056 V 1.5596418 A 0.9959746,0.9959746 0 0 0 20.09606,0.5720017 H 9.036158 A 0.99805822,0.99805822 0 0 0 8.0381,1.5700599 v 0.4396457 a 1.0001419,1.0001419 0 0 0 0.998058,0.9980582 h 0.466733 a 0.50007093,0.50007093 0 0 1 0.500071,0.4979873 V 11.877773 A 1.01056,1.01056 0 0 1 9.827937,12.442436 L 1.1350373,25.1109 c -0.4542311,0.662594 0.020836,1.946109 0.8334515,1.946109 H 23.10482 Z" fill="url(#bb7d2fa1-5df1-4286-a784-52ab4f60600a)" id="path3822" style="fill:url(#bb7d2fa1-5df1-4286-a784-52ab4f60600a);stroke-width:2.08363" /> <rect x="58.224522" y="15.158711" width="0" height="0" rx="0" fill="#faa21d" id="rect3830" ry="0" /> <rect x="-13.999269" y="54.715313" width="0" height="0" rx="0" fill="#faa21d" id="rect3828" ry="0" transform="rotate(-90)" /> <path d="m 19.060496,20.95406 a 2.7587246,2.7587246 0 0 1 -0.110432,-0.110432 3.5421691,3.5421691 0 0 1 -0.04584,-4.871526 l -1.45854,-2.106549 A 1.6314814,1.6314814 0 0 1 17.160227,12.944589 V 8.804419 A 0.76677542,0.76677542 0 0 0 16.393451,8.037644 H 12.642919 A 0.76885905,0.76885905 0 0 0 11.87406,8.804419 v 3.877633 a 2.4232604,2.4232604 0 0 1 -0.416725,1.36686 l -6.60302,9.626368 a 0.57508157,0.57508157 0 0 0 0.472984,0.900127 h 17.371213 z" class="msportalfx-svg-c01" fill="#ffffff" id="path3832" style="stroke-width:2.08363" /> </g> <g id="g75" transform="translate(-2.40968,0.817001)"> <title id="title51" /> <path d="m 19.305,3.55 c -12.6433333,-2.59333333 -6.321667,-1.2966667 0,0 z" fill="#e62323" id="path59" sodipodi:nodetypes="cc" /> <path d="m -2.8285,28.8005 c 2.11233333,-19.427 1.0561667,-9.7135 0,0 z" fill="#ff7381" id="path61" sodipodi:nodetypes="cc" /> <path d="m 34,19.908 c 0,5.667 -6.8,10.162 -8.273,11.088 a 0.604,0.604 0 0 1 -0.567,0 C 23.63,30.089 16.83,25.575 16.83,19.908 v -6.743 a 0.548,0.548 0 0 1 0.529,-0.548 c 5.289,-0.132 4.061,-2.456 8.028,-2.456 3.966,0 2.739,2.324 8.027,2.456 a 0.548,0.548 0 0 1 0.53,0.548 z" fill="#dfa500" id="path67" /> <path d="M 25.387,20.626 V 11.03 c 3.645,0 2.512,2.135 7.366,2.267 a 0.491,0.491 0 0 1 0.491,0.491 v 6.762 z m 0,0 h -7.82 c 0.472,4.854 6.27,8.707 7.555,9.444 h 0.227 z" fill="url(#bb7d2fa1-5df1-4286-a784-52ab4f60605b)" id="path69" style="fill:url(#bb7d2fa1-5df1-4286-a784-52ab4f60605b)" /> <path d="m 18.02,13.297 c 4.854,-0.132 3.778,-2.267 7.367,-2.267 v 9.596 h -7.82 v -6.838 a 0.49,0.49 0 0 1 0.453,-0.491 z m 15.187,7.329 h -7.82 v 9.595 h 0.208 c 1.36,-0.868 7.14,-4.646 7.612,-9.595 z" fill="#ffd400" id="path71" /> <defs id="defs73" /> </g> <defs id="defs88"> <linearGradient id="bb7d2fa1-5df1-4286-a784-52ab4f60605b" x1="25.405" y1="11.03" x2="25.405" y2="30.221" gradientUnits="userSpaceOnUse"> <stop stop-color="#FFD70F" id="stop77" /> <stop offset=".12" stop-color="#FC1" id="stop79" /> <stop offset=".44" stop-color="#FEB517" id="stop81" /> <stop offset=".75" stop-color="#FEA61A" id="stop83" /> <stop offset="1" stop-color="#FEA11B" id="stop85" /> </linearGradient> <linearGradient id="bb7d2fa1-5df1-4286-a784-52ab4f60600b" x1="13.259" y1="5.3629999" x2="13.259" y2="17.405001" gradientUnits="userSpaceOnUse" gradientTransform="translate(36.21832,0.43950054)"> <stop offset="1" stop-color="#ffd400" id="stop3843" /> </linearGradient> <linearGradient id="bb7d2fa1-5df1-4286-a784-52ab4f60600a" x1="6.1360002" y1="0.66500002" x2="6.2740002" y2="13.402" gradientUnits="userSpaceOnUse" gradientTransform="matrix(2.0836289,0,0,2.0836289,-1.0215184,-0.6781755)"> <stop offset="0" stop-color="#5ea0ef" id="stop3836" /> <stop offset="1" stop-color="#0078d4" id="stop3838" /> </linearGradient> </defs> </svg>
AzureTRE/docs/assets/azure-tre-logo.svg/0
{ "file_path": "AzureTRE/docs/assets/azure-tre-logo.svg", "repo_id": "AzureTRE", "token_count": 3310 }
109
# Network Architecture The Trusted Research Environment (TRE) network topology is based on [hub-spoke](https://docs.microsoft.com/en-us/azure/architecture/reference-architectures/hybrid-networking/hub-spoke). The TRE Core VNET ([Azure Virtual Network](https://docs.microsoft.com/en-us/azure/virtual-network/virtual-networks-overview)) is the central hub and each workspace is a spoke. ![Network architecture](../assets/network-architecture.png) Azure TRE VNETs are segregated allowing limited traffic between the TRE Core VNET and Workspace VNETs. The security rules are managed by `nsg-ws` network security group. See [workspace network security groups (NSG)](#workspaces) further down. The Core VNET is further divided into subnets. | <div style="width:200px">Subnet</div> | Description | | -------| ----------- | | `AzureBastionSubnet` | A dedicated subnet for Azure Bastion hosts. | | `AppGwSubnet` | Subnet for Azure Application Gateway controlling ingress traffic. | | `AzureFirewallSubnet` | Subnet for Azure Firewall controlling egress traffic. | | `ResourceProcessorSubnet` | Subnet for VMSS used by the Composition Service to host Docker containers to execute Porter bundles that deploys Workspaces. | | `WebAppSubnet` | Subnet for TRE API. | | `SharedSubnet` | Shared Services subnet for all things shared by TRE Core and Workspaces. Such as Source Mirror Shared Service and Package Mirror Shared Service. | All subnets (Core and Workspace subnets) have a default route which directs egress traffic to the Azure Firewall to ensure only explicitly allowed destinations on the Internet to be accessed. There are a couple of exceptions: - `AzureFirewallSubnet` as it hosts the Azure Firewall which routes traffic to the Internet. - `AzureBastionSubnet` as it hosts [Azure Bastion](https://azure.microsoft.com/en-us/services/azure-bastion) which is the management jump box within the VNET with Internet access. - `AppGwSubnet` as it hosts the Azure Application Gateway which has to be able to a ping the health endpoints e.g. TRE API. ## Ingress and egress Ingress traffic from the Internet is only allowed through the Application Gateway, which forwards HTTPS (port 443) call to the TRE API in the `WebAppSubnet`. Egress traffic is routed through the Azure Firewall with a few exceptions and by default all ingress and egress traffic is denied except explicitly allowed. The explicitly allowed egress traffic is described here: - [Resource Processor](../tre-developers/resource-processor.md#network-requirements) - [TRE API](../tre-developers/api.md#network-requirements) - [Gitea Shared Service](../tre-templates/shared-services/gitea.md#network-requirements) - [Nexus Shared Service](../tre-templates/shared-services/nexus.md#network-requirements) ## Azure Monitor Azure Monitor resources are secured using [Azure Monitor Private Link Scope (AMPLS)](https://docs.microsoft.com/azure/azure-monitor/logs/private-link-security) keeping all traffic inside the Microsoft Azure backbone network. The Azure Monitor resources and their network configuration is defined in `/core/terraform/azure-monitor` folder and the required private DNS zones in file `/core/terraform/network/dns_zones.tf`. ## Network security groups ### TRE Core Network security groups (NSG), and their security rules for TRE core resources are defined in `/core/terraform/network/network_security_groups.tf`. | Network security group | Associated subnet(s) | | ---------------------- | -------------------- | | `nsg-bastion-subnet` | `AzureBastionSubnet` | | `nsg-app-gw` | `AppGwSubnet` | | `nsg-default-rules` | `ResourceProcessorSubnet`, `SharedSubnet`, `WebAppSubnet` | ### Workspaces Azure TRE VNETs are segregated allowing limited traffic between the TRE Core VNET and Workspace VNETs. The rules to manage and limit the traffic between the TRE Core VNET and Workspace VNETs are defined by the `nsg-ws` network security group: - Inbound traffic from TRE Core VNET to workspace allowed for [Azure Bastion](https://docs.microsoft.com/en-us/azure/bastion/bastion-overview) (22, 3389) - All other inbound traffic from Core to workspace denied. - Outbound traffic to `SharedSubnet` from Workspace allowed. - Outbound traffic to Internet allowed on HTTPS port 443 (next hop Azure Firewall). - All other outbound traffic denied. Each of these rules can be managed per workspace. !!! caution In Azure, traffic between subnets are allowed except explicitly denied.
AzureTRE/docs/azure-tre-overview/networking.md/0
{ "file_path": "AzureTRE/docs/azure-tre-overview/networking.md", "repo_id": "AzureTRE", "token_count": 1211 }
110
# TRE Client UX ## Name The Client Identity is typically called `<TRE_ID> UX` within the Microsoft Entra ID Portal. ## Purpose This identity is used by any public facing client application so that user impersonation can occur to the Core API and any Workspace Applications. ## Application Roles This application does not have any roles defined. ## Permissions | Name | Type* | Admin consent required | TRE usage | | --- | -- | -----| --------- | |offline_access|Delegated|No|Allows the app to see and update the data you gave it access to, even when users are not currently using the app. | |openid|Delegated|No|Allows users to sign in to the app with their work or school accounts and allows the app to see basic user profile information.| |TRE API/user_impersonation|Delegated|No|Flow the authenticated user to the TRE API when needed.| |Workspace API/user_impersonation|Delegated|No|Flow the authenticated user to the Workspace API when needed.| '*' See the difference between [delegated and application permission](https://docs.microsoft.com/graph/auth/auth-concepts#delegated-and-application-permissions) types. See [Microsoft Graph permissions reference](https://docs.microsoft.com/graph/permissions-reference) for more details. ## Clients This identity should only be used by client applications. Currently this is the React UI and the Swagger UI. ## How to create This identity is created when you create the API. For completeness, you can run the following script Example on how to run the script: ```bash ./devops/scripts/aad/create_api_application.sh \ --name <TRE_ID> \ --tre-url "https://<TRE_ID>.<LOCATION>.cloudapp.azure.com" \ --admin-consent \ --automation-clientid <TEST_ACCOUNT_CLIENT_ID> ``` | Argument | Description | | -------- | ----------- | | `--name` | The prefix of the name of the app registrations. `TRE` will give you `TRE API`. | | `--tre-url` | Used to construct auth redirection URLs for the UI and Swagger app. Use the values of the [environment variables](../environment-variables.md) `TRE_ID` and `LOCATION` in the URL. Reply URL for the localhost, `http://localhost:8000/api/docs/oauth2-redirect`, will be added by default. | | `--admin-consent` | Grants admin consent for the app registrations. This is required for them to function properly, but requires Microsoft Entra ID admin privileges. | | `--automation-clientid` | This is an optional parameter but will create an application with test users with permission to use the `TRE API` and `TRE Swagger UI` | | `--reset-password` | Optional, default is 0. This flag has no relevance when creating the UX as there is no password for the Microsoft Entra ID Application. | ## Redirect URLs The following Redirect URIs will be added to the application * `https://<TRE ID>.<Azure location>.cloudapp.azure.com` * `http://localhost:8000/docs/oauth2-redirect` - For local testing ## Environment Variables | Variable | Description | Location | | -------- | ----------- | -------- | |SWAGGER_UI_CLIENT_ID|The Client Id|`./config.yaml`|
AzureTRE/docs/tre-admins/identities/client.md/0
{ "file_path": "AzureTRE/docs/tre-admins/identities/client.md", "repo_id": "AzureTRE", "token_count": 850 }
111
# Installing base workspace ## Publishing and registering the base workspace bundle Run the following in a terminal to build, publish and register the base workpace bundle: ```cmd make workspace_bundle BUNDLE=base ``` This will prepare the template for use with your TRE. ## Create Base Workspace Workspace can be easily created via AzureTRE UI. Open a browser and navigate to: `https://<TRE_ID>.<LOCATION>.cloudapp.azure.com/` (replace TRE_ID and LOCATION with values from previous steps). It will require you to log in, make sure you login with a user who is a TREAdmin. 1. Select Workspaces -> Create New: ![Create workspace main](../../assets/create-workspace-main.png) 1. Click on Create under Base Workspace: ![Create workspace](../../assets/create-workspace.png) 1. Fill in the details for your workspace: - General information such as name and description - [Optional] Update values for Shared Storage Quota, App Service Plan (SKU) and Address space if needed - Workspace Authentication Type - this determines whether you'd like TRE to create an app registration for the workspace automatically, or whether you with to provide an existing one that you've created manually. To read about how to create it manually read the [Creating an Application Client for base workspace](#creating-an-application-client-for-base-workspace) section below. 1. After filling the details press submit. ![Create workspace - Fill Details](../../assets/create-workspace-fill-details.png) 1. Select go to resource to see its status: ![Create Workspace In Progress](../../assets/create-workspace-in-progress.png) 1. Navigate to Operation and wait till changed to deployed: ![Create Workspace Status](../../assets/create-workspace-status.png) Workspace is now ready to use. ## Creating an Application Client for base workspace As explained in the [auth guide](../auth.md), every workspace has a corresponding app registration which if you haven't run `make auth`; can be created using the helper script `./devops/scripts/aad/create_workspace_application.sh`. For example: ```bash ./devops/scripts/aad/create_workspace_application.sh \ --name "${TRE_ID} - workspace 1" \ --admin-consent \ --ux-clientid "${SWAGGER_UI_CLIENT_ID}" \ --automation-clientid "${TEST_ACCOUNT_CLIENT_ID}" \ --application-admin-clientid "${APPLICATION_ADMIN_CLIENT_ID}" ``` !!! caution If you're using a separate tenant for Microsoft Entra ID app registrations to the one where you've deployed the TRE infrastructure resources, ensure you've signed into that tenant in the `az cli` before running the above command. See **Using a separate Microsoft Entra ID tenant** in [Setup Auth configuration](./setup-auth-entities.md) for more details. Running the script will report `WORKSPACE_API_CLIENT_ID` and `WORKSPACE_API_CLIENT_SECRET` for the generated app. Set these under authentication section in `config.yaml` so that automated testing will work. You also need to use `WORKSPACE_API_CLIENT_ID` and `WORKSPACE_API_CLIENT_SECRET` in the form. ## Next steps * [Installing a workspace service & user resources](./ui-install-ws-and-ur.md)
AzureTRE/docs/tre-admins/setup-instructions/ui-install-base-workspace.md/0
{ "file_path": "AzureTRE/docs/tre-admins/setup-instructions/ui-install-base-workspace.md", "repo_id": "AzureTRE", "token_count": 886 }
112
# TRE Web User Interface This project contains a React-based web UI which covers the core aspects of a TRE, for researchers and workspace owners. ## Chosen UI Stack + Components The UI is built upon several popular web frameworks: - React v18 (created via create-react-app, with all build configurations left as defaults) - Typescript - React Router v6 for client side routing - Fluent UI [Fluent UI Docs](https://developer.microsoft.com/en-us/fluentui#/controls/web) - MSAL v2: Microsoft Entra ID authentication [msal-react docs](https://github.com/AzureAD/microsoft-authentication-library-for-js/tree/dev/lib/msal-react) ### Folder structure ```text ui ├── app - Root of the React application │ ├── build - Location of compiled files after build process │ ├── public - Location for static HTML to bootstrap the app │ ├── src - All .tsx components │ ├── index.tsx - Entry point for the app │ ├── App.tsx - Wrapper and routing for the app │ └── config.source.json - JSON file to be used as source file for autogenerated config ``` ### AuthN + AuthZ For further details on the auth setup, see [Auth](../tre-admins/auth.md). As stated above, Microsoft Entra ID is used for Authentication and Authorization. There are 3 Microsoft Entra ID apps involved here: - **TRE UX**. This is the app that the user authenticates against. Once authenticated, the client will request an access token for the `TRE Api`. - **TRE Api**. In the access token response from this app we get the user's role membership for TRE-level roles (`TREAdmin` / `TREUser`). Based on these role memberships, aspects of the UI will be made available. If the user is in a `TREAdmin` role, they will see buttons to create workspaces for instance. When the user navigates into a Workspace, the client will request an access token for that `Workspace App`. - **Workspace App(s)**. Each TRE workspace will have a workspace app registration. The Application Id URI for each workspace app is stored in the Workspace resource object in Cosmos, and the client uses this URI to gain an access token for that particular workspace. Workspace app registrations may be reused across multiple workspaces in development scenarios. From this access token we can find the Workspace-level roles the user is in (`WorkspaceOwner` / `WorkspaceResearcher`). These are in turn used to show/hide features of the UI. ### React Contexts The React Context API is a clean way to handle a limited amount of global state, and is used for a few scenarios in this project: - TRE Roles Context: A context provides details of the base TRE roles a user is in, which can be consumed anywhere throughout the app - Workspace Context: Tracks the currently selected Workspace, and the roles the user is in for that Workspace. This context is used for nested components to be able to authenticate against the correct Microsoft Entra ID App via `workspaceCtx.workspaceApplicationIdURI`. - Create Form Context: A context to control the Create / Update form behaviour. - Notifications Context: Tracks all the in-progress operations currently running. For each operation, the Notifications panel also uses this context to broadcast Component 'actions' which are subscribed to by downstream components. This way, a resource component does not have to track it's own changes, and can be 'told' by the Notifications Context whether it should refresh / lock etc. ### Custom Hooks Hooks are used throughout the project, and a couple of custom hooks were written to abstract common logic: - `useAuthApiCall`: A way to encapsulate an authenticated `fetch` request and provide a simple interface for downstream components to use. - `useComponentManager`: This hook subscribes to changes broadcast from the Notifications panel, via the context. A component can simply add this hook to start subscribing to changes and react accordingly. ## Deployment The UI is deployed as part of the `tre-deploy` make target (unless you set `deploy_ui=false` under tre defaults section in your `config.yaml` file). To re-deploy _just_ the UI (after an initial deploy), run `make build-and-deploy-ui` from the root of the dev container. This will: - Use the environment variables from your deployment to create a `config.json` file for the UI - Build the source code, via `yarn build` - Deploy the code to Azure blob storage, where it will be statically served behind the App Gateway that also fronts the APi.
AzureTRE/docs/tre-developers/ui.md/0
{ "file_path": "AzureTRE/docs/tre-developers/ui.md", "repo_id": "AzureTRE", "token_count": 1179 }
113
# Azure Health Data Services Workspace Service See [Azure Health Data Services Documentation](https://learn.microsoft.com/en-us/azure/healthcare-apis/healthcare-apis-overview). ## Prerequisites - [A base workspace deployed](https://microsoft.github.io/AzureTRE/tre-templates/workspaces/base/) ## Azure Healthcare Workspace Each Azure Health Data Services workspace service creates a [Healthcare Workspace](https://learn.microsoft.com/en-us/azure/healthcare-apis/workspace-overview). In addition, when creating this workspace service you can choose to deploy [FHIR](https://learn.microsoft.com/en-us/azure/healthcare-apis/fhir/) and [DICOM](https://learn.microsoft.com/en-us/azure/healthcare-apis/dicom/) instances within the newly created healthcare workspace. ![Healthcare Service](images/hs_details.png) ## Authentication Learn more about authentication and application roles in [this doc](https://learn.microsoft.com/en-us/azure/healthcare-apis/authentication-authorization). Make sure to assign your users/apps with the required role and follow the guidelines to retrieve a token. Notice: If you are using a separate tenant for authentication follow [this documentation](https://learn.microsoft.com/en-us/azure/healthcare-apis/azure-api-for-fhir/configure-local-rbac) to assign users/apps to your FHIR instance.
AzureTRE/docs/tre-templates/workspace-services/health_services.md/0
{ "file_path": "AzureTRE/docs/tre-templates/workspace-services/health_services.md", "repo_id": "AzureTRE", "token_count": 372 }
114
import os import pytest import asyncio import logging from azure.core.exceptions import ResourceNotFoundError from azure.storage.blob import ContainerClient from airlock.request import post_request, get_request, upload_blob_using_sas, wait_for_status from resources.resource import get_resource, post_resource from resources.workspace import get_workspace_auth_details from airlock import strings as airlock_strings from e2e_tests.conftest import get_workspace_owner_token from helpers import get_admin_token pytestmark = pytest.mark.asyncio LOGGER = logging.getLogger(__name__) BLOB_FILE_PATH = "./test_airlock_sample.txt" BLOB_NAME = os.path.basename(BLOB_FILE_PATH) async def submit_airlock_import_request(workspace_path: str, workspace_owner_token: str, verify: bool): LOGGER.info("Creating airlock import request") payload = { "type": airlock_strings.IMPORT, "businessJustification": "some business justification" } request_result = await post_request(payload, f'/api{workspace_path}/requests', workspace_owner_token, verify, 201) assert request_result["airlockRequest"]["type"] == airlock_strings.IMPORT assert request_result["airlockRequest"]["businessJustification"] == "some business justification" assert request_result["airlockRequest"]["status"] == airlock_strings.DRAFT_STATUS request_id = request_result["airlockRequest"]["id"] # get container link LOGGER.info("Getting airlock request container URL") request_result = await get_request(f'/api{workspace_path}/requests/{request_id}/link', workspace_owner_token, verify, 200) container_url = request_result["containerUrl"] # upload blob # currenly there's no elegant way to check if the container was created yet becasue its an asyc process # it would be better to create another draft_improgress step and wait for the request to change to draft state before # uploading the blob i = 1 blob_uploaded = False wait_time = 30 while not blob_uploaded: LOGGER.info(f"try #{i} to upload a blob to container [{container_url}]") try: await asyncio.sleep(5) upload_response = await upload_blob_using_sas(BLOB_FILE_PATH, container_url) if "etag" in upload_response: blob_uploaded = True else: raise Exception("upload failed") except ResourceNotFoundError: i += 1 LOGGER.info(f"sleeping for {wait_time} sec until container would be created") await asyncio.sleep(wait_time) pass except Exception as e: LOGGER.error(f"upload blob failed with exception: {e}") raise e # submit request LOGGER.info("Submitting airlock request") request_result = await post_request(None, f'/api{workspace_path}/requests/{request_id}/submit', workspace_owner_token, verify, 200) assert request_result["airlockRequest"]["status"] == airlock_strings.SUBMITTED_STATUS await wait_for_status(airlock_strings.IN_REVIEW_STATUS, workspace_owner_token, workspace_path, request_id, verify) return request_id, container_url @pytest.mark.timeout(50 * 60) @pytest.mark.airlock async def test_airlock_review_vm_flow(setup_test_workspace, setup_test_airlock_import_review_workspace_and_guacamole_service, verify): workspace_path, workspace_id = setup_test_workspace workspace_owner_token = await get_workspace_owner_token(workspace_id, verify) _, import_review_workspace_id, _, import_review_workspace_service_id = setup_test_airlock_import_review_workspace_and_guacamole_service # Preparation: Update the research workspace so that it has the import review details patch_payload = { "properties": { "enable_airlock": True, "configure_review_vms": True, "airlock_review_config": { "import": { "import_vm_workspace_id": import_review_workspace_id, "import_vm_workspace_service_id": import_review_workspace_service_id, "import_vm_user_resource_template_name": "tre-service-guacamole-import-reviewvm" }, "export": { "export_vm_workspace_service_id": "", "export_vm_user_resource_template_name": "tre-service-guacamole-export-reviewvm" } } } } # Get workspace to get the etag workspace = await get_resource(f"/api{workspace_path}", workspace_owner_token, verify) admin_token = await get_admin_token(verify) await post_resource( payload=patch_payload, endpoint=f"/api{workspace_path}", access_token=admin_token, verify=verify, method="PATCH", etag=workspace["workspace"]["_etag"], ) LOGGER.info("Workspace Airlock Review confiuguration set up") # IMPORT FLOW # Submit the request request_id, _ = await submit_airlock_import_request(workspace_path, workspace_owner_token, verify) LOGGER.info(f'Airlock Request ID {request_id} has been created') # Create a review VM admin_token = await get_admin_token(verify) import_workspace_owner_token, _ = await get_workspace_auth_details(admin_token=admin_token, workspace_id=import_review_workspace_id, verify=verify) user_resource_path, user_resource_id = await post_resource( payload={}, endpoint=f"/api{workspace_path}/requests/{request_id}/review-user-resource", access_token=workspace_owner_token, verify=verify, method="POST", wait=True, access_token_for_wait=import_workspace_owner_token # needs a different token as is created in a separate workspace ) LOGGER.info(f"Airlock Review VM has been created: {user_resource_path}") # Approve request LOGGER.info("Approving airlock request") payload = { "approval": "True", "decisionExplanation": "the reason why this request was approved/rejected" } request_result = await post_request(payload, f'/api{workspace_path}/requests/{request_id}/review', workspace_owner_token, verify, 200) assert request_result["airlockRequest"]["reviews"][0]["decisionExplanation"] == "the reason why this request was approved/rejected" await wait_for_status(airlock_strings.APPROVED_STATUS, workspace_owner_token, workspace_path, request_id, verify) LOGGER.info("Airlock request has been approved") # Check that deletion for user resource has started user_resource = await get_resource(f"/api{user_resource_path}", import_workspace_owner_token, verify) assert user_resource["userResource"]["deploymentStatus"] == "updating" LOGGER.info("Review VM has started deletion successfully") # EXPORT FLOW # We can't test teh export flow as we can't fully create an export request without special networking setup @pytest.mark.airlock @pytest.mark.extended @pytest.mark.timeout(35 * 60) async def test_airlock_flow(setup_test_workspace, verify) -> None: # 1. Get the workspace set up workspace_path, workspace_id = setup_test_workspace workspace_owner_token = await get_workspace_owner_token(workspace_id, verify) # 2. create and submit airlock request request_id, container_url = await submit_airlock_import_request(workspace_path, workspace_owner_token, verify) # 3. approve request LOGGER.info("Approving airlock request") payload = { "approval": "True", "decisionExplanation": "the reason why this request was approved/rejected" } request_result = await post_request(payload, f'/api{workspace_path}/requests/{request_id}/review', workspace_owner_token, verify, 200) assert request_result["airlockRequest"]["reviews"][0]["decisionExplanation"] == "the reason why this request was approved/rejected" await wait_for_status(airlock_strings.APPROVED_STATUS, workspace_owner_token, workspace_path, request_id, verify) # 4. check the file has been deleted from the source # NOTE: We should really be checking that the file is deleted from in progress location too, # but doing that will require setting up network access to in-progress storage account try: container_client = ContainerClient.from_container_url(container_url=container_url) # We expect the container to eventually be deleted too, but sometimes this async operation takes some time. # Checking that at least there are no blobs within the container for _ in container_client.list_blobs(): container_url_without_sas = container_url.split("?")[0] assert False, f"The source blob in container {container_url_without_sas} should be deleted" except ResourceNotFoundError: # Expecting this exception pass # 5. get a link to the blob in the approved location. # For a full E2E we should try to download it, but can't without special networking setup. # So at the very least we check that we get the link for it. request_result = await get_request(f'/api{workspace_path}/requests/{request_id}/link', workspace_owner_token, verify, 200) container_url = request_result["containerUrl"] # 6. create airlock export request LOGGER.info("Creating airlock export request") justification = "another business justification" payload = { "type": airlock_strings.EXPORT, "businessJustification": justification } request_result = await post_request(payload, f'/api{workspace_path}/requests', workspace_owner_token, verify, 201) assert request_result["airlockRequest"]["type"] == airlock_strings.EXPORT assert request_result["airlockRequest"]["businessJustification"] == justification assert request_result["airlockRequest"]["status"] == airlock_strings.DRAFT_STATUS request_id = request_result["airlockRequest"]["id"] # 7. get container link LOGGER.info("Getting airlock request container URL") request_result = await get_request(f'/api{workspace_path}/requests/{request_id}/link', workspace_owner_token, verify, 200) container_url = request_result["containerUrl"] # we can't test any more the export flow since we don't have the network # access to upload the file from within the workspace.
AzureTRE/e2e_tests/test_airlock.py/0
{ "file_path": "AzureTRE/e2e_tests/test_airlock.py", "repo_id": "AzureTRE", "token_count": 3682 }
115
import os import uuid from azure.servicebus import ServiceBusClient, ServiceBusMessage CREATE_WORKSPACE_REQUEST_DATA_FILE = "createWorkspaceRequestData.json" def send_service_bus_message(service_bus_connection_string, service_bus_queue_name, correlation_id): with open(CREATE_WORKSPACE_REQUEST_DATA_FILE, "r") as file: data = file.read().replace('\n', '') service_bus_client = ServiceBusClient.from_connection_string( conn_str=service_bus_connection_string, logging_enable=True) with service_bus_client: queue_sender = service_bus_client.get_queue_sender(queue_name=service_bus_queue_name) with queue_sender: message = ServiceBusMessage(body=data, correlation_id=correlation_id) queue_sender.send_messages(message) print(f"Service Bus message sent to queue: {data}") if __name__ == "__main__": try: service_bus_connection_string = os.environ["SERVICE_BUS_CONNECTION_STRING"] resource_request_queue_name = os.environ["SERVICE_BUS_RESOURCE_REQUEST_QUEUE"] correlation_id = str(uuid.uuid4()) print(f"Service Bus queue name: {resource_request_queue_name}") print(f"Generated correlation ID: {correlation_id}") send_service_bus_message(service_bus_connection_string, resource_request_queue_name, correlation_id) except Exception as e: print(f"Failed to send a Service Bus message: {type(e).__name__}: {e}")
AzureTRE/resource_processor/test_tools/service_bus_message_sender/send_message_to_servicebus.py/0
{ "file_path": "AzureTRE/resource_processor/test_tools/service_bus_message_sender/send_message_to_servicebus.py", "repo_id": "AzureTRE", "token_count": 556 }
116
resource "azurerm_network_interface" "jumpbox_nic" { name = "nic-vm-${var.tre_id}" resource_group_name = data.azurerm_resource_group.rg.name location = data.azurerm_resource_group.rg.location tags = local.tre_shared_service_tags ip_configuration { name = "internalIPConfig" subnet_id = data.azurerm_subnet.shared.id private_ip_address_allocation = "Dynamic" } lifecycle { ignore_changes = [tags] } } resource "random_password" "password" { length = 16 lower = true min_lower = 1 upper = true min_upper = 1 numeric = true min_numeric = 1 special = true min_special = 1 override_special = "_%@" } resource "azurerm_windows_virtual_machine" "jumpbox" { name = "vm-${var.tre_id}" resource_group_name = data.azurerm_resource_group.rg.name location = data.azurerm_resource_group.rg.location network_interface_ids = [azurerm_network_interface.jumpbox_nic.id] size = var.admin_jumpbox_vm_sku allow_extension_operations = true admin_username = "adminuser" admin_password = random_password.password.result tags = local.tre_shared_service_tags source_image_reference { publisher = "MicrosoftWindowsDesktop" offer = "windows-10" sku = "win10-21h2-pro-g2" version = "latest" } os_disk { name = "vm-dsk-${var.tre_id}" caching = "ReadWrite" storage_account_type = "Standard_LRS" } lifecycle { ignore_changes = [tags] } } resource "azurerm_key_vault_secret" "jumpbox_credentials" { name = "${azurerm_windows_virtual_machine.jumpbox.name}-jumpbox-password" value = random_password.password.result key_vault_id = data.azurerm_key_vault.keyvault.id tags = local.tre_shared_service_tags lifecycle { ignore_changes = [tags] } } resource "azurerm_virtual_machine_extension" "antimalware" { virtual_machine_id = azurerm_windows_virtual_machine.jumpbox.id name = "${azurerm_windows_virtual_machine.jumpbox.name}-AntimalwareExtension" publisher = "Microsoft.Azure.Security" type = "IaaSAntimalware" type_handler_version = "1.3" auto_upgrade_minor_version = true tags = local.tre_shared_service_tags settings = jsonencode({ "AntimalwareEnabled" = true }) lifecycle { ignore_changes = [tags] } }
AzureTRE/templates/shared_services/admin-vm/terraform/admin-jumpbox.tf/0
{ "file_path": "AzureTRE/templates/shared_services/admin-vm/terraform/admin-jumpbox.tf", "repo_id": "AzureTRE", "token_count": 1252 }
117
{ "$schema": "http://json-schema.org/draft-07/schema", "$id": "https://github.com/microsoft/AzureTRE/templates/shared_services/airlock_notifier/template_schema.json", "type": "object", "title": "Airlock Notifier Shared Service", "description": "A shared service notifying on Airlock operations", "required": ["smtp_server_address", "smtp_username", "smtpPassword", "smtp_from_email"], "properties": { "smtp_server_address": { "$id": "#/properties/smtp_server_address", "type": "string", "title": "SMTP Server Address", "description": "SMTP Server Address", "updateable": false }, "smtp_username": { "$id": "#/properties/smtp_username", "type": "string", "title": "SMTP Username", "description": "SMTP Username", "updateable": false }, "smtpPassword": { "$id": "#/properties/smtpPassword", "type": "string", "title": "SMTP Password", "description": "SMTP Password", "updateable": false, "sensitive": true }, "smtp_from_email": { "$id": "#/properties/smtp_from_email", "type": "string", "title": "SMTP From Email", "description": "The notification emails will be sent from this address", "updateable": false }, "tre_url": { "$id": "#/properties/tre_url", "type": "string", "title": "TRE URL", "description": "If your TRE URL is different from ${TRE_ID}.${LOCATION}.cloudapp.azure.com, please enter it here", "updateable": false, "pattern": "^(https:|http:|www\\.)\\S*", "examples": [ "https://mytre.westeurope.cloudapp.azure.com" ] }, "smtp_server_enable_ssl": { "$id": "#/properties/smtp_server_enable_ssl", "type": "boolean", "title": "SMTP SSL Enabled", "updateable": false, "default": true }, "smtp_server_port": { "$id": "#/properties/smtp_server_port", "type": "integer", "title": "SMTP Server Port", "updateable": false, "default": 25 } }, "pipeline": { "install": [ { "stepId": "main" } ], "upgrade": [ { "stepId": "main" } ], "uninstall": [ { "stepId": "main" } ] } }
AzureTRE/templates/shared_services/airlock_notifier/template_schema.json/0
{ "file_path": "AzureTRE/templates/shared_services/airlock_notifier/template_schema.json", "repo_id": "AzureTRE", "token_count": 1008 }
118
--- schemaVersion: 1.0.0 name: tre-shared-service-cyclecloud version: 0.5.5 description: "An Azure TRE Shared Service Template for Azure Cyclecloud" registry: azuretre dockerfile: Dockerfile.tmpl credentials: - name: azure_tenant_id env: ARM_TENANT_ID - name: azure_subscription_id env: ARM_SUBSCRIPTION_ID - name: azure_client_id env: ARM_CLIENT_ID - name: azure_client_secret env: ARM_CLIENT_SECRET parameters: - name: tre_id type: string - name: id type: string description: "An Id for this installation" env: id - name: azure_environment type: string default: "AzureCloud" description: "Used by Azure CLI to set the Azure environment" - name: tfstate_resource_group_name type: string description: "Resource group containing the Terraform state storage account" - name: tfstate_storage_account_name type: string description: "The name of the Terraform state storage account" - name: tfstate_container_name env: tfstate_container_name type: string default: "tfstate" description: "The name of the Terraform state storage container" - name: arm_use_msi env: ARM_USE_MSI type: boolean default: false - name: arm_environment env: ARM_ENVIRONMENT type: string default: "public" outputs: - name: connection_uri type: string applyTo: - install - upgrade mixins: - exec - terraform: clientVersion: 1.3.6 - az: clientVersion: 2.37.0 install: - terraform: description: "Deploy Cyclecloud shared service" vars: tre_id: ${ bundle.parameters.tre_id } arm_client_id: ${ bundle.credentials.azure_client_id } arm_client_secret: ${ bundle.credentials.azure_client_secret } arm_tenant_id: ${ bundle.credentials.azure_tenant_id } arm_use_msi: ${ bundle.parameters.arm_use_msi } tre_resource_id: ${ bundle.parameters.id } arm_environment: ${ bundle.parameters.arm_environment } backendConfig: resource_group_name: ${ bundle.parameters.tfstate_resource_group_name } storage_account_name: ${ bundle.parameters.tfstate_storage_account_name } container_name: ${ bundle.parameters.tfstate_container_name } key: ${ bundle.parameters.id } outputs: - name: connection_uri upgrade: - terraform: description: "Update Cyclecloud shared service" vars: tre_id: ${ bundle.parameters.tre_id } arm_client_id: ${ bundle.credentials.azure_client_id } arm_client_secret: ${ bundle.credentials.azure_client_secret } arm_tenant_id: ${ bundle.credentials.azure_tenant_id } arm_use_msi: ${ bundle.parameters.arm_use_msi } tre_resource_id: ${ bundle.parameters.id } arm_environment: ${ bundle.parameters.arm_environment } backendConfig: resource_group_name: ${ bundle.parameters.tfstate_resource_group_name } storage_account_name: ${ bundle.parameters.tfstate_storage_account_name } container_name: ${ bundle.parameters.tfstate_container_name } key: ${ bundle.parameters.id } outputs: - name: connection_uri uninstall: - terraform: description: "Delete the Cyclecloud shared service" vars: tre_id: ${ bundle.parameters.tre_id } arm_client_id: ${ bundle.credentials.azure_client_id } arm_client_secret: ${ bundle.credentials.azure_client_secret } arm_tenant_id: ${ bundle.credentials.azure_tenant_id } arm_use_msi: ${ bundle.parameters.arm_use_msi } tre_resource_id: ${ bundle.parameters.id } arm_environment: ${ bundle.parameters.arm_environment } backendConfig: resource_group_name: ${ bundle.parameters.tfstate_resource_group_name } storage_account_name: ${ bundle.parameters.tfstate_storage_account_name } container_name: ${ bundle.parameters.tfstate_container_name } key: ${ bundle.parameters.id } start: - terraform: arguments: - "output" description: "Get resource ID from Terraform outputs" backendConfig: resource_group_name: ${ bundle.parameters.tfstate_resource_group_name } storage_account_name: ${ bundle.parameters.tfstate_storage_account_name } container_name: ${ bundle.parameters.tfstate_container_name } key: ${ bundle.parameters.id } outputs: - name: azure_resource_id - az: description: "Set Azure Cloud Environment" arguments: - cloud - set flags: name: ${ bundle.parameters.azure_environment } - az: description: "Login to Azure" arguments: - login flags: identity: username: ${ bundle.credentials.azure_client_id } - az: description: "Start the VM" arguments: - vm - start flags: ids: ${ bundle.outputs.azure_resource_id } stop: - terraform: arguments: - "output" description: "Get VM hostname and rg from Terraform outputs" backendConfig: resource_group_name: ${ bundle.parameters.tfstate_resource_group_name } storage_account_name: ${ bundle.parameters.tfstate_storage_account_name } container_name: ${ bundle.parameters.tfstate_container_name } key: ${ bundle.parameters.id } outputs: - name: azure_resource_id - az: description: "Set Azure Cloud Environment" arguments: - cloud - set flags: name: ${ bundle.parameters.azure_environment } - az: description: "Login to Azure" arguments: - login flags: identity: username: ${ bundle.credentials.azure_client_id } - az: description: "Stop the VM" arguments: - vm - deallocate flags: ids: ${ bundle.outputs.azure_resource_id }
AzureTRE/templates/shared_services/cyclecloud/porter.yaml/0
{ "file_path": "AzureTRE/templates/shared_services/cyclecloud/porter.yaml", "repo_id": "AzureTRE", "token_count": 2416 }
119
# This file is maintained automatically by "terraform init". # Manual edits may be lost in future updates. provider "registry.terraform.io/databricks/databricks" { version = "1.5.0" constraints = "1.5.0" hashes = [ "h1:UJe5L/BteOU7M5ewRLzuUjiewYFLF695eLp3hMKVR6M=", "zh:0fa9ca13d977a8dcb46254f07c9be731891468f5b423f09cb51da97eaace8e2b", "zh:3a648e4f8ece8aab05acfc7759b4e4cd153ecd29b3ed0e00d7f1a3a19911f7d8", "zh:3b052b98b5e22ae4e81e4b667ae5cee9a68bb1750d22546ae9eff16c8d6a294a", "zh:4320b165218cb39f0ad313d483bba20d0de9e48db0c1467fd0e3a0afb2c02012", "zh:588c9fdbf35ca9c430cafb5dbd90f34a165744e3514212d0f2c07a3387d8b339", "zh:b50f8eb38b556ddfa24a76b4113e8a84b778a9a0bb4b4ba5fdc3edca59198d2a", "zh:ca5186443ac672f5566d9c9b5727f55124a8642dd3949e973790b9195e6b306a", "zh:db817409b94c34c9b9b5e109751eff7fbca90d08b407a099630c8ec79b6c6d4b", "zh:edf04424c68db603bf2473e2f14f3e3ad217feb84fc2c7debb6641d15886f8e3", "zh:ef374f84c41fe529bff1ec3274eb7fe5dd8184c5e71f3e6d99a6adaff6eab82e", ] } provider "registry.terraform.io/hashicorp/azurerm" { version = "3.40.0" constraints = "3.40.0" hashes = [ "h1:/Jbhw/zNAsDYDoASaG6w+0KZyay9BkUVOpR8b7m0CsA=", "zh:00fa6dc05bf2643c6a3c741edb7d88263698086835a8a613f1d7bd76d1b918fd", "zh:0da9b788e773272a7aa9d59bd9e3d5842edd4acc8c3895bea469e66dc14205a0", "zh:25a8c39d1f042fc7c83ba9dd745c3569ea9e577fadb57563a575fb115ac2b9f1", "zh:4423666dbeae8bc22c6e8898ffbb88745681dc27668ca9104b665dd7f3d7292c", "zh:78c07308e7407b558d15737a98fb5eaf15529d297fc3798de6a7d61e0466e2e3", "zh:894aca7e6f4f331ee8eb51957a180dc03d399d2b1727e0d7842e9b3f022a8c6a", "zh:bb0e620c2161b4c4892a6f50b1c4c69ed70f66bb5e92543a03d79d0e4b1d9441", "zh:c7d8e6a791159ca63b30908c9efe72ab65f60d64b30f0c1eb5a64972f4994844", "zh:d04c11bfd346c1ac34d16bbdca70b23b006e822f6beb236b85375e8343888eb4", "zh:f4edea9660327c7c70a823d786fd1b1c1b186c8759770447f63da72f23e1a73c", "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", "zh:f986e268949cf445ff53a66af48a87c6f6dba5964e8a5b1dc0ea02afabdd71f7", ] }
AzureTRE/templates/shared_services/databricks-auth/terraform/.terraform.lock.hcl/0
{ "file_path": "AzureTRE/templates/shared_services/databricks-auth/terraform/.terraform.lock.hcl", "repo_id": "AzureTRE", "token_count": 1286 }
120
data "azurerm_subnet" "firewall" { name = "AzureFirewallSubnet" virtual_network_name = "vnet-${var.tre_id}" resource_group_name = local.core_resource_group_name } data "azurerm_subnet" "firewall_management" { name = "AzureFirewallManagementSubnet" virtual_network_name = "vnet-${var.tre_id}" resource_group_name = local.core_resource_group_name } data "azurerm_subnet" "shared" { name = "SharedSubnet" virtual_network_name = "vnet-${var.tre_id}" resource_group_name = local.core_resource_group_name } data "azurerm_subnet" "resource_processor" { name = "ResourceProcessorSubnet" virtual_network_name = "vnet-${var.tre_id}" resource_group_name = local.core_resource_group_name } data "azurerm_subnet" "web_app" { name = "WebAppSubnet" virtual_network_name = "vnet-${var.tre_id}" resource_group_name = local.core_resource_group_name } data "azurerm_subnet" "airlock_processor" { name = "AirlockProcessorSubnet" virtual_network_name = "vnet-${var.tre_id}" resource_group_name = local.core_resource_group_name } data "azurerm_subnet" "airlock_storage" { name = "AirlockStorageSubnet" virtual_network_name = "vnet-${var.tre_id}" resource_group_name = local.core_resource_group_name } data "azurerm_subnet" "airlock_events" { name = "AirlockEventsSubnet" virtual_network_name = "vnet-${var.tre_id}" resource_group_name = local.core_resource_group_name } data "azurerm_log_analytics_workspace" "tre" { name = "log-${var.tre_id}" resource_group_name = local.core_resource_group_name } data "azurerm_resource_group" "rg" { name = local.core_resource_group_name } data "azurerm_ip_group" "resource_processor" { name = "ipg-resource-processor" resource_group_name = local.core_resource_group_name } data "azurerm_ip_group" "shared" { name = "ipg-shared" resource_group_name = local.core_resource_group_name } data "azurerm_ip_group" "web" { name = "ipg-web-app" resource_group_name = local.core_resource_group_name } data "azurerm_ip_group" "airlock_processor" { name = "ipg-airlock-processor" resource_group_name = local.core_resource_group_name } data "azurerm_ip_group" "referenced" { for_each = toset(distinct(flatten( [for collection in concat(local.api_driven_network_rule_collection, local.api_driven_application_rule_collection) : [for rule in collection.rules : try(rule.source_ip_groups_in_core, [])] ] ))) name = each.value resource_group_name = local.core_resource_group_name }
AzureTRE/templates/shared_services/firewall/terraform/data.tf/0
{ "file_path": "AzureTRE/templates/shared_services/firewall/terraform/data.tf", "repo_id": "AzureTRE", "token_count": 1169 }
121
{ "$schema": "http://json-schema.org/draft-07/schema", "$id": "https://github.com/microsoft/AzureTRE/templates/shared_services/gitea/template_schema.json", "type": "object", "title": "Gitea - Git Mirror", "description": "The Gitea shared service is typically used for mirroring external Git repositories.", "required": [], "properties": { "display_name": { "type": "string", "title": "Name for the workspace service", "description": "The name of the workspace service to be displayed to users", "default": "Git Mirror", "updateable": true }, "description": { "type": "string", "title": "Description of the workspace service", "description": "Description of the workspace service", "default": "Access mirrored Git repositories.", "updateable": true }, "overview": { "type": "string", "title": "Workspace Service Overview", "description": "Long form description of the workspace service, in markdown syntax", "default": "The Gitea shared service is used for mirroring external Git repositories. For instructions on how to create Git mirrors see [https://docs.gitea.com/usage/repo-mirror](https://docs.gitea.com/usage/repo-mirror).", "updateable": true }, "sql_sku": { "$id": "#/properties/sql_sku", "type": "string", "title": "MySQL server SKU", "description": "MySQL server SKU", "updateable": true, "enum": [ "B | 4GB 2vCores", "GP | 8GB 2vCores", "BC | 16GB 2vCores" ], "default": "B | 4GB 2vCores" }, "is_exposed_externally": { "$id": "#/properties/is_exposed_externally", "type": "boolean", "title": "Expose externally", "description": "Is the Gitea accessible from outside of the TRE network.", "default": false } }, "uiSchema": { "is_exposed_externally": { "classNames": "tre-hidden" } }, "pipeline": { "install": [ { "stepId": "main" }, { "stepId": "260421b3-7308-491f-b531-e007cdc0ff46", "stepTitle": "Add gitea rule collection to firewall", "resourceTemplateName": "tre-shared-service-firewall", "resourceType": "shared-service", "resourceAction": "upgrade", "properties": [ { "name": "rule_collections", "type": "array", "arraySubstitutionAction": "replace", "arrayMatchField": "name", "value": { "name": "arc_web_app_subnet_gitea_v2", "action": "Allow", "rules": [ { "name": "nexus-package-sources", "description": "Nexus Package Sources", "protocols": [ { "port": "443", "type": "Https" }, { "port": "80", "type": "Http" } ], "target_fqdns": "{{ resource.properties.gitea_allowed_fqdns_list }}", "source_addresses": "{{ resource.properties.address_prefixes }}" } ] } } ] } ], "upgrade": [ { "stepId": "main" }, { "stepId": "360421b3-7308-491f-b531-e007cdc0ff47", "stepTitle": "Update gitea rule collection in firewall", "resourceTemplateName": "tre-shared-service-firewall", "resourceType": "shared-service", "resourceAction": "upgrade", "properties": [ { "name": "rule_collections", "type": "array", "arraySubstitutionAction": "replace", "arrayMatchField": "name", "value": { "name": "arc_web_app_subnet_gitea_v2", "action": "Allow", "rules": [ { "name": "nexus-package-sources", "description": "Nexus Package Sources", "protocols": [ { "port": "443", "type": "Https" }, { "port": "80", "type": "Http" } ], "target_fqdns": "{{ resource.properties.gitea_allowed_fqdns_list }}", "source_addresses": "{{ resource.properties.address_prefixes }}" } ] } } ] } ], "uninstall": [ { "stepId": "460421b3-7308-491f-b531-e007cdc0ff48", "stepTitle": "Remove gitea rule collection from firewall", "resourceTemplateName": "tre-shared-service-firewall", "resourceType": "shared-service", "resourceAction": "upgrade", "properties": [ { "name": "rule_collections", "type": "array", "arraySubstitutionAction": "remove", "arrayMatchField": "name", "value": { "name": "arc_web_app_subnet_gitea_v2" } } ] }, { "stepId": "main" } ] } }
AzureTRE/templates/shared_services/gitea/template_schema.json/0
{ "file_path": "AzureTRE/templates/shared_services/gitea/template_schema.json", "repo_id": "AzureTRE", "token_count": 2787 }
122
#!/bin/bash set -o pipefail set -o nounset # set -o xtrace if [ -z "$1" ] then echo 'Nexus password needs to be passed as argument' fi timeout=300 echo 'Checking for ./nexus_repos_config directory...' while [ ! -d "$(dirname "${BASH_SOURCE[0]}")"/nexus_repos_config ]; do # Wait for ./nexus_repos_config with json config files to be copied into vm if [ $timeout == 0 ]; then echo 'ERROR - Timeout while waiting for nexus_repos_config directory' exit 1 fi sleep 1 ((timeout--)) done # Create proxy for each .json file for filename in "$(dirname "${BASH_SOURCE[0]}")"/nexus_repos_config/*.json; do echo "Found config file: $filename. Sending to Nexus..." # Check if apt proxy base_type=$( jq .baseType "$filename" | sed 's/"//g') repo_type=$( jq .repoType "$filename" | sed 's/"//g') repo_name=$(jq .name "$filename" | sed 's/"//g') base_url=http://localhost/service/rest/v1/repositories/$base_type/$repo_type config_timeout=300 status_code=1 while [ "$status_code" != 201 ]; do status_code=$(curl -iu admin:"$1" -XPOST \ "$base_url" \ -H 'accept: application/json' \ -H 'Content-Type: application/json' \ -d @"$filename" \ -k -s -w "%{http_code}" -o /dev/null) echo "Response received from Nexus: $status_code" if [ $config_timeout == 0 ]; then echo "ERROR - Timeout while trying to configure $repo_name" exit 1 elif [ "$status_code" != 201 ]; then sleep 1 ((config_timeout--)) fi done done # Configure realms required for repo authentication echo 'Configuring realms...' status_code=$(curl -iu admin:"$1" -XPUT \ 'http://localhost/service/rest/v1/security/realms/active' \ -H 'accept: application/json' \ -H 'Content-Type: application/json' \ -d @"$(dirname "${BASH_SOURCE[0]}")"/nexus_realms_config.json \ -k -s -w "%{http_code}" -o /dev/null) echo "Response received from Nexus: $status_code"
AzureTRE/templates/shared_services/sonatype-nexus-vm/scripts/configure_nexus_repos.sh/0
{ "file_path": "AzureTRE/templates/shared_services/sonatype-nexus-vm/scripts/configure_nexus_repos.sh", "repo_id": "AzureTRE", "token_count": 782 }
123
resource "azurerm_storage_account" "aml" { name = local.storage_name location = data.azurerm_resource_group.ws.location resource_group_name = data.azurerm_resource_group.ws.name account_tier = "Standard" account_replication_type = "GRS" tags = local.tre_workspace_service_tags network_rules { default_action = "Deny" } lifecycle { ignore_changes = [tags] } } data "azurerm_private_dns_zone" "blobcore" { name = module.terraform_azurerm_environment_configuration.private_links["privatelink.blob.core.windows.net"] resource_group_name = local.core_resource_group_name } data "azurerm_private_dns_zone" "filecore" { name = module.terraform_azurerm_environment_configuration.private_links["privatelink.file.core.windows.net"] resource_group_name = local.core_resource_group_name } resource "azurerm_private_endpoint" "blobpe" { name = "pe-${local.storage_name}" location = data.azurerm_resource_group.ws.location resource_group_name = data.azurerm_resource_group.ws.name subnet_id = azurerm_subnet.aml.id tags = local.tre_workspace_service_tags lifecycle { ignore_changes = [tags] } private_dns_zone_group { name = "private-dns-zone-group" private_dns_zone_ids = [data.azurerm_private_dns_zone.blobcore.id] } private_service_connection { name = "dnsgroup-blob${local.storage_name}" private_connection_resource_id = azurerm_storage_account.aml.id is_manual_connection = false subresource_names = ["Blob"] } } resource "azurerm_private_endpoint" "filepe" { name = "pe-file-${local.storage_name}" location = data.azurerm_resource_group.ws.location resource_group_name = data.azurerm_resource_group.ws.name subnet_id = azurerm_subnet.aml.id tags = local.tre_workspace_service_tags lifecycle { ignore_changes = [tags] } private_dns_zone_group { name = "dnsgroup-files-${local.storage_name}" private_dns_zone_ids = [data.azurerm_private_dns_zone.filecore.id] } private_service_connection { name = "dnsgroup-file-${var.tre_id}" private_connection_resource_id = azurerm_storage_account.aml.id is_manual_connection = false subresource_names = ["file"] } depends_on = [ azurerm_private_endpoint.blobpe ] }
AzureTRE/templates/workspace_services/azureml/terraform/storage.tf/0
{ "file_path": "AzureTRE/templates/workspace_services/azureml/terraform/storage.tf", "repo_id": "AzureTRE", "token_count": 1166 }
124
ID=__CHANGE_ME__ WORKSPACE_ID=__CHANGE_ME__ AZURE_LOCATION=__CHANGE_ME__ HOST_SUBNET_ADDRESS_PREFIX=__CHANGE_ME__ CONTAINER_SUBNET_ADDRESS_PREFIX=__CHANGE_ME__
AzureTRE/templates/workspace_services/databricks/.env.sample/0
{ "file_path": "AzureTRE/templates/workspace_services/databricks/.env.sample", "repo_id": "AzureTRE", "token_count": 80 }
125
#!/usr/bin/env sh echo >&2 "oauth exited. code=${1}" # terminate other services to exit from the container exec s6-svscanctl -t /var/run/s6/services
AzureTRE/templates/workspace_services/guacamole/guacamole-server/docker/services/oauth/finish/0
{ "file_path": "AzureTRE/templates/workspace_services/guacamole/guacamole-server/docker/services/oauth/finish", "repo_id": "AzureTRE", "token_count": 54 }
126
/** * */ package org.apache.guacamole.auth.azuretre;
AzureTRE/templates/workspace_services/guacamole/guacamole-server/guacamole-auth-azure/src/main/java/org/apache/guacamole/auth/azuretre/package-info.java/0
{ "file_path": "AzureTRE/templates/workspace_services/guacamole/guacamole-server/guacamole-auth-azure/src/main/java/org/apache/guacamole/auth/azuretre/package-info.java", "repo_id": "AzureTRE", "token_count": 23 }
127
{ "$schema": "http://json-schema.org/draft-07/schema", "$id": "https://github.com/microsoft/AzureTRE/templates/workspace_services/guacamole/template_schema.json", "type": "object", "title": "Apache Guacamole - Virtual Desktop Service", "description": "Enables Windows and Linux virtual machines to be accessed via Apache Guacamole.", "required": [], "properties": { "display_name": { "type": "string", "title": "Name for the workspace service", "description": "The name of the workspace service to be displayed to users", "default": "Virtual Desktops", "updateable": true }, "description": { "type": "string", "title": "Description of the workspace service", "description": "Description of the workspace service", "default": "Access Windows and Linux virtual machines via Apache Guacamole", "updateable": true }, "overview": { "type": "string", "title": "Workspace Service Overview", "description": "Long form description of the workspace service, in markdown syntax", "default": "Access Windows and Linux virtual machines via Apache Guacamole. Documentation for using this service can be found here: [https://guacamole.apache.org/doc/gug/using-guacamole.html](https://guacamole.apache.org/doc/gug/using-guacamole.html)", "updateable": true }, "guac_disable_copy": { "$id": "#/properties/guac_disable_copy", "type": "boolean", "title": "Disable 'Copy'", "description": "Disable Copy functionality", "updateable": true }, "guac_disable_paste": { "$id": "#/properties/guac_disable_paste", "type": "boolean", "title": "Disable 'Paste'", "description": "Disable Paste functionality", "updateable": true }, "guac_enable_drive": { "$id": "#/properties/guac_enable_drive", "type": "boolean", "title": "Enable Drive", "description": "Enable mounted drive", "updateable": true }, "guac_disable_download": { "$id": "#/properties/guac_disable_download", "type": "boolean", "title": "Disable files download", "description": "Disable files download", "updateable": true, "default": true }, "guac_disable_upload": { "$id": "#/properties/guac_disable_upload", "type": "boolean", "title": "Disable files upload", "description": "Disable files upload", "updateable": true, "default": true }, "is_exposed_externally": { "$id": "#/properties/is_exposed_externally", "type": "boolean", "title": "Expose externally", "description": "Is the Guacamole service exposed outside of the vnet", "default": true } }, "pipeline": { "install": [ { "stepId": "main" }, { "stepId": "12ba0dad-ea6c-4d0d-9255-d316212f5ffa", "stepTitle": "Add Guacamole URI as AAD redirect URI", "resourceType": "workspace", "resourceAction": "upgrade", "properties": [ { "name": "aad_redirect_uris", "type": "array", "arraySubstitutionAction": "replace", "arrayMatchField": "name", "value": { "name": "{{ resource.id }}", "value": "{{ resource.properties.authentication_callback_uri }}" } } ] }, { "stepId": "260421b3-7308-491f-b531-e007cdc0ff46", "stepTitle": "Add network firewall rules for guacamole", "resourceTemplateName": "tre-shared-service-firewall", "resourceType": "shared-service", "resourceAction": "upgrade", "properties": [ { "name": "network_rule_collections", "type": "array", "arraySubstitutionAction": "replace", "arrayMatchField": "name", "value": { "name": "nrc_svc_{{ resource.id }}_guacamole", "action": "Allow", "rules": [ { "name": "AzureAD", "description": "AAD access for authNZ", "source_addresses": "{{ resource.properties.web_apps_addresses }}", "destination_addresses": [ "AzureActiveDirectory" ], "destination_ports": [ "*" ], "protocols": [ "TCP" ] } ] } } ] } ], "upgrade": [ { "stepId": "main" }, { "stepId": "741c7ff2-eff5-47b2-bf62-2b410d65c96b", "stepTitle": "Update Guacamole URI in AAD", "resourceType": "workspace", "resourceAction": "upgrade", "properties": [ { "name": "aad_redirect_uris", "type": "array", "arraySubstitutionAction": "replace", "arrayMatchField": "name", "value": { "name": "{{ resource.id }}", "value": "{{ resource.properties.authentication_callback_uri }}" } } ] } ], "uninstall": [ { "stepId": "758d8043-4455-45ee-b9b5-a0586bb9f6cf", "stepTitle": "Remove network firewall rules for guacamole", "resourceTemplateName": "tre-shared-service-firewall", "resourceType": "shared-service", "resourceAction": "upgrade", "properties": [ { "name": "network_rule_collections", "type": "array", "arraySubstitutionAction": "remove", "arrayMatchField": "name", "value": { "name": "nrc_svc_{{ resource.id }}_guacamole" } } ] }, { "stepId": "9a1d6b95-26c8-4165-8010-573dd4e2b45c", "stepTitle": "Update guacamole URI to AAD redirect URI", "resourceType": "workspace", "resourceAction": "upgrade", "properties": [ { "name": "aad_redirect_uris", "type": "array", "arraySubstitutionAction": "remove", "arrayMatchField": "name", "value": { "name": "{{ resource.id }}" } } ] }, { "stepId": "main" } ] } }
AzureTRE/templates/workspace_services/guacamole/template_schema.json/0
{ "file_path": "AzureTRE/templates/workspace_services/guacamole/template_schema.json", "repo_id": "AzureTRE", "token_count": 3143 }
128
variable "workspace_id" { type = string } variable "tre_id" { type = string } variable "parent_service_id" { type = string } variable "tre_resource_id" { type = string } variable "image" { type = string } variable "vm_size" { type = string } variable "shared_storage_access" { type = bool } variable "shared_storage_name" { type = string } variable "image_gallery_id" { type = string default = "" }
AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-linuxvm/terraform/variables.tf/0
{ "file_path": "AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-linuxvm/terraform/variables.tf", "repo_id": "AzureTRE", "token_count": 152 }
129
variable "workspace_id" { type = string description = "TRE workspace ID" } variable "aad_authority_url" { type = string description = "Active directory" } variable "tre_id" { type = string description = "TRE ID" } variable "tre_resource_id" { type = string description = "Resource ID" } variable "deploy_fhir" { type = bool description = "Indicates if FHIR should be created in the Azure Health Data Services Workspace." } variable "fhir_kind" { type = string description = "FHIR version that will be deployed." } variable "deploy_dicom" { type = bool description = "Indicates if DICOM should be created in the Azure Health Data Services Workspace." } variable "auth_tenant_id" { type = string description = "Used to authenticate into the AAD Tenant to get app role members" } variable "auth_client_id" { type = string description = "Used to authenticate into the AAD Tenant to get app role members" } variable "auth_client_secret" { type = string description = "Used to authenticate into the AAD Tenant to get app role members" } variable "arm_environment" { type = string }
AzureTRE/templates/workspace_services/health-services/terraform/variables.tf/0
{ "file_path": "AzureTRE/templates/workspace_services/health-services/terraform/variables.tf", "repo_id": "AzureTRE", "token_count": 423 }
130
# Azure Provider source and version being used terraform { required_providers { azurerm = { source = "hashicorp/azurerm" version = "=2.97.0" } random = { source = "hashicorp/random" version = "=3.4.2" } local = { source = "hashicorp/local" version = "=2.4.0" } } backend "azurerm" { } } provider "azurerm" { features {} } module "terraform_azurerm_environment_configuration" { source = "git::https://github.com/microsoft/terraform-azurerm-environment-configuration.git?ref=0.2.0" arm_environment = var.arm_environment } data "azurerm_resource_group" "ws" { name = "rg-${local.workspace_resource_name_suffix}" } data "azurerm_virtual_network" "ws" { name = "vnet-${local.workspace_resource_name_suffix}" resource_group_name = data.azurerm_resource_group.ws.name } data "azurerm_subnet" "web_apps" { name = "WebAppsSubnet" virtual_network_name = data.azurerm_virtual_network.ws.name resource_group_name = data.azurerm_virtual_network.ws.resource_group_name } data "azurerm_subnet" "services" { name = "ServicesSubnet" virtual_network_name = data.azurerm_virtual_network.ws.name resource_group_name = data.azurerm_virtual_network.ws.resource_group_name }
AzureTRE/templates/workspace_services/innereye/terraform/main.tf/0
{ "file_path": "AzureTRE/templates/workspace_services/innereye/terraform/main.tf", "repo_id": "AzureTRE", "token_count": 562 }
131
--- schemaVersion: 1.0.0 name: tre-service-mlflow version: 0.7.7 description: "An Azure TRE service for MLflow machine learning lifecycle" dockerfile: Dockerfile.tmpl registry: azuretre custom: runtime_image: name: mlflow-server build: version_file: mlflow-server/version.txt docker_file: mlflow-server/docker/Dockerfile docker_context: mlflow-server credentials: - name: azure_tenant_id env: ARM_TENANT_ID - name: azure_subscription_id env: ARM_SUBSCRIPTION_ID - name: azure_client_id env: ARM_CLIENT_ID - name: azure_client_secret env: ARM_CLIENT_SECRET parameters: - name: workspace_id type: string - name: tre_id type: string - name: id type: string description: "Resource ID for this installation" - name: mgmt_acr_name type: string env: mgmt_acr_name description: "The devops ACR name" - name: mgmt_resource_group_name type: string description: "Resource group containing the devops ACR" env: MGMT_RESOURCE_GROUP_NAME - name: tfstate_resource_group_name type: string description: "Resource group containing the Terraform state storage account" - name: tfstate_storage_account_name type: string description: "The name of the Terraform state storage account" - name: tfstate_container_name env: tfstate_container_name type: string default: "tfstate" description: "The name of the Terraform state storage container" - name: arm_use_msi env: ARM_USE_MSI type: boolean default: false - name: arm_environment env: ARM_ENVIRONMENT type: string default: "public" outputs: - name: connection_uri type: string applyTo: - install - upgrade - name: is_exposed_externally type: boolean applyTo: - install - upgrade mixins: - exec - terraform: clientVersion: 1.3.6 install: - terraform: description: "Deploy workspace service" vars: workspace_id: ${ bundle.parameters.workspace_id } tre_id: ${ bundle.parameters.tre_id } tre_resource_id: ${ bundle.parameters.id } mgmt_acr_name: ${ bundle.parameters.mgmt_acr_name } mgmt_resource_group_name: ${ bundle.parameters.mgmt_resource_group_name } arm_environment: ${ bundle.parameters.arm_environment } backendConfig: resource_group_name: ${ bundle.parameters.tfstate_resource_group_name } storage_account_name: ${ bundle.parameters.tfstate_storage_account_name } container_name: ${ bundle.parameters.tfstate_container_name } key: tre-service-mlflow-${ bundle.parameters.id } outputs: - name: connection_uri - name: is_exposed_externally upgrade: - terraform: description: "Deploy workspace service" vars: workspace_id: ${ bundle.parameters.workspace_id } tre_id: ${ bundle.parameters.tre_id } tre_resource_id: ${ bundle.parameters.id } mgmt_acr_name: ${ bundle.parameters.mgmt_acr_name } mgmt_resource_group_name: ${ bundle.parameters.mgmt_resource_group_name } arm_environment: ${ bundle.parameters.arm_environment } backendConfig: resource_group_name: ${ bundle.parameters.tfstate_resource_group_name } storage_account_name: ${ bundle.parameters.tfstate_storage_account_name } container_name: ${ bundle.parameters.tfstate_container_name } key: tre-service-mlflow-${ bundle.parameters.id } outputs: - name: connection_uri - name: is_exposed_externally uninstall: - terraform: description: "Tear down workspace service" vars: workspace_id: ${ bundle.parameters.workspace_id } tre_id: ${ bundle.parameters.tre_id } tre_resource_id: ${ bundle.parameters.id } mgmt_acr_name: ${ bundle.parameters.mgmt_acr_name } mgmt_resource_group_name: ${ bundle.parameters.mgmt_resource_group_name } arm_environment: ${ bundle.parameters.arm_environment } backendConfig: resource_group_name: ${ bundle.parameters.tfstate_resource_group_name } storage_account_name: ${ bundle.parameters.tfstate_storage_account_name } container_name: ${ bundle.parameters.tfstate_container_name } key: tre-service-mlflow-${ bundle.parameters.id }
AzureTRE/templates/workspace_services/mlflow/porter.yaml/0
{ "file_path": "AzureTRE/templates/workspace_services/mlflow/porter.yaml", "repo_id": "AzureTRE", "token_count": 1715 }
132
--- schemaVersion: 1.0.0 name: tre-workspace-service-mysql version: 1.0.1 description: "A MySQL workspace service" registry: azuretre dockerfile: Dockerfile.tmpl credentials: - name: azure_tenant_id env: ARM_TENANT_ID - name: azure_subscription_id env: ARM_SUBSCRIPTION_ID - name: azure_client_id env: ARM_CLIENT_ID - name: azure_client_secret env: ARM_CLIENT_SECRET parameters: - name: workspace_id type: string - name: tre_id type: string # the following are added automatically by the resource processor - name: id type: string description: "Resource ID" env: id - name: tfstate_resource_group_name type: string description: "Resource group containing the Terraform state storage account" - name: tfstate_storage_account_name type: string description: "The name of the Terraform state storage account" - name: tfstate_container_name env: tfstate_container_name type: string default: "tfstate" description: "The name of the Terraform state storage container" - name: arm_use_msi env: ARM_USE_MSI type: boolean default: false - name: arm_environment env: ARM_ENVIRONMENT type: string default: "public" - name: sql_sku type: string default: "B | 4GB 2vCores" - name: storage_mb type: integer default: 5120 - name: db_name type: string default: tredb mixins: - exec - terraform: clientVersion: 1.3.6 outputs: - name: mysql_fqdn type: string applyTo: - install - upgrade install: - terraform: description: "Deploy MySQL workspace service" vars: workspace_id: ${ bundle.parameters.workspace_id } tre_id: ${ bundle.parameters.tre_id } tre_resource_id: ${ bundle.parameters.id } sql_sku: ${ bundle.parameters.sql_sku } storage_mb: ${ bundle.parameters.storage_mb } db_name: ${ bundle.parameters.db_name } arm_environment: ${ bundle.parameters.arm_environment } backendConfig: resource_group_name: ${ bundle.parameters.tfstate_resource_group_name } storage_account_name: ${ bundle.parameters.tfstate_storage_account_name } container_name: ${ bundle.parameters.tfstate_container_name } key: tre-workspace-service-mysql-${ bundle.parameters.id } outputs: - name: mysql_fqdn upgrade: - terraform: description: "Upgrade MySQL workspace service" vars: workspace_id: ${ bundle.parameters.workspace_id } tre_id: ${ bundle.parameters.tre_id } tre_resource_id: ${ bundle.parameters.id } sql_sku: ${ bundle.parameters.sql_sku } storage_mb: ${ bundle.parameters.storage_mb } db_name: ${ bundle.parameters.db_name } arm_environment: ${ bundle.parameters.arm_environment } backendConfig: resource_group_name: ${ bundle.parameters.tfstate_resource_group_name } storage_account_name: ${ bundle.parameters.tfstate_storage_account_name } container_name: ${ bundle.parameters.tfstate_container_name } key: tre-workspace-service-mysql-${ bundle.parameters.id } outputs: - name: mysql_fqdn uninstall: - terraform: description: "Tear down MySQL workspace service" vars: workspace_id: ${ bundle.parameters.workspace_id } tre_id: ${ bundle.parameters.tre_id } tre_resource_id: ${ bundle.parameters.id } sql_sku: ${ bundle.parameters.sql_sku } storage_mb: ${ bundle.parameters.storage_mb } db_name: ${ bundle.parameters.db_name } arm_environment: ${ bundle.parameters.arm_environment } backendConfig: resource_group_name: ${ bundle.parameters.tfstate_resource_group_name } storage_account_name: ${ bundle.parameters.tfstate_storage_account_name } container_name: ${ bundle.parameters.tfstate_container_name } key: tre-workspace-service-mysql-${ bundle.parameters.id }
AzureTRE/templates/workspace_services/mysql/porter.yaml/0
{ "file_path": "AzureTRE/templates/workspace_services/mysql/porter.yaml", "repo_id": "AzureTRE", "token_count": 1578 }
133
#!/bin/bash set -o errexit set -o pipefail set -o nounset psql -v ON_ERROR_STOP=1 -e "$OHDSI_ADMIN_CONNECTION_STRING" -f "../sql/atlas_create_security.sql" psql -v ON_ERROR_STOP=1 -e "$OHDSI_ADMIN_CONNECTION_STRING" -f "../sql/atlas_default_roles.sql" count=1 for i in ${ATLAS_USERS//,/ } do if [ "$(("$count" % 2))" -eq "1" ]; then username=$i else # shellcheck disable=SC2016 atlaspw=$(htpasswd -bnBC 4 "" "$i" | tr -d ':\n' | sed 's/$2y/$2a/') psql -v ON_ERROR_STOP=1 -e "$OHDSI_ADMIN_CONNECTION_STRING" -c "insert into webapi_security.security (email,password) values ('$username', E'$atlaspw');" # this step adds some required rows/ids in the db curl "$WEB_API_URL/user/login/db" --data-urlencode "login=$username" --data-urlencode "password=$i" --fail if [ "$count" = "2" ]; then psql -v ON_ERROR_STOP=1 -e "$OHDSI_ADMIN_CONNECTION_STRING" -c "insert into webapi.sec_user_role (user_id, role_id) values ((select id from webapi.sec_user where login='$username'),2);" #admin role else psql -v ON_ERROR_STOP=1 -e "$OHDSI_ADMIN_CONNECTION_STRING" -c "insert into webapi.sec_user_role (user_id, role_id) values ((select id from webapi.sec_user where login='$username'),10);" #atlas user role fi fi ((count++)) done
AzureTRE/templates/workspace_services/ohdsi/scripts/atlas_security.sh/0
{ "file_path": "AzureTRE/templates/workspace_services/ohdsi/scripts/atlas_security.sh", "repo_id": "AzureTRE", "token_count": 598 }
134
resource "terraform_data" "add_data_source" { count = var.configure_data_source ? 1 : 0 triggers_replace = { postgres_database_id = azurerm_postgresql_flexible_server_database.db.id } provisioner "local-exec" { environment = { OHDSI_WEB_API_URL = local.ohdsi_webapi_fqdn OHDSI_WEB_API_USER = "admin" OHDSI_WEB_API_PASSWORD = azurerm_key_vault_secret.atlas_security_admin_password.value DIALECT = local.dialects[local.data_source_config.dialect] SOURCE_NAME = local.data_source_config.source_name SOURCE_KEY = local.data_source_config.source_key CONNECTION_STRING = local.data_source_config.connection_string USERNAME = local.data_source_config.username PASSWORD = local.data_source_config.password DAIMON_CDM = try(local.data_source_daimons.daimon_cdm, null) DAIMON_VOCABULARY = try(local.data_source_daimons.daimon_vocabulary, null) DAIMON_RESULTS = local.results_schema_name DAIMON_CEM = try(local.data_source_daimons.daimon_cem, null) DAIMON_CEM_RESULTS = try(local.data_source_daimons.daimon_cem_results, null) DAIMON_TEMP = local.temp_schema_name } command = "../scripts/add_data_source.sh" } depends_on = [terraform_data.deployment_atlas_security] }
AzureTRE/templates/workspace_services/ohdsi/terraform/data_source.tf/0
{ "file_path": "AzureTRE/templates/workspace_services/ohdsi/terraform/data_source.tf", "repo_id": "AzureTRE", "token_count": 673 }
135
# HACK: PR #3769: Remove file when base workspace release updated output "vnet_id" { value = azurerm_virtual_network.ws.id }
AzureTRE/templates/workspaces/airlock-import-review/terraform/network_output.terraform/0
{ "file_path": "AzureTRE/templates/workspaces/airlock-import-review/terraform/network_output.terraform", "repo_id": "AzureTRE", "token_count": 43 }
136
# 'Approved' storage account resource "azurerm_storage_account" "sa_import_approved" { name = local.import_approved_storage_name location = var.location resource_group_name = var.ws_resource_group_name account_tier = "Standard" account_replication_type = "LRS" allow_nested_items_to_be_public = false # Important! we rely on the fact that the blob craeted events are issued when the creation of the blobs are done. # This is true ONLY when Hierarchical Namespace is DISABLED is_hns_enabled = false network_rules { default_action = var.enable_local_debugging ? "Allow" : "Deny" bypass = ["AzureServices"] } tags = merge( var.tre_workspace_tags, { description = "airlock;import;approved" } ) lifecycle { ignore_changes = [tags] } } resource "azurerm_private_endpoint" "import_approved_pe" { name = "pe-sa-import-approved-blob-${var.short_workspace_id}" location = var.location resource_group_name = var.ws_resource_group_name subnet_id = var.services_subnet_id tags = var.tre_workspace_tags lifecycle { ignore_changes = [tags] } private_dns_zone_group { name = "private-dns-zone-group-sa-import-approved" private_dns_zone_ids = [data.azurerm_private_dns_zone.blobcore.id] } private_service_connection { name = "psc-sa-import-approved-${var.short_workspace_id}" private_connection_resource_id = azurerm_storage_account.sa_import_approved.id is_manual_connection = false subresource_names = ["Blob"] } } # 'Drop' location for export resource "azurerm_storage_account" "sa_export_internal" { name = local.export_internal_storage_name location = var.location resource_group_name = var.ws_resource_group_name account_tier = "Standard" account_replication_type = "LRS" allow_nested_items_to_be_public = false # Important! we rely on the fact that the blob craeted events are issued when the creation of the blobs are done. # This is true ONLY when Hierarchical Namespace is DISABLED is_hns_enabled = false network_rules { default_action = var.enable_local_debugging ? "Allow" : "Deny" bypass = ["AzureServices"] } tags = merge( var.tre_workspace_tags, { description = "airlock;export;internal" } ) lifecycle { ignore_changes = [tags] } } resource "azurerm_private_endpoint" "export_internal_pe" { name = "pe-sa-export-int-blob-${var.short_workspace_id}" location = var.location resource_group_name = var.ws_resource_group_name subnet_id = var.services_subnet_id tags = var.tre_workspace_tags lifecycle { ignore_changes = [tags] } private_dns_zone_group { name = "private-dns-zone-group-sa-export-int" private_dns_zone_ids = [data.azurerm_private_dns_zone.blobcore.id] } private_service_connection { name = "psc-sa-export-int-${var.short_workspace_id}" private_connection_resource_id = azurerm_storage_account.sa_export_internal.id is_manual_connection = false subresource_names = ["Blob"] } } # 'In-progress' location for export resource "azurerm_storage_account" "sa_export_inprogress" { name = local.export_inprogress_storage_name location = var.location resource_group_name = var.ws_resource_group_name account_tier = "Standard" account_replication_type = "LRS" allow_nested_items_to_be_public = false # Important! we rely on the fact that the blob craeted events are issued when the creation of the blobs are done. # This is true ONLY when Hierarchical Namespace is DISABLED is_hns_enabled = false tags = merge( var.tre_workspace_tags, { description = "airlock;export;inprogress" } ) lifecycle { ignore_changes = [tags] } } resource "azurerm_storage_account_network_rules" "sa_export_inprogress_rules" { storage_account_id = azurerm_storage_account.sa_export_inprogress.id # The Airlock processor is unable to copy blobs from the export-inprogress storage account when the only method of access from the Airlock processor is a private endpoint in the core VNet, # so we need to allow the Airlock processor subnet to access this storage account without using a private endpoint. # https://github.com/microsoft/AzureTRE/issues/2098 virtual_network_subnet_ids = [var.airlock_processor_subnet_id] default_action = var.enable_local_debugging ? "Allow" : "Deny" bypass = ["AzureServices"] } resource "azurerm_private_endpoint" "export_inprogress_pe" { name = "pe-sa-export-ip-blob-${var.short_workspace_id}" location = var.location resource_group_name = var.ws_resource_group_name subnet_id = var.services_subnet_id tags = var.tre_workspace_tags lifecycle { ignore_changes = [tags] } private_dns_zone_group { name = "private-dns-zone-group-sa-export-ip" private_dns_zone_ids = [data.azurerm_private_dns_zone.blobcore.id] } private_service_connection { name = "psc-sa-export-ip-${var.short_workspace_id}" private_connection_resource_id = azurerm_storage_account.sa_export_inprogress.id is_manual_connection = false subresource_names = ["Blob"] } } # 'Rejected' location for export resource "azurerm_storage_account" "sa_export_rejected" { name = local.export_rejected_storage_name location = var.location resource_group_name = var.ws_resource_group_name account_tier = "Standard" account_replication_type = "LRS" allow_nested_items_to_be_public = false # Important! we rely on the fact that the blob craeted events are issued when the creation of the blobs are done. # This is true ONLY when Hierarchical Namespace is DISABLED is_hns_enabled = false network_rules { default_action = var.enable_local_debugging ? "Allow" : "Deny" bypass = ["AzureServices"] } tags = merge( var.tre_workspace_tags, { description = "airlock;export;rejected" } ) lifecycle { ignore_changes = [tags] } } resource "azurerm_private_endpoint" "export_rejected_pe" { name = "pe-sa-export-rej-blob-${var.short_workspace_id}" location = var.location resource_group_name = var.ws_resource_group_name subnet_id = var.services_subnet_id tags = var.tre_workspace_tags lifecycle { ignore_changes = [tags] } private_dns_zone_group { name = "private-dns-zone-group-sa-export-rej" private_dns_zone_ids = [data.azurerm_private_dns_zone.blobcore.id] } private_service_connection { name = "psc-sa-export-rej-${var.short_workspace_id}" private_connection_resource_id = azurerm_storage_account.sa_export_rejected.id is_manual_connection = false subresource_names = ["Blob"] } } # 'Blocked' location for export resource "azurerm_storage_account" "sa_export_blocked" { name = local.export_blocked_storage_name location = var.location resource_group_name = var.ws_resource_group_name account_tier = "Standard" account_replication_type = "LRS" allow_nested_items_to_be_public = false # Important! we rely on the fact that the blob craeted events are issued when the creation of the blobs are done. # This is true ONLY when Hierarchical Namespace is DISABLED is_hns_enabled = false network_rules { default_action = var.enable_local_debugging ? "Allow" : "Deny" bypass = ["AzureServices"] } tags = merge( var.tre_workspace_tags, { description = "airlock;export;blocked" } ) lifecycle { ignore_changes = [tags] } } resource "azurerm_private_endpoint" "export_blocked_pe" { name = "pe-sa-export-blocked-blob-${var.short_workspace_id}" location = var.location resource_group_name = var.ws_resource_group_name subnet_id = var.services_subnet_id tags = var.tre_workspace_tags lifecycle { ignore_changes = [tags] } private_dns_zone_group { name = "private-dns-zone-group-sa-export-blocked" private_dns_zone_ids = [data.azurerm_private_dns_zone.blobcore.id] } private_service_connection { name = "psc-sa-export-blocked-${var.short_workspace_id}" private_connection_resource_id = azurerm_storage_account.sa_export_blocked.id is_manual_connection = false subresource_names = ["Blob"] } } # we can't use for_each due to the data object resource "azurerm_role_assignment" "airlock_blob_data_contributor" { count = length(local.airlock_blob_data_contributor) scope = local.airlock_blob_data_contributor[count.index] role_definition_name = "Storage Blob Data Contributor" principal_id = data.azurerm_user_assigned_identity.airlock_id.principal_id } # This might be considered redundent since we give Virtual Machine Contributor # at the subscription level, but best to be explicit. resource "azurerm_role_assignment" "api_sa_data_contributor" { count = length(local.api_sa_data_contributor) scope = local.api_sa_data_contributor[count.index] role_definition_name = "Storage Blob Data Contributor" principal_id = data.azurerm_user_assigned_identity.api_id.principal_id }
AzureTRE/templates/workspaces/base/terraform/airlock/storage_accounts.tf/0
{ "file_path": "AzureTRE/templates/workspaces/base/terraform/airlock/storage_accounts.tf", "repo_id": "AzureTRE", "token_count": 4266 }
137
resource "azurerm_virtual_network" "ws" { name = "vnet-${local.workspace_resource_name_suffix}" location = var.location resource_group_name = var.ws_resource_group_name address_space = local.address_spaces tags = var.tre_workspace_tags lifecycle { ignore_changes = [tags] } } resource "azurerm_subnet" "services" { name = "ServicesSubnet" virtual_network_name = azurerm_virtual_network.ws.name resource_group_name = var.ws_resource_group_name address_prefixes = [local.services_subnet_address_prefix] # notice that private endpoints do not adhere to NSG rules private_endpoint_network_policies_enabled = false private_link_service_network_policies_enabled = true } resource "azurerm_subnet" "webapps" { name = "WebAppsSubnet" virtual_network_name = azurerm_virtual_network.ws.name resource_group_name = var.ws_resource_group_name address_prefixes = [local.webapps_subnet_address_prefix] # notice that private endpoints do not adhere to NSG rules private_endpoint_network_policies_enabled = false private_link_service_network_policies_enabled = true delegation { name = "delegation" service_delegation { name = "Microsoft.Web/serverFarms" actions = ["Microsoft.Network/virtualNetworks/subnets/action"] } } depends_on = [ # meant to resolve AnotherOperation errors with one operation in the vnet at a time azurerm_subnet.services ] } resource "azurerm_virtual_network_peering" "ws_core_peer" { name = "ws-core-peer-${local.workspace_resource_name_suffix}" resource_group_name = var.ws_resource_group_name virtual_network_name = azurerm_virtual_network.ws.name remote_virtual_network_id = data.azurerm_virtual_network.core.id triggers = { remote_address_space = join(",", data.azurerm_virtual_network.core.address_space) } # meant to resolve AnotherOperation errors with one operation in the vnet at a time depends_on = [ azurerm_subnet.webapps ] } moved { from = azurerm_virtual_network_peering.ws-core-peer to = azurerm_virtual_network_peering.ws_core_peer } resource "azurerm_virtual_network_peering" "core_ws_peer" { name = "core-ws-peer-${local.workspace_resource_name_suffix}" resource_group_name = local.core_resource_group_name virtual_network_name = local.core_vnet remote_virtual_network_id = azurerm_virtual_network.ws.id triggers = { remote_address_space = join(",", azurerm_virtual_network.ws.address_space) } # meant to resolve AnotherOperation errors with one operation in the vnet at a time depends_on = [ azurerm_virtual_network_peering.ws_core_peer ] } moved { from = azurerm_virtual_network_peering.core-ws-peer to = azurerm_virtual_network_peering.core_ws_peer } resource "azurerm_subnet_route_table_association" "rt_services_subnet_association" { route_table_id = data.azurerm_route_table.rt.id subnet_id = azurerm_subnet.services.id depends_on = [ # meant to resolve AnotherOperation errors with one operation in the vnet at a time azurerm_virtual_network_peering.core_ws_peer ] } resource "azurerm_subnet_route_table_association" "rt_webapps_subnet_association" { route_table_id = data.azurerm_route_table.rt.id subnet_id = azurerm_subnet.webapps.id depends_on = [ # meant to resolve AnotherOperation errors with one operation in the vnet at a time azurerm_subnet_route_table_association.rt_services_subnet_association ] } module "terraform_azurerm_environment_configuration" { source = "git::https://github.com/microsoft/terraform-azurerm-environment-configuration.git?ref=0.2.0" arm_environment = var.arm_environment }
AzureTRE/templates/workspaces/base/terraform/network/network.tf/0
{ "file_path": "AzureTRE/templates/workspaces/base/terraform/network/network.tf", "repo_id": "AzureTRE", "token_count": 1451 }
138
import { Configuration, PublicClientApplication } from "@azure/msal-browser"; import config from "./config.json" // MSAL configuration const configuration: Configuration = { auth: { clientId: config.rootClientId, authority: `${config.activeDirectoryUri}/${config.rootTenantId}`, redirectUri: `${window.location.protocol}//${window.location.hostname}:${window.location.port}`, postLogoutRedirectUri: `${window.location.protocol}//${window.location.hostname}:${window.location.port}/logout` } }; export const pca = new PublicClientApplication(configuration);
AzureTRE/ui/app/src/authConfig.ts/0
{ "file_path": "AzureTRE/ui/app/src/authConfig.ts", "repo_id": "AzureTRE", "token_count": 201 }
139
import React, { useCallback, useContext, useState } from 'react'; import { ComponentAction, VMPowerStates, Resource } from '../../models/resource'; import { Callout, DefaultPalette, FontWeights, IconButton, IStackStyles, IStyle, mergeStyleSets, PrimaryButton, Shimmer, Stack, Text, TooltipHost } from '@fluentui/react'; import { useNavigate } from 'react-router-dom'; import moment from 'moment'; import { ResourceContextMenu } from './ResourceContextMenu'; import { useComponentManager } from '../../hooks/useComponentManager'; import { StatusBadge } from './StatusBadge'; import { actionsDisabledStates, successStates } from '../../models/operation'; import { PowerStateBadge } from './PowerStateBadge'; import { ResourceType } from '../../models/resourceType'; import { WorkspaceContext } from '../../contexts/WorkspaceContext'; import { CostsTag } from './CostsTag'; import { ConfirmCopyUrlToClipboard } from './ConfirmCopyUrlToClipboard'; import { AppRolesContext } from '../../contexts/AppRolesContext'; import { SecuredByRole } from './SecuredByRole'; import { RoleName, WorkspaceRoleName } from '../../models/roleNames'; interface ResourceCardProps { resource: Resource, itemId: number, selectResource?: (resource: Resource) => void, onUpdate: (resource: Resource) => void, onDelete: (resource: Resource) => void, readonly?: boolean; isExposedExternally?: boolean; } export const ResourceCard: React.FunctionComponent<ResourceCardProps> = (props: ResourceCardProps) => { const [loading] = useState(false); const [showCopyUrl, setShowCopyUrl] = useState(false); const [showInfo, setShowInfo] = useState(false); const workspaceCtx = useContext(WorkspaceContext); const latestUpdate = useComponentManager( props.resource, (r: Resource) => { props.onUpdate(r); }, (r: Resource) => { props.onDelete(r); } ); const navigate = useNavigate(); const costTagRolesByResourceType = { [ResourceType.Workspace]: [RoleName.TREAdmin, WorkspaceRoleName.WorkspaceOwner], [ResourceType.SharedService]: [RoleName.TREAdmin], [ResourceType.WorkspaceService]: [WorkspaceRoleName.WorkspaceOwner], [ResourceType.UserResource]: [WorkspaceRoleName.WorkspaceOwner] // when implemented WorkspaceRoleName.WorkspaceResearcher] }; const costsTagsRoles = costTagRolesByResourceType[props.resource.resourceType]; const goToResource = useCallback(() => { const { resource } = props; const { resourceType, resourcePath, id } = resource; // shared services are accessed from the root and the workspace, have to handle the URL differently const resourceUrl = (ResourceType.SharedService === resourceType) && (workspaceCtx.workspace.id) ? id : resourcePath; props.selectResource?.(resource); navigate(resourceUrl); }, [navigate, props, workspaceCtx.workspace]); let connectUri = props.resource.properties && props.resource.properties.connection_uri; const shouldDisable = () => { return latestUpdate.componentAction === ComponentAction.Lock || actionsDisabledStates.includes(props.resource.deploymentStatus) || !props.resource.isEnabled || (props.resource.azureStatus?.powerState && props.resource.azureStatus.powerState !== VMPowerStates.Running); }; const resourceStatus = latestUpdate.operation?.status ? latestUpdate.operation.status : props.resource.deploymentStatus; // Decide what to show as the top-right header badge let headerBadge = <></>; if ( latestUpdate.componentAction !== ComponentAction.Lock && props.resource.azureStatus?.powerState && successStates.includes(resourceStatus) && props.resource.isEnabled ) { headerBadge = <PowerStateBadge state={props.resource.azureStatus.powerState} />; } else { headerBadge = <StatusBadge resource={props.resource} status={resourceStatus} />; } const appRoles = useContext(AppRolesContext); const authNotProvisioned = props.resource.resourceType === ResourceType.Workspace && !props.resource.properties.scope_id; const enableClickOnCard = !authNotProvisioned || appRoles.roles.includes(RoleName.TREAdmin); const workspaceId = props.resource.resourceType === ResourceType.Workspace ? props.resource.id : ""; const cardStyles = enableClickOnCard ? noNavCardStyles : clickableCardStyles; return ( <> { loading ? <Stack styles={noNavCardStyles}> <Stack.Item style={headerStyles}> <Shimmer width="70%" /> </Stack.Item> <Stack.Item grow={3} style={bodyStyles}> <br /> <Shimmer /> <br /> <Shimmer /> </Stack.Item> <Stack.Item style={footerStyles}> <Shimmer /> </Stack.Item> </Stack> : <TooltipHost content={authNotProvisioned ? "Authentication has not yet been provisioned for this resource." : ""} id={`card-${props.resource.id}`} styles={{ root: { width: '100%' } }} > <Stack styles={cardStyles} aria-labelledby={`card-${props.resource.id}`} onClick={() => { if (enableClickOnCard) goToResource(); }} > <Stack horizontal> <Stack.Item grow={5} style={headerStyles}>{props.resource.properties.display_name}</Stack.Item> {headerBadge} </Stack> <Stack.Item grow={3} style={bodyStyles}> <Text>{props.resource.properties.description}</Text> </Stack.Item> <Stack horizontal style={footerStyles}> <Stack.Item grow> <Stack horizontal> <Stack.Item> <IconButton iconProps={{ iconName: 'Info' }} id={`item-${props.itemId}`} onClick={(e) => { // Stop onClick triggering parent handler e.stopPropagation(); setShowInfo(!showInfo); }} /> </Stack.Item> <Stack.Item> { !props.readonly && <ResourceContextMenu resource={props.resource} componentAction={latestUpdate.componentAction} /> } </Stack.Item> </Stack> </Stack.Item> <SecuredByRole allowedAppRoles={costsTagsRoles} allowedWorkspaceRoles={costsTagsRoles} workspaceId={workspaceId} element={ <CostsTag resourceId={props.resource.id} /> } /> { connectUri && <PrimaryButton onClick={(e) => { e.stopPropagation(); props.isExposedExternally === false ? setShowCopyUrl(true) : window.open(connectUri); }} disabled={shouldDisable()} title={shouldDisable() ? 'Resource must be enabled, successfully deployed & powered on to connect' : 'Connect to resource'} className={styles.button} > Connect </PrimaryButton> } { showCopyUrl && <ConfirmCopyUrlToClipboard onDismiss={() => setShowCopyUrl(false)} resource={props.resource} /> } </Stack> </Stack> </TooltipHost> } { showInfo && <Callout className={styles.callout} ariaLabelledBy={`item-${props.itemId}-label`} ariaDescribedBy={`item-${props.itemId}-description`} role="dialog" gapSpace={0} target={`#item-${props.itemId}`} onDismiss={() => setShowInfo(false)} setInitialFocus > <Text block variant="xLarge" className={styles.title} id={`item-${props.itemId}-label`}> {props.resource.templateName} ({props.resource.templateVersion}) </Text> <Text block variant="small" id={`item-${props.itemId}-description`}> <Stack> <Stack.Item> <Stack horizontal tokens={{ childrenGap: 5 }}> <Stack.Item style={calloutKeyStyles}>Resource Id:</Stack.Item> <Stack.Item style={calloutValueStyles}>{props.resource.id}</Stack.Item> </Stack> <Stack horizontal tokens={{ childrenGap: 5 }}> <Stack.Item style={calloutKeyStyles}>Last Modified By:</Stack.Item> <Stack.Item style={calloutValueStyles}>{props.resource.user.name}</Stack.Item> </Stack> <Stack horizontal tokens={{ childrenGap: 5 }}> <Stack.Item style={calloutKeyStyles}>Last Updated:</Stack.Item> <Stack.Item style={calloutValueStyles}>{moment.unix(props.resource.updatedWhen).toDate().toDateString()}</Stack.Item> </Stack> </Stack.Item> </Stack> </Text> </Callout> } </> ); }; const baseCardStyles: IStyle = { width: '100%', borderRadius: '5px', boxShadow: '0 1.6px 3.6px 0 rgba(0,0,0,.132),0 .3px .9px 0 rgba(0,0,0,.108)', backgroundColor: DefaultPalette.white, padding: 10 }; const noNavCardStyles: IStackStyles = { root: { ...baseCardStyles } }; const clickableCardStyles: IStackStyles = { root: { ...baseCardStyles, "&:hover": { transition: 'all .2s ease-in-out', transform: 'scale(1.02)', cursor: 'pointer' } } }; const headerStyles: React.CSSProperties = { padding: '5px 10px', fontSize: '1.2rem', }; const bodyStyles: React.CSSProperties = { padding: '10px 10px', minHeight: '40px' }; const footerStyles: React.CSSProperties = { minHeight: '30px', alignItems: 'center' }; const calloutKeyStyles: React.CSSProperties = { width: 160 }; const calloutValueStyles: React.CSSProperties = { width: 180 }; const styles = mergeStyleSets({ button: { width: 130, margin: 10 }, callout: { width: 350, padding: '20px 24px', }, title: { marginBottom: 12, fontWeight: FontWeights.semilight }, link: { display: 'block', marginTop: 20, } });
AzureTRE/ui/app/src/components/shared/ResourceCard.tsx/0
{ "file_path": "AzureTRE/ui/app/src/components/shared/ResourceCard.tsx", "repo_id": "AzureTRE", "token_count": 4374 }
140