text
stringlengths
5
22M
id
stringlengths
12
177
metadata
dict
__index_level_0__
int64
0
1.37k
## The relevant files are currently on a shared Google ## drive at https://drive.google.com/drive/folders/1kC0I2UGl2ltrluI9NqDjaQJGw5iliw_J ## Monitor for changes and eventually migrate to nlp dataset curl -L 'https://drive.google.com/uc?export=download&id=1Jjhbal535VVz2ap4v4r_rN1UEHTdLK5P' \ | grep -v "^#" | cut -f 2,3 | tr '\t' ' ' > train.txt.tmp curl -L 'https://drive.google.com/uc?export=download&id=1ZfRcQThdtAR5PPRjIDtrVP7BtXSCUBbm' \ | grep -v "^#" | cut -f 2,3 | tr '\t' ' ' > dev.txt.tmp curl -L 'https://drive.google.com/uc?export=download&id=1u9mb7kNJHWQCWyweMDRMuTFoOHOfeBTH' \ | grep -v "^#" | cut -f 2,3 | tr '\t' ' ' > test.txt.tmp export MAX_LENGTH=128 export BERT_MODEL=bert-base-multilingual-cased python3 scripts/preprocess.py train.txt.tmp $BERT_MODEL $MAX_LENGTH > train.txt python3 scripts/preprocess.py dev.txt.tmp $BERT_MODEL $MAX_LENGTH > dev.txt python3 scripts/preprocess.py test.txt.tmp $BERT_MODEL $MAX_LENGTH > test.txt cat train.txt dev.txt test.txt | cut -d " " -f 2 | grep -v "^$"| sort | uniq > labels.txt export OUTPUT_DIR=germeval-model export BATCH_SIZE=32 export NUM_EPOCHS=3 export SAVE_STEPS=750 export SEED=1 python3 run_ner.py \ --task_type NER \ --data_dir . \ --labels ./labels.txt \ --model_name_or_path $BERT_MODEL \ --output_dir $OUTPUT_DIR \ --max_seq_length $MAX_LENGTH \ --num_train_epochs $NUM_EPOCHS \ --per_gpu_train_batch_size $BATCH_SIZE \ --save_steps $SAVE_STEPS \ --seed $SEED \ --do_train \ --do_eval \ --do_predict
AdaMix/examples/legacy/token-classification/run.sh/0
{ "file_path": "AdaMix/examples/legacy/token-classification/run.sh", "repo_id": "AdaMix", "token_count": 643 }
34
# Text Summarization with Pretrained Encoders This folder contains part of the code necessary to reproduce the results on abstractive summarization from the article [Text Summarization with Pretrained Encoders](https://arxiv.org/pdf/1908.08345.pdf) by [Yang Liu](https://nlp-yang.github.io/) and [Mirella Lapata](https://homepages.inf.ed.ac.uk/mlap/). It can also be used to summarize any document. The original code can be found on the Yang Liu's [github repository](https://github.com/nlpyang/PreSumm). The model is loaded with the pre-trained weights for the abstractive summarization model trained on the CNN/Daily Mail dataset with an extractive and then abstractive tasks. ## Setup ``` git clone https://github.com/huggingface/transformers && cd transformers pip install . pip install nltk py-rouge cd examples/seq2seq/bertabs ``` ## Reproduce the authors' ROUGE score To be able to reproduce the authors' results on the CNN/Daily Mail dataset you first need to download both CNN and Daily Mail datasets [from Kyunghyun Cho's website](https://cs.nyu.edu/~kcho/DMQA/) (the links next to "Stories") in the same folder. Then uncompress the archives by running: ```bash tar -xvf cnn_stories.tgz && tar -xvf dailymail_stories.tgz ``` And move all the stories to the same folder. We will refer as `$DATA_PATH` the path to where you uncompressed both archive. Then run the following in the same folder as `run_summarization.py`: ```bash python run_summarization.py \ --documents_dir $DATA_PATH \ --summaries_output_dir $SUMMARIES_PATH \ # optional --no_cuda false \ --batch_size 4 \ --min_length 50 \ --max_length 200 \ --beam_size 5 \ --alpha 0.95 \ --block_trigram true \ --compute_rouge true ``` The scripts executes on GPU if one is available and if `no_cuda` is not set to `true`. Inference on multiple GPUs is not supported yet. The ROUGE scores will be displayed in the console at the end of evaluation and written in a `rouge_scores.txt` file. The script takes 30 hours to compute with a single Tesla V100 GPU and a batch size of 10 (300,000 texts to summarize). ## Summarize any text Put the documents that you would like to summarize in a folder (the path to which is referred to as `$DATA_PATH` below) and run the following in the same folder as `run_summarization.py`: ```bash python run_summarization.py \ --documents_dir $DATA_PATH \ --summaries_output_dir $SUMMARIES_PATH \ # optional --no_cuda false \ --batch_size 4 \ --min_length 50 \ --max_length 200 \ --beam_size 5 \ --alpha 0.95 \ --block_trigram true \ ``` You may want to play around with `min_length`, `max_length` and `alpha` to suit your use case. If you want to compute ROUGE on another dataset you will need to tweak the stories/summaries import in `utils_summarization.py` and tell it where to fetch the reference summaries.
AdaMix/examples/research_projects/bertabs/README.md/0
{ "file_path": "AdaMix/examples/research_projects/bertabs/README.md", "repo_id": "AdaMix", "token_count": 908 }
35
from __future__ import absolute_import, division, print_function import argparse import glob import logging import os import random import time import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset from torch.utils.data.distributed import DistributedSampler from tqdm import tqdm, trange import transformers from src.modeling_highway_bert import DeeBertForSequenceClassification from src.modeling_highway_roberta import DeeRobertaForSequenceClassification from transformers import ( WEIGHTS_NAME, AdamW, BertConfig, BertTokenizer, RobertaConfig, RobertaTokenizer, get_linear_schedule_with_warmup, ) from transformers import glue_compute_metrics as compute_metrics from transformers import glue_convert_examples_to_features as convert_examples_to_features from transformers import glue_output_modes as output_modes from transformers import glue_processors as processors from transformers.trainer_utils import is_main_process try: from torch.utils.tensorboard import SummaryWriter except ImportError: from tensorboardX import SummaryWriter logger = logging.getLogger(__name__) MODEL_CLASSES = { "bert": (BertConfig, DeeBertForSequenceClassification, BertTokenizer), "roberta": (RobertaConfig, DeeRobertaForSequenceClassification, RobertaTokenizer), } def set_seed(args): random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if args.n_gpu > 0: torch.cuda.manual_seed_all(args.seed) def get_wanted_result(result): if "spearmanr" in result: print_result = result["spearmanr"] elif "f1" in result: print_result = result["f1"] elif "mcc" in result: print_result = result["mcc"] elif "acc" in result: print_result = result["acc"] else: raise ValueError("Primary metric unclear in the results") return print_result def train(args, train_dataset, model, tokenizer, train_highway=False): """ Train the model """ if args.local_rank in [-1, 0]: tb_writer = SummaryWriter() args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu) train_sampler = RandomSampler(train_dataset) if args.local_rank == -1 else DistributedSampler(train_dataset) train_dataloader = DataLoader(train_dataset, sampler=train_sampler, batch_size=args.train_batch_size) if args.max_steps > 0: t_total = args.max_steps args.num_train_epochs = args.max_steps // (len(train_dataloader) // args.gradient_accumulation_steps) + 1 else: t_total = len(train_dataloader) // args.gradient_accumulation_steps * args.num_train_epochs # Prepare optimizer and schedule (linear warmup and decay) no_decay = ["bias", "LayerNorm.weight"] if train_highway: optimizer_grouped_parameters = [ { "params": [ p for n, p in model.named_parameters() if ("highway" in n) and (not any(nd in n for nd in no_decay)) ], "weight_decay": args.weight_decay, }, { "params": [ p for n, p in model.named_parameters() if ("highway" in n) and (any(nd in n for nd in no_decay)) ], "weight_decay": 0.0, }, ] else: optimizer_grouped_parameters = [ { "params": [ p for n, p in model.named_parameters() if ("highway" not in n) and (not any(nd in n for nd in no_decay)) ], "weight_decay": args.weight_decay, }, { "params": [ p for n, p in model.named_parameters() if ("highway" not in n) and (any(nd in n for nd in no_decay)) ], "weight_decay": 0.0, }, ] optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon) scheduler = get_linear_schedule_with_warmup( optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=t_total ) if args.fp16: try: from apex import amp except ImportError: raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, optimizer = amp.initialize(model, optimizer, opt_level=args.fp16_opt_level) # multi-gpu training (should be after apex fp16 initialization) if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True ) # Train! logger.info("***** Running training *****") logger.info(" Num examples = %d", len(train_dataset)) logger.info(" Num Epochs = %d", args.num_train_epochs) logger.info(" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size) logger.info( " Total train batch size (w. parallel, distributed & accumulation) = %d", args.train_batch_size * args.gradient_accumulation_steps * (torch.distributed.get_world_size() if args.local_rank != -1 else 1), ) logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", t_total) global_step = 0 tr_loss, logging_loss = 0.0, 0.0 model.zero_grad() train_iterator = trange(int(args.num_train_epochs), desc="Epoch", disable=args.local_rank not in [-1, 0]) set_seed(args) # Added here for reproductibility (even between python 2 and 3) for _ in train_iterator: epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=args.local_rank not in [-1, 0]) for step, batch in enumerate(epoch_iterator): model.train() batch = tuple(t.to(args.device) for t in batch) inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert", "xlnet"] else None ) # XLM, DistilBERT and RoBERTa don't use segment_ids inputs["train_highway"] = train_highway outputs = model(**inputs) loss = outputs[0] # model outputs are always tuple in transformers (see doc) if args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() tr_loss += loss.item() if (step + 1) % args.gradient_accumulation_steps == 0: if args.fp16: torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm) optimizer.step() scheduler.step() # Update learning rate schedule model.zero_grad() global_step += 1 if args.local_rank in [-1, 0] and args.logging_steps > 0 and global_step % args.logging_steps == 0: # Log metrics if ( args.local_rank == -1 and args.evaluate_during_training ): # Only evaluate when single GPU otherwise metrics may not average well results = evaluate(args, model, tokenizer) for key, value in results.items(): tb_writer.add_scalar("eval_{}".format(key), value, global_step) tb_writer.add_scalar("lr", scheduler.get_lr()[0], global_step) tb_writer.add_scalar("loss", (tr_loss - logging_loss) / args.logging_steps, global_step) logging_loss = tr_loss if args.local_rank in [-1, 0] and args.save_steps > 0 and global_step % args.save_steps == 0: # Save model checkpoint output_dir = os.path.join(args.output_dir, "checkpoint-{}".format(global_step)) if not os.path.exists(output_dir): os.makedirs(output_dir) model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(output_dir) torch.save(args, os.path.join(output_dir, "training_args.bin")) logger.info("Saving model checkpoint to %s", output_dir) if args.max_steps > 0 and global_step > args.max_steps: epoch_iterator.close() break if args.max_steps > 0 and global_step > args.max_steps: train_iterator.close() break if args.local_rank in [-1, 0]: tb_writer.close() return global_step, tr_loss / global_step def evaluate(args, model, tokenizer, prefix="", output_layer=-1, eval_highway=False): # Loop to handle MNLI double evaluation (matched, mis-matched) eval_task_names = ("mnli", "mnli-mm") if args.task_name == "mnli" else (args.task_name,) eval_outputs_dirs = (args.output_dir, args.output_dir + "-MM") if args.task_name == "mnli" else (args.output_dir,) results = {} for eval_task, eval_output_dir in zip(eval_task_names, eval_outputs_dirs): eval_dataset = load_and_cache_examples(args, eval_task, tokenizer, evaluate=True) if not os.path.exists(eval_output_dir) and args.local_rank in [-1, 0]: os.makedirs(eval_output_dir) args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu) # Note that DistributedSampler samples randomly eval_sampler = SequentialSampler(eval_dataset) if args.local_rank == -1 else DistributedSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size) # multi-gpu eval if args.n_gpu > 1: model = torch.nn.DataParallel(model) # Eval! logger.info("***** Running evaluation {} *****".format(prefix)) logger.info(" Num examples = %d", len(eval_dataset)) logger.info(" Batch size = %d", args.eval_batch_size) eval_loss = 0.0 nb_eval_steps = 0 preds = None out_label_ids = None exit_layer_counter = {(i + 1): 0 for i in range(model.num_layers)} st = time.time() for batch in tqdm(eval_dataloader, desc="Evaluating"): model.eval() batch = tuple(t.to(args.device) for t in batch) with torch.no_grad(): inputs = {"input_ids": batch[0], "attention_mask": batch[1], "labels": batch[3]} if args.model_type != "distilbert": inputs["token_type_ids"] = ( batch[2] if args.model_type in ["bert", "xlnet"] else None ) # XLM, DistilBERT and RoBERTa don't use segment_ids if output_layer >= 0: inputs["output_layer"] = output_layer outputs = model(**inputs) if eval_highway: exit_layer_counter[outputs[-1]] += 1 tmp_eval_loss, logits = outputs[:2] eval_loss += tmp_eval_loss.mean().item() nb_eval_steps += 1 if preds is None: preds = logits.detach().cpu().numpy() out_label_ids = inputs["labels"].detach().cpu().numpy() else: preds = np.append(preds, logits.detach().cpu().numpy(), axis=0) out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0) eval_time = time.time() - st logger.info("Eval time: {}".format(eval_time)) eval_loss = eval_loss / nb_eval_steps if args.output_mode == "classification": preds = np.argmax(preds, axis=1) elif args.output_mode == "regression": preds = np.squeeze(preds) result = compute_metrics(eval_task, preds, out_label_ids) results.update(result) if eval_highway: logger.info("Exit layer counter: {}".format(exit_layer_counter)) actual_cost = sum([l * c for l, c in exit_layer_counter.items()]) full_cost = len(eval_dataloader) * model.num_layers logger.info("Expected saving: {}".format(actual_cost / full_cost)) if args.early_exit_entropy >= 0: save_fname = ( args.plot_data_dir + "/" + args.model_name_or_path[2:] + "/entropy_{}.npy".format(args.early_exit_entropy) ) if not os.path.exists(os.path.dirname(save_fname)): os.makedirs(os.path.dirname(save_fname)) print_result = get_wanted_result(result) np.save(save_fname, np.array([exit_layer_counter, eval_time, actual_cost / full_cost, print_result])) logger.info("Entropy={}\tResult={:.2f}".format(args.early_exit_entropy, 100 * print_result)) output_eval_file = os.path.join(eval_output_dir, prefix, "eval_results.txt") with open(output_eval_file, "w") as writer: logger.info("***** Eval results {} *****".format(prefix)) for key in sorted(result.keys()): logger.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) return results def load_and_cache_examples(args, task, tokenizer, evaluate=False): if args.local_rank not in [-1, 0] and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache processor = processors[task]() output_mode = output_modes[task] # Load data features from cache or dataset file cached_features_file = os.path.join( args.data_dir, "cached_{}_{}_{}_{}".format( "dev" if evaluate else "train", list(filter(None, args.model_name_or_path.split("/"))).pop(), str(args.max_seq_length), str(task), ), ) if os.path.exists(cached_features_file) and not args.overwrite_cache: logger.info("Loading features from cached file %s", cached_features_file) features = torch.load(cached_features_file) else: logger.info("Creating features from dataset file at %s", args.data_dir) label_list = processor.get_labels() if task in ["mnli", "mnli-mm"] and args.model_type in ["roberta"]: # HACK(label indices are swapped in RoBERTa pretrained model) label_list[1], label_list[2] = label_list[2], label_list[1] examples = ( processor.get_dev_examples(args.data_dir) if evaluate else processor.get_train_examples(args.data_dir) ) features = convert_examples_to_features( examples, tokenizer, label_list=label_list, max_length=args.max_seq_length, output_mode=output_mode, ) if args.local_rank in [-1, 0]: logger.info("Saving features into cached file %s", cached_features_file) torch.save(features, cached_features_file) if args.local_rank == 0 and not evaluate: torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache # Convert to Tensors and build dataset all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long) all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long) if features[0].token_type_ids is None: # For RoBERTa (a potential bug!) all_token_type_ids = torch.tensor([[0] * args.max_seq_length for f in features], dtype=torch.long) else: all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long) if output_mode == "classification": all_labels = torch.tensor([f.label for f in features], dtype=torch.long) elif output_mode == "regression": all_labels = torch.tensor([f.label for f in features], dtype=torch.float) dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels) return dataset def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data files) for the task.", ) parser.add_argument( "--model_type", default=None, type=str, required=True, help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys()), ) parser.add_argument( "--model_name_or_path", default=None, type=str, required=True, help="Path to pre-trained model or shortcut name.", ) parser.add_argument( "--task_name", default=None, type=str, required=True, help="The name of the task to train selected in the list: " + ", ".join(processors.keys()), ) parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--plot_data_dir", default="./plotting/", type=str, required=False, help="The directory to store data for plotting figures.", ) # Other parameters parser.add_argument( "--config_name", default="", type=str, help="Pretrained config name or path if not the same as model_name" ) parser.add_argument( "--tokenizer_name", default="", type=str, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from huggingface.co", ) parser.add_argument( "--max_seq_length", default=128, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_eval", action="store_true", help="Whether to run eval on the dev set.") parser.add_argument( "--evaluate_during_training", action="store_true", help="Rul evaluation during training at each logging step." ) parser.add_argument( "--do_lower_case", action="store_true", help="Set this flag if you are using an uncased model." ) parser.add_argument("--eval_each_highway", action="store_true", help="Set this flag to evaluate each highway.") parser.add_argument( "--eval_after_first_stage", action="store_true", help="Set this flag to evaluate after training only bert (not highway).", ) parser.add_argument("--eval_highway", action="store_true", help="Set this flag if it's evaluating highway models") parser.add_argument("--per_gpu_train_batch_size", default=8, type=int, help="Batch size per GPU/CPU for training.") parser.add_argument( "--per_gpu_eval_batch_size", default=8, type=int, help="Batch size per GPU/CPU for evaluation." ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.") parser.add_argument( "--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform." ) parser.add_argument( "--max_steps", default=-1, type=int, help="If > 0: set total number of training steps to perform. Override num_train_epochs.", ) parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--early_exit_entropy", default=-1, type=float, help="Entropy threshold for early exit.") parser.add_argument("--logging_steps", type=int, default=50, help="Log every X updates steps.") parser.add_argument("--save_steps", type=int, default=50, help="Save checkpoint every X updates steps.") parser.add_argument( "--eval_all_checkpoints", action="store_true", help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number", ) parser.add_argument("--no_cuda", action="store_true", help="Avoid using CUDA when available") parser.add_argument( "--overwrite_output_dir", action="store_true", help="Overwrite the content of the output directory" ) parser.add_argument( "--overwrite_cache", action="store_true", help="Overwrite the cached training and evaluation sets" ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O1", help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html", ) parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank") parser.add_argument("--server_ip", type=str, default="", help="For distant debugging.") parser.add_argument("--server_port", type=str, default="", help="For distant debugging.") args = parser.parse_args() if ( os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir ): raise ValueError( "Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format( args.output_dir ) ) # Setup distant debugging if needed if args.server_ip and args.server_port: # Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script import ptvsd print("Waiting for debugger attach") ptvsd.enable_attach(address=(args.server_ip, args.server_port), redirect_output=True) ptvsd.wait_for_attach() # Setup CUDA, GPU & distributed training if args.local_rank == -1 or args.no_cuda: device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") args.n_gpu = torch.cuda.device_count() else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.n_gpu = 1 args.device = device # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s", args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16, ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() # Set seed set_seed(args) # Prepare GLUE task args.task_name = args.task_name.lower() if args.task_name not in processors: raise ValueError("Task not found: %s" % (args.task_name)) processor = processors[args.task_name]() args.output_mode = output_modes[args.task_name] label_list = processor.get_labels() num_labels = len(label_list) # Load pretrained model and tokenizer if args.local_rank not in [-1, 0]: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab args.model_type = args.model_type.lower() config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] config = config_class.from_pretrained( args.config_name if args.config_name else args.model_name_or_path, num_labels=num_labels, finetuning_task=args.task_name, cache_dir=args.cache_dir if args.cache_dir else None, ) tokenizer = tokenizer_class.from_pretrained( args.tokenizer_name if args.tokenizer_name else args.model_name_or_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir if args.cache_dir else None, ) model = model_class.from_pretrained( args.model_name_or_path, from_tf=bool(".ckpt" in args.model_name_or_path), config=config, cache_dir=args.cache_dir if args.cache_dir else None, ) if args.model_type == "bert": model.bert.encoder.set_early_exit_entropy(args.early_exit_entropy) model.bert.init_highway_pooler() elif args.model_type == "roberta": model.roberta.encoder.set_early_exit_entropy(args.early_exit_entropy) model.roberta.init_highway_pooler() else: raise NotImplementedError() if args.local_rank == 0: torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab model.to(args.device) logger.info("Training/evaluation parameters %s", args) # Training if args.do_train: train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, evaluate=False) global_step, tr_loss = train(args, train_dataset, model, tokenizer) logger.info(" global_step = %s, average loss = %s", global_step, tr_loss) if args.eval_after_first_stage: result = evaluate(args, model, tokenizer, prefix="") print_result = get_wanted_result(result) train(args, train_dataset, model, tokenizer, train_highway=True) # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained() if args.do_train and (args.local_rank == -1 or torch.distributed.get_rank() == 0): # Create output directory if needed if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]: os.makedirs(args.output_dir) logger.info("Saving model checkpoint to %s", args.output_dir) # Save a trained model, configuration and tokenizer using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` model_to_save = ( model.module if hasattr(model, "module") else model ) # Take care of distributed/parallel training model_to_save.save_pretrained(args.output_dir) tokenizer.save_pretrained(args.output_dir) # Good practice: save your training arguments together with the trained model torch.save(args, os.path.join(args.output_dir, "training_args.bin")) # Load a trained model and vocabulary that you have fine-tuned model = model_class.from_pretrained(args.output_dir) tokenizer = tokenizer_class.from_pretrained(args.output_dir) model.to(args.device) # Evaluation results = {} if args.do_eval and args.local_rank in [-1, 0]: tokenizer = tokenizer_class.from_pretrained(args.output_dir, do_lower_case=args.do_lower_case) checkpoints = [args.output_dir] if args.eval_all_checkpoints: checkpoints = list( os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + "/**/" + WEIGHTS_NAME, recursive=True)) ) logger.info("Evaluate the following checkpoints: %s", checkpoints) for checkpoint in checkpoints: global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else "" prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else "" model = model_class.from_pretrained(checkpoint) if args.model_type == "bert": model.bert.encoder.set_early_exit_entropy(args.early_exit_entropy) elif args.model_type == "roberta": model.roberta.encoder.set_early_exit_entropy(args.early_exit_entropy) else: raise NotImplementedError() model.to(args.device) result = evaluate(args, model, tokenizer, prefix=prefix, eval_highway=args.eval_highway) print_result = get_wanted_result(result) logger.info("Result: {}".format(print_result)) if args.eval_each_highway: last_layer_results = print_result each_layer_results = [] for i in range(model.num_layers): logger.info("\n") _result = evaluate( args, model, tokenizer, prefix=prefix, output_layer=i, eval_highway=args.eval_highway ) if i + 1 < model.num_layers: each_layer_results.append(get_wanted_result(_result)) each_layer_results.append(last_layer_results) save_fname = args.plot_data_dir + "/" + args.model_name_or_path[2:] + "/each_layer.npy" if not os.path.exists(os.path.dirname(save_fname)): os.makedirs(os.path.dirname(save_fname)) np.save(save_fname, np.array(each_layer_results)) info_str = "Score of each layer:" for i in range(model.num_layers): info_str += " {:.2f}".format(100 * each_layer_results[i]) logger.info(info_str) result = dict((k + "_{}".format(global_step), v) for k, v in result.items()) results.update(result) return results if __name__ == "__main__": main()
AdaMix/examples/research_projects/deebert/run_glue_deebert.py/0
{ "file_path": "AdaMix/examples/research_projects/deebert/run_glue_deebert.py", "repo_id": "AdaMix", "token_count": 13849 }
36
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Training the distilled model. Supported architectures include: BERT -> DistilBERT, RoBERTa -> DistilRoBERTa, GPT2 -> DistilGPT2. """ import argparse import json import os import pickle import shutil import numpy as np import torch from distiller import Distiller from lm_seqs_dataset import LmSeqsDataset from transformers import ( BertConfig, BertForMaskedLM, BertTokenizer, DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer, GPT2Config, GPT2LMHeadModel, GPT2Tokenizer, RobertaConfig, RobertaForMaskedLM, RobertaTokenizer, ) from utils import git_log, init_gpu_params, logger, set_seed MODEL_CLASSES = { "distilbert": (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer), "roberta": (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer), "bert": (BertConfig, BertForMaskedLM, BertTokenizer), "gpt2": (GPT2Config, GPT2LMHeadModel, GPT2Tokenizer), } def sanity_checks(args): """ A bunch of args sanity checks to perform even starting... """ assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0) assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0) if args.mlm: assert os.path.isfile(args.token_counts) assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"]) else: assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"]) assert args.teacher_type == args.student_type or ( args.student_type == "distilbert" and args.teacher_type == "bert" ) assert os.path.isfile(args.student_config) if args.student_pretrained_weights is not None: assert os.path.isfile(args.student_pretrained_weights) if args.freeze_token_type_embds: assert args.student_type in ["roberta"] assert args.alpha_ce >= 0.0 assert args.alpha_mlm >= 0.0 assert args.alpha_clm >= 0.0 assert args.alpha_mse >= 0.0 assert args.alpha_cos >= 0.0 assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0 def freeze_pos_embeddings(student, args): if args.student_type == "roberta": student.roberta.embeddings.position_embeddings.weight.requires_grad = False elif args.student_type == "gpt2": student.transformer.wpe.weight.requires_grad = False def freeze_token_type_embeddings(student, args): if args.student_type == "roberta": student.roberta.embeddings.token_type_embeddings.weight.requires_grad = False def main(): parser = argparse.ArgumentParser(description="Training") parser.add_argument("--force", action="store_true", help="Overwrite dump_path if it already exists.") parser.add_argument( "--dump_path", type=str, required=True, help="The output directory (log, checkpoints, parameters, etc.)" ) parser.add_argument( "--data_file", type=str, required=True, help="The binarized file (tokenized + tokens_to_ids) and grouped by sequence.", ) parser.add_argument( "--student_type", type=str, choices=["distilbert", "roberta", "gpt2"], required=True, help="The student type (DistilBERT, RoBERTa).", ) parser.add_argument("--student_config", type=str, required=True, help="Path to the student configuration.") parser.add_argument( "--student_pretrained_weights", default=None, type=str, help="Load student initialization checkpoint." ) parser.add_argument( "--teacher_type", choices=["bert", "roberta", "gpt2"], required=True, help="Teacher type (BERT, RoBERTa)." ) parser.add_argument("--teacher_name", type=str, required=True, help="The teacher model.") parser.add_argument("--temperature", default=2.0, type=float, help="Temperature for the softmax temperature.") parser.add_argument( "--alpha_ce", default=0.5, type=float, help="Linear weight for the distillation loss. Must be >=0." ) parser.add_argument( "--alpha_mlm", default=0.0, type=float, help="Linear weight for the MLM loss. Must be >=0. Should be used in coonjunction with `mlm` flag.", ) parser.add_argument("--alpha_clm", default=0.5, type=float, help="Linear weight for the CLM loss. Must be >=0.") parser.add_argument("--alpha_mse", default=0.0, type=float, help="Linear weight of the MSE loss. Must be >=0.") parser.add_argument( "--alpha_cos", default=0.0, type=float, help="Linear weight of the cosine embedding loss. Must be >=0." ) parser.add_argument( "--mlm", action="store_true", help="The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM." ) parser.add_argument( "--mlm_mask_prop", default=0.15, type=float, help="Proportion of tokens for which we need to make a prediction.", ) parser.add_argument("--word_mask", default=0.8, type=float, help="Proportion of tokens to mask out.") parser.add_argument("--word_keep", default=0.1, type=float, help="Proportion of tokens to keep.") parser.add_argument("--word_rand", default=0.1, type=float, help="Proportion of tokens to randomly replace.") parser.add_argument( "--mlm_smoothing", default=0.7, type=float, help="Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).", ) parser.add_argument("--token_counts", type=str, help="The token counts in the data_file for MLM.") parser.add_argument( "--restrict_ce_to_mask", action="store_true", help="If true, compute the distilation loss only the [MLM] prediction distribution.", ) parser.add_argument( "--freeze_pos_embs", action="store_true", help="Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.", ) parser.add_argument( "--freeze_token_type_embds", action="store_true", help="Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.", ) parser.add_argument("--n_epoch", type=int, default=3, help="Number of pass on the whole dataset.") parser.add_argument("--batch_size", type=int, default=5, help="Batch size (for each process).") parser.add_argument( "--group_by_size", action="store_false", help="If true, group sequences that have similar length into the same batch. Default is true.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=50, help="Gradient accumulation for larger training batches.", ) parser.add_argument("--warmup_prop", default=0.05, type=float, help="Linear warmup proportion.") parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight deay if we apply some.") parser.add_argument("--learning_rate", default=5e-4, type=float, help="The initial learning rate for Adam.") parser.add_argument("--adam_epsilon", default=1e-6, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_grad_norm", default=5.0, type=float, help="Max gradient norm.") parser.add_argument("--initializer_range", default=0.02, type=float, help="Random initialization range.") parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit", ) parser.add_argument( "--fp16_opt_level", type=str, default="O1", help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']." "See details at https://nvidia.github.io/apex/amp.html", ) parser.add_argument("--gpus", type=int, default=1, help="Number of GPUs in the node.") parser.add_argument("--local_rank", type=int, default=-1, help="Distributed training - Local rank") parser.add_argument("--seed", type=int, default=56, help="Random seed") parser.add_argument("--log_interval", type=int, default=500, help="Tensorboard logging interval.") parser.add_argument("--checkpoint_interval", type=int, default=4000, help="Checkpoint interval.") args = parser.parse_args() sanity_checks(args) # ARGS # init_gpu_params(args) set_seed(args) if args.is_master: if os.path.exists(args.dump_path): if not args.force: raise ValueError( f"Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite it" "Use `--force` if you want to overwrite it" ) else: shutil.rmtree(args.dump_path) if not os.path.exists(args.dump_path): os.makedirs(args.dump_path) logger.info(f"Experiment will be dumped and logged in {args.dump_path}") # SAVE PARAMS # logger.info(f"Param: {args}") with open(os.path.join(args.dump_path, "parameters.json"), "w") as f: json.dump(vars(args), f, indent=4) git_log(args.dump_path) student_config_class, student_model_class, _ = MODEL_CLASSES[args.student_type] teacher_config_class, teacher_model_class, teacher_tokenizer_class = MODEL_CLASSES[args.teacher_type] # TOKENIZER # tokenizer = teacher_tokenizer_class.from_pretrained(args.teacher_name) special_tok_ids = {} for tok_name, tok_symbol in tokenizer.special_tokens_map.items(): idx = tokenizer.all_special_tokens.index(tok_symbol) special_tok_ids[tok_name] = tokenizer.all_special_ids[idx] logger.info(f"Special tokens {special_tok_ids}") args.special_tok_ids = special_tok_ids args.max_model_input_size = tokenizer.max_model_input_sizes[args.teacher_name] # DATA LOADER # logger.info(f"Loading data from {args.data_file}") with open(args.data_file, "rb") as fp: data = pickle.load(fp) if args.mlm: logger.info(f"Loading token counts from {args.token_counts} (already pre-computed)") with open(args.token_counts, "rb") as fp: counts = pickle.load(fp) token_probs = np.maximum(counts, 1) ** -args.mlm_smoothing for idx in special_tok_ids.values(): token_probs[idx] = 0.0 # do not predict special tokens token_probs = torch.from_numpy(token_probs) else: token_probs = None train_lm_seq_dataset = LmSeqsDataset(params=args, data=data) logger.info("Data loader created.") # STUDENT # logger.info(f"Loading student config from {args.student_config}") stu_architecture_config = student_config_class.from_pretrained(args.student_config) stu_architecture_config.output_hidden_states = True if args.student_pretrained_weights is not None: logger.info(f"Loading pretrained weights from {args.student_pretrained_weights}") student = student_model_class.from_pretrained(args.student_pretrained_weights, config=stu_architecture_config) else: student = student_model_class(stu_architecture_config) if args.n_gpu > 0: student.to(f"cuda:{args.local_rank}") logger.info("Student loaded.") # TEACHER # teacher = teacher_model_class.from_pretrained(args.teacher_name, output_hidden_states=True) if args.n_gpu > 0: teacher.to(f"cuda:{args.local_rank}") logger.info(f"Teacher loaded from {args.teacher_name}.") # FREEZING # if args.freeze_pos_embs: freeze_pos_embeddings(student, args) if args.freeze_token_type_embds: freeze_token_type_embeddings(student, args) # SANITY CHECKS # assert student.config.vocab_size == teacher.config.vocab_size assert student.config.hidden_size == teacher.config.hidden_size assert student.config.max_position_embeddings == teacher.config.max_position_embeddings if args.mlm: assert token_probs.size(0) == stu_architecture_config.vocab_size # DISTILLER # torch.cuda.empty_cache() distiller = Distiller( params=args, dataset=train_lm_seq_dataset, token_probs=token_probs, student=student, teacher=teacher ) distiller.train() logger.info("Let's go get some drinks.") if __name__ == "__main__": main()
AdaMix/examples/research_projects/distillation/train.py/0
{ "file_path": "AdaMix/examples/research_projects/distillation/train.py", "repo_id": "AdaMix", "token_count": 5123 }
37
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Masked Version of BERT. It replaces the `torch.nn.Linear` layers with :class:`~emmental.MaskedLinear` and add an additional parameters in the forward pass to compute the adaptive mask. Built on top of `transformers.models.bert.modeling_bert`""" import logging import math import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss from emmental import MaskedBertConfig from emmental.modules import MaskedLinear from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward from transformers.modeling_utils import PreTrainedModel, prune_linear_layer from transformers.models.bert.modeling_bert import ACT2FN, BertLayerNorm, load_tf_weights_in_bert logger = logging.getLogger(__name__) class BertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=0) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids=None, token_type_ids=None, position_ids=None, inputs_embeds=None): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] device = input_ids.device if input_ids is not None else inputs_embeds.device if position_ids is None: position_ids = torch.arange(seq_length, dtype=torch.long, device=device) position_ids = position_ids.unsqueeze(0).expand(input_shape) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) embeddings = inputs_embeds + position_embeddings + token_type_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertSelfAttention(nn.Module): def __init__(self, config): super().__init__() if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"): raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads) ) self.output_attentions = config.output_attentions self.num_attention_heads = config.num_attention_heads self.attention_head_size = int(config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size self.query = MaskedLinear( config.hidden_size, self.all_head_size, pruning_method=config.pruning_method, mask_init=config.mask_init, mask_scale=config.mask_scale, ) self.key = MaskedLinear( config.hidden_size, self.all_head_size, pruning_method=config.pruning_method, mask_init=config.mask_init, mask_scale=config.mask_scale, ) self.value = MaskedLinear( config.hidden_size, self.all_head_size, pruning_method=config.pruning_method, mask_init=config.mask_init, mask_scale=config.mask_scale, ) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) def transpose_for_scores(self, x): new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) x = x.view(*new_x_shape) return x.permute(0, 2, 1, 3) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, threshold=None, ): mixed_query_layer = self.query(hidden_states, threshold=threshold) # If this is instantiated as a cross-attention module, the keys # and values come from an encoder; the attention mask needs to be # such that the encoder's padding tokens are not attended to. if encoder_hidden_states is not None: mixed_key_layer = self.key(encoder_hidden_states, threshold=threshold) mixed_value_layer = self.value(encoder_hidden_states, threshold=threshold) attention_mask = encoder_attention_mask else: mixed_key_layer = self.key(hidden_states, threshold=threshold) mixed_value_layer = self.value(hidden_states, threshold=threshold) query_layer = self.transpose_for_scores(mixed_query_layer) key_layer = self.transpose_for_scores(mixed_key_layer) value_layer = self.transpose_for_scores(mixed_value_layer) # Take the dot product between "query" and "key" to get the raw attention scores. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) attention_scores = attention_scores / math.sqrt(self.attention_head_size) if attention_mask is not None: # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) # Mask heads if we want to if head_mask is not None: attention_probs = attention_probs * head_mask context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) outputs = (context_layer, attention_probs) if self.output_attentions else (context_layer,) return outputs class BertSelfOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = MaskedLinear( config.hidden_size, config.hidden_size, pruning_method=config.pruning_method, mask_init=config.mask_init, mask_scale=config.mask_scale, ) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor, threshold): hidden_states = self.dense(hidden_states, threshold=threshold) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): def __init__(self, config): super().__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) self.pruned_heads = set() def prune_heads(self, heads): if len(heads) == 0: return mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size) heads = set(heads) - self.pruned_heads # Convert to set and remove already pruned heads for head in heads: # Compute how many pruned heads are before the head and move the index accordingly head = head - sum(1 if h < head else 0 for h in self.pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index = torch.arange(len(mask))[mask].long() # Prune linear layers self.self.query = prune_linear_layer(self.self.query, index) self.self.key = prune_linear_layer(self.self.key, index) self.self.value = prune_linear_layer(self.self.value, index) self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) # Update hyper params and store pruned heads self.self.num_attention_heads = self.self.num_attention_heads - len(heads) self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads self.pruned_heads = self.pruned_heads.union(heads) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, threshold=None, ): self_outputs = self.self( hidden_states, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask, threshold=threshold, ) attention_output = self.output(self_outputs[0], hidden_states, threshold=threshold) outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them return outputs class BertIntermediate(nn.Module): def __init__(self, config): super().__init__() self.dense = MaskedLinear( config.hidden_size, config.intermediate_size, pruning_method=config.pruning_method, mask_init=config.mask_init, mask_scale=config.mask_scale, ) if isinstance(config.hidden_act, str): self.intermediate_act_fn = ACT2FN[config.hidden_act] else: self.intermediate_act_fn = config.hidden_act def forward(self, hidden_states, threshold): hidden_states = self.dense(hidden_states, threshold=threshold) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class BertOutput(nn.Module): def __init__(self, config): super().__init__() self.dense = MaskedLinear( config.intermediate_size, config.hidden_size, pruning_method=config.pruning_method, mask_init=config.mask_init, mask_scale=config.mask_scale, ) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor, threshold): hidden_states = self.dense(hidden_states, threshold=threshold) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertLayer(nn.Module): def __init__(self, config): super().__init__() self.attention = BertAttention(config) self.is_decoder = config.is_decoder if self.is_decoder: self.crossattention = BertAttention(config) self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, threshold=None, ): self_attention_outputs = self.attention(hidden_states, attention_mask, head_mask, threshold=threshold) attention_output = self_attention_outputs[0] outputs = self_attention_outputs[1:] # add self attentions if we output attention weights if self.is_decoder and encoder_hidden_states is not None: cross_attention_outputs = self.crossattention( attention_output, attention_mask, head_mask, encoder_hidden_states, encoder_attention_mask ) attention_output = cross_attention_outputs[0] outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights intermediate_output = self.intermediate(attention_output, threshold=threshold) layer_output = self.output(intermediate_output, attention_output, threshold=threshold) outputs = (layer_output,) + outputs return outputs class BertEncoder(nn.Module): def __init__(self, config): super().__init__() self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states self.layer = nn.ModuleList([BertLayer(config) for _ in range(config.num_hidden_layers)]) def forward( self, hidden_states, attention_mask=None, head_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, threshold=None, ): all_hidden_states = () all_attentions = () for i, layer_module in enumerate(self.layer): if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) layer_outputs = layer_module( hidden_states, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask, threshold=threshold, ) hidden_states = layer_outputs[0] if self.output_attentions: all_attentions = all_attentions + (layer_outputs[1],) # Add last layer if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) outputs = (hidden_states,) if self.output_hidden_states: outputs = outputs + (all_hidden_states,) if self.output_attentions: outputs = outputs + (all_attentions,) return outputs # last-layer hidden state, (all hidden states), (all attentions) class BertPooler(nn.Module): def __init__(self, config): super().__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class MaskedBertPreTrainedModel(PreTrainedModel): """An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = MaskedBertConfig load_tf_weights = load_tf_weights_in_bert base_model_prefix = "bert" def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, (nn.Linear, nn.Embedding)): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, BertLayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() MASKED_BERT_START_DOCSTRING = r""" This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~emmental.MaskedBertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ MASKED_BERT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`transformers.BertTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1`` corresponds to a `sentence B` token `What are token type IDs? <../glossary.html#token-type-ids>`_ position_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. """ @add_start_docstrings( "The bare Masked Bert Model transformer outputting raw hidden-states without any specific head on top.", MASKED_BERT_START_DOCSTRING, ) class MaskedBertModel(MaskedBertPreTrainedModel): """ The `MaskedBertModel` class replicates the :class:`~transformers.BertModel` class and adds specific inputs to compute the adaptive mask on the fly. Note that we freeze the embeddings modules from their pre-trained values. """ def __init__(self, config): super().__init__(config) self.config = config self.embeddings = BertEmbeddings(config) self.embeddings.requires_grad_(requires_grad=False) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(MASKED_BERT_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, threshold=None, ): r""" threshold (:obj:`float`): Threshold value (see :class:`~emmental.MaskedLinear`). Return: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~emmental.MaskedBertConfig`) and inputs: last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`): Sequence of hidden-states at the output of the last layer of the model. pooler_output (:obj:`torch.FloatTensor`: of shape :obj:`(batch_size, hidden_size)`): Last layer hidden-state of the first token of the sequence (classification token) further processed by a Linear layer and a Tanh activation function. The Linear layer weights are trained from the next sentence prediction (classification) objective during pre-training. This output is usually *not* a good summary of the semantic content of the input, you're often better with averaging or pooling the sequence of hidden-states for the whole input sequence. hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if attention_mask is None: attention_mask = torch.ones(input_shape, device=device) if token_type_ids is None: token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. if attention_mask.dim() == 3: extended_attention_mask = attention_mask[:, None, :, :] elif attention_mask.dim() == 2: # Provided a padding mask of dimensions [batch_size, seq_length] # - if the model is a decoder, apply a causal mask in addition to the padding mask # - if the model is an encoder, make the mask broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder: batch_size, seq_length = input_shape seq_ids = torch.arange(seq_length, device=device) causal_mask = seq_ids[None, None, :].repeat(batch_size, seq_length, 1) <= seq_ids[None, :, None] causal_mask = causal_mask.to( attention_mask.dtype ) # causal and attention masks must have same type with pytorch version < 1.3 extended_attention_mask = causal_mask[:, None, :, :] * attention_mask[:, None, None, :] else: extended_attention_mask = attention_mask[:, None, None, :] else: raise ValueError( "Wrong shape for input_ids (shape {}) or attention_mask (shape {})".format( input_shape, attention_mask.shape ) ) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to(dtype=next(self.parameters()).dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 # If a 2D ou 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) if encoder_attention_mask.dim() == 3: encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :] elif encoder_attention_mask.dim() == 2: encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :] else: raise ValueError( "Wrong shape for encoder_hidden_shape (shape {}) or encoder_attention_mask (shape {})".format( encoder_hidden_shape, encoder_attention_mask.shape ) ) encoder_extended_attention_mask = encoder_extended_attention_mask.to( dtype=next(self.parameters()).dtype ) # fp16 compatibility encoder_extended_attention_mask = (1.0 - encoder_extended_attention_mask) * -10000.0 else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if head_mask is not None: if head_mask.dim() == 1: head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1) head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1) elif head_mask.dim() == 2: head_mask = ( head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1) ) # We can specify head_mask for each layer head_mask = head_mask.to( dtype=next(self.parameters()).dtype ) # switch to float if need + fp16 compatibility else: head_mask = [None] * self.config.num_hidden_layers embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, threshold=threshold, ) sequence_output = encoder_outputs[0] pooled_output = self.pooler(sequence_output) outputs = (sequence_output, pooled_output,) + encoder_outputs[ 1: ] # add hidden_states and attentions if they are here return outputs # sequence_output, pooled_output, (hidden_states), (attentions) @add_start_docstrings( """Masked Bert Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, MASKED_BERT_START_DOCSTRING, ) class MaskedBertForSequenceClassification(MaskedBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert = MaskedBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, self.config.num_labels) self.init_weights() @add_start_docstrings_to_model_forward(MASKED_BERT_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, threshold=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ..., config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss), If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy). threshold (:obj:`float`): Threshold value (see :class:`~emmental.MaskedLinear`). Returns: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~emmental.MaskedBertConfig`) and inputs: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided): Classification (or regression if config.num_labels==1) loss. logits (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, threshold=threshold, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here if labels is not None: if self.num_labels == 1: # We are doing regression loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) outputs = (loss,) + outputs return outputs # (loss), logits, (hidden_states), (attentions) @add_start_docstrings( """Masked Bert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, MASKED_BERT_START_DOCSTRING, ) class MaskedBertForMultipleChoice(MaskedBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.bert = MaskedBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.init_weights() @add_start_docstrings_to_model_forward(MASKED_BERT_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, threshold=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension of the input tensors. (see `input_ids` above) threshold (:obj:`float`): Threshold value (see :class:`~emmental.MaskedLinear`). Returns: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~emmental.MaskedBertConfig`) and inputs: loss (:obj:`torch.FloatTensor` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided): Classification loss. classification_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_choices)`): `num_choices` is the second dimension of the input tensors. (see `input_ids` above). Classification scores (before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ num_choices = input_ids.shape[1] input_ids = input_ids.view(-1, input_ids.size(-1)) attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, threshold=threshold, ) pooled_output = outputs[1] pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, num_choices) outputs = (reshaped_logits,) + outputs[2:] # add hidden states and attention if they are here if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) outputs = (loss,) + outputs return outputs # (loss), reshaped_logits, (hidden_states), (attentions) @add_start_docstrings( """Masked Bert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, MASKED_BERT_START_DOCSTRING, ) class MaskedBertForTokenClassification(MaskedBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert = MaskedBertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() @add_start_docstrings_to_model_forward(MASKED_BERT_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, labels=None, threshold=None, ): r""" labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. threshold (:obj:`float`): Threshold value (see :class:`~emmental.MaskedLinear`). Returns: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~emmental.MaskedBertConfig`) and inputs: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) : Classification loss. scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`) Classification scores (before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, threshold=threshold, ) sequence_output = outputs[0] sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) outputs = (logits,) + outputs[2:] # add hidden states and attention if they are here if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels) active_labels = torch.where( active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels) ) loss = loss_fct(active_logits, active_labels) else: loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) outputs = (loss,) + outputs return outputs # (loss), scores, (hidden_states), (attentions) @add_start_docstrings( """Masked Bert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, MASKED_BERT_START_DOCSTRING, ) class MaskedBertForQuestionAnswering(MaskedBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.num_labels = config.num_labels self.bert = MaskedBertModel(config) self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels) self.init_weights() @add_start_docstrings_to_model_forward(MASKED_BERT_INPUTS_DOCSTRING) def forward( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, start_positions=None, end_positions=None, threshold=None, ): r""" start_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (:obj:`torch.LongTensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. threshold (:obj:`float`): Threshold value (see :class:`~emmental.MaskedLinear`). Returns: :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~emmental.MaskedBertConfig`) and inputs: loss (:obj:`torch.FloatTensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`): Span-start scores (before SoftMax). end_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length,)`): Span-end scores (before SoftMax). hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ outputs = self.bert( input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, threshold=threshold, ) sequence_output = outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) outputs = ( start_logits, end_logits, ) + outputs[2:] if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 outputs = (total_loss,) + outputs return outputs # (loss), start_logits, end_logits, (hidden_states), (attentions)
AdaMix/examples/research_projects/movement-pruning/emmental/modeling_bert_masked.py/0
{ "file_path": "AdaMix/examples/research_projects/movement-pruning/emmental/modeling_bert_masked.py", "repo_id": "AdaMix", "token_count": 20030 }
38
import torch class ClassificationHead(torch.nn.Module): """Classification Head for transformer encoders""" def __init__(self, class_size, embed_size): super().__init__() self.class_size = class_size self.embed_size = embed_size # self.mlp1 = torch.nn.Linear(embed_size, embed_size) # self.mlp2 = (torch.nn.Linear(embed_size, class_size)) self.mlp = torch.nn.Linear(embed_size, class_size) def forward(self, hidden_state): # hidden_state = F.relu(self.mlp1(hidden_state)) # hidden_state = self.mlp2(hidden_state) logits = self.mlp(hidden_state) return logits
AdaMix/examples/research_projects/pplm/pplm_classification_head.py/0
{ "file_path": "AdaMix/examples/research_projects/pplm/pplm_classification_head.py", "repo_id": "AdaMix", "token_count": 283 }
39
""" This script reads DPR retriever training data and parses each datapoint. We save a line per datapoint. Each line consists of the query followed by a tab-separated list of Wikipedia page titles constituting positive contexts for a given query. """ import argparse import json from tqdm import tqdm def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--src_path", type=str, default="biencoder-nq-dev.json", help="Path to raw DPR training data", ) parser.add_argument( "--evaluation_set", type=str, help="where to store parsed evaluation_set file", ) parser.add_argument( "--gold_data_path", type=str, help="where to store parsed gold_data_path file", ) args = parser.parse_args() with open(args.src_path, "r") as src_file, open(args.evaluation_set, "w") as eval_file, open( args.gold_data_path, "w" ) as gold_file: dpr_records = json.load(src_file) for dpr_record in tqdm(dpr_records): question = dpr_record["question"] contexts = [context["title"] for context in dpr_record["positive_ctxs"]] eval_file.write(question + "\n") gold_file.write("\t".join(contexts) + "\n") if __name__ == "__main__": main()
AdaMix/examples/research_projects/rag/parse_dpr_relevance_data.py/0
{ "file_path": "AdaMix/examples/research_projects/rag/parse_dpr_relevance_data.py", "repo_id": "AdaMix", "token_count": 559 }
40
#!/usr/bin/env bash export PYTHONPATH="../":"${PYTHONPATH}" export WANDB_PROJECT=dmar export MAX_LEN=128 export m=sshleifer/student_marian_en_ro_6_1 python finetune.py \ --learning_rate=3e-4 \ --do_train \ --fp16 \ --data_dir wmt_en_ro \ --max_source_length $MAX_LEN --max_target_length $MAX_LEN --val_max_target_length $MAX_LEN --test_max_target_length $MAX_LEN \ --freeze_encoder --freeze_embeds \ --train_batch_size=48 --eval_batch_size=64 \ --tokenizer_name $m --model_name_or_path $m --num_train_epochs=1 \ --warmup_steps 500 --logger_name wandb --gpus 1 \ --fp16_opt_level=O1 --task translation \ "$@"
AdaMix/examples/research_projects/seq2seq-distillation/dynamic_bs_example.sh/0
{ "file_path": "AdaMix/examples/research_projects/seq2seq-distillation/dynamic_bs_example.sh", "repo_id": "AdaMix", "token_count": 267 }
41
## Fine-tuning Wav2Vec2 The `run_training.py` script allows one to finetune pretrained Wav2Vec2 models that can be found [here](https://huggingface.co/models?search=facebook/wav2vec2). This finetuning script can also be run as a google colab [TODO: here]( ). The script is actively maintained by [Patrick von Platen](https://github.com/patrickvonplaten). Feel free to ask a question on the [Forum](https://discuss.huggingface.co/) or post an issue on [GitHub](https://github.com/huggingface/transformers/issues/new/choose) and adding `@patrickvonplaten` as a tag.
AdaMix/examples/research_projects/wav2vec2/README.md/0
{ "file_path": "AdaMix/examples/research_projects/wav2vec2/README.md", "repo_id": "AdaMix", "token_count": 184 }
42
{ "fp16": { "enabled": true, "loss_scale": 0, "loss_scale_window": 1000, "initial_scale_power": 32, "hysteresis": 2, "min_loss_scale": 1 }, "zero_optimization": { "stage": 2, "allgather_partitions": true, "allgather_bucket_size": 2e8, "overlap_comm": true, "reduce_scatter": true, "reduce_bucket_size": 2e8, "contiguous_gradients": true, "cpu_offload": true }, "optimizer": { "type": "AdamW", "params": { "lr": 3e-5, "betas": [0.8, 0.999], "eps": 1e-8, "weight_decay": 3e-7 } }, "scheduler": { "type": "WarmupLR", "params": { "warmup_min_lr": 0, "warmup_max_lr": 3e-5, "warmup_num_steps": 500 } }, "steps_per_print": 2000, "wall_clock_breakdown": false }
AdaMix/examples/tests/deepspeed/ds_config.json/0
{ "file_path": "AdaMix/examples/tests/deepspeed/ds_config.json", "repo_id": "AdaMix", "token_count": 551 }
43
#!/usr/bin/env python # coding=utf-8 # Copyright 2020 The HuggingFace Team All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Fine-tuning the library models for token classification. """ # You can also adapt this script on your own token classification task and datasets. Pointers for this are left as # comments. import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np from datasets import ClassLabel, load_dataset, load_metric import transformers from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, DataCollatorForTokenClassification, HfArgumentParser, PreTrainedTokenizerFast, Trainer, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process from transformers.utils import check_min_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.4.0") logger = logging.getLogger(__name__) @dataclass class ModelArguments: """ Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. """ model_name_or_path: str = field( metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ) config_name: Optional[str] = field( default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} ) tokenizer_name: Optional[str] = field( default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} ) cache_dir: Optional[str] = field( default=None, metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"}, ) model_revision: str = field( default="main", metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, ) use_auth_token: bool = field( default=False, metadata={ "help": "Will use the token generated when running `transformers-cli login` (necessary to use this script " "with private models)." }, ) @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ task_name: Optional[str] = field(default="ner", metadata={"help": "The name of the task (ner, pos...)."}) dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field( default=None, metadata={"help": "The input training data file (a csv or JSON file)."} ) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate on (a csv or JSON file)."}, ) test_file: Optional[str] = field( default=None, metadata={"help": "An optional input test data file to predict on (a csv or JSON file)."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) pad_to_max_length: bool = field( default=False, metadata={ "help": "Whether to pad all samples to model maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." }, ) max_val_samples: Optional[int] = field( default=None, metadata={ "help": "For debugging purposes or quicker training, truncate the number of validation examples to this " "value if set." }, ) max_test_samples: Optional[int] = field( default=None, metadata={ "help": "For debugging purposes or quicker training, truncate the number of test examples to this " "value if set." }, ) label_all_tokens: bool = field( default=False, metadata={ "help": "Whether to put the label for one word on all tokens of generated by that word or just on the " "one (in which case the other tokens will have a padding index)." }, ) return_entity_level_metrics: bool = field( default=False, metadata={"help": "Whether to return all the entity levels during evaluation or just the overall ones."}, ) def __post_init__(self): if self.dataset_name is None and self.train_file is None and self.validation_file is None: raise ValueError("Need either a dataset name or a training/validation file.") else: if self.train_file is not None: extension = self.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if self.validation_file is not None: extension = self.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." self.task_name = self.task_name.lower() def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info("Training/evaluation parameters %s", training_args) # Set seed before initializing model. set_seed(training_args.seed) # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if data_args.dataset_name is not None: # Downloading and loading a dataset from the hub. datasets = load_dataset(data_args.dataset_name, data_args.dataset_config_name) else: data_files = {} if data_args.train_file is not None: data_files["train"] = data_args.train_file if data_args.validation_file is not None: data_files["validation"] = data_args.validation_file if data_args.test_file is not None: data_files["test"] = data_args.test_file extension = data_args.train_file.split(".")[-1] datasets = load_dataset(extension, data_files=data_files) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. if training_args.do_train: column_names = datasets["train"].column_names features = datasets["train"].features else: column_names = datasets["validation"].column_names features = datasets["validation"].features text_column_name = "tokens" if "tokens" in column_names else column_names[0] label_column_name = ( f"{data_args.task_name}_tags" if f"{data_args.task_name}_tags" in column_names else column_names[1] ) # In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the # unique labels. def get_label_list(labels): unique_labels = set() for label in labels: unique_labels = unique_labels | set(label) label_list = list(unique_labels) label_list.sort() return label_list if isinstance(features[label_column_name].feature, ClassLabel): label_list = features[label_column_name].feature.names # No need to convert the labels since they are already ints. label_to_id = {i: i for i in range(len(label_list))} else: label_list = get_label_list(datasets["train"][label_column_name]) label_to_id = {l: i for i, l in enumerate(label_list)} num_labels = len(label_list) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # Tokenizer check: this script requires a fast tokenizer. if not isinstance(tokenizer, PreTrainedTokenizerFast): raise ValueError( "This example script only works for models that have a fast tokenizer. Checkout the big table of models " "at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this " "requirement" ) # Preprocessing the dataset # Padding strategy padding = "max_length" if data_args.pad_to_max_length else False # Tokenize all texts and align the labels with them. def tokenize_and_align_labels(examples): tokenized_inputs = tokenizer( examples[text_column_name], padding=padding, truncation=True, # We use this argument because the texts in our dataset are lists of words (with a label for each word). is_split_into_words=True, ) labels = [] for i, label in enumerate(examples[label_column_name]): word_ids = tokenized_inputs.word_ids(batch_index=i) previous_word_idx = None label_ids = [] for word_idx in word_ids: # Special tokens have a word id that is None. We set the label to -100 so they are automatically # ignored in the loss function. if word_idx is None: label_ids.append(-100) # We set the label for the first token of each word. elif word_idx != previous_word_idx: label_ids.append(label_to_id[label[word_idx]]) # For the other tokens in a word, we set the label to either the current label or -100, depending on # the label_all_tokens flag. else: label_ids.append(label_to_id[label[word_idx]] if data_args.label_all_tokens else -100) previous_word_idx = word_idx labels.append(label_ids) tokenized_inputs["labels"] = labels return tokenized_inputs if training_args.do_train: if "train" not in datasets: raise ValueError("--do_train requires a train dataset") train_dataset = datasets["train"] if data_args.max_train_samples is not None: train_dataset = train_dataset.select(range(data_args.max_train_samples)) train_dataset = train_dataset.map( tokenize_and_align_labels, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_eval: if "validation" not in datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = datasets["validation"] if data_args.max_val_samples is not None: eval_dataset = eval_dataset.select(range(data_args.max_val_samples)) eval_dataset = eval_dataset.map( tokenize_and_align_labels, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_predict: if "test" not in datasets: raise ValueError("--do_predict requires a test dataset") test_dataset = datasets["test"] if data_args.max_test_samples is not None: test_dataset = test_dataset.select(range(data_args.max_test_samples)) test_dataset = test_dataset.map( tokenize_and_align_labels, batched=True, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) # Data collator data_collator = DataCollatorForTokenClassification(tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None) # Metrics metric = load_metric("seqeval") def compute_metrics(p): predictions, labels = p predictions = np.argmax(predictions, axis=2) # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] true_labels = [ [label_list[l] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] results = metric.compute(predictions=true_predictions, references=true_labels) if data_args.return_entity_level_metrics: # Unpack nested dictionaries final_results = {} for key, value in results.items(): if isinstance(value, dict): for n, v in value.items(): final_results[f"{key}_{n}"] = v else: final_results[key] = value return final_results else: return { "precision": results["overall_precision"], "recall": results["overall_recall"], "f1": results["overall_f1"], "accuracy": results["overall_accuracy"], } # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics, ) # Training if training_args.do_train: if last_checkpoint is not None: checkpoint = last_checkpoint elif os.path.isdir(model_args.model_name_or_path): checkpoint = model_args.model_name_or_path else: checkpoint = None train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics trainer.save_model() # Saves the tokenizer too for easy upload max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_val_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Predict if training_args.do_predict: logger.info("*** Predict ***") predictions, labels, metrics = trainer.predict(test_dataset) predictions = np.argmax(predictions, axis=2) # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] trainer.log_metrics("test", metrics) trainer.save_metrics("test", metrics) # Save predictions output_test_predictions_file = os.path.join(training_args.output_dir, "test_predictions.txt") if trainer.is_world_process_zero(): with open(output_test_predictions_file, "w") as writer: for prediction in true_predictions: writer.write(" ".join(prediction) + "\n") def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
AdaMix/examples/token-classification/run_ner.py/0
{ "file_path": "AdaMix/examples/token-classification/run_ner.py", "repo_id": "AdaMix", "token_count": 8373 }
44
<jupyter_start><jupyter_text>How can I leverage State-of-the-Art Natural Language Models with only one line of code ? Newly introduced in transformers v2.3.0, **pipelines** provides a high-level, easy to use,API for doing inference over a variety of downstream-tasks, including: - ***Sentence Classification _(Sentiment Analysis)_***: Indicate if the overall sentence is either positive or negative, i.e. *binary classification task* or *logitic regression task*.- ***Token Classification (Named Entity Recognition, Part-of-Speech tagging)***: For each sub-entities _(*tokens*)_ in the input, assign them a label, i.e. classification task.- ***Question-Answering***: Provided a tuple (`question`, `context`) the model should find the span of text in `content` answering the `question`.- ***Mask-Filling***: Suggests possible word(s) to fill the masked input with respect to the provided `context`.- ***Summarization***: Summarizes the ``input`` article to a shorter article.- ***Translation***: Translates the input from a language to another language.- ***Feature Extraction***: Maps the input to a higher, multi-dimensional space learned from the data.Pipelines encapsulate the overall process of every NLP process: 1. *Tokenization*: Split the initial input into multiple sub-entities with ... properties (i.e. tokens). 2. *Inference*: Maps every tokens into a more meaningful representation. 3. *Decoding*: Use the above representation to generate and/or extract the final output for the underlying task.The overall API is exposed to the end-user through the `pipeline()` method with the following structure:```pythonfrom transformers import pipeline Using default model and tokenizer for the taskpipeline("") Using a user-specified modelpipeline("", model="") Using custom model/tokenizer as strpipeline('', model='', tokenizer='')```<jupyter_code>!pip install -q transformers from __future__ import print_function import ipywidgets as widgets from transformers import pipeline<jupyter_output><empty_output><jupyter_text>1. Sentence Classification - Sentiment Analysis<jupyter_code>nlp_sentence_classif = pipeline('sentiment-analysis') nlp_sentence_classif('Such a nice weather outside !')<jupyter_output><empty_output><jupyter_text>2. Token Classification - Named Entity Recognition<jupyter_code>nlp_token_class = pipeline('ner') nlp_token_class('Hugging Face is a French company based in New-York.')<jupyter_output><empty_output><jupyter_text>3. Question Answering<jupyter_code>nlp_qa = pipeline('question-answering') nlp_qa(context='Hugging Face is a French company based in New-York.', question='Where is based Hugging Face ?')<jupyter_output><empty_output><jupyter_text>4. Text Generation - Mask Filling<jupyter_code>nlp_fill = pipeline('fill-mask') nlp_fill('Hugging Face is a French company based in ' + nlp_fill.tokenizer.mask_token)<jupyter_output><empty_output><jupyter_text>5. SummarizationSummarization is currently supported by `Bart` and `T5`.<jupyter_code>TEXT_TO_SUMMARIZE = """ New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County, New York. A year later, she got married again in Westchester County, but to a different man and without divorcing her first husband. Only 18 days after that marriage, she got hitched yet again. Then, Barrientos declared "I do" five more times, sometimes only within two weeks of each other. In 2010, she married once more, this time in the Bronx. In an application for a marriage license, she stated it was her "first and only" marriage. Barrientos, now 39, is facing two criminal counts of "offering a false instrument for filing in the first degree," referring to her false statements on the 2010 marriage license application, according to court documents. Prosecutors said the marriages were part of an immigration scam. On Friday, she pleaded not guilty at State Supreme Court in the Bronx, according to her attorney, Christopher Wright, who declined to comment further. After leaving court, Barrientos was arrested and charged with theft of service and criminal trespass for allegedly sneaking into the New York subway through an emergency exit, said Detective Annette Markowski, a police spokeswoman. In total, Barrientos has been married 10 times, with nine of her marriages occurring between 1999 and 2002. All occurred either in Westchester County, Long Island, New Jersey or the Bronx. She is believed to still be married to four men, and at one time, she was married to eight men at once, prosecutors say. Prosecutors said the immigration scam involved some of her husbands, who filed for permanent residence status shortly after the marriages. Any divorces happened only after such filings were approved. It was unclear whether any of the men will be prosecuted. The case was referred to the Bronx District Attorney\'s Office by Immigration and Customs Enforcement and the Department of Homeland Security\'s Investigation Division. Seven of the men are from so-called "red-flagged" countries, including Egypt, Turkey, Georgia, Pakistan and Mali. Her eighth husband, Rashid Rajput, was deported in 2006 to his native Pakistan after an investigation by the Joint Terrorism Task Force. If convicted, Barrientos faces up to four years in prison. Her next court appearance is scheduled for May 18. """ summarizer = pipeline('summarization') summarizer(TEXT_TO_SUMMARIZE)<jupyter_output>Couldn't reach server at 'https://s3.amazonaws.com/models.huggingface.co/bert/facebook/bart-large-cnn/modelcard.json' to download model card file. Creating an empty model card.<jupyter_text>6. TranslationTranslation is currently supported by `T5` for the language mappings English-to-French (`translation_en_to_fr`), English-to-German (`translation_en_to_de`) and English-to-Romanian (`translation_en_to_ro`).<jupyter_code># English to French translator = pipeline('translation_en_to_fr') translator("HuggingFace is a French company that is based in New York City. HuggingFace's mission is to solve NLP one commit at a time") # English to German translator = pipeline('translation_en_to_de') translator("The history of natural language processing (NLP) generally started in the 1950s, although work can be found from earlier periods.")<jupyter_output><empty_output><jupyter_text>7. Text GenerationText generation is currently supported by GPT-2, OpenAi-GPT, TransfoXL, XLNet, CTRL and Reformer.<jupyter_code>text_generator = pipeline("text-generation") text_generator("Today is a beautiful day and I will")<jupyter_output><empty_output><jupyter_text>8. Projection - Features Extraction<jupyter_code>import numpy as np nlp_features = pipeline('feature-extraction') output = nlp_features('Hugging Face is a French company based in Paris') np.array(output).shape # (Samples, Tokens, Vector Size)<jupyter_output><empty_output><jupyter_text>Alright ! Now you have a nice picture of what is possible through transformers' pipelines, and there is moreto come in future releases. In the meantime, you can try the different pipelines with your own inputs<jupyter_code>task = widgets.Dropdown( options=['sentiment-analysis', 'ner', 'fill_mask'], value='ner', description='Task:', disabled=False ) input = widgets.Text( value='', placeholder='Enter something', description='Your input:', disabled=False ) def forward(_): if len(input.value) > 0: if task.value == 'ner': output = nlp_token_class(input.value) elif task.value == 'sentiment-analysis': output = nlp_sentence_classif(input.value) else: if input.value.find('<mask>') == -1: output = nlp_fill(input.value + ' <mask>') else: output = nlp_fill(input.value) print(output) input.on_submit(forward) display(task, input) context = widgets.Textarea( value='Einstein is famous for the general theory of relativity', placeholder='Enter something', description='Context:', disabled=False ) query = widgets.Text( value='Why is Einstein famous for ?', placeholder='Enter something', description='Question:', disabled=False ) def forward(_): if len(context.value) > 0 and len(query.value) > 0: output = nlp_qa(question=query.value, context=context.value) print(output) query.on_submit(forward) display(context, query)<jupyter_output><empty_output>
AdaMix/notebooks/03-pipelines.ipynb/0
{ "file_path": "AdaMix/notebooks/03-pipelines.ipynb", "repo_id": "AdaMix", "token_count": 2442 }
45
#!/usr/bin/env python # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Usage: # ./gen-card-allenai-wmt19.py import os from pathlib import Path def write_model_card(model_card_dir, src_lang, tgt_lang, model_name): texts = { "en": "Machine learning is great, isn't it?", "ru": "Машинное обучение - это здорово, не так ли?", "de": "Maschinelles Lernen ist großartig, nicht wahr?", } # BLUE scores as follows: # "pair": [fairseq, transformers] scores = { "wmt19-de-en-6-6-base": [0, 38.37], "wmt19-de-en-6-6-big": [0, 39.90], } pair = f"{src_lang}-{tgt_lang}" readme = f""" --- language: - {src_lang} - {tgt_lang} thumbnail: tags: - translation - wmt19 - allenai license: apache-2.0 datasets: - wmt19 metrics: - bleu --- # FSMT ## Model description This is a ported version of fairseq-based [wmt19 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}. For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369). 2 models are available: * [wmt19-de-en-6-6-big](https://huggingface.co/allenai/wmt19-de-en-6-6-big) * [wmt19-de-en-6-6-base](https://huggingface.co/allenai/wmt19-de-en-6-6-base) ## Intended uses & limitations #### How to use ```python from transformers import FSMTForConditionalGeneration, FSMTTokenizer mname = "allenai/{model_name}" tokenizer = FSMTTokenizer.from_pretrained(mname) model = FSMTForConditionalGeneration.from_pretrained(mname) input = "{texts[src_lang]}" input_ids = tokenizer.encode(input, return_tensors="pt") outputs = model.generate(input_ids) decoded = tokenizer.decode(outputs[0], skip_special_tokens=True) print(decoded) # {texts[tgt_lang]} ``` #### Limitations and bias ## Training data Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369). ## Eval results Here are the BLEU scores: model | transformers -------|--------- {model_name} | {scores[model_name][1]} The score was calculated using this code: ```bash git clone https://github.com/huggingface/transformers cd transformers export PAIR={pair} export DATA_DIR=data/$PAIR export SAVE_DIR=data/$PAIR export BS=8 export NUM_BEAMS=5 mkdir -p $DATA_DIR sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target echo $PAIR PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS ``` ## Data Sources - [training, etc.](http://www.statmt.org/wmt19/) - [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561) ### BibTeX entry and citation info ``` @misc{{kasai2020deep, title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}}, author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}}, year={{2020}}, eprint={{2006.10369}}, archivePrefix={{arXiv}}, primaryClass={{cs.CL}} }} ``` """ model_card_dir.mkdir(parents=True, exist_ok=True) path = os.path.join(model_card_dir, "README.md") print(f"Generating {path}") with open(path, "w", encoding="utf-8") as f: f.write(readme) # make sure we are under the root of the project repo_dir = Path(__file__).resolve().parent.parent.parent model_cards_dir = repo_dir / "model_cards" for model_name in ["wmt19-de-en-6-6-base", "wmt19-de-en-6-6-big"]: model_card_dir = model_cards_dir / "allenai" / model_name write_model_card(model_card_dir, src_lang="de", tgt_lang="en", model_name=model_name)
AdaMix/scripts/fsmt/gen-card-allenai-wmt19.py/0
{ "file_path": "AdaMix/scripts/fsmt/gen-card-allenai-wmt19.py", "repo_id": "AdaMix", "token_count": 1729 }
46
# flake8: noqa # There's no way to ignore "F401 '...' imported but unused" warnings in this # module, but to preserve other warnings. So, don't check this module at all. # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # When adding a new object to this init, remember to add it twice: once inside the `_import_structure` dictionary and # once inside the `if TYPE_CHECKING` branch. The `TYPE_CHECKING` should have import statements as usual, but they are # only there for type checking. The `_import_structure` is a dictionary submodule to list of object names, and is used # to defer the actual importing for when the objects are requested. This way `import transformers` provides the names # in the namespace without actually importing anything (and especially none of the backends). __version__ = "4.4.2" # Work around to update TensorFlow's absl.logging threshold which alters the # default Python logging output behavior when present. # see: https://github.com/abseil/abseil-py/issues/99 # and: https://github.com/tensorflow/tensorflow/issues/26691#issuecomment-500369493 try: import absl.logging except ImportError: pass else: absl.logging.set_verbosity("info") absl.logging.set_stderrthreshold("info") absl.logging._warn_preinit_stderr = False from typing import TYPE_CHECKING # Check the dependencies satisfy the minimal versions required. from . import dependency_versions_check from .file_utils import ( _BaseLazyModule, is_flax_available, is_sentencepiece_available, is_tf_available, is_tokenizers_available, is_torch_available, ) from .utils import logging logger = logging.get_logger(__name__) # pylint: disable=invalid-name # Base objects, independent of any specific backend _import_structure = { "configuration_utils": ["PretrainedConfig"], "data": [ "DataProcessor", "InputExample", "InputFeatures", "SingleSentenceClassificationProcessor", "SquadExample", "SquadFeatures", "SquadV1Processor", "SquadV2Processor", "glue_compute_metrics", "glue_convert_examples_to_features", "glue_output_modes", "glue_processors", "glue_tasks_num_labels", "squad_convert_examples_to_features", "xnli_compute_metrics", "xnli_output_modes", "xnli_processors", "xnli_tasks_num_labels", ], "file_utils": [ "CONFIG_NAME", "MODEL_CARD_NAME", "PYTORCH_PRETRAINED_BERT_CACHE", "PYTORCH_TRANSFORMERS_CACHE", "SPIECE_UNDERLINE", "TF2_WEIGHTS_NAME", "TF_WEIGHTS_NAME", "TRANSFORMERS_CACHE", "WEIGHTS_NAME", "TensorType", "add_end_docstrings", "add_start_docstrings", "cached_path", "is_apex_available", "is_datasets_available", "is_faiss_available", "is_flax_available", "is_psutil_available", "is_py3nvml_available", "is_sentencepiece_available", "is_sklearn_available", "is_tf_available", "is_tokenizers_available", "is_torch_available", "is_torch_tpu_available", ], "hf_argparser": ["HfArgumentParser"], "integrations": [ "is_comet_available", "is_optuna_available", "is_ray_available", "is_ray_tune_available", "is_tensorboard_available", "is_wandb_available", ], "modelcard": ["ModelCard"], "modeling_tf_pytorch_utils": [ "convert_tf_weight_name_to_pt_weight_name", "load_pytorch_checkpoint_in_tf2_model", "load_pytorch_model_in_tf2_model", "load_pytorch_weights_in_tf2_model", "load_tf2_checkpoint_in_pytorch_model", "load_tf2_model_in_pytorch_model", "load_tf2_weights_in_pytorch_model", ], "models": [], # Models "models.wav2vec2": [ "WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Wav2Vec2Config", "Wav2Vec2CTCTokenizer", "Wav2Vec2Tokenizer", "Wav2Vec2FeatureExtractor", "Wav2Vec2Processor", ], "models.m2m_100": ["M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP", "M2M100Config"], "models.speech_to_text": [ "SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig", "Speech2TextFeatureExtractor", ], "models.convbert": ["CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "ConvBertConfig", "ConvBertTokenizer"], "models.albert": ["ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "AlbertConfig"], "models.auto": [ "ALL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CONFIG_MAPPING", "MODEL_NAMES_MAPPING", "TOKENIZER_MAPPING", "AutoConfig", "AutoTokenizer", ], "models.bart": ["BartConfig", "BartTokenizer"], "models.barthez": [], "models.bert": [ "BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BasicTokenizer", "BertConfig", "BertTokenizer", "WordpieceTokenizer", ], "models.bert_generation": ["BertGenerationConfig"], "models.bert_japanese": ["BertJapaneseTokenizer", "CharacterTokenizer", "MecabTokenizer"], "models.bertweet": ["BertweetTokenizer"], "models.blenderbot": ["BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotConfig", "BlenderbotTokenizer"], "models.blenderbot_small": [ "BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP", "BlenderbotSmallConfig", "BlenderbotSmallTokenizer", ], "models.camembert": ["CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "CamembertConfig"], "models.ctrl": ["CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP", "CTRLConfig", "CTRLTokenizer"], "models.deberta": ["DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaConfig", "DebertaTokenizer"], "models.deberta_v2": ["DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP", "DebertaV2Config"], "models.distilbert": ["DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "DistilBertConfig", "DistilBertTokenizer"], "models.dpr": [ "DPR_PRETRAINED_CONFIG_ARCHIVE_MAP", "DPRConfig", "DPRContextEncoderTokenizer", "DPRQuestionEncoderTokenizer", "DPRReaderOutput", "DPRReaderTokenizer", ], "models.electra": ["ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP", "ElectraConfig", "ElectraTokenizer"], "models.encoder_decoder": ["EncoderDecoderConfig"], "models.flaubert": ["FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "FlaubertConfig", "FlaubertTokenizer"], "models.fsmt": ["FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP", "FSMTConfig", "FSMTTokenizer"], "models.funnel": ["FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP", "FunnelConfig", "FunnelTokenizer"], "models.gpt2": ["GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPT2Config", "GPT2Tokenizer"], "models.herbert": ["HerbertTokenizer"], "models.ibert": ["IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "IBertConfig"], "models.layoutlm": ["LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "LayoutLMConfig", "LayoutLMTokenizer"], "models.led": ["LED_PRETRAINED_CONFIG_ARCHIVE_MAP", "LEDConfig", "LEDTokenizer"], "models.longformer": ["LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongformerConfig", "LongformerTokenizer"], "models.lxmert": ["LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LxmertConfig", "LxmertTokenizer"], "models.marian": ["MarianConfig"], "models.mbart": ["MBartConfig"], "models.mmbt": ["MMBTConfig"], "models.mobilebert": ["MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MobileBertConfig", "MobileBertTokenizer"], "models.mpnet": ["MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "MPNetConfig", "MPNetTokenizer"], "models.mt5": ["MT5Config"], "models.openai": ["OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OpenAIGPTConfig", "OpenAIGPTTokenizer"], "models.pegasus": ["PegasusConfig"], "models.phobert": ["PhobertTokenizer"], "models.prophetnet": ["PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "ProphetNetConfig", "ProphetNetTokenizer"], "models.rag": ["RagConfig", "RagRetriever", "RagTokenizer"], "models.reformer": ["REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "ReformerConfig"], "models.retribert": ["RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "RetriBertConfig", "RetriBertTokenizer"], "models.roberta": ["ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "RobertaConfig", "RobertaTokenizer"], "models.squeezebert": ["SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "SqueezeBertConfig", "SqueezeBertTokenizer"], "models.t5": ["T5_PRETRAINED_CONFIG_ARCHIVE_MAP", "T5Config"], "models.tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig", "TapasTokenizer"], "models.transfo_xl": [ "TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig", "TransfoXLCorpus", "TransfoXLTokenizer", ], "models.xlm": ["XLM_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMConfig", "XLMTokenizer"], "models.xlm_prophetnet": ["XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMProphetNetConfig"], "models.xlm_roberta": ["XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLMRobertaConfig"], "models.xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"], "pipelines": [ "Conversation", "ConversationalPipeline", "CsvPipelineDataFormat", "FeatureExtractionPipeline", "FillMaskPipeline", "JsonPipelineDataFormat", "NerPipeline", "PipedPipelineDataFormat", "Pipeline", "PipelineDataFormat", "QuestionAnsweringPipeline", "SummarizationPipeline", "TableQuestionAnsweringPipeline", "Text2TextGenerationPipeline", "TextClassificationPipeline", "TextGenerationPipeline", "TokenClassificationPipeline", "TranslationPipeline", "ZeroShotClassificationPipeline", "pipeline", ], "tokenization_utils": ["PreTrainedTokenizer"], "tokenization_utils_base": [ "AddedToken", "BatchEncoding", "CharSpan", "PreTrainedTokenizerBase", "SpecialTokensMixin", "TokenSpan", ], "feature_extraction_sequence_utils": ["SequenceFeatureExtractor", "BatchFeature"], "trainer_callback": [ "DefaultFlowCallback", "EarlyStoppingCallback", "PrinterCallback", "ProgressCallback", "TrainerCallback", "TrainerControl", "TrainerState", ], "trainer_utils": ["EvalPrediction", "IntervalStrategy", "SchedulerType", "set_seed"], "training_args": ["TrainingArguments"], "training_args_seq2seq": ["Seq2SeqTrainingArguments"], "training_args_tf": ["TFTrainingArguments"], "utils": ["logging"], } # sentencepiece-backed objects if is_sentencepiece_available(): _import_structure["models.albert"].append("AlbertTokenizer") _import_structure["models.barthez"].append("BarthezTokenizer") _import_structure["models.bert_generation"].append("BertGenerationTokenizer") _import_structure["models.camembert"].append("CamembertTokenizer") _import_structure["models.deberta_v2"].append("DebertaV2Tokenizer") _import_structure["models.m2m_100"].append("M2M100Tokenizer") _import_structure["models.marian"].append("MarianTokenizer") _import_structure["models.mbart"].append("MBartTokenizer") _import_structure["models.mbart"].append("MBart50Tokenizer") _import_structure["models.mt5"].append("MT5Tokenizer") _import_structure["models.pegasus"].append("PegasusTokenizer") _import_structure["models.reformer"].append("ReformerTokenizer") _import_structure["models.speech_to_text"].append("Speech2TextTokenizer") _import_structure["models.speech_to_text"].append("Speech2TextProcessor") _import_structure["models.t5"].append("T5Tokenizer") _import_structure["models.xlm_prophetnet"].append("XLMProphetNetTokenizer") _import_structure["models.xlm_roberta"].append("XLMRobertaTokenizer") _import_structure["models.xlnet"].append("XLNetTokenizer") else: from .utils import dummy_sentencepiece_objects _import_structure["utils.dummy_sentencepiece_objects"] = [ name for name in dir(dummy_sentencepiece_objects) if not name.startswith("_") ] # tokenziers-backed objects if is_tokenizers_available(): # Fast tokenizers _import_structure["models.convbert"].append("ConvBertTokenizerFast") _import_structure["models.albert"].append("AlbertTokenizerFast") _import_structure["models.bart"].append("BartTokenizerFast") _import_structure["models.barthez"].append("BarthezTokenizerFast") _import_structure["models.bert"].append("BertTokenizerFast") _import_structure["models.camembert"].append("CamembertTokenizerFast") _import_structure["models.distilbert"].append("DistilBertTokenizerFast") _import_structure["models.dpr"].extend( ["DPRContextEncoderTokenizerFast", "DPRQuestionEncoderTokenizerFast", "DPRReaderTokenizerFast"] ) _import_structure["models.electra"].append("ElectraTokenizerFast") _import_structure["models.funnel"].append("FunnelTokenizerFast") _import_structure["models.gpt2"].append("GPT2TokenizerFast") _import_structure["models.herbert"].append("HerbertTokenizerFast") _import_structure["models.layoutlm"].append("LayoutLMTokenizerFast") _import_structure["models.led"].append("LEDTokenizerFast") _import_structure["models.longformer"].append("LongformerTokenizerFast") _import_structure["models.lxmert"].append("LxmertTokenizerFast") _import_structure["models.mbart"].append("MBartTokenizerFast") _import_structure["models.mbart"].append("MBart50TokenizerFast") _import_structure["models.mobilebert"].append("MobileBertTokenizerFast") _import_structure["models.mpnet"].append("MPNetTokenizerFast") _import_structure["models.mt5"].append("MT5TokenizerFast") _import_structure["models.openai"].append("OpenAIGPTTokenizerFast") _import_structure["models.pegasus"].append("PegasusTokenizerFast") _import_structure["models.reformer"].append("ReformerTokenizerFast") _import_structure["models.retribert"].append("RetriBertTokenizerFast") _import_structure["models.roberta"].append("RobertaTokenizerFast") _import_structure["models.squeezebert"].append("SqueezeBertTokenizerFast") _import_structure["models.t5"].append("T5TokenizerFast") _import_structure["models.xlm_roberta"].append("XLMRobertaTokenizerFast") _import_structure["models.xlnet"].append("XLNetTokenizerFast") _import_structure["tokenization_utils_fast"] = ["PreTrainedTokenizerFast"] if is_sentencepiece_available(): _import_structure["convert_slow_tokenizer"] = ["SLOW_TO_FAST_CONVERTERS", "convert_slow_tokenizer"] else: from .utils import dummy_tokenizers_objects _import_structure["utils.dummy_tokenizers_objects"] = [ name for name in dir(dummy_tokenizers_objects) if not name.startswith("_") ] # PyTorch-backed objects if is_torch_available(): _import_structure["benchmark.benchmark"] = ["PyTorchBenchmark"] _import_structure["benchmark.benchmark_args"] = ["PyTorchBenchmarkArguments"] _import_structure["data.data_collator"] = [ "DataCollator", "DataCollatorForLanguageModeling", "DataCollatorForPermutationLanguageModeling", "DataCollatorForSeq2Seq", "DataCollatorForSOP", "DataCollatorForTokenClassification", "DataCollatorForWholeWordMask", "DataCollatorWithPadding", "default_data_collator", ] _import_structure["data.datasets"] = [ "GlueDataset", "GlueDataTrainingArguments", "LineByLineTextDataset", "LineByLineWithRefDataset", "LineByLineWithSOPTextDataset", "SquadDataset", "SquadDataTrainingArguments", "TextDataset", "TextDatasetForNextSentencePrediction", ] _import_structure["generation_beam_search"] = ["BeamScorer", "BeamSearchScorer"] _import_structure["generation_logits_process"] = [ "HammingDiversityLogitsProcessor", "LogitsProcessor", "LogitsProcessorList", "LogitsWarper", "MinLengthLogitsProcessor", "NoBadWordsLogitsProcessor", "NoRepeatNGramLogitsProcessor", "PrefixConstrainedLogitsProcessor", "RepetitionPenaltyLogitsProcessor", "TemperatureLogitsWarper", "TopKLogitsWarper", "TopPLogitsWarper", ] _import_structure["generation_stopping_criteria"] = [ "StoppingCriteria", "StoppingCriteriaList", "MaxLengthCriteria", "MaxTimeCriteria", ] _import_structure["generation_utils"] = ["top_k_top_p_filtering"] _import_structure["modeling_utils"] = ["Conv1D", "PreTrainedModel", "apply_chunking_to_forward", "prune_layer"] # PyTorch models structure _import_structure["models.speech_to_text"].extend( [ "SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST", "Speech2TextForConditionalGeneration", "Speech2TextModel", ] ) _import_structure["models.wav2vec2"].extend( [ "WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST", "Wav2Vec2ForCTC", "Wav2Vec2ForMaskedLM", "Wav2Vec2Model", "Wav2Vec2PreTrainedModel", ] ) _import_structure["models.m2m_100"].extend( [ "M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST", "M2M100ForConditionalGeneration", "M2M100Model", ] ) _import_structure["models.convbert"].extend( [ "CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "ConvBertForMaskedLM", "ConvBertForMultipleChoice", "ConvBertForQuestionAnswering", "ConvBertForSequenceClassification", "ConvBertForTokenClassification", "ConvBertLayer", "ConvBertModel", "ConvBertPreTrainedModel", "load_tf_weights_in_convbert", ] ) _import_structure["models.albert"].extend( [ "ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "AlbertForMaskedLM", "AlbertForMultipleChoice", "AlbertForPreTraining", "AlbertForQuestionAnswering", "AlbertForSequenceClassification", "AlbertForTokenClassification", "AlbertModel", "AlbertPreTrainedModel", "load_tf_weights_in_albert", ] ) _import_structure["models.auto"].extend( [ "MODEL_FOR_CAUSAL_LM_MAPPING", "MODEL_FOR_MASKED_LM_MAPPING", "MODEL_FOR_MULTIPLE_CHOICE_MAPPING", "MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING", "MODEL_FOR_PRETRAINING_MAPPING", "MODEL_FOR_QUESTION_ANSWERING_MAPPING", "MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING", "MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING", "MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING", "MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING", "MODEL_MAPPING", "MODEL_WITH_LM_HEAD_MAPPING", "AutoModel", "AutoModelForCausalLM", "AutoModelForMaskedLM", "AutoModelForMultipleChoice", "AutoModelForNextSentencePrediction", "AutoModelForPreTraining", "AutoModelForQuestionAnswering", "AutoModelForSeq2SeqLM", "AutoModelForSequenceClassification", "AutoModelForTableQuestionAnswering", "AutoModelForTokenClassification", "AutoModelWithLMHead", ] ) _import_structure["models.bart"].extend( [ "BART_PRETRAINED_MODEL_ARCHIVE_LIST", "BartForCausalLM", "BartForConditionalGeneration", "BartForQuestionAnswering", "BartForSequenceClassification", "BartModel", "BartPretrainedModel", "PretrainedBartModel", ] ) _import_structure["models.bert"].extend( [ "BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "BertForMaskedLM", "BertForMultipleChoice", "BertForNextSentencePrediction", "BertForPreTraining", "BertForQuestionAnswering", "BertForSequenceClassification", "BertForTokenClassification", "BertLayer", "BertLMHeadModel", "BertModel", "BertPreTrainedModel", "load_tf_weights_in_bert", ] ) _import_structure["models.bert_generation"].extend( [ "BertGenerationDecoder", "BertGenerationEncoder", "load_tf_weights_in_bert_generation", ] ) _import_structure["models.blenderbot"].extend( [ "BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotForConditionalGeneration", "BlenderbotModel", "BlenderbotForCausalLM", ] ) _import_structure["models.blenderbot_small"].extend( [ "BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST", "BlenderbotSmallForConditionalGeneration", "BlenderbotSmallModel", "BlenderbotSmallForCausalLM", ] ) _import_structure["models.camembert"].extend( [ "CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "CamembertForCausalLM", "CamembertForMaskedLM", "CamembertForMultipleChoice", "CamembertForQuestionAnswering", "CamembertForSequenceClassification", "CamembertForTokenClassification", "CamembertModel", ] ) _import_structure["models.ctrl"].extend( [ "CTRL_PRETRAINED_MODEL_ARCHIVE_LIST", "CTRLForSequenceClassification", "CTRLLMHeadModel", "CTRLModel", "CTRLPreTrainedModel", ] ) _import_structure["models.deberta"].extend( [ "DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "DebertaForSequenceClassification", "DebertaModel", "DebertaForMaskedLM", "DebertaPreTrainedModel", "DebertaForTokenClassification", "DebertaForQuestionAnswering", ] ) _import_structure["models.deberta_v2"].extend( [ "DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST", "DebertaV2ForSequenceClassification", "DebertaV2Model", "DebertaV2ForMaskedLM", "DebertaV2PreTrainedModel", "DebertaV2ForTokenClassification", "DebertaV2ForQuestionAnswering", ] ) _import_structure["models.distilbert"].extend( [ "DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "DistilBertForMaskedLM", "DistilBertForMultipleChoice", "DistilBertForQuestionAnswering", "DistilBertForSequenceClassification", "DistilBertForTokenClassification", "DistilBertModel", "DistilBertPreTrainedModel", ] ) _import_structure["models.dpr"].extend( [ "DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST", "DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST", "DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST", "DPRContextEncoder", "DPRPretrainedContextEncoder", "DPRPretrainedQuestionEncoder", "DPRPretrainedReader", "DPRQuestionEncoder", "DPRReader", ] ) _import_structure["models.electra"].extend( [ "ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST", "ElectraForMaskedLM", "ElectraForMultipleChoice", "ElectraForPreTraining", "ElectraForQuestionAnswering", "ElectraForSequenceClassification", "ElectraForTokenClassification", "ElectraModel", "ElectraPreTrainedModel", "load_tf_weights_in_electra", ] ) _import_structure["models.encoder_decoder"].append("EncoderDecoderModel") _import_structure["models.flaubert"].extend( [ "FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "FlaubertForMultipleChoice", "FlaubertForQuestionAnswering", "FlaubertForQuestionAnsweringSimple", "FlaubertForSequenceClassification", "FlaubertForTokenClassification", "FlaubertModel", "FlaubertWithLMHeadModel", ] ) _import_structure["models.fsmt"].extend(["FSMTForConditionalGeneration", "FSMTModel", "PretrainedFSMTModel"]) _import_structure["models.funnel"].extend( [ "FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "FunnelBaseModel", "FunnelForMaskedLM", "FunnelForMultipleChoice", "FunnelForPreTraining", "FunnelForQuestionAnswering", "FunnelForSequenceClassification", "FunnelForTokenClassification", "FunnelModel", "load_tf_weights_in_funnel", ] ) _import_structure["models.gpt2"].extend( [ "GPT2_PRETRAINED_MODEL_ARCHIVE_LIST", "GPT2DoubleHeadsModel", "GPT2ForSequenceClassification", "GPT2LMHeadModel", "GPT2Model", "GPT2PreTrainedModel", "load_tf_weights_in_gpt2", ] ) _import_structure["models.ibert"].extend( [ "IBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "IBertForMaskedLM", "IBertForMultipleChoice", "IBertForQuestionAnswering", "IBertForSequenceClassification", "IBertForTokenClassification", "IBertModel", "IBertPreTrainedModel", ] ) _import_structure["models.layoutlm"].extend( [ "LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST", "LayoutLMForMaskedLM", "LayoutLMForSequenceClassification", "LayoutLMForTokenClassification", "LayoutLMModel", ] ) _import_structure["models.led"].extend( [ "LED_PRETRAINED_MODEL_ARCHIVE_LIST", "LEDForConditionalGeneration", "LEDForQuestionAnswering", "LEDForSequenceClassification", "LEDModel", ] ) _import_structure["models.longformer"].extend( [ "LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "LongformerForMaskedLM", "LongformerForMultipleChoice", "LongformerForQuestionAnswering", "LongformerForSequenceClassification", "LongformerForTokenClassification", "LongformerModel", "LongformerSelfAttention", ] ) _import_structure["models.lxmert"].extend( [ "LxmertEncoder", "LxmertForPreTraining", "LxmertForQuestionAnswering", "LxmertModel", "LxmertPreTrainedModel", "LxmertVisualFeatureEncoder", "LxmertXLayer", ] ) _import_structure["models.marian"].extend(["MarianModel", "MarianMTModel", "MarianForCausalLM"]) _import_structure["models.mbart"].extend( [ "MBartForCausalLM", "MBartForConditionalGeneration", "MBartForQuestionAnswering", "MBartForSequenceClassification", "MBartModel", ] ) _import_structure["models.mmbt"].extend(["MMBTForClassification", "MMBTModel", "ModalEmbeddings"]) _import_structure["models.mobilebert"].extend( [ "MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "MobileBertForMaskedLM", "MobileBertForMultipleChoice", "MobileBertForNextSentencePrediction", "MobileBertForPreTraining", "MobileBertForQuestionAnswering", "MobileBertForSequenceClassification", "MobileBertForTokenClassification", "MobileBertLayer", "MobileBertModel", "MobileBertPreTrainedModel", "load_tf_weights_in_mobilebert", ] ) _import_structure["models.mpnet"].extend( [ "MPNET_PRETRAINED_MODEL_ARCHIVE_LIST", "MPNetForMaskedLM", "MPNetForMultipleChoice", "MPNetForQuestionAnswering", "MPNetForSequenceClassification", "MPNetForTokenClassification", "MPNetLayer", "MPNetModel", "MPNetPreTrainedModel", ] ) _import_structure["models.mt5"].extend(["MT5EncoderModel", "MT5ForConditionalGeneration", "MT5Model"]) _import_structure["models.openai"].extend( [ "OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST", "OpenAIGPTDoubleHeadsModel", "OpenAIGPTForSequenceClassification", "OpenAIGPTLMHeadModel", "OpenAIGPTModel", "OpenAIGPTPreTrainedModel", "load_tf_weights_in_openai_gpt", ] ) _import_structure["models.pegasus"].extend( ["PegasusForConditionalGeneration", "PegasusModel", "PegasusForCausalLM"] ) _import_structure["models.prophetnet"].extend( [ "PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST", "ProphetNetDecoder", "ProphetNetEncoder", "ProphetNetForCausalLM", "ProphetNetForConditionalGeneration", "ProphetNetModel", "ProphetNetPreTrainedModel", ] ) _import_structure["models.rag"].extend(["RagModel", "RagSequenceForGeneration", "RagTokenForGeneration"]) _import_structure["models.reformer"].extend( [ "REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "ReformerAttention", "ReformerForMaskedLM", "ReformerForQuestionAnswering", "ReformerForSequenceClassification", "ReformerLayer", "ReformerModel", "ReformerModelWithLMHead", ] ) _import_structure["models.retribert"].extend( ["RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "RetriBertModel", "RetriBertPreTrainedModel"] ) _import_structure["models.roberta"].extend( [ "ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "RobertaForCausalLM", "RobertaForMaskedLM", "RobertaForMultipleChoice", "RobertaForQuestionAnswering", "RobertaForSequenceClassification", "RobertaForTokenClassification", "RobertaModel", ] ) _import_structure["models.squeezebert"].extend( [ "SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "SqueezeBertForMaskedLM", "SqueezeBertForMultipleChoice", "SqueezeBertForQuestionAnswering", "SqueezeBertForSequenceClassification", "SqueezeBertForTokenClassification", "SqueezeBertModel", "SqueezeBertModule", "SqueezeBertPreTrainedModel", ] ) _import_structure["models.t5"].extend( [ "T5_PRETRAINED_MODEL_ARCHIVE_LIST", "T5EncoderModel", "T5ForConditionalGeneration", "T5Model", "T5PreTrainedModel", "load_tf_weights_in_t5", ] ) _import_structure["models.tapas"].extend( [ "TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST", "TapasForMaskedLM", "TapasForQuestionAnswering", "TapasForSequenceClassification", "TapasModel", ] ) _import_structure["models.transfo_xl"].extend( [ "TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST", "AdaptiveEmbedding", "TransfoXLForSequenceClassification", "TransfoXLLMHeadModel", "TransfoXLModel", "TransfoXLPreTrainedModel", "load_tf_weights_in_transfo_xl", ] ) _import_structure["models.xlm"].extend( [ "XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMForMultipleChoice", "XLMForQuestionAnswering", "XLMForQuestionAnsweringSimple", "XLMForSequenceClassification", "XLMForTokenClassification", "XLMModel", "XLMPreTrainedModel", "XLMWithLMHeadModel", ] ) _import_structure["models.xlm_prophetnet"].extend( [ "XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMProphetNetDecoder", "XLMProphetNetEncoder", "XLMProphetNetForCausalLM", "XLMProphetNetForConditionalGeneration", "XLMProphetNetModel", ] ) _import_structure["models.xlm_roberta"].extend( [ "XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "XLMRobertaForCausalLM", "XLMRobertaForMaskedLM", "XLMRobertaForMultipleChoice", "XLMRobertaForQuestionAnswering", "XLMRobertaForSequenceClassification", "XLMRobertaForTokenClassification", "XLMRobertaModel", ] ) _import_structure["models.xlnet"].extend( [ "XLNET_PRETRAINED_MODEL_ARCHIVE_LIST", "XLNetForMultipleChoice", "XLNetForQuestionAnswering", "XLNetForQuestionAnsweringSimple", "XLNetForSequenceClassification", "XLNetForTokenClassification", "XLNetLMHeadModel", "XLNetModel", "XLNetPreTrainedModel", "load_tf_weights_in_xlnet", ] ) _import_structure["optimization"] = [ "Adafactor", "AdamW", "get_constant_schedule", "get_constant_schedule_with_warmup", "get_cosine_schedule_with_warmup", "get_cosine_with_hard_restarts_schedule_with_warmup", "get_linear_schedule_with_warmup", "get_polynomial_decay_schedule_with_warmup", "get_scheduler", ] _import_structure["trainer"] = ["Trainer"] _import_structure["trainer_pt_utils"] = ["torch_distributed_zero_first"] _import_structure["trainer_seq2seq"] = ["Seq2SeqTrainer"] else: from .utils import dummy_pt_objects _import_structure["utils.dummy_pt_objects"] = [name for name in dir(dummy_pt_objects) if not name.startswith("_")] # TensorFlow-backed objects if is_tf_available(): _import_structure["benchmark.benchmark_args_tf"] = ["TensorFlowBenchmarkArguments"] _import_structure["benchmark.benchmark_tf"] = ["TensorFlowBenchmark"] _import_structure["generation_tf_utils"] = ["tf_top_k_top_p_filtering"] _import_structure["modeling_tf_utils"] = [ "TFPreTrainedModel", "TFSequenceSummary", "TFSharedEmbeddings", "shape_list", ] # TensorFlow models structure _import_structure["models.convbert"].extend( [ "TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFConvBertForMaskedLM", "TFConvBertForMultipleChoice", "TFConvBertForQuestionAnswering", "TFConvBertForSequenceClassification", "TFConvBertForTokenClassification", "TFConvBertLayer", "TFConvBertModel", "TFConvBertPreTrainedModel", ] ) _import_structure["models.albert"].extend( [ "TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFAlbertForMaskedLM", "TFAlbertForMultipleChoice", "TFAlbertForPreTraining", "TFAlbertForQuestionAnswering", "TFAlbertForSequenceClassification", "TFAlbertForTokenClassification", "TFAlbertMainLayer", "TFAlbertModel", "TFAlbertPreTrainedModel", ] ) _import_structure["models.auto"].extend( [ "TF_MODEL_FOR_CAUSAL_LM_MAPPING", "TF_MODEL_FOR_MASKED_LM_MAPPING", "TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING", "TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING", "TF_MODEL_FOR_PRETRAINING_MAPPING", "TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING", "TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING", "TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING", "TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING", "TF_MODEL_MAPPING", "TF_MODEL_WITH_LM_HEAD_MAPPING", "TFAutoModel", "TFAutoModelForCausalLM", "TFAutoModelForMaskedLM", "TFAutoModelForMultipleChoice", "TFAutoModelForPreTraining", "TFAutoModelForQuestionAnswering", "TFAutoModelForSeq2SeqLM", "TFAutoModelForSequenceClassification", "TFAutoModelForTokenClassification", "TFAutoModelWithLMHead", ] ) _import_structure["models.bart"].extend(["TFBartForConditionalGeneration", "TFBartModel", "TFBartPretrainedModel"]) _import_structure["models.bert"].extend( [ "TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFBertEmbeddings", "TFBertForMaskedLM", "TFBertForMultipleChoice", "TFBertForNextSentencePrediction", "TFBertForPreTraining", "TFBertForQuestionAnswering", "TFBertForSequenceClassification", "TFBertForTokenClassification", "TFBertLMHeadModel", "TFBertMainLayer", "TFBertModel", "TFBertPreTrainedModel", ] ) _import_structure["models.blenderbot"].extend(["TFBlenderbotForConditionalGeneration", "TFBlenderbotModel"]) _import_structure["models.blenderbot_small"].extend( ["TFBlenderbotSmallForConditionalGeneration", "TFBlenderbotSmallModel"] ) _import_structure["models.camembert"].extend( [ "TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCamembertForMaskedLM", "TFCamembertForMultipleChoice", "TFCamembertForQuestionAnswering", "TFCamembertForSequenceClassification", "TFCamembertForTokenClassification", "TFCamembertModel", ] ) _import_structure["models.ctrl"].extend( [ "TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFCTRLForSequenceClassification", "TFCTRLLMHeadModel", "TFCTRLModel", "TFCTRLPreTrainedModel", ] ) _import_structure["models.distilbert"].extend( [ "TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDistilBertForMaskedLM", "TFDistilBertForMultipleChoice", "TFDistilBertForQuestionAnswering", "TFDistilBertForSequenceClassification", "TFDistilBertForTokenClassification", "TFDistilBertMainLayer", "TFDistilBertModel", "TFDistilBertPreTrainedModel", ] ) _import_structure["models.dpr"].extend( [ "TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST", "TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST", "TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFDPRContextEncoder", "TFDPRPretrainedContextEncoder", "TFDPRPretrainedQuestionEncoder", "TFDPRPretrainedReader", "TFDPRQuestionEncoder", "TFDPRReader", ] ) _import_structure["models.electra"].extend( [ "TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFElectraForMaskedLM", "TFElectraForMultipleChoice", "TFElectraForPreTraining", "TFElectraForQuestionAnswering", "TFElectraForSequenceClassification", "TFElectraForTokenClassification", "TFElectraModel", "TFElectraPreTrainedModel", ] ) _import_structure["models.flaubert"].extend( [ "TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFFlaubertForMultipleChoice", "TFFlaubertForQuestionAnsweringSimple", "TFFlaubertForSequenceClassification", "TFFlaubertForTokenClassification", "TFFlaubertModel", "TFFlaubertWithLMHeadModel", ] ) _import_structure["models.funnel"].extend( [ "TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFFunnelBaseModel", "TFFunnelForMaskedLM", "TFFunnelForMultipleChoice", "TFFunnelForPreTraining", "TFFunnelForQuestionAnswering", "TFFunnelForSequenceClassification", "TFFunnelForTokenClassification", "TFFunnelModel", ] ) _import_structure["models.gpt2"].extend( [ "TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST", "TFGPT2DoubleHeadsModel", "TFGPT2ForSequenceClassification", "TFGPT2LMHeadModel", "TFGPT2MainLayer", "TFGPT2Model", "TFGPT2PreTrainedModel", ] ) _import_structure["models.led"].extend(["TFLEDForConditionalGeneration", "TFLEDModel", "TFLEDPreTrainedModel"]) _import_structure["models.longformer"].extend( [ "TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", "TFLongformerForMaskedLM", "TFLongformerForMultipleChoice", "TFLongformerForQuestionAnswering", "TFLongformerForSequenceClassification", "TFLongformerForTokenClassification", "TFLongformerModel", "TFLongformerSelfAttention", ] ) _import_structure["models.lxmert"].extend( [ "TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFLxmertForPreTraining", "TFLxmertMainLayer", "TFLxmertModel", "TFLxmertPreTrainedModel", "TFLxmertVisualFeatureEncoder", ] ) _import_structure["models.marian"].extend(["TFMarianMTModel", "TFMarianModel"]) _import_structure["models.mbart"].extend(["TFMBartForConditionalGeneration", "TFMBartModel"]) _import_structure["models.mobilebert"].extend( [ "TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMobileBertForMaskedLM", "TFMobileBertForMultipleChoice", "TFMobileBertForNextSentencePrediction", "TFMobileBertForPreTraining", "TFMobileBertForQuestionAnswering", "TFMobileBertForSequenceClassification", "TFMobileBertForTokenClassification", "TFMobileBertMainLayer", "TFMobileBertModel", "TFMobileBertPreTrainedModel", ] ) _import_structure["models.mpnet"].extend( [ "TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST", "TFMPNetForMaskedLM", "TFMPNetForMultipleChoice", "TFMPNetForQuestionAnswering", "TFMPNetForSequenceClassification", "TFMPNetForTokenClassification", "TFMPNetMainLayer", "TFMPNetModel", "TFMPNetPreTrainedModel", ] ) _import_structure["models.mt5"].extend(["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]) _import_structure["models.openai"].extend( [ "TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST", "TFOpenAIGPTDoubleHeadsModel", "TFOpenAIGPTForSequenceClassification", "TFOpenAIGPTLMHeadModel", "TFOpenAIGPTMainLayer", "TFOpenAIGPTModel", "TFOpenAIGPTPreTrainedModel", ] ) _import_structure["models.pegasus"].extend(["TFPegasusForConditionalGeneration", "TFPegasusModel"]) _import_structure["models.rag"].extend( [ "TFRagModel", "TFRagSequenceForGeneration", "TFRagTokenForGeneration", ] ) _import_structure["models.roberta"].extend( [ "TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFRobertaForMaskedLM", "TFRobertaForMultipleChoice", "TFRobertaForQuestionAnswering", "TFRobertaForSequenceClassification", "TFRobertaForTokenClassification", "TFRobertaMainLayer", "TFRobertaModel", "TFRobertaPreTrainedModel", ] ) _import_structure["models.t5"].extend( [ "TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST", "TFT5EncoderModel", "TFT5ForConditionalGeneration", "TFT5Model", "TFT5PreTrainedModel", ] ) _import_structure["models.transfo_xl"].extend( [ "TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST", "TFAdaptiveEmbedding", "TFTransfoXLForSequenceClassification", "TFTransfoXLLMHeadModel", "TFTransfoXLMainLayer", "TFTransfoXLModel", "TFTransfoXLPreTrainedModel", ] ) _import_structure["models.xlm"].extend( [ "TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMForMultipleChoice", "TFXLMForQuestionAnsweringSimple", "TFXLMForSequenceClassification", "TFXLMForTokenClassification", "TFXLMMainLayer", "TFXLMModel", "TFXLMPreTrainedModel", "TFXLMWithLMHeadModel", ] ) _import_structure["models.xlm_roberta"].extend( [ "TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLMRobertaForMaskedLM", "TFXLMRobertaForMultipleChoice", "TFXLMRobertaForQuestionAnswering", "TFXLMRobertaForSequenceClassification", "TFXLMRobertaForTokenClassification", "TFXLMRobertaModel", ] ) _import_structure["models.xlnet"].extend( [ "TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST", "TFXLNetForMultipleChoice", "TFXLNetForQuestionAnsweringSimple", "TFXLNetForSequenceClassification", "TFXLNetForTokenClassification", "TFXLNetLMHeadModel", "TFXLNetMainLayer", "TFXLNetModel", "TFXLNetPreTrainedModel", ] ) _import_structure["optimization_tf"] = ["AdamWeightDecay", "GradientAccumulator", "WarmUp", "create_optimizer"] _import_structure["trainer_tf"] = ["TFTrainer"] else: from .utils import dummy_tf_objects _import_structure["utils.dummy_tf_objects"] = [name for name in dir(dummy_tf_objects) if not name.startswith("_")] # FLAX-backed objects if is_flax_available(): _import_structure["modeling_flax_utils"] = ["FlaxPreTrainedModel"] _import_structure["models.auto"].extend(["FLAX_MODEL_MAPPING", "FlaxAutoModel"]) _import_structure["models.bert"].extend(["FlaxBertForMaskedLM", "FlaxBertModel"]) _import_structure["models.roberta"].append("FlaxRobertaModel") else: from .utils import dummy_flax_objects _import_structure["utils.dummy_flax_objects"] = [ name for name in dir(dummy_flax_objects) if not name.startswith("_") ] # Direct imports for type-checking if TYPE_CHECKING: # Configuration from .configuration_utils import PretrainedConfig # Data from .data import ( DataProcessor, InputExample, InputFeatures, SingleSentenceClassificationProcessor, SquadExample, SquadFeatures, SquadV1Processor, SquadV2Processor, glue_compute_metrics, glue_convert_examples_to_features, glue_output_modes, glue_processors, glue_tasks_num_labels, squad_convert_examples_to_features, xnli_compute_metrics, xnli_output_modes, xnli_processors, xnli_tasks_num_labels, ) # Feature Extractor from .feature_extraction_utils import BatchFeature, SequenceFeatureExtractor # Files and general utilities from .file_utils import ( CONFIG_NAME, MODEL_CARD_NAME, PYTORCH_PRETRAINED_BERT_CACHE, PYTORCH_TRANSFORMERS_CACHE, SPIECE_UNDERLINE, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME, TRANSFORMERS_CACHE, WEIGHTS_NAME, TensorType, add_end_docstrings, add_start_docstrings, cached_path, is_apex_available, is_datasets_available, is_faiss_available, is_flax_available, is_psutil_available, is_py3nvml_available, is_sentencepiece_available, is_sklearn_available, is_tf_available, is_tokenizers_available, is_torch_available, is_torch_tpu_available, ) from .hf_argparser import HfArgumentParser # Integrations from .integrations import ( is_comet_available, is_optuna_available, is_ray_available, is_ray_tune_available, is_tensorboard_available, is_wandb_available, ) # Model Cards from .modelcard import ModelCard # TF 2.0 <=> PyTorch conversion utilities from .modeling_tf_pytorch_utils import ( convert_tf_weight_name_to_pt_weight_name, load_pytorch_checkpoint_in_tf2_model, load_pytorch_model_in_tf2_model, load_pytorch_weights_in_tf2_model, load_tf2_checkpoint_in_pytorch_model, load_tf2_model_in_pytorch_model, load_tf2_weights_in_pytorch_model, ) from .models.albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig from .models.auto import ( ALL_PRETRAINED_CONFIG_ARCHIVE_MAP, CONFIG_MAPPING, MODEL_NAMES_MAPPING, TOKENIZER_MAPPING, AutoConfig, AutoTokenizer, ) from .models.bart import BartConfig, BartTokenizer from .models.bert import ( BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BasicTokenizer, BertConfig, BertTokenizer, WordpieceTokenizer, ) from .models.bert_generation import BertGenerationConfig from .models.bert_japanese import BertJapaneseTokenizer, CharacterTokenizer, MecabTokenizer from .models.bertweet import BertweetTokenizer from .models.blenderbot import BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotConfig, BlenderbotTokenizer from .models.blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP, BlenderbotSmallConfig, BlenderbotSmallTokenizer, ) from .models.camembert import CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CamembertConfig from .models.convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertTokenizer from .models.ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig, CTRLTokenizer from .models.deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaTokenizer from .models.deberta_v2 import DEBERTA_V2_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaV2Config from .models.distilbert import DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DistilBertConfig, DistilBertTokenizer from .models.dpr import ( DPR_PRETRAINED_CONFIG_ARCHIVE_MAP, DPRConfig, DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderOutput, DPRReaderTokenizer, ) from .models.electra import ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ElectraConfig, ElectraTokenizer from .models.encoder_decoder import EncoderDecoderConfig from .models.flaubert import FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, FlaubertConfig, FlaubertTokenizer from .models.fsmt import FSMT_PRETRAINED_CONFIG_ARCHIVE_MAP, FSMTConfig, FSMTTokenizer from .models.funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig, FunnelTokenizer from .models.gpt2 import GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2Config, GPT2Tokenizer from .models.herbert import HerbertTokenizer from .models.ibert import IBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, IBertConfig from .models.layoutlm import LAYOUTLM_PRETRAINED_CONFIG_ARCHIVE_MAP, LayoutLMConfig, LayoutLMTokenizer from .models.led import LED_PRETRAINED_CONFIG_ARCHIVE_MAP, LEDConfig, LEDTokenizer from .models.longformer import LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerTokenizer from .models.lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig, LxmertTokenizer from .models.m2m_100 import M2M_100_PRETRAINED_CONFIG_ARCHIVE_MAP, M2M100Config from .models.marian import MarianConfig from .models.mbart import MBartConfig from .models.mmbt import MMBTConfig from .models.mobilebert import MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileBertConfig, MobileBertTokenizer from .models.mpnet import MPNET_PRETRAINED_CONFIG_ARCHIVE_MAP, MPNetConfig, MPNetTokenizer from .models.mt5 import MT5Config from .models.openai import OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OpenAIGPTConfig, OpenAIGPTTokenizer from .models.pegasus import PegasusConfig from .models.phobert import PhobertTokenizer from .models.prophetnet import PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ProphetNetConfig, ProphetNetTokenizer from .models.rag import RagConfig, RagRetriever, RagTokenizer from .models.reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig from .models.retribert import RETRIBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RetriBertConfig, RetriBertTokenizer from .models.roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaTokenizer from .models.speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, Speech2TextConfig, Speech2TextFeatureExtractor, ) from .models.squeezebert import SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, SqueezeBertConfig, SqueezeBertTokenizer from .models.t5 import T5_PRETRAINED_CONFIG_ARCHIVE_MAP, T5Config from .models.tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig, TapasTokenizer from .models.transfo_xl import ( TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig, TransfoXLCorpus, TransfoXLTokenizer, ) from .models.wav2vec2 import ( WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, Wav2Vec2Config, Wav2Vec2CTCTokenizer, Wav2Vec2FeatureExtractor, Wav2Vec2Processor, Wav2Vec2Tokenizer, ) from .models.xlm import XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMConfig, XLMTokenizer from .models.xlm_prophetnet import XLM_PROPHETNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMProphetNetConfig from .models.xlm_roberta import XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLMRobertaConfig from .models.xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig # Pipelines from .pipelines import ( Conversation, ConversationalPipeline, CsvPipelineDataFormat, FeatureExtractionPipeline, FillMaskPipeline, JsonPipelineDataFormat, NerPipeline, PipedPipelineDataFormat, Pipeline, PipelineDataFormat, QuestionAnsweringPipeline, SummarizationPipeline, TableQuestionAnsweringPipeline, Text2TextGenerationPipeline, TextClassificationPipeline, TextGenerationPipeline, TokenClassificationPipeline, TranslationPipeline, ZeroShotClassificationPipeline, pipeline, ) # Tokenization from .tokenization_utils import PreTrainedTokenizer from .tokenization_utils_base import ( AddedToken, BatchEncoding, CharSpan, PreTrainedTokenizerBase, SpecialTokensMixin, TokenSpan, ) # Trainer from .trainer_callback import ( DefaultFlowCallback, EarlyStoppingCallback, PrinterCallback, ProgressCallback, TrainerCallback, TrainerControl, TrainerState, ) from .trainer_utils import EvalPrediction, IntervalStrategy, SchedulerType, set_seed from .training_args import TrainingArguments from .training_args_seq2seq import Seq2SeqTrainingArguments from .training_args_tf import TFTrainingArguments if is_sentencepiece_available(): from .models.albert import AlbertTokenizer from .models.barthez import BarthezTokenizer from .models.bert_generation import BertGenerationTokenizer from .models.camembert import CamembertTokenizer from .models.deberta_v2 import DebertaV2Tokenizer from .models.m2m_100 import M2M100Tokenizer from .models.marian import MarianTokenizer from .models.mbart import MBart50Tokenizer, MBartTokenizer from .models.mt5 import MT5Tokenizer from .models.pegasus import PegasusTokenizer from .models.reformer import ReformerTokenizer from .models.speech_to_text import Speech2TextProcessor, Speech2TextTokenizer from .models.t5 import T5Tokenizer from .models.xlm_prophetnet import XLMProphetNetTokenizer from .models.xlm_roberta import XLMRobertaTokenizer from .models.xlnet import XLNetTokenizer else: from .utils.dummy_sentencepiece_objects import * if is_tokenizers_available(): from .models.albert import AlbertTokenizerFast from .models.bart import BartTokenizerFast from .models.barthez import BarthezTokenizerFast from .models.bert import BertTokenizerFast from .models.camembert import CamembertTokenizerFast from .models.convbert import ConvBertTokenizerFast from .models.distilbert import DistilBertTokenizerFast from .models.dpr import DPRContextEncoderTokenizerFast, DPRQuestionEncoderTokenizerFast, DPRReaderTokenizerFast from .models.electra import ElectraTokenizerFast from .models.funnel import FunnelTokenizerFast from .models.gpt2 import GPT2TokenizerFast from .models.herbert import HerbertTokenizerFast from .models.layoutlm import LayoutLMTokenizerFast from .models.led import LEDTokenizerFast from .models.longformer import LongformerTokenizerFast from .models.lxmert import LxmertTokenizerFast from .models.mbart import MBart50TokenizerFast, MBartTokenizerFast from .models.mobilebert import MobileBertTokenizerFast from .models.mpnet import MPNetTokenizerFast from .models.mt5 import MT5TokenizerFast from .models.openai import OpenAIGPTTokenizerFast from .models.pegasus import PegasusTokenizerFast from .models.reformer import ReformerTokenizerFast from .models.retribert import RetriBertTokenizerFast from .models.roberta import RobertaTokenizerFast from .models.squeezebert import SqueezeBertTokenizerFast from .models.t5 import T5TokenizerFast from .models.xlm_roberta import XLMRobertaTokenizerFast from .models.xlnet import XLNetTokenizerFast from .tokenization_utils_fast import PreTrainedTokenizerFast if is_sentencepiece_available(): from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS, convert_slow_tokenizer else: from .utils.dummy_tokenizers_objects import * # Modeling if is_torch_available(): # Benchmarks from .benchmark.benchmark import PyTorchBenchmark from .benchmark.benchmark_args import PyTorchBenchmarkArguments from .data.data_collator import ( DataCollator, DataCollatorForLanguageModeling, DataCollatorForPermutationLanguageModeling, DataCollatorForSeq2Seq, DataCollatorForSOP, DataCollatorForTokenClassification, DataCollatorForWholeWordMask, DataCollatorWithPadding, default_data_collator, ) from .data.datasets import ( GlueDataset, GlueDataTrainingArguments, LineByLineTextDataset, LineByLineWithRefDataset, LineByLineWithSOPTextDataset, SquadDataset, SquadDataTrainingArguments, TextDataset, TextDatasetForNextSentencePrediction, ) from .generation_beam_search import BeamScorer, BeamSearchScorer from .generation_logits_process import ( HammingDiversityLogitsProcessor, LogitsProcessor, LogitsProcessorList, LogitsWarper, MinLengthLogitsProcessor, NoBadWordsLogitsProcessor, NoRepeatNGramLogitsProcessor, PrefixConstrainedLogitsProcessor, RepetitionPenaltyLogitsProcessor, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, ) from .generation_utils import top_k_top_p_filtering from .modeling_utils import Conv1D, PreTrainedModel, apply_chunking_to_forward, prune_layer from .models.albert import ( ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, AlbertForMaskedLM, AlbertForMultipleChoice, AlbertForPreTraining, AlbertForQuestionAnswering, AlbertForSequenceClassification, AlbertForTokenClassification, AlbertModel, AlbertPreTrainedModel, load_tf_weights_in_albert, ) from .models.auto import ( MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, MODEL_FOR_MULTIPLE_CHOICE_MAPPING, MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, MODEL_FOR_PRETRAINING_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, MODEL_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForMultipleChoice, AutoModelForNextSentencePrediction, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForTableQuestionAnswering, AutoModelForTokenClassification, AutoModelWithLMHead, ) from .models.bart import ( BART_PRETRAINED_MODEL_ARCHIVE_LIST, BartForCausalLM, BartForConditionalGeneration, BartForQuestionAnswering, BartForSequenceClassification, BartModel, BartPretrainedModel, PretrainedBartModel, ) from .models.bert import ( BERT_PRETRAINED_MODEL_ARCHIVE_LIST, BertForMaskedLM, BertForMultipleChoice, BertForNextSentencePrediction, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertLayer, BertLMHeadModel, BertModel, BertPreTrainedModel, load_tf_weights_in_bert, ) from .models.bert_generation import ( BertGenerationDecoder, BertGenerationEncoder, load_tf_weights_in_bert_generation, ) from .models.blenderbot import ( BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotForCausalLM, BlenderbotForConditionalGeneration, BlenderbotModel, ) from .models.blenderbot_small import ( BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST, BlenderbotSmallForCausalLM, BlenderbotSmallForConditionalGeneration, BlenderbotSmallModel, ) from .models.camembert import ( CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, CamembertForCausalLM, CamembertForMaskedLM, CamembertForMultipleChoice, CamembertForQuestionAnswering, CamembertForSequenceClassification, CamembertForTokenClassification, CamembertModel, ) from .models.convbert import ( CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, ConvBertForMaskedLM, ConvBertForMultipleChoice, ConvBertForQuestionAnswering, ConvBertForSequenceClassification, ConvBertForTokenClassification, ConvBertLayer, ConvBertModel, ConvBertPreTrainedModel, load_tf_weights_in_convbert, ) from .models.ctrl import ( CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, CTRLForSequenceClassification, CTRLLMHeadModel, CTRLModel, CTRLPreTrainedModel, ) from .models.deberta import ( DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaForMaskedLM, DebertaForQuestionAnswering, DebertaForSequenceClassification, DebertaForTokenClassification, DebertaModel, DebertaPreTrainedModel, ) from .models.deberta_v2 import ( DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST, DebertaV2ForMaskedLM, DebertaV2ForQuestionAnswering, DebertaV2ForSequenceClassification, DebertaV2ForTokenClassification, DebertaV2Model, DebertaV2PreTrainedModel, ) from .models.distilbert import ( DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, DistilBertForMaskedLM, DistilBertForMultipleChoice, DistilBertForQuestionAnswering, DistilBertForSequenceClassification, DistilBertForTokenClassification, DistilBertModel, DistilBertPreTrainedModel, ) from .models.dpr import ( DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, DPRContextEncoder, DPRPretrainedContextEncoder, DPRPretrainedQuestionEncoder, DPRPretrainedReader, DPRQuestionEncoder, DPRReader, ) from .models.electra import ( ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, ElectraForMaskedLM, ElectraForMultipleChoice, ElectraForPreTraining, ElectraForQuestionAnswering, ElectraForSequenceClassification, ElectraForTokenClassification, ElectraModel, ElectraPreTrainedModel, load_tf_weights_in_electra, ) from .models.encoder_decoder import EncoderDecoderModel from .models.flaubert import ( FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, FlaubertForMultipleChoice, FlaubertForQuestionAnswering, FlaubertForQuestionAnsweringSimple, FlaubertForSequenceClassification, FlaubertForTokenClassification, FlaubertModel, FlaubertWithLMHeadModel, ) from .models.fsmt import FSMTForConditionalGeneration, FSMTModel, PretrainedFSMTModel from .models.funnel import ( FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, FunnelBaseModel, FunnelForMaskedLM, FunnelForMultipleChoice, FunnelForPreTraining, FunnelForQuestionAnswering, FunnelForSequenceClassification, FunnelForTokenClassification, FunnelModel, load_tf_weights_in_funnel, ) from .models.gpt2 import ( GPT2_PRETRAINED_MODEL_ARCHIVE_LIST, GPT2DoubleHeadsModel, GPT2ForSequenceClassification, GPT2LMHeadModel, GPT2Model, GPT2PreTrainedModel, load_tf_weights_in_gpt2, ) from .models.ibert import ( IBERT_PRETRAINED_MODEL_ARCHIVE_LIST, IBertForMaskedLM, IBertForMultipleChoice, IBertForQuestionAnswering, IBertForSequenceClassification, IBertForTokenClassification, IBertModel, ) from .models.layoutlm import ( LAYOUTLM_PRETRAINED_MODEL_ARCHIVE_LIST, LayoutLMForMaskedLM, LayoutLMForSequenceClassification, LayoutLMForTokenClassification, LayoutLMModel, ) from .models.led import ( LED_PRETRAINED_MODEL_ARCHIVE_LIST, LEDForConditionalGeneration, LEDForQuestionAnswering, LEDForSequenceClassification, LEDModel, ) from .models.longformer import ( LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, LongformerForMaskedLM, LongformerForMultipleChoice, LongformerForQuestionAnswering, LongformerForSequenceClassification, LongformerForTokenClassification, LongformerModel, LongformerSelfAttention, ) from .models.lxmert import ( LxmertEncoder, LxmertForPreTraining, LxmertForQuestionAnswering, LxmertModel, LxmertPreTrainedModel, LxmertVisualFeatureEncoder, LxmertXLayer, ) from .models.m2m_100 import M2M_100_PRETRAINED_MODEL_ARCHIVE_LIST, M2M100ForConditionalGeneration, M2M100Model from .models.marian import MarianForCausalLM, MarianModel, MarianMTModel from .models.mbart import ( MBartForCausalLM, MBartForConditionalGeneration, MBartForQuestionAnswering, MBartForSequenceClassification, MBartModel, ) from .models.mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings from .models.mobilebert import ( MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertLayer, MobileBertModel, MobileBertPreTrainedModel, load_tf_weights_in_mobilebert, ) from .models.mpnet import ( MPNET_PRETRAINED_MODEL_ARCHIVE_LIST, MPNetForMaskedLM, MPNetForMultipleChoice, MPNetForQuestionAnswering, MPNetForSequenceClassification, MPNetForTokenClassification, MPNetLayer, MPNetModel, MPNetPreTrainedModel, ) from .models.mt5 import MT5EncoderModel, MT5ForConditionalGeneration, MT5Model from .models.openai import ( OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification, OpenAIGPTLMHeadModel, OpenAIGPTModel, OpenAIGPTPreTrainedModel, load_tf_weights_in_openai_gpt, ) from .models.pegasus import PegasusForCausalLM, PegasusForConditionalGeneration, PegasusModel from .models.prophetnet import ( PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST, ProphetNetDecoder, ProphetNetEncoder, ProphetNetForCausalLM, ProphetNetForConditionalGeneration, ProphetNetModel, ProphetNetPreTrainedModel, ) from .models.rag import RagModel, RagSequenceForGeneration, RagTokenForGeneration from .models.reformer import ( REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, ReformerAttention, ReformerForMaskedLM, ReformerForQuestionAnswering, ReformerForSequenceClassification, ReformerLayer, ReformerModel, ReformerModelWithLMHead, ) from .models.retribert import RETRIBERT_PRETRAINED_MODEL_ARCHIVE_LIST, RetriBertModel, RetriBertPreTrainedModel from .models.roberta import ( ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, RobertaForCausalLM, RobertaForMaskedLM, RobertaForMultipleChoice, RobertaForQuestionAnswering, RobertaForSequenceClassification, RobertaForTokenClassification, RobertaModel, ) from .models.speech_to_text import ( SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST, Speech2TextForConditionalGeneration, Speech2TextModel, ) from .models.squeezebert import ( SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, SqueezeBertForMaskedLM, SqueezeBertForMultipleChoice, SqueezeBertForQuestionAnswering, SqueezeBertForSequenceClassification, SqueezeBertForTokenClassification, SqueezeBertModel, SqueezeBertModule, SqueezeBertPreTrainedModel, ) from .models.t5 import ( T5_PRETRAINED_MODEL_ARCHIVE_LIST, T5EncoderModel, T5ForConditionalGeneration, T5Model, T5PreTrainedModel, load_tf_weights_in_t5, ) from .models.tapas import ( TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, ) from .models.transfo_xl import ( TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, AdaptiveEmbedding, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel, TransfoXLPreTrainedModel, load_tf_weights_in_transfo_xl, ) from .models.wav2vec2 import ( WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, Wav2Vec2ForCTC, Wav2Vec2ForMaskedLM, Wav2Vec2Model, Wav2Vec2PreTrainedModel, ) from .models.xlm import ( XLM_PRETRAINED_MODEL_ARCHIVE_LIST, XLMForMultipleChoice, XLMForQuestionAnswering, XLMForQuestionAnsweringSimple, XLMForSequenceClassification, XLMForTokenClassification, XLMModel, XLMPreTrainedModel, XLMWithLMHeadModel, ) from .models.xlm_prophetnet import ( XLM_PROPHETNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLMProphetNetDecoder, XLMProphetNetEncoder, XLMProphetNetForCausalLM, XLMProphetNetForConditionalGeneration, XLMProphetNetModel, ) from .models.xlm_roberta import ( XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, XLMRobertaForCausalLM, XLMRobertaForMaskedLM, XLMRobertaForMultipleChoice, XLMRobertaForQuestionAnswering, XLMRobertaForSequenceClassification, XLMRobertaForTokenClassification, XLMRobertaModel, ) from .models.xlnet import ( XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, XLNetForMultipleChoice, XLNetForQuestionAnswering, XLNetForQuestionAnsweringSimple, XLNetForSequenceClassification, XLNetForTokenClassification, XLNetLMHeadModel, XLNetModel, XLNetPreTrainedModel, load_tf_weights_in_xlnet, ) # Optimization from .optimization import ( Adafactor, AdamW, get_constant_schedule, get_constant_schedule_with_warmup, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, get_scheduler, ) # Trainer from .trainer import Trainer from .trainer_pt_utils import torch_distributed_zero_first from .trainer_seq2seq import Seq2SeqTrainer else: from .utils.dummy_pt_objects import * # TensorFlow if is_tf_available(): from .benchmark.benchmark_args_tf import TensorFlowBenchmarkArguments # Benchmarks from .benchmark.benchmark_tf import TensorFlowBenchmark from .generation_tf_utils import tf_top_k_top_p_filtering from .modeling_tf_utils import TFPreTrainedModel, TFSequenceSummary, TFSharedEmbeddings, shape_list from .models.albert import ( TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFAlbertForMaskedLM, TFAlbertForMultipleChoice, TFAlbertForPreTraining, TFAlbertForQuestionAnswering, TFAlbertForSequenceClassification, TFAlbertForTokenClassification, TFAlbertMainLayer, TFAlbertModel, TFAlbertPreTrainedModel, ) from .models.auto import ( TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING, TFAutoModel, TFAutoModelForCausalLM, TFAutoModelForMaskedLM, TFAutoModelForMultipleChoice, TFAutoModelForPreTraining, TFAutoModelForQuestionAnswering, TFAutoModelForSeq2SeqLM, TFAutoModelForSequenceClassification, TFAutoModelForTokenClassification, TFAutoModelWithLMHead, ) from .models.bart import TFBartForConditionalGeneration, TFBartModel, TFBartPretrainedModel from .models.bert import ( TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFBertEmbeddings, TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertMainLayer, TFBertModel, TFBertPreTrainedModel, ) from .models.blenderbot import TFBlenderbotForConditionalGeneration, TFBlenderbotModel from .models.blenderbot_small import TFBlenderbotSmallForConditionalGeneration, TFBlenderbotSmallModel from .models.camembert import ( TF_CAMEMBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFCamembertForMaskedLM, TFCamembertForMultipleChoice, TFCamembertForQuestionAnswering, TFCamembertForSequenceClassification, TFCamembertForTokenClassification, TFCamembertModel, ) from .models.convbert import ( TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFConvBertForMaskedLM, TFConvBertForMultipleChoice, TFConvBertForQuestionAnswering, TFConvBertForSequenceClassification, TFConvBertForTokenClassification, TFConvBertLayer, TFConvBertModel, TFConvBertPreTrainedModel, ) from .models.ctrl import ( TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST, TFCTRLForSequenceClassification, TFCTRLLMHeadModel, TFCTRLModel, TFCTRLPreTrainedModel, ) from .models.distilbert import ( TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFDistilBertForMaskedLM, TFDistilBertForMultipleChoice, TFDistilBertForQuestionAnswering, TFDistilBertForSequenceClassification, TFDistilBertForTokenClassification, TFDistilBertMainLayer, TFDistilBertModel, TFDistilBertPreTrainedModel, ) from .models.dpr import ( TF_DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, TF_DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, TFDPRContextEncoder, TFDPRPretrainedContextEncoder, TFDPRPretrainedQuestionEncoder, TFDPRPretrainedReader, TFDPRQuestionEncoder, TFDPRReader, ) from .models.electra import ( TF_ELECTRA_PRETRAINED_MODEL_ARCHIVE_LIST, TFElectraForMaskedLM, TFElectraForMultipleChoice, TFElectraForPreTraining, TFElectraForQuestionAnswering, TFElectraForSequenceClassification, TFElectraForTokenClassification, TFElectraModel, TFElectraPreTrainedModel, ) from .models.flaubert import ( TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFFlaubertForMultipleChoice, TFFlaubertForQuestionAnsweringSimple, TFFlaubertForSequenceClassification, TFFlaubertForTokenClassification, TFFlaubertModel, TFFlaubertWithLMHeadModel, ) from .models.funnel import ( TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST, TFFunnelBaseModel, TFFunnelForMaskedLM, TFFunnelForMultipleChoice, TFFunnelForPreTraining, TFFunnelForQuestionAnswering, TFFunnelForSequenceClassification, TFFunnelForTokenClassification, TFFunnelModel, ) from .models.gpt2 import ( TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST, TFGPT2DoubleHeadsModel, TFGPT2ForSequenceClassification, TFGPT2LMHeadModel, TFGPT2MainLayer, TFGPT2Model, TFGPT2PreTrainedModel, ) from .models.led import TFLEDForConditionalGeneration, TFLEDModel, TFLEDPreTrainedModel from .models.longformer import ( TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, TFLongformerForMaskedLM, TFLongformerForMultipleChoice, TFLongformerForQuestionAnswering, TFLongformerForSequenceClassification, TFLongformerForTokenClassification, TFLongformerModel, TFLongformerSelfAttention, ) from .models.lxmert import ( TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFLxmertForPreTraining, TFLxmertMainLayer, TFLxmertModel, TFLxmertPreTrainedModel, TFLxmertVisualFeatureEncoder, ) from .models.marian import TFMarian, TFMarianMTModel from .models.mbart import TFMBartForConditionalGeneration, TFMBartModel from .models.mobilebert import ( TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST, TFMobileBertForMaskedLM, TFMobileBertForMultipleChoice, TFMobileBertForNextSentencePrediction, TFMobileBertForPreTraining, TFMobileBertForQuestionAnswering, TFMobileBertForSequenceClassification, TFMobileBertForTokenClassification, TFMobileBertMainLayer, TFMobileBertModel, TFMobileBertPreTrainedModel, ) from .models.mpnet import ( TF_MPNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFMPNetForMaskedLM, TFMPNetForMultipleChoice, TFMPNetForQuestionAnswering, TFMPNetForSequenceClassification, TFMPNetForTokenClassification, TFMPNetMainLayer, TFMPNetModel, TFMPNetPreTrainedModel, ) from .models.mt5 import TFMT5EncoderModel, TFMT5ForConditionalGeneration, TFMT5Model from .models.openai import ( TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST, TFOpenAIGPTDoubleHeadsModel, TFOpenAIGPTForSequenceClassification, TFOpenAIGPTLMHeadModel, TFOpenAIGPTMainLayer, TFOpenAIGPTModel, TFOpenAIGPTPreTrainedModel, ) from .models.pegasus import TFPegasusForConditionalGeneration, TFPegasusModel from .models.rag import TFRagModel, TFRagSequenceForGeneration, TFRagTokenForGeneration from .models.roberta import ( TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFRobertaForMaskedLM, TFRobertaForMultipleChoice, TFRobertaForQuestionAnswering, TFRobertaForSequenceClassification, TFRobertaForTokenClassification, TFRobertaMainLayer, TFRobertaModel, TFRobertaPreTrainedModel, ) from .models.t5 import ( TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST, TFT5EncoderModel, TFT5ForConditionalGeneration, TFT5Model, TFT5PreTrainedModel, ) from .models.transfo_xl import ( TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST, TFAdaptiveEmbedding, TFTransfoXLForSequenceClassification, TFTransfoXLLMHeadModel, TFTransfoXLMainLayer, TFTransfoXLModel, TFTransfoXLPreTrainedModel, ) from .models.xlm import ( TF_XLM_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMForMultipleChoice, TFXLMForQuestionAnsweringSimple, TFXLMForSequenceClassification, TFXLMForTokenClassification, TFXLMMainLayer, TFXLMModel, TFXLMPreTrainedModel, TFXLMWithLMHeadModel, ) from .models.xlm_roberta import ( TF_XLM_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLMRobertaForMaskedLM, TFXLMRobertaForMultipleChoice, TFXLMRobertaForQuestionAnswering, TFXLMRobertaForSequenceClassification, TFXLMRobertaForTokenClassification, TFXLMRobertaModel, ) from .models.xlnet import ( TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST, TFXLNetForMultipleChoice, TFXLNetForQuestionAnsweringSimple, TFXLNetForSequenceClassification, TFXLNetForTokenClassification, TFXLNetLMHeadModel, TFXLNetMainLayer, TFXLNetModel, TFXLNetPreTrainedModel, ) # Optimization from .optimization_tf import AdamWeightDecay, GradientAccumulator, WarmUp, create_optimizer # Trainer from .trainer_tf import TFTrainer else: # Import the same objects as dummies to get them in the namespace. # They will raise an import error if the user tries to instantiate / use them. from .utils.dummy_tf_objects import * if is_flax_available(): from .modeling_flax_utils import FlaxPreTrainedModel from .models.auto import FLAX_MODEL_MAPPING, FlaxAutoModel from .models.bert import FlaxBertForMaskedLM, FlaxBertModel from .models.roberta import FlaxRobertaModel else: # Import the same objects as dummies to get them in the namespace. # They will raise an import error if the user tries to instantiate / use them. from .utils.dummy_flax_objects import * else: import importlib import os import sys class _LazyModule(_BaseLazyModule): """ Module class that surfaces all objects but only performs associated imports when the objects are requested. """ __file__ = globals()["__file__"] __path__ = [os.path.dirname(__file__)] def _get_module(self, module_name: str): return importlib.import_module("." + module_name, self.__name__) def __getattr__(self, name: str): # Special handling for the version, which is a constant from this module and not imported in a submodule. if name == "__version__": return __version__ return super().__getattr__(name) sys.modules[__name__] = _LazyModule(__name__, _import_structure) if not is_tf_available() and not is_torch_available() and not is_flax_available(): logger.warning( "None of PyTorch, TensorFlow >= 2.0, or Flax have been found. " "Models won't be available and only tokenizers, configuration " "and file/data utilities can be used." )
AdaMix/src/transformers/__init__.py/0
{ "file_path": "AdaMix/src/transformers/__init__.py", "repo_id": "AdaMix", "token_count": 43837 }
47
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import dataclasses import json from dataclasses import dataclass, field from time import time from typing import List from ..utils import logging logger = logging.get_logger(__name__) def list_field(default=None, metadata=None): return field(default_factory=lambda: default, metadata=metadata) @dataclass class BenchmarkArguments: """ BenchMarkArguments are arguments we use in our benchmark scripts **which relate to the training loop itself**. Using `HfArgumentParser` we can turn this class into argparse arguments to be able to specify them on the command line. """ models: List[str] = list_field( default=[], metadata={ "help": "Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version of all available models" }, ) batch_sizes: List[int] = list_field( default=[8], metadata={"help": "List of batch sizes for which memory and time performance will be evaluated"} ) sequence_lengths: List[int] = list_field( default=[8, 32, 128, 512], metadata={"help": "List of sequence lengths for which memory and time performance will be evaluated"}, ) inference: bool = field( default=True, metadata={"help": "Whether to benchmark inference of model. Inference can be disabled via --no-inference."}, ) cuda: bool = field( default=True, metadata={"help": "Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."}, ) tpu: bool = field( default=True, metadata={"help": "Whether to run on available tpu devices. TPU can be disabled via --no-tpu."} ) fp16: bool = field(default=False, metadata={"help": "Use FP16 to accelerate inference."}) training: bool = field(default=False, metadata={"help": "Benchmark training of model"}) verbose: bool = field(default=False, metadata={"help": "Verbose memory tracing"}) speed: bool = field( default=True, metadata={"help": "Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."}, ) memory: bool = field( default=True, metadata={ "help": "Whether to perform memory measurements. Memory measurements can be disabled via --no-memory" }, ) trace_memory_line_by_line: bool = field(default=False, metadata={"help": "Trace memory line by line"}) save_to_csv: bool = field(default=False, metadata={"help": "Save result to a CSV file"}) log_print: bool = field(default=False, metadata={"help": "Save all print statements in a log file"}) env_print: bool = field(default=False, metadata={"help": "Whether to print environment information"}) multi_process: bool = field( default=True, metadata={ "help": "Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled for debugging / testing and on TPU." }, ) inference_time_csv_file: str = field( default=f"inference_time_{round(time())}.csv", metadata={"help": "CSV filename used if saving time results to csv."}, ) inference_memory_csv_file: str = field( default=f"inference_memory_{round(time())}.csv", metadata={"help": "CSV filename used if saving memory results to csv."}, ) train_time_csv_file: str = field( default=f"train_time_{round(time())}.csv", metadata={"help": "CSV filename used if saving time results to csv for training."}, ) train_memory_csv_file: str = field( default=f"train_memory_{round(time())}.csv", metadata={"help": "CSV filename used if saving memory results to csv for training."}, ) env_info_csv_file: str = field( default=f"env_info_{round(time())}.csv", metadata={"help": "CSV filename used if saving environment information."}, ) log_filename: str = field( default=f"log_{round(time())}.csv", metadata={"help": "Log filename used if print statements are saved in log."}, ) repeat: int = field(default=3, metadata={"help": "Times an experiment will be run."}) only_pretrain_model: bool = field( default=False, metadata={ "help": "Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain model weights." }, ) def to_json_string(self): """ Serializes this instance to a JSON string. """ return json.dumps(dataclasses.asdict(self), indent=2) @property def model_names(self): assert ( len(self.models) > 0 ), "Please make sure you provide at least one model name / model identifier, *e.g.* `--models bert-base-cased` or `args.models = ['bert-base-cased']." return self.models @property def do_multi_processing(self): if not self.multi_process: return False elif self.is_tpu: logger.info("Multiprocessing is currently not possible on TPU.") return False else: return True
AdaMix/src/transformers/benchmark/benchmark_args_utils.py/0
{ "file_path": "AdaMix/src/transformers/benchmark/benchmark_args_utils.py", "repo_id": "AdaMix", "token_count": 2073 }
48
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Convert pytorch checkpoints to TensorFlow """ import argparse import os from . import ( ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BART_PRETRAINED_MODEL_ARCHIVE_LIST, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, WEIGHTS_NAME, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, BartConfig, BertConfig, CamembertConfig, CTRLConfig, DistilBertConfig, DPRConfig, ElectraConfig, FlaubertConfig, GPT2Config, LxmertConfig, OpenAIGPTConfig, RobertaConfig, T5Config, TFAlbertForPreTraining, TFBartForConditionalGeneration, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFCamembertForMaskedLM, TFCTRLLMHeadModel, TFDistilBertForMaskedLM, TFDistilBertForQuestionAnswering, TFDPRContextEncoder, TFDPRQuestionEncoder, TFDPRReader, TFElectraForPreTraining, TFFlaubertWithLMHeadModel, TFGPT2LMHeadModel, TFLxmertForPreTraining, TFLxmertVisualFeatureEncoder, TFOpenAIGPTLMHeadModel, TFRobertaForMaskedLM, TFRobertaForSequenceClassification, TFT5ForConditionalGeneration, TFTransfoXLLMHeadModel, TFXLMRobertaForMaskedLM, TFXLMWithLMHeadModel, TFXLNetLMHeadModel, TransfoXLConfig, XLMConfig, XLMRobertaConfig, XLNetConfig, cached_path, is_torch_available, load_pytorch_checkpoint_in_tf2_model, ) from .file_utils import hf_bucket_url from .utils import logging if is_torch_available(): import numpy as np import torch from . import ( AlbertForPreTraining, BartForConditionalGeneration, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, CamembertForMaskedLM, CTRLLMHeadModel, DistilBertForMaskedLM, DistilBertForQuestionAnswering, DPRContextEncoder, DPRQuestionEncoder, DPRReader, ElectraForPreTraining, FlaubertWithLMHeadModel, GPT2LMHeadModel, LxmertForPreTraining, LxmertVisualFeatureEncoder, OpenAIGPTLMHeadModel, RobertaForMaskedLM, RobertaForSequenceClassification, T5ForConditionalGeneration, TransfoXLLMHeadModel, XLMRobertaForMaskedLM, XLMWithLMHeadModel, XLNetLMHeadModel, ) logging.set_verbosity_info() MODEL_CLASSES = { "bart": ( BartConfig, TFBartForConditionalGeneration, BartForConditionalGeneration, BART_PRETRAINED_MODEL_ARCHIVE_LIST, ), "bert": ( BertConfig, TFBertForPreTraining, BertForPreTraining, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "bert-large-uncased-whole-word-masking-finetuned-squad": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "bert-large-cased-whole-word-masking-finetuned-squad": ( BertConfig, TFBertForQuestionAnswering, BertForQuestionAnswering, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "bert-base-cased-finetuned-mrpc": ( BertConfig, TFBertForSequenceClassification, BertForSequenceClassification, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "dpr": ( DPRConfig, TFDPRQuestionEncoder, TFDPRContextEncoder, TFDPRReader, DPRQuestionEncoder, DPRContextEncoder, DPRReader, DPR_CONTEXT_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_QUESTION_ENCODER_PRETRAINED_MODEL_ARCHIVE_LIST, DPR_READER_PRETRAINED_MODEL_ARCHIVE_LIST, ), "gpt2": ( GPT2Config, TFGPT2LMHeadModel, GPT2LMHeadModel, GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "xlnet": ( XLNetConfig, TFXLNetLMHeadModel, XLNetLMHeadModel, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "xlm": ( XLMConfig, TFXLMWithLMHeadModel, XLMWithLMHeadModel, XLM_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "xlm-roberta": ( XLMRobertaConfig, TFXLMRobertaForMaskedLM, XLMRobertaForMaskedLM, XLM_ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "transfo-xl": ( TransfoXLConfig, TFTransfoXLLMHeadModel, TransfoXLLMHeadModel, TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "openai-gpt": ( OpenAIGPTConfig, TFOpenAIGPTLMHeadModel, OpenAIGPTLMHeadModel, OPENAI_GPT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "roberta": ( RobertaConfig, TFRobertaForMaskedLM, RobertaForMaskedLM, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "roberta-large-mnli": ( RobertaConfig, TFRobertaForSequenceClassification, RobertaForSequenceClassification, ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "camembert": ( CamembertConfig, TFCamembertForMaskedLM, CamembertForMaskedLM, CAMEMBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "flaubert": ( FlaubertConfig, TFFlaubertWithLMHeadModel, FlaubertWithLMHeadModel, FLAUBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "distilbert": ( DistilBertConfig, TFDistilBertForMaskedLM, DistilBertForMaskedLM, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "distilbert-base-distilled-squad": ( DistilBertConfig, TFDistilBertForQuestionAnswering, DistilBertForQuestionAnswering, DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "lxmert": ( LxmertConfig, TFLxmertForPreTraining, LxmertForPreTraining, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "lxmert-visual-feature-encoder": ( LxmertConfig, TFLxmertVisualFeatureEncoder, LxmertVisualFeatureEncoder, LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "ctrl": ( CTRLConfig, TFCTRLLMHeadModel, CTRLLMHeadModel, CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "albert": ( AlbertConfig, TFAlbertForPreTraining, AlbertForPreTraining, ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "t5": ( T5Config, TFT5ForConditionalGeneration, T5ForConditionalGeneration, T5_PRETRAINED_CONFIG_ARCHIVE_MAP, ), "electra": ( ElectraConfig, TFElectraForPreTraining, ElectraForPreTraining, ELECTRA_PRETRAINED_CONFIG_ARCHIVE_MAP, ), } def convert_pt_checkpoint_to_tf( model_type, pytorch_checkpoint_path, config_file, tf_dump_path, compare_with_pt_model=False, use_cached_models=True ): if model_type not in MODEL_CLASSES: raise ValueError("Unrecognized model type, should be one of {}.".format(list(MODEL_CLASSES.keys()))) config_class, model_class, pt_model_class, aws_config_map = MODEL_CLASSES[model_type] # Initialise TF model if config_file in aws_config_map: config_file = cached_path(aws_config_map[config_file], force_download=not use_cached_models) config = config_class.from_json_file(config_file) config.output_hidden_states = True config.output_attentions = True print("Building TensorFlow model from configuration: {}".format(str(config))) tf_model = model_class(config) # Load weights from tf checkpoint if pytorch_checkpoint_path in aws_config_map.keys(): pytorch_checkpoint_url = hf_bucket_url(pytorch_checkpoint_path, filename=WEIGHTS_NAME) pytorch_checkpoint_path = cached_path(pytorch_checkpoint_url, force_download=not use_cached_models) # Load PyTorch checkpoint in tf2 model: tf_model = load_pytorch_checkpoint_in_tf2_model(tf_model, pytorch_checkpoint_path) if compare_with_pt_model: tfo = tf_model(tf_model.dummy_inputs, training=False) # build the network state_dict = torch.load(pytorch_checkpoint_path, map_location="cpu") pt_model = pt_model_class.from_pretrained( pretrained_model_name_or_path=None, config=config, state_dict=state_dict ) with torch.no_grad(): pto = pt_model(**pt_model.dummy_inputs) np_pt = pto[0].numpy() np_tf = tfo[0].numpy() diff = np.amax(np.abs(np_pt - np_tf)) print("Max absolute difference between models outputs {}".format(diff)) assert diff <= 2e-2, "Error, model absolute difference is >2e-2: {}".format(diff) # Save pytorch-model print("Save TensorFlow model to {}".format(tf_dump_path)) tf_model.save_weights(tf_dump_path, save_format="h5") def convert_all_pt_checkpoints_to_tf( args_model_type, tf_dump_path, model_shortcut_names_or_path=None, config_shortcut_names_or_path=None, compare_with_pt_model=False, use_cached_models=False, remove_cached_files=False, only_convert_finetuned_models=False, ): if args_model_type is None: model_types = list(MODEL_CLASSES.keys()) else: model_types = [args_model_type] for j, model_type in enumerate(model_types, start=1): print("=" * 100) print(" Converting model type {}/{}: {}".format(j, len(model_types), model_type)) print("=" * 100) if model_type not in MODEL_CLASSES: raise ValueError( "Unrecognized model type {}, should be one of {}.".format(model_type, list(MODEL_CLASSES.keys())) ) config_class, model_class, pt_model_class, aws_model_maps, aws_config_map = MODEL_CLASSES[model_type] if model_shortcut_names_or_path is None: model_shortcut_names_or_path = list(aws_model_maps.keys()) if config_shortcut_names_or_path is None: config_shortcut_names_or_path = model_shortcut_names_or_path for i, (model_shortcut_name, config_shortcut_name) in enumerate( zip(model_shortcut_names_or_path, config_shortcut_names_or_path), start=1 ): print("-" * 100) if "-squad" in model_shortcut_name or "-mrpc" in model_shortcut_name or "-mnli" in model_shortcut_name: if not only_convert_finetuned_models: print(" Skipping finetuned checkpoint {}".format(model_shortcut_name)) continue model_type = model_shortcut_name elif only_convert_finetuned_models: print(" Skipping not finetuned checkpoint {}".format(model_shortcut_name)) continue print( " Converting checkpoint {}/{}: {} - model_type {}".format( i, len(aws_config_map), model_shortcut_name, model_type ) ) print("-" * 100) if config_shortcut_name in aws_config_map: config_file = cached_path(aws_config_map[config_shortcut_name], force_download=not use_cached_models) else: config_file = cached_path(config_shortcut_name, force_download=not use_cached_models) if model_shortcut_name in aws_model_maps: model_file = cached_path(aws_model_maps[model_shortcut_name], force_download=not use_cached_models) else: model_file = cached_path(model_shortcut_name, force_download=not use_cached_models) if os.path.isfile(model_shortcut_name): model_shortcut_name = "converted_model" convert_pt_checkpoint_to_tf( model_type=model_type, pytorch_checkpoint_path=model_file, config_file=config_file, tf_dump_path=os.path.join(tf_dump_path, model_shortcut_name + "-tf_model.h5"), compare_with_pt_model=compare_with_pt_model, ) if remove_cached_files: os.remove(config_file) os.remove(model_file) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_dump_path", default=None, type=str, required=True, help="Path to the output Tensorflow dump file." ) parser.add_argument( "--model_type", default=None, type=str, help="Model type selected in the list of {}. If not given, will download and convert all the models from AWS.".format( list(MODEL_CLASSES.keys()) ), ) parser.add_argument( "--pytorch_checkpoint_path", default=None, type=str, help="Path to the PyTorch checkpoint path or shortcut name to download from AWS. " "If not given, will download and convert all the checkpoints from AWS.", ) parser.add_argument( "--config_file", default=None, type=str, help="The config json file corresponding to the pre-trained model. \n" "This specifies the model architecture. If not given and " "--pytorch_checkpoint_path is not given or is a shortcut name" "use the configuration associated to the shortcut name on the AWS", ) parser.add_argument( "--compare_with_pt_model", action="store_true", help="Compare Tensorflow and PyTorch model predictions." ) parser.add_argument( "--use_cached_models", action="store_true", help="Use cached models if possible instead of updating to latest checkpoint versions.", ) parser.add_argument( "--remove_cached_files", action="store_true", help="Remove pytorch models after conversion (save memory when converting in batches).", ) parser.add_argument("--only_convert_finetuned_models", action="store_true", help="Only convert finetuned models.") args = parser.parse_args() # if args.pytorch_checkpoint_path is not None: # convert_pt_checkpoint_to_tf(args.model_type.lower(), # args.pytorch_checkpoint_path, # args.config_file if args.config_file is not None else args.pytorch_checkpoint_path, # args.tf_dump_path, # compare_with_pt_model=args.compare_with_pt_model, # use_cached_models=args.use_cached_models) # else: convert_all_pt_checkpoints_to_tf( args.model_type.lower() if args.model_type is not None else None, args.tf_dump_path, model_shortcut_names_or_path=[args.pytorch_checkpoint_path] if args.pytorch_checkpoint_path is not None else None, config_shortcut_names_or_path=[args.config_file] if args.config_file is not None else None, compare_with_pt_model=args.compare_with_pt_model, use_cached_models=args.use_cached_models, remove_cached_files=args.remove_cached_files, only_convert_finetuned_models=args.only_convert_finetuned_models, )
AdaMix/src/transformers/convert_pytorch_checkpoint_to_tf2.py/0
{ "file_path": "AdaMix/src/transformers/convert_pytorch_checkpoint_to_tf2.py", "repo_id": "AdaMix", "token_count": 7574 }
49
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import math from abc import ABC from typing import Callable, Iterable, List import numpy as np import torch from .file_utils import add_start_docstrings LOGITS_PROCESSOR_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.BertTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs: Additional logits processor specific kwargs. Return: :obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.vocab_size)`: The processed prediction scores. """ class LogitsProcessor(ABC): """Abstract base class for all logit processors that can be applied during generation.""" @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: """Torch method for processing logits.""" raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) class LogitsWarper(ABC): """Abstract base class for all logit warpers that can be applied during generation with multinomial sampling.""" @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: """Torch method for warping logits.""" raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) class LogitsProcessorList(list): """ This class can be used to create a list of :class:`~transformers.LogitsProcessor` or :class:`~transformers.LogitsWarper` to subsequently process a :obj:`scores` input tensor. This class inherits from list and adds a specific `__call__` method to apply each :class:`~transformers.LogitsProcessor` or :class:`~transformers.LogitsProcessor` to the inputs. """ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.FloatTensor: for processor in self: function_args = inspect.signature(processor.__call__).parameters if len(function_args) > 2: assert all( arg in kwargs for arg in list(function_args.keys())[2:] ), f"Make sure that all the required parameters: {list(function_args.keys())} for {processor.__class__} are passed to the logits processor." scores = processor(input_ids, scores, **kwargs) else: scores = processor(input_ids, scores) return scores class MinLengthLogitsProcessor(LogitsProcessor): r""" :class:`transformers.LogitsProcessor` enforcing a min-length by setting EOS probability to 0. Args: min_length (:obj:`int`): The minimum length below which the score of :obj:`eos_token_id` is set to :obj:`-float("Inf")`. eos_token_id (:obj:`int`): The id of the `end-of-sequence` token. """ def __init__(self, min_length: int, eos_token_id: int): if not isinstance(min_length, int) or min_length < 0: raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}") if not isinstance(eos_token_id, int) or eos_token_id < 0: raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}") self.min_length = min_length self.eos_token_id = eos_token_id def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: cur_len = input_ids.shape[-1] if cur_len < self.min_length: scores[:, self.eos_token_id] = -float("inf") return scores class TemperatureLogitsWarper(LogitsWarper): r""" :class:`transformers.LogitsWarper` for temperature (exponential scaling output probability distribution). Args: temperature (:obj:`float`): The value used to module the logits distribution. """ def __init__(self, temperature: float): if not isinstance(temperature, float) or not (temperature > 0): raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}") self.temperature = temperature def __call__(self, input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: scores = scores / self.temperature return scores class RepetitionPenaltyLogitsProcessor(LogitsProcessor): r""" :class:`transformers.LogitsProcessor` enforcing an exponential penalty on repeated sequences. Args: repetition_penalty (:obj:`float`): The parameter for repetition penalty. 1.0 means no penalty. See `this paper <https://arxiv.org/pdf/1909.05858.pdf>`__ for more details. """ def __init__(self, penalty: float): if not isinstance(penalty, float) or not (penalty > 0): raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}") self.penalty = penalty def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: score = torch.gather(scores, 1, input_ids) # if score < 0 then repetition penalty has to be multiplied to reduce the previous token probability score = torch.where(score < 0, score * self.penalty, score / self.penalty) scores.scatter_(1, input_ids, score) return scores class TopPLogitsWarper(LogitsWarper): """ :class:`transformers.LogitsWarper` that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off. Args: top_p (:obj:`float`): If set to < 1, only the most probable tokens with probabilities that add up to :obj:`top_p` or higher are kept for generation. filter_value (:obj:`float`, `optional`, defaults to :obj:`-float("Inf")`): All filtered values will be set to this float value. min_tokens_to_keep (:obj:`int`, `optional`, defaults to 1): Minimum number of tokens that cannot be filtered. """ def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): if not isinstance(top_p, float) or (top_p < 0 or top_p > 1.0): raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}") self.top_p = top_p self.filter_value = filter_value self.min_tokens_to_keep = min_tokens_to_keep def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: sorted_logits, sorted_indices = torch.sort(scores, descending=True) cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1) # Remove tokens with cumulative top_p above the threshold (token with 0 are kept) sorted_indices_to_remove = cumulative_probs > self.top_p if self.min_tokens_to_keep > 1: # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below) sorted_indices_to_remove[..., : self.min_tokens_to_keep - 1] = 0 # Shift the indices to the right to keep also the first token above the threshold sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 # scatter sorted tensors to original indexing indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove) scores = scores.masked_fill(indices_to_remove, self.filter_value) return scores class TopKLogitsWarper(LogitsWarper): r""" :class:`transformers.LogitsWarper` that performs top-k, i.e. restricting to the k highest probability elements. Args: top_k (:obj:`int`): The number of highest probability vocabulary tokens to keep for top-k-filtering. filter_value (:obj:`float`, `optional`, defaults to :obj:`-float("Inf")`): All filtered values will be set to this float value. min_tokens_to_keep (:obj:`int`, `optional`, defaults to 1): Minimum number of tokens that cannot be filtered. """ def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): if not isinstance(top_k, int) or top_k <= 0: raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}") self.top_k = top_k self.filter_value = filter_value self.min_tokens_to_keep = min_tokens_to_keep def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: top_k = min(max(self.top_k, self.min_tokens_to_keep), scores.size(-1)) # Safety check # Remove all tokens with a probability less than the last token of the top-k indices_to_remove = scores < torch.topk(scores, top_k)[0][..., -1, None] scores = scores.masked_fill(indices_to_remove, self.filter_value) return scores def _get_ngrams(ngram_size: int, prev_input_ids: torch.Tensor, num_hypos: int): generated_ngrams = [{} for _ in range(num_hypos)] for idx in range(num_hypos): gen_tokens = prev_input_ids[idx].tolist() generated_ngram = generated_ngrams[idx] for ngram in zip(*[gen_tokens[i:] for i in range(ngram_size)]): prev_ngram_tuple = tuple(ngram[:-1]) generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]] return generated_ngrams def _get_generated_ngrams(banned_ngrams, prev_input_ids, ngram_size, cur_len): # Before decoding the next token, prevent decoding of ngrams that have already appeared start_idx = cur_len + 1 - ngram_size ngram_idx = tuple(prev_input_ids[start_idx:cur_len].tolist()) return banned_ngrams.get(ngram_idx, []) def _calc_banned_ngram_tokens( ngram_size: int, prev_input_ids: torch.Tensor, num_hypos: int, cur_len: int ) -> List[Iterable[int]]: """Copied from fairseq for no_repeat_ngram in beam_search""" if cur_len + 1 < ngram_size: # return no banned tokens if we haven't generated no_repeat_ngram_size tokens yet return [[] for _ in range(num_hypos)] generated_ngrams = _get_ngrams(ngram_size, prev_input_ids, num_hypos) banned_tokens = [ _get_generated_ngrams(generated_ngrams[hypo_idx], prev_input_ids[hypo_idx], ngram_size, cur_len) for hypo_idx in range(num_hypos) ] return banned_tokens class NoRepeatNGramLogitsProcessor(LogitsProcessor): r""" :class:`transformers.LogitsProcessor` that enforces no repetition of n-grams. See `Fairseq <https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345>`__. Args: ngram_size (:obj:`int`): All ngrams of size :obj:`ngram_size` can only occur once. """ def __init__(self, ngram_size: int): if not isinstance(ngram_size, int) or ngram_size <= 0: raise ValueError(f"`ngram_size` has to be a strictly positive integer, but is {ngram_size}") self.ngram_size = ngram_size def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: num_batch_hypotheses = scores.shape[0] cur_len = input_ids.shape[-1] banned_batch_tokens = _calc_banned_ngram_tokens(self.ngram_size, input_ids, num_batch_hypotheses, cur_len) for i, banned_tokens in enumerate(banned_batch_tokens): scores[i, banned_tokens] = -float("inf") return scores class EncoderNoRepeatNGramLogitsProcessor(LogitsProcessor): r""" :class:`transformers.LogitsProcessor` that enforces no repetition of encoder input ids n-grams for the decoder ids. See `ParlAI <https://github.com/facebookresearch/ParlAI/blob/master/parlai/core/torch_generator_agent.py#L1350>`__. Args: encoder_ngram_size (:obj:`int`): All ngrams of size :obj:`ngram_size` can only occur within the encoder input ids. encoder_input_ids (:obj:`int`): The encoder_input_ids that should not be repeated within the decoder ids. """ def __init__(self, encoder_ngram_size: int, encoder_input_ids: torch.LongTensor): if not isinstance(encoder_ngram_size, int) or encoder_ngram_size <= 0: raise ValueError( f"`encoder_ngram_size` has to be a strictly positive integer, but is {encoder_ngram_size}" ) self.ngram_size = encoder_ngram_size if len(encoder_input_ids.shape) == 1: encoder_input_ids = encoder_input_ids.unsqueeze(0) self.batch_size = encoder_input_ids.shape[0] self.generated_ngrams = _get_ngrams(encoder_ngram_size, encoder_input_ids, self.batch_size) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: # B x num_beams num_hypos = scores.shape[0] num_beams = num_hypos // self.batch_size cur_len = input_ids.shape[-1] banned_batch_tokens = [ _get_generated_ngrams( self.generated_ngrams[hypo_idx // num_beams], input_ids[hypo_idx], self.ngram_size, cur_len ) for hypo_idx in range(num_hypos) ] for i, banned_tokens in enumerate(banned_batch_tokens): scores[i, banned_tokens] = -float("inf") return scores class NoBadWordsLogitsProcessor(LogitsProcessor): """ :class:`transformers.LogitsProcessor` that enforces that specified sequences will never be sampled. Args: bad_words_ids (:obj:`List[List[int]]`): List of list of token ids that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use :obj:`tokenizer(bad_word, add_prefix_space=True).input_ids`. eos_token_id (:obj:`int`): The id of the `end-of-sequence` token. """ def __init__(self, bad_words_ids: Iterable[Iterable[int]], eos_token_id: int): if not isinstance(bad_words_ids, List) or len(bad_words_ids) == 0: raise ValueError(f"`bad_words_ids` has to be a non-emtpy list, but is {bad_words_ids}.") if any(not isinstance(bad_word_ids, list) for bad_word_ids in bad_words_ids): raise ValueError(f"`bad_words_ids` has to be a list of lists, but is {bad_words_ids}.") if any( any((not isinstance(token_id, (int, np.integer)) or token_id < 0) for token_id in bad_word_ids) for bad_word_ids in bad_words_ids ): raise ValueError( f"Each list in `bad_words_ids` has to be a list of positive integers, but is {bad_words_ids}." ) self.bad_words_ids = list(filter(lambda bad_token_seq: bad_token_seq != [eos_token_id], bad_words_ids)) for banned_token_seq in self.bad_words_ids: assert len(banned_token_seq) > 0, "Banned words token sequences {} cannot have an empty list".format( bad_words_ids ) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: banned_tokens = self._calc_banned_bad_words_ids(input_ids) scores = self._set_scores_to_inf_for_banned_tokens(scores, banned_tokens) return scores def _tokens_match(self, prev_tokens: torch.LongTensor, tokens: List[int]) -> bool: if len(tokens) == 0: # if bad word tokens is just one token always ban it return True elif len(tokens) > len(prev_tokens): # if bad word tokens are longer then prev input_ids they can't be equal return False elif prev_tokens[-len(tokens) :].tolist() == tokens: # if tokens match return True else: return False def _calc_banned_bad_words_ids(self, prev_input_ids: Iterable[int]) -> Iterable[int]: banned_tokens = [] for prev_input_ids_slice in prev_input_ids: banned_tokens_slice = [] for banned_token_seq in self.bad_words_ids: if self._tokens_match(prev_input_ids_slice, banned_token_seq[:-1]) is False: # if tokens do not match continue continue banned_tokens_slice.append(banned_token_seq[-1]) banned_tokens.append(banned_tokens_slice) return banned_tokens def _set_scores_to_inf_for_banned_tokens(self, scores: torch.Tensor, banned_tokens: List[List[int]]) -> None: """ Modifies the scores in place by setting the banned token positions to `-inf`. Banned token is expected to be a list of list of banned tokens to ban in the format [[batch index, vocabulary position],... Args: scores: logits distribution of shape (batch size, vocabulary size) banned_tokens: list of list of tokens to ban of length (batch_size) """ banned_mask_list = [] for idx, batch_banned_tokens in enumerate(banned_tokens): for token in batch_banned_tokens: banned_mask_list.append([idx, token]) if not banned_mask_list: return scores banned_mask = torch.LongTensor(banned_mask_list) indices = torch.ones(len(banned_mask)) # A sparse tensor is generated from a list of coordinates: [[0, 1], [0, 2], [2, 0]]. A conversion to dense tensor generates: # [ 0 1 1 ] # [ 0 0 0 ] # [ 1 0 0 ] banned_mask = ( torch.sparse.LongTensor(banned_mask.t(), indices, scores.size()).to(scores.device).to_dense().bool() ) scores = scores.masked_fill(banned_mask, -float("inf")) return scores class PrefixConstrainedLogitsProcessor(LogitsProcessor): r""" :class:`transformers.LogitsProcessor` that enforces contrained generation and is useful for prefix-conditioned constrained generation. See `Autoregressive Entity Retrieval <https://arxiv.org/abs/2010.00904>`__ for more information. Args: prefix_allowed_tokens_fn: (:obj:`Callable[[int, torch.Tensor], List[int]]`): This function constraints the beam search to allowed tokens only at each step. This function takes 2 arguments :obj:`inputs_ids` and the batch ID :obj:`batch_id`. It has to return a list with the allowed tokens for the next generation step conditioned on the previously generated tokens :obj:`inputs_ids` and the batch ID :obj:`batch_id`. """ def __init__(self, prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]], num_beams: int): self._prefix_allowed_tokens_fn = prefix_allowed_tokens_fn self._num_beams = num_beams def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: mask = torch.full_like(scores, -math.inf) for batch_id, beam_sent in enumerate(input_ids.view(-1, self._num_beams, input_ids.shape[-1])): for beam_id, sent in enumerate(beam_sent): mask[batch_id * self._num_beams + beam_id, self._prefix_allowed_tokens_fn(batch_id, sent)] = 0 return scores + mask class HammingDiversityLogitsProcessor(LogitsProcessor): r""" :class:`transformers.LogitsProcessor` that enforces diverse beam search. Note that this logits processor is only effective for :meth:`transformers.PretrainedModel.group_beam_search`. See `Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence Models <https://arxiv.org/pdf/1610.02424.pdf>`__ for more details. Args: diversity_penalty (:obj:`float`): This value is subtracted from a beam's score if it generates a token same as any beam from other group at a particular time. Note that :obj:`diversity_penalty` is only effective if ``group beam search`` is enabled. num_beams (:obj:`int`): Number of beams used for group beam search. See `this paper <https://arxiv.org/pdf/1610.02424.pdf>`__ for more details. num_beam_groups (:obj:`int`): Number of groups to divide :obj:`num_beams` into in order to ensure diversity among different groups of beams. See `this paper <https://arxiv.org/pdf/1610.02424.pdf>`__ for more details. """ def __init__(self, diversity_penalty: float, num_beams: int, num_beam_groups: int): if not isinstance(diversity_penalty, float) or (not diversity_penalty > 0.0): raise ValueError("`diversity_penalty` should be a float strictly larger than 0.") self._diversity_penalty = diversity_penalty if not isinstance(num_beams, int) or num_beams < 2: raise ValueError("`num_beams` should be an integer strictly larger than 1.") self._num_beams = num_beams if not isinstance(num_beam_groups, int) or num_beam_groups < 2: raise ValueError("`num_beam_groups` should be an integer strictly larger than 1.") if num_beam_groups > num_beams: raise ValueError("`beam_groups` has to be smaller or equal to `num_beams`.") self._num_sub_beams = num_beams // num_beam_groups def __call__( self, input_ids: torch.LongTensor, scores: torch.FloatTensor, current_tokens: torch.LongTensor, beam_group_idx: int, ) -> torch.FloatTensor: # hamming diversity: penalise using same token in current group which was used in previous groups at # the same time step batch_size = current_tokens.shape[0] // self._num_beams group_start_idx = beam_group_idx * self._num_sub_beams group_end_idx = min(group_start_idx + self._num_sub_beams, self._num_beams) group_size = group_end_idx - group_start_idx vocab_size = scores.shape[-1] if group_start_idx == 0: return scores for batch_idx in range(batch_size): # predicted tokens of last time step of previous groups previous_group_tokens = current_tokens[ batch_idx * self._num_beams : batch_idx * self._num_beams + group_start_idx ] token_frequency = torch.bincount(previous_group_tokens, minlength=vocab_size).to(scores.device) scores[batch_idx * group_size : (batch_idx + 1) * group_size] -= self._diversity_penalty * token_frequency return scores class ForcedBOSTokenLogitsProcessor(LogitsProcessor): r""" :class:`~transformers.LogitsProcessor` that enforces the specified token as the first generated token. Args: bos_token_id (:obj:`int`): The id of the token to force as the first generated token. """ def __init__(self, bos_token_id: int): self.bos_token_id = bos_token_id def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: cur_len = input_ids.shape[-1] if cur_len == 1: num_tokens = scores.shape[1] scores[:, [i for i in range(num_tokens) if i != self.bos_token_id]] = -float("inf") scores[:, self.bos_token_id] = 0 return scores class ForcedEOSTokenLogitsProcessor(LogitsProcessor): r""" :class:`~transformers.LogitsProcessor` that enforces the specified token as the last generated token when :obj:`max_length` is reached. Args: max_length (:obj:`int`): The maximum length of the sequence to be generated. eos_token_id (:obj:`int`): The id of the token to force as the last generated token when :obj:`max_length` is reached. """ def __init__(self, max_length: int, eos_token_id: int): self.max_length = max_length self.eos_token_id = eos_token_id def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: cur_len = input_ids.shape[-1] if cur_len == self.max_length - 1: num_tokens = scores.shape[1] scores[:, [i for i in range(num_tokens) if i != self.eos_token_id]] = -float("inf") scores[:, self.eos_token_id] = 0 return scores
AdaMix/src/transformers/generation_logits_process.py/0
{ "file_path": "AdaMix/src/transformers/generation_logits_process.py", "repo_id": "AdaMix", "token_count": 10570 }
50
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert BART checkpoint.""" import argparse import os from pathlib import Path import fairseq import torch from packaging import version from torch import nn from transformers import ( BartConfig, BartForConditionalGeneration, BartForSequenceClassification, BartModel, BartTokenizer, ) from transformers.utils import logging FAIRSEQ_MODELS = ["bart.large", "bart.large.mnli", "bart.large.cnn", "bart_xsum/model.pt"] extra_arch = {"bart.large": BartModel, "bart.large.mnli": BartForSequenceClassification} if version.parse(fairseq.__version__) < version.parse("0.9.0"): raise Exception("requires fairseq >= 0.9.0") logging.set_verbosity_info() logger = logging.get_logger(__name__) SAMPLE_TEXT = " Hello world! cécé herlolip" mnli_rename_keys = [ ("model.classification_heads.mnli.dense.weight", "classification_head.dense.weight"), ("model.classification_heads.mnli.dense.bias", "classification_head.dense.bias"), ("model.classification_heads.mnli.out_proj.weight", "classification_head.out_proj.weight"), ("model.classification_heads.mnli.out_proj.bias", "classification_head.out_proj.bias"), ] def remove_ignore_keys_(state_dict): ignore_keys = [ "encoder.version", "decoder.version", "model.encoder.version", "model.decoder.version", "_float_tensor", ] for k in ignore_keys: state_dict.pop(k, None) def rename_key(dct, old, new): val = dct.pop(old) dct[new] = val def load_xsum_checkpoint(checkpoint_path): """Checkpoint path should end in model.pt""" sd = torch.load(checkpoint_path, map_location="cpu") hub_interface = torch.hub.load("pytorch/fairseq", "bart.large.cnn").eval() hub_interface.model.load_state_dict(sd["model"]) return hub_interface def make_linear_from_emb(emb): vocab_size, emb_size = emb.weight.shape lin_layer = nn.Linear(vocab_size, emb_size, bias=False) lin_layer.weight.data = emb.weight.data return lin_layer @torch.no_grad() def convert_bart_checkpoint(checkpoint_path, pytorch_dump_folder_path, hf_checkpoint_name=None): """ Copy/paste/tweak model's weights to our BERT structure. """ if not os.path.exists(checkpoint_path): bart = torch.hub.load("pytorch/fairseq", checkpoint_path).eval() else: bart = load_xsum_checkpoint(checkpoint_path) bart.model.upgrade_state_dict(bart.model.state_dict()) if hf_checkpoint_name is None: hf_checkpoint_name = checkpoint_path.replace(".", "-") config = BartConfig.from_pretrained(hf_checkpoint_name) tokens = bart.encode(SAMPLE_TEXT).unsqueeze(0) tokens2 = BartTokenizer.from_pretrained(hf_checkpoint_name).encode(SAMPLE_TEXT, return_tensors="pt").unsqueeze(0) assert torch.eq(tokens, tokens2).all() if checkpoint_path == "bart.large.mnli": state_dict = bart.state_dict() remove_ignore_keys_(state_dict) state_dict["model.shared.weight"] = state_dict["model.decoder.embed_tokens.weight"] for src, dest in mnli_rename_keys: rename_key(state_dict, src, dest) model = BartForSequenceClassification(config).eval() model.load_state_dict(state_dict) fairseq_output = bart.predict("mnli", tokens, return_logits=True) new_model_outputs = model(tokens)[0] # logits else: # no classification heads to worry about state_dict = bart.model.state_dict() remove_ignore_keys_(state_dict) state_dict["shared.weight"] = state_dict["decoder.embed_tokens.weight"] fairseq_output = bart.extract_features(tokens) if hf_checkpoint_name == "facebook/bart-large": model = BartModel(config).eval() model.load_state_dict(state_dict) new_model_outputs = model(tokens).model[0] else: model = BartForConditionalGeneration(config).eval() # an existing summarization ckpt model.model.load_state_dict(state_dict) if hasattr(model, "lm_head"): model.lm_head = make_linear_from_emb(model.model.shared) new_model_outputs = model.model(tokens)[0] # Check results assert fairseq_output.shape == new_model_outputs.shape assert (fairseq_output == new_model_outputs).all().item() Path(pytorch_dump_folder_path).mkdir(exist_ok=True) model.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "fairseq_path", type=str, help="bart.large, bart.large.cnn or a path to a model.pt on local filesystem." ) parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.") parser.add_argument( "--hf_config", default=None, type=str, help="Which huggingface architecture to use: bart-large-xsum" ) args = parser.parse_args() convert_bart_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, hf_checkpoint_name=args.hf_config)
AdaMix/src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py/0
{ "file_path": "AdaMix/src/transformers/models/bart/convert_bart_original_pytorch_checkpoint_to_pytorch.py", "repo_id": "AdaMix", "token_count": 2187 }
51
# coding=utf-8 # Copyright 2020 The Google AI Language Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch BERT model specific for generation. """ import torch import torch.utils.checkpoint from torch import nn from torch.nn import CrossEntropyLoss from ...file_utils import ( add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_outputs import BaseModelOutputWithPastAndCrossAttentions, CausalLMOutputWithCrossAttentions from ...modeling_utils import PreTrainedModel from ...utils import logging from ..bert.modeling_bert import BertEncoder from .configuration_bert_generation import BertGenerationConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "google/bert_for_seq_generation_L-24_bbc_encoder" _CONFIG_FOR_DOC = "BertGenerationConfig" _TOKENIZER_FOR_DOC = "BertGenerationTokenizer" def load_tf_weights_in_bert_generation( model, tf_hub_path, model_class, is_encoder_named_decoder=False, is_encoder=False ): try: import numpy as np import tensorflow.compat.v1 as tf import tensorflow_hub as hub import tensorflow_text # noqa: F401 tf.disable_eager_execution() except ImportError: logger.error( "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions." ) raise tf_model = hub.Module(tf_hub_path) init = tf.global_variables_initializer() with tf.Session() as sess: init.run() all_variables = tf_model.variable_map keep_track_variables = all_variables.copy() for key in list(all_variables.keys()): if "global" in key: logger.info(f"Skipping {key}...") continue if not is_encoder: model_pointer = getattr(model, model_class) else: model_pointer = model is_embedding = False logger.info(f"Trying to match {key}...") # remove start_string = "module/bert/" sub_layers = key.split("/")[2:] if is_encoder_named_decoder and sub_layers[0] == "encoder": logger.info(f"Skipping encoder layer {key} for decoder") continue if is_encoder and sub_layers[0] == "decoder": logger.info(f"Skipping decoder layer {key} for encoder") continue for i, sub_layer in enumerate(sub_layers): if sub_layer == "embeddings": is_embedding = True elif sub_layer == "LayerNorm": is_embedding = False if "layer" in sub_layer: model_pointer = model_pointer.layer[int(sub_layer.split("_")[-1])] elif sub_layer in ["kernel", "gamma"]: model_pointer = model_pointer.weight elif sub_layer == "beta": model_pointer = model_pointer.bias elif sub_layer == "encdec": model_pointer = model_pointer.crossattention.self elif sub_layer == "encdec_output": model_pointer = model_pointer.crossattention.output elif is_encoder_named_decoder and sub_layer == "decoder": model_pointer = model_pointer.encoder else: if sub_layer == "attention" and "encdec" in sub_layers[i + 1]: continue try: model_pointer = getattr(model_pointer, sub_layer) except AttributeError: logger.info(f"Skipping to initialize {key} at {sub_layer}...") raise AttributeError array = np.asarray(sess.run(all_variables[key])) if not is_embedding: logger.info("Transposing numpy weight of shape {} for {}".format(array.shape, key)) array = np.transpose(array) else: model_pointer = model_pointer.weight try: assert ( model_pointer.shape == array.shape ), f"Pointer shape {model_pointer.shape} and array shape {array.shape} mismatched" except AssertionError as e: e.args += (model_pointer.shape, array.shape) raise logger.info(f"Initialize PyTorch weight {key}") model_pointer.data = torch.from_numpy(array.astype(np.float32)) keep_track_variables.pop(key, None) logger.info("Weights not copied to PyTorch model: {}".format(", ".join(keep_track_variables.keys()))) return model class BertGenerationEmbeddings(nn.Module): """Construct the embeddings from word and position embeddings.""" def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = torch.nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) self.dropout = nn.Dropout(config.hidden_dropout_prob) # position_ids (1, len position emb) is contiguous in memory and exported when serialized self.register_buffer("position_ids", torch.arange(config.max_position_embeddings).expand((1, -1))) def forward(self, input_ids=None, position_ids=None, inputs_embeds=None, past_key_values_length=0): if input_ids is not None: input_shape = input_ids.size() else: input_shape = inputs_embeds.size()[:-1] seq_length = input_shape[1] if position_ids is None: position_ids = self.position_ids[:, past_key_values_length : seq_length + past_key_values_length] if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) embeddings = inputs_embeds + position_embeddings embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertGenerationPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = BertGenerationConfig base_model_prefix = "bert" _keys_to_ignore_on_load_missing = [r"position_ids"] def _init_weights(self, module): """ Initialize the weights """ if isinstance(module, nn.Linear): # Slightly different from the TF version which uses truncated_normal for initialization # cf https://github.com/pytorch/pytorch/pull/5617 module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.bias is not None: module.bias.data.zero_() elif isinstance(module, nn.Embedding): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if module.padding_idx is not None: module.weight.data[module.padding_idx].zero_() elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) BERT_GENERATION_START_DOCSTRING = r""" This model inherits from :class:`~transformers.PreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`__ subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.BertGenerationConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ BERT_GENERATION_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.BertGenerationTokenizer`. See :meth:`transformers.PreTrainedTokenizer.__call__` and :meth:`transformers.PreTrainedTokenizer.encode` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ position_ids (:obj:`torch.LongTensor` of shape :obj:`({0})`, `optional`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`_ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`({0}, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. """ @add_start_docstrings( "The bare BertGeneration model transformer outputting raw hidden-states without any specific head on top.", BERT_GENERATION_START_DOCSTRING, ) class BertGenerationEncoder(BertGenerationPreTrainedModel): """ The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of cross-attention is added between the self-attention layers, following the architecture described in `Attention is all you need <https://arxiv.org/abs/1706.03762>`__ by Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser and Illia Polosukhin. This model should be used when leveraging Bert or Roberta checkpoints for the :class:`~transformers.EncoderDecoderModel` class as described in `Leveraging Pre-trained Checkpoints for Sequence Generation Tasks <https://arxiv.org/abs/1907.12461>`__ by Sascha Rothe, Shashi Narayan, and Aliaksei Severyn. To behave as an decoder the model needs to be initialized with the :obj:`is_decoder` argument of the configuration set to :obj:`True`. To be used in a Seq2Seq model, the model needs to initialized with both :obj:`is_decoder` argument and :obj:`add_cross_attention` set to :obj:`True`; an :obj:`encoder_hidden_states` is then expected as an input to the forward pass. """ def __init__(self, config): super().__init__(config) self.config = config self.embeddings = BertGenerationEmbeddings(config) self.encoder = BertEncoder(config) self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, value): self.embeddings.word_embeddings = value def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.encoder.layer[layer].attention.prune_heads(heads) @add_start_docstrings_to_model_forward(BERT_GENERATION_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=BaseModelOutputWithPastAndCrossAttentions, config_class=_CONFIG_FOR_DOC, ) def forward( self, input_ids=None, attention_mask=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). """ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if self.config.is_decoder: use_cache = use_cache if use_cache is not None else self.config.use_cache else: use_cache = False if input_ids is not None and inputs_embeds is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif input_ids is not None: input_shape = input_ids.size() batch_size, seq_length = input_shape elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size, seq_length = input_shape else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device # past_key_values_length past_key_values_length = past_key_values[0][0].shape[2] if past_key_values is not None else 0 if attention_mask is None: attention_mask = torch.ones(((batch_size, seq_length + past_key_values_length)), device=device) # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length] # ourselves in which case we just need to make it broadcastable to all heads. extended_attention_mask = None if not use_cache: extended_attention_mask: torch.Tensor = self.get_extended_attention_mask( attention_mask, input_shape, device ) # If a 2D or 3D attention mask is provided for the cross-attention # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] if self.config.is_decoder and encoder_hidden_states is not None: encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size() encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) if encoder_attention_mask is None: encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) encoder_extended_attention_mask = self.invert_attention_mask(encoder_attention_mask) else: encoder_extended_attention_mask = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) embedding_output = self.embeddings( input_ids=input_ids, position_ids=position_ids, inputs_embeds=inputs_embeds, past_key_values_length=past_key_values_length, ) encoder_outputs = self.encoder( embedding_output, attention_mask=extended_attention_mask, head_mask=head_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_extended_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = encoder_outputs[0] if not return_dict: return (sequence_output,) + encoder_outputs[1:] return BaseModelOutputWithPastAndCrossAttentions( last_hidden_state=sequence_output, past_key_values=encoder_outputs.past_key_values, hidden_states=encoder_outputs.hidden_states, attentions=encoder_outputs.attentions, cross_attentions=encoder_outputs.cross_attentions, ) class BertGenerationOnlyLMHead(nn.Module): def __init__(self, config): super().__init__() self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): logits = self.decoder(hidden_states) return logits @add_start_docstrings( """BertGeneration Model with a `language modeling` head on top for CLM fine-tuning. """, BERT_GENERATION_START_DOCSTRING, ) class BertGenerationDecoder(BertGenerationPreTrainedModel): def __init__(self, config): super().__init__(config) if not config.is_decoder: logger.warn("If you want to use `BertGenerationDecoder` as a standalone, add `is_decoder=True.`") self.bert = BertGenerationEncoder(config) self.lm_head = BertGenerationOnlyLMHead(config) self.init_weights() def get_output_embeddings(self): return self.lm_head.decoder def set_output_embeddings(self, new_embeddings): self.lm_head.decoder = new_embeddings @add_start_docstrings_to_model_forward(BERT_GENERATION_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC) def forward( self, input_ids=None, attention_mask=None, position_ids=None, head_mask=None, inputs_embeds=None, encoder_hidden_states=None, encoder_attention_mask=None, labels=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, ): r""" encoder_hidden_states (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if the model is configured as a decoder. encoder_attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in the cross-attention if the model is configured as a decoder. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in ``[-100, 0, ..., config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]`` past_key_values (:obj:`tuple(tuple(torch.FloatTensor))` of length :obj:`config.n_layers` with each tuple having 4 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. use_cache (:obj:`bool`, `optional`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). Returns: Example:: >>> from transformers import BertGenerationTokenizer, BertGenerationDecoder, BertGenerationConfig >>> import torch >>> tokenizer = BertGenerationTokenizer.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder') >>> config = BertGenerationConfig.from_pretrained("google/bert_for_seq_generation_L-24_bbc_encoder") >>> config.is_decoder = True >>> model = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder', config=config) >>> inputs = tokenizer("Hello, my dog is cute", return_token_type_ids=False, return_tensors="pt") >>> outputs = model(**inputs) >>> prediction_logits = outputs.logits """ return_dict = return_dict if return_dict is not None else self.config.use_return_dict if labels is not None: use_cache = False outputs = self.bert( input_ids, attention_mask=attention_mask, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, ) sequence_output = outputs[0] prediction_scores = self.lm_head(sequence_output) lm_loss = None if labels is not None: # we are doing next-token prediction; shift prediction scores and input ids by one shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous() labels = labels[:, 1:].contiguous() loss_fct = CrossEntropyLoss() lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)) if not return_dict: output = (prediction_scores,) + outputs[1:] return ((lm_loss,) + output) if lm_loss is not None else output return CausalLMOutputWithCrossAttentions( loss=lm_loss, logits=prediction_scores, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, cross_attentions=outputs.cross_attentions, ) def prepare_inputs_for_generation(self, input_ids, past=None, attention_mask=None, **model_kwargs): input_shape = input_ids.shape # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly if attention_mask is None: attention_mask = input_ids.new_ones(input_shape) # cut decoder_input_ids if past is used if past is not None: input_ids = input_ids[:, -1:] return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past} def _reorder_cache(self, past, beam_idx): reordered_past = () for layer_past in past: reordered_past += (tuple(past_state.index_select(0, beam_idx) for past_state in layer_past),) return reordered_past
AdaMix/src/transformers/models/bert_generation/modeling_bert_generation.py/0
{ "file_path": "AdaMix/src/transformers/models/bert_generation/modeling_bert_generation.py", "repo_id": "AdaMix", "token_count": 11445 }
52
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert Blenderbot checkpoint.""" import argparse import torch from transformers import BartConfig, BartForConditionalGeneration from transformers.utils import logging logging.set_verbosity_info() logger = logging.get_logger(__name__) PATTERNS = [ ["attention", "attn"], ["encoder_attention", "encoder_attn"], ["q_lin", "q_proj"], ["k_lin", "k_proj"], ["v_lin", "v_proj"], ["out_lin", "out_proj"], ["norm_embeddings", "layernorm_embedding"], ["position_embeddings", "embed_positions"], ["embeddings", "embed_tokens"], ["ffn.lin", "fc"], ] def rename_state_dict_key(k): if k == "embeddings.weight": return "shared.weight" for parlai_name, hf_name in PATTERNS: k = k.replace(parlai_name, hf_name) if k.startswith("encoder"): k = k.replace(".attn", ".self_attn") k = k.replace("norm1", "self_attn_layer_norm") k = k.replace("norm2", "final_layer_norm") elif k.startswith("decoder"): k = k.replace("norm1", "self_attn_layer_norm") k = k.replace("norm2", "encoder_attn_layer_norm") k = k.replace("norm3", "final_layer_norm") return k def rename_layernorm_keys(sd): keys = [ "model.encoder.layernorm_embedding.weight", "model.encoder.layernorm_embedding.bias", "model.decoder.layernorm_embedding.weight", "model.decoder.layernorm_embedding.bias", ] for k in keys: v = sd.pop(k) new_k = k.replace("layernorm_embedding", "layer_norm") assert new_k not in sd sd[new_k] = v IGNORE_KEYS = ["START"] @torch.no_grad() def convert_parlai_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_json_path): """ Copy/paste/tweak model's weights to our BERT structure. """ model = torch.load(checkpoint_path, map_location="cpu") sd = model["model"] cfg = BartConfig.from_json_file(config_json_path) m = BartForConditionalGeneration(cfg) valid_keys = m.model.state_dict().keys() failures = [] mapping = {} for k, v in sd.items(): if k in IGNORE_KEYS: continue new_k = rename_state_dict_key(k) if new_k not in valid_keys: failures.append([k, new_k]) else: mapping[new_k] = v if cfg.normalize_before: # Blenderbot-3B checkpoints. Rename layernorm_embedding -> layer_norm rename_layernorm_keys(sd) m.model.load_state_dict(mapping, strict=True) m.half() m.save_pretrained(pytorch_dump_folder_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument("--src_path", type=str, help="like blenderbot-model.bin") parser.add_argument("--save_dir", default="hf_blenderbot", type=str, help="Where to save converted model.") parser.add_argument( "--hf_config_json", default="blenderbot-3b-config.json", type=str, help="Path to config to use" ) args = parser.parse_args() convert_parlai_checkpoint(args.src_path, args.save_dir, args.hf_config_json)
AdaMix/src/transformers/models/blenderbot/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.py/0
{ "file_path": "AdaMix/src/transformers/models/blenderbot/convert_blenderbot_original_pytorch_checkpoint_to_pytorch.py", "repo_id": "AdaMix", "token_count": 1496 }
53
# coding=utf-8 # Copyright The HuggingFace team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ ConvBERT model configuration """ from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { "YituTech/conv-bert-base": "https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json", "YituTech/conv-bert-medium-small": "https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json", "YituTech/conv-bert-small": "https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json", # See all ConvBERT models at https://huggingface.co/models?filter=convbert } class ConvBertConfig(PretrainedConfig): r""" This is the configuration class to store the configuration of a :class:`~transformers.ConvBertModel`. It is used to instantiate an ConvBERT model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the ConvBERT `conv-bert-base <https://huggingface.co/YituTech/conv-bert-base>`__ architecture. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Args: vocab_size (:obj:`int`, `optional`, defaults to 30522): Vocabulary size of the ConvBERT model. Defines the number of different tokens that can be represented by the :obj:`inputs_ids` passed when calling :class:`~transformers.ConvBertModel` or :class:`~transformers.TFConvBertModel`. hidden_size (:obj:`int`, `optional`, defaults to 768): Dimensionality of the encoder layers and the pooler layer. num_hidden_layers (:obj:`int`, `optional`, defaults to 12): Number of hidden layers in the Transformer encoder. num_attention_heads (:obj:`int`, `optional`, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. intermediate_size (:obj:`int`, `optional`, defaults to 3072): Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act (:obj:`str` or :obj:`function`, `optional`, defaults to :obj:`"gelu"`): The non-linear activation function (function or string) in the encoder and pooler. If string, :obj:`"gelu"`, :obj:`"relu"`, :obj:`"selu"` and :obj:`"gelu_new"` are supported. hidden_dropout_prob (:obj:`float`, `optional`, defaults to 0.1): The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob (:obj:`float`, `optional`, defaults to 0.1): The dropout ratio for the attention probabilities. max_position_embeddings (:obj:`int`, `optional`, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size (:obj:`int`, `optional`, defaults to 2): The vocabulary size of the :obj:`token_type_ids` passed when calling :class:`~transformers.ConvBertModel` or :class:`~transformers.TFConvBertModel`. initializer_range (:obj:`float`, `optional`, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12): The epsilon used by the layer normalization layers. head_ratio (:obj:`int`, `optional`, defaults to 2): Ratio gamma to reduce the number of attention heads. num_groups (:obj:`int`, `optional`, defaults to 1): The number of groups for grouped linear layers for ConvBert model conv_kernel_size (:obj:`int`, `optional`, defaults to 9): The size of the convolutional kernel. Example:: >>> from transformers import ConvBertModel, ConvBertConfig >>> # Initializing a ConvBERT convbert-base-uncased style configuration >>> configuration = ConvBertConfig() >>> # Initializing a model from the convbert-base-uncased style configuration >>> model = ConvBertModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config """ model_type = "convbert" def __init__( self, vocab_size=30522, hidden_size=768, is_encoder_decoder=False, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=1, bos_token_id=0, eos_token_id=2, embedding_size=768, head_ratio=2, conv_kernel_size=9, num_groups=1, **kwargs, ): super().__init__( pad_token_id=pad_token_id, is_encoder_decoder=is_encoder_decoder, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs, ) self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.embedding_size = embedding_size self.head_ratio = head_ratio self.conv_kernel_size = conv_kernel_size self.num_groups = num_groups
AdaMix/src/transformers/models/convbert/configuration_convbert.py/0
{ "file_path": "AdaMix/src/transformers/models/convbert/configuration_convbert.py", "repo_id": "AdaMix", "token_count": 2640 }
54
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert ELECTRA checkpoint.""" import argparse import torch from transformers import ElectraConfig, ElectraForMaskedLM, ElectraForPreTraining, load_tf_weights_in_electra from transformers.utils import logging logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch(tf_checkpoint_path, config_file, pytorch_dump_path, discriminator_or_generator): # Initialise PyTorch model config = ElectraConfig.from_json_file(config_file) print("Building PyTorch model from configuration: {}".format(str(config))) if discriminator_or_generator == "discriminator": model = ElectraForPreTraining(config) elif discriminator_or_generator == "generator": model = ElectraForMaskedLM(config) else: raise ValueError("The discriminator_or_generator argument should be either 'discriminator' or 'generator'") # Load weights from tf checkpoint load_tf_weights_in_electra( model, config, tf_checkpoint_path, discriminator_or_generator=discriminator_or_generator ) # Save pytorch-model print("Save PyTorch model to {}".format(pytorch_dump_path)) torch.save(model.state_dict(), pytorch_dump_path) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained model. \n" "This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) parser.add_argument( "--discriminator_or_generator", default=None, type=str, required=True, help="Whether to export the generator or the discriminator. Should be a string, either 'discriminator' or " "'generator'.", ) args = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path, args.discriminator_or_generator )
AdaMix/src/transformers/models/electra/convert_electra_original_tf_checkpoint_to_pytorch.py/0
{ "file_path": "AdaMix/src/transformers/models/electra/convert_electra_original_tf_checkpoint_to_pytorch.py", "repo_id": "AdaMix", "token_count": 1005 }
55
# coding=utf-8 # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ OpenAI GPT-2 configuration """ from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) GPT2_PRETRAINED_CONFIG_ARCHIVE_MAP = { "gpt2": "https://huggingface.co/gpt2/resolve/main/config.json", "gpt2-medium": "https://huggingface.co/gpt2-medium/resolve/main/config.json", "gpt2-large": "https://huggingface.co/gpt2-large/resolve/main/config.json", "gpt2-xl": "https://huggingface.co/gpt2-xl/resolve/main/config.json", "distilgpt2": "https://huggingface.co/distilgpt2/resolve/main/config.json", } class GPT2Config(PretrainedConfig): """ This is the configuration class to store the configuration of a :class:`~transformers.GPT2Model` or a :class:`~transformers.TFGPT2Model`. It is used to instantiate a GPT-2 model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the GPT-2 `small <https://huggingface.co/gpt2>`__ architecture. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Args: vocab_size (:obj:`int`, `optional`, defaults to 50257): Vocabulary size of the GPT-2 model. Defines the number of different tokens that can be represented by the :obj:`inputs_ids` passed when calling :class:`~transformers.GPT2Model` or :class:`~transformers.TFGPT2Model`. n_positions (:obj:`int`, `optional`, defaults to 1024): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). n_ctx (:obj:`int`, `optional`, defaults to 1024): Dimensionality of the causal mask (usually same as n_positions). n_embd (:obj:`int`, `optional`, defaults to 768): Dimensionality of the embeddings and hidden states. n_layer (:obj:`int`, `optional`, defaults to 12): Number of hidden layers in the Transformer encoder. n_head (:obj:`int`, `optional`, defaults to 12): Number of attention heads for each attention layer in the Transformer encoder. n_inner (:obj:`int`, `optional`, defaults to None): Dimensionality of the inner feed-forward layers. :obj:`None` will set it to 4 times n_embd activation_function (:obj:`str`, `optional`, defaults to :obj:`"gelu"`): Activation function, to be selected in the list :obj:`["relu", "silu", "gelu", "tanh", "gelu_new"]`. resid_pdrop (:obj:`float`, `optional`, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. embd_pdrop (:obj:`int`, `optional`, defaults to 0.1): The dropout ratio for the embeddings. attn_pdrop (:obj:`float`, `optional`, defaults to 0.1): The dropout ratio for the attention. layer_norm_epsilon (:obj:`float`, `optional`, defaults to 1e-5): The epsilon to use in the layer normalization layers initializer_range (:obj:`float`, `optional`, defaults to 0.02): The standard deviation of the truncated_normal_initializer for initializing all weight matrices. summary_type (:obj:`string`, `optional`, defaults to :obj:`"cls_index"`): Argument used when doing sequence summary, used in the models :class:`~transformers.GPT2DoubleHeadsModel` and :class:`~transformers.TFGPT2DoubleHeadsModel`. Has to be one of the following options: - :obj:`"last"`: Take the last token hidden state (like XLNet). - :obj:`"first"`: Take the first token hidden state (like BERT). - :obj:`"mean"`: Take the mean of all tokens hidden states. - :obj:`"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2). - :obj:`"attn"`: Not implemented now, use multi-head attention. summary_use_proj (:obj:`bool`, `optional`, defaults to :obj:`True`): Argument used when doing sequence summary, used in the models :class:`~transformers.GPT2DoubleHeadsModel` and :class:`~transformers.TFGPT2DoubleHeadsModel`. Whether or not to add a projection after the vector extraction. summary_activation (:obj:`str`, `optional`): Argument used when doing sequence summary. Used in for the multiple choice head in :class:`~transformers.GPT2DoubleHeadsModel`. Pass :obj:`"tanh"` for a tanh activation to the output, any other value will result in no activation. summary_proj_to_labels (:obj:`bool`, `optional`, defaults to :obj:`True`): Argument used when doing sequence summary, used in the models :class:`~transformers.GPT2DoubleHeadsModel` and :class:`~transformers.TFGPT2DoubleHeadsModel`. Whether the projection outputs should have :obj:`config.num_labels` or :obj:`config.hidden_size` classes. summary_first_dropout (:obj:`float`, `optional`, defaults to 0.1): Argument used when doing sequence summary, used in the models :class:`~transformers.GPT2DoubleHeadsModel` and :class:`~transformers.TFGPT2DoubleHeadsModel`. The dropout ratio to be used after the projection and activation. gradient_checkpointing (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass. use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not the model should return the last key/values attentions (not used by all models). Example:: >>> from transformers import GPT2Model, GPT2Config >>> # Initializing a GPT2 configuration >>> configuration = GPT2Config() >>> # Initializing a model from the configuration >>> model = GPT2Model(configuration) >>> # Accessing the model configuration >>> configuration = model.config """ model_type = "gpt2" keys_to_ignore_at_inference = ["past_key_values"] def __init__( self, vocab_size=50257, n_positions=1024, n_ctx=1024, n_embd=768, n_layer=12, n_head=12, n_inner=None, activation_function="gelu_new", resid_pdrop=0.1, embd_pdrop=0.1, attn_pdrop=0.1, layer_norm_epsilon=1e-5, initializer_range=0.02, summary_type="cls_index", summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, gradient_checkpointing=False, use_cache=True, bos_token_id=50256, eos_token_id=50256, **kwargs ): super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) self.vocab_size = vocab_size self.n_ctx = n_ctx self.n_positions = n_positions self.n_embd = n_embd self.n_layer = n_layer self.n_head = n_head self.n_inner = n_inner self.activation_function = activation_function self.resid_pdrop = resid_pdrop self.embd_pdrop = embd_pdrop self.attn_pdrop = attn_pdrop self.layer_norm_epsilon = layer_norm_epsilon self.initializer_range = initializer_range self.summary_type = summary_type self.summary_use_proj = summary_use_proj self.summary_activation = summary_activation self.summary_first_dropout = summary_first_dropout self.summary_proj_to_labels = summary_proj_to_labels self.gradient_checkpointing = gradient_checkpointing self.use_cache = use_cache self.bos_token_id = bos_token_id self.eos_token_id = eos_token_id @property def max_position_embeddings(self): return self.n_positions @property def hidden_size(self): return self.n_embd @property def num_attention_heads(self): return self.n_head @property def num_hidden_layers(self): return self.n_layer
AdaMix/src/transformers/models/gpt2/configuration_gpt2.py/0
{ "file_path": "AdaMix/src/transformers/models/gpt2/configuration_gpt2.py", "repo_id": "AdaMix", "token_count": 3605 }
56
# coding=utf-8 # Copyright 2018 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert RoBERTa checkpoint.""" import argparse import pytorch_lightning as pl import torch from transformers import LongformerForQuestionAnswering, LongformerModel class LightningModel(pl.LightningModule): def __init__(self, model): super().__init__() self.model = model self.num_labels = 2 self.qa_outputs = torch.nn.Linear(self.model.config.hidden_size, self.num_labels) # implement only because lightning requires to do so def forward(self): pass def convert_longformer_qa_checkpoint_to_pytorch( longformer_model: str, longformer_question_answering_ckpt_path: str, pytorch_dump_folder_path: str ): # load longformer model from model identifier longformer = LongformerModel.from_pretrained(longformer_model) lightning_model = LightningModel(longformer) ckpt = torch.load(longformer_question_answering_ckpt_path, map_location=torch.device("cpu")) lightning_model.load_state_dict(ckpt["state_dict"]) # init longformer question answering model longformer_for_qa = LongformerForQuestionAnswering.from_pretrained(longformer_model) # transfer weights longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict()) longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict()) longformer_for_qa.eval() # save model longformer_for_qa.save_pretrained(pytorch_dump_folder_path) print("Conversion successful. Model saved under {}".format(pytorch_dump_folder_path)) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--longformer_model", default=None, type=str, required=True, help="model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.", ) parser.add_argument( "--longformer_question_answering_ckpt_path", default=None, type=str, required=True, help="Path the official PyTorch Lightning Checkpoint.", ) parser.add_argument( "--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_longformer_qa_checkpoint_to_pytorch( args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path )
AdaMix/src/transformers/models/longformer/convert_longformer_original_pytorch_lightning_to_pytorch.py/0
{ "file_path": "AdaMix/src/transformers/models/longformer/convert_longformer_original_pytorch_lightning_to_pytorch.py", "repo_id": "AdaMix", "token_count": 1066 }
57
# coding=utf-8 # Copyright 2020 The Google AI Team, Stanford University and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from ..bert.tokenization_bert_fast import BertTokenizerFast from .tokenization_lxmert import LxmertTokenizer VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt", }, "tokenizer_file": { "unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json", }, } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "unc-nlp/lxmert-base-uncased": 512, } PRETRAINED_INIT_CONFIGURATION = { "unc-nlp/lxmert-base-uncased": {"do_lower_case": True}, } class LxmertTokenizerFast(BertTokenizerFast): r""" Construct a "fast" LXMERT tokenizer (backed by HuggingFace's `tokenizers` library). :class:`~transformers.LxmertTokenizerFast` is identical to :class:`~transformers.BertTokenizerFast` and runs end-to-end tokenization: punctuation splitting and wordpiece. Refer to superclass :class:`~transformers.BertTokenizerFast` for usage examples and documentation concerning parameters. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION slow_tokenizer_class = LxmertTokenizer
AdaMix/src/transformers/models/lxmert/tokenization_lxmert_fast.py/0
{ "file_path": "AdaMix/src/transformers/models/lxmert/tokenization_lxmert_fast.py", "repo_id": "AdaMix", "token_count": 773 }
58
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import os from pathlib import Path from typing import List, Tuple from transformers.models.marian.convert_marian_to_pytorch import ( FRONT_MATTER_TEMPLATE, _parse_readme, convert_all_sentencepiece_models, get_system_metadata, remove_prefix, remove_suffix, ) try: import pandas as pd except ImportError: pass DEFAULT_REPO = "Tatoeba-Challenge" DEFAULT_MODEL_DIR = os.path.join(DEFAULT_REPO, "models") LANG_CODE_URL = "https://datahub.io/core/language-codes/r/language-codes-3b2.csv" ISO_URL = "https://cdn-datasets.huggingface.co/language_codes/iso-639-3.csv" ISO_PATH = "lang_code_data/iso-639-3.csv" LANG_CODE_PATH = "lang_code_data/language-codes-3b2.csv" class TatoebaConverter: """ Convert Tatoeba-Challenge models to huggingface format. Steps: 1. convert numpy state dict to hf format (same code as OPUS-MT-Train conversion). 2. rename opus model to huggingface format. This means replace each alpha3 code with an alpha2 code if a unique one exists. e.g. aav-eng -> aav-en, heb-eng -> he-en 3. write a model card containing the original Tatoeba-Challenge/README.md and extra info about alpha3 group members. """ def __init__(self, save_dir="marian_converted"): assert Path(DEFAULT_REPO).exists(), "need git clone [email protected]:Helsinki-NLP/Tatoeba-Challenge.git" reg = self.make_tatoeba_registry() self.download_metadata() self.registry = reg reg_df = pd.DataFrame(reg, columns=["id", "prepro", "url_model", "url_test_set"]) assert reg_df.id.value_counts().max() == 1 reg_df = reg_df.set_index("id") reg_df["src"] = reg_df.reset_index().id.apply(lambda x: x.split("-")[0]).values reg_df["tgt"] = reg_df.reset_index().id.apply(lambda x: x.split("-")[1]).values released_cols = [ "url_base", "pair", # (ISO639-3/ISO639-5 codes), "short_pair", # (reduced codes), "chrF2_score", "bleu", "brevity_penalty", "ref_len", "src_name", "tgt_name", ] released = pd.read_csv("Tatoeba-Challenge/models/released-models.txt", sep="\t", header=None).iloc[:-1] released.columns = released_cols released["fname"] = released["url_base"].apply( lambda x: remove_suffix(remove_prefix(x, "https://object.pouta.csc.fi/Tatoeba-Challenge/opus"), ".zip") ) released["2m"] = released.fname.str.startswith("2m") released["date"] = pd.to_datetime( released["fname"].apply(lambda x: remove_prefix(remove_prefix(x, "2m-"), "-")) ) released["base_ext"] = released.url_base.apply(lambda x: Path(x).name) reg_df["base_ext"] = reg_df.url_model.apply(lambda x: Path(x).name) metadata_new = reg_df.reset_index().merge(released.rename(columns={"pair": "id"}), on=["base_ext", "id"]) metadata_renamer = {"src": "src_alpha3", "tgt": "tgt_alpha3", "id": "long_pair", "date": "train_date"} metadata_new = metadata_new.rename(columns=metadata_renamer) metadata_new["src_alpha2"] = metadata_new.short_pair.apply(lambda x: x.split("-")[0]) metadata_new["tgt_alpha2"] = metadata_new.short_pair.apply(lambda x: x.split("-")[1]) DROP_COLS_BOTH = ["url_base", "base_ext", "fname"] metadata_new = metadata_new.drop(DROP_COLS_BOTH, 1) metadata_new["prefer_old"] = metadata_new.long_pair.isin([]) self.metadata = metadata_new assert self.metadata.short_pair.value_counts().max() == 1, "Multiple metadata entries for a short pair" self.metadata = self.metadata.set_index("short_pair") # wget.download(LANG_CODE_URL) mapper = pd.read_csv(LANG_CODE_PATH) mapper.columns = ["a3", "a2", "ref"] self.iso_table = pd.read_csv(ISO_PATH, sep="\t").rename(columns=lambda x: x.lower()) more_3_to_2 = self.iso_table.set_index("id").part1.dropna().to_dict() more_3_to_2.update(mapper.set_index("a3").a2.to_dict()) self.alpha3_to_alpha2 = more_3_to_2 self.model_card_dir = Path(save_dir) self.constituents = GROUP_MEMBERS def convert_models(self, tatoeba_ids, dry_run=False): entries_to_convert = [x for x in self.registry if x[0] in tatoeba_ids] converted_paths = convert_all_sentencepiece_models(entries_to_convert, dest_dir=self.model_card_dir) for path in converted_paths: long_pair = remove_prefix(path.name, "opus-mt-").split("-") # eg. heb-eng assert len(long_pair) == 2 new_p_src = self.get_two_letter_code(long_pair[0]) new_p_tgt = self.get_two_letter_code(long_pair[1]) hf_model_id = f"opus-mt-{new_p_src}-{new_p_tgt}" new_path = path.parent.joinpath(hf_model_id) # opus-mt-he-en os.rename(str(path), str(new_path)) self.write_model_card(hf_model_id, dry_run=dry_run) def get_two_letter_code(self, three_letter_code): return self.alpha3_to_alpha2.get(three_letter_code, three_letter_code) def expand_group_to_two_letter_codes(self, grp_name): return [self.get_two_letter_code(x) for x in self.constituents[grp_name]] def get_tags(self, code, ref_name): if len(code) == 2: assert "languages" not in ref_name, f"{code}: {ref_name}" return [code], False elif "languages" in ref_name or len(self.constituents.get(code, [])) > 1: group = self.expand_group_to_two_letter_codes(code) group.append(code) return group, True else: # zho-> zh print(f"Three letter monolingual code: {code}") return [code], False def resolve_lang_code(self, r) -> Tuple[List[str], str, str]: """R is a row in ported""" short_pair = r.short_pair src, tgt = short_pair.split("-") src_tags, src_multilingual = self.get_tags(src, r.src_name) assert isinstance(src_tags, list) tgt_tags, tgt_multilingual = self.get_tags(tgt, r.tgt_name) assert isinstance(tgt_tags, list) return dedup(src_tags + tgt_tags), src_multilingual, tgt_multilingual def write_model_card( self, hf_model_id: str, repo_root=DEFAULT_REPO, dry_run=False, ) -> str: """ Copy the most recent model's readme section from opus, and add metadata. upload command: aws s3 sync model_card_dir s3://models.huggingface.co/bert/Helsinki-NLP/ --dryrun """ short_pair = remove_prefix(hf_model_id, "opus-mt-") extra_metadata = self.metadata.loc[short_pair].drop("2m") extra_metadata["short_pair"] = short_pair lang_tags, src_multilingual, tgt_multilingual = self.resolve_lang_code(extra_metadata) opus_name = f"{extra_metadata.src_alpha3}-{extra_metadata.tgt_alpha3}" # opus_name: str = self.convert_hf_name_to_opus_name(hf_model_name) assert repo_root in ("OPUS-MT-train", "Tatoeba-Challenge") opus_readme_path = Path(repo_root).joinpath("models", opus_name, "README.md") assert opus_readme_path.exists(), f"Readme file {opus_readme_path} not found" opus_src, opus_tgt = [x.split("+") for x in opus_name.split("-")] readme_url = f"https://github.com/Helsinki-NLP/{repo_root}/tree/master/models/{opus_name}/README.md" s, t = ",".join(opus_src), ",".join(opus_tgt) metadata = { "hf_name": short_pair, "source_languages": s, "target_languages": t, "opus_readme_url": readme_url, "original_repo": repo_root, "tags": ["translation"], "languages": lang_tags, } lang_tags = l2front_matter(lang_tags) metadata["src_constituents"] = self.constituents[s] metadata["tgt_constituents"] = self.constituents[t] metadata["src_multilingual"] = src_multilingual metadata["tgt_multilingual"] = tgt_multilingual metadata.update(extra_metadata) metadata.update(get_system_metadata(repo_root)) # combine with Tatoeba markdown extra_markdown = f"### {short_pair}\n\n* source group: {metadata['src_name']} \n* target group: {metadata['tgt_name']} \n* OPUS readme: [{opus_name}]({readme_url})\n" content = opus_readme_path.open().read() content = content.split("\n# ")[-1] # Get the lowest level 1 header in the README -- the most recent model. splat = content.split("*")[2:] content = "*".join(splat) # BETTER FRONT MATTER LOGIC content = ( FRONT_MATTER_TEMPLATE.format(lang_tags) + extra_markdown + "\n* " + content.replace("download", "download original " "weights") ) items = "\n\n".join([f"- {k}: {v}" for k, v in metadata.items()]) sec3 = "\n### System Info: \n" + items content += sec3 if dry_run: return content, metadata sub_dir = self.model_card_dir / hf_model_id sub_dir.mkdir(exist_ok=True) dest = sub_dir / "README.md" dest.open("w").write(content) pd.Series(metadata).to_json(sub_dir / "metadata.json") return content, metadata def download_metadata(self): Path(LANG_CODE_PATH).parent.mkdir(exist_ok=True) import wget if not os.path.exists(ISO_PATH): wget.download(ISO_URL, ISO_PATH) if not os.path.exists(LANG_CODE_PATH): wget.download(LANG_CODE_URL, LANG_CODE_PATH) @staticmethod def make_tatoeba_registry(repo_path=DEFAULT_MODEL_DIR): if not (Path(repo_path) / "zho-eng" / "README.md").exists(): raise ValueError( f"repo_path:{repo_path} does not exist: " "You must run: git clone [email protected]:Helsinki-NLP/Tatoeba-Challenge.git before calling." ) results = {} for p in Path(repo_path).iterdir(): if len(p.name) != 7: continue lns = list(open(p / "README.md").readlines()) results[p.name] = _parse_readme(lns) return [(k, v["pre-processing"], v["download"], v["download"][:-4] + ".test.txt") for k, v in results.items()] GROUP_MEMBERS = { # three letter code -> (group/language name, {constituents...} # if this language is on the target side the constituents can be used as target language codes. # if the language is on the source side they are supported natively without special codes. "aav": ("Austro-Asiatic languages", {"hoc", "hoc_Latn", "kha", "khm", "khm_Latn", "mnw", "vie", "vie_Hani"}), "afa": ( "Afro-Asiatic languages", { "acm", "afb", "amh", "apc", "ara", "arq", "ary", "arz", "hau_Latn", "heb", "kab", "mlt", "rif_Latn", "shy_Latn", "som", "thv", "tir", }, ), "afr": ("Afrikaans", {"afr"}), "alv": ( "Atlantic-Congo languages", { "ewe", "fuc", "fuv", "ibo", "kin", "lin", "lug", "nya", "run", "sag", "sna", "swh", "toi_Latn", "tso", "umb", "wol", "xho", "yor", "zul", }, ), "ara": ("Arabic", {"afb", "apc", "apc_Latn", "ara", "ara_Latn", "arq", "arq_Latn", "arz"}), "art": ( "Artificial languages", { "afh_Latn", "avk_Latn", "dws_Latn", "epo", "ido", "ido_Latn", "ile_Latn", "ina_Latn", "jbo", "jbo_Cyrl", "jbo_Latn", "ldn_Latn", "lfn_Cyrl", "lfn_Latn", "nov_Latn", "qya", "qya_Latn", "sjn_Latn", "tlh_Latn", "tzl", "tzl_Latn", "vol_Latn", }, ), "aze": ("Azerbaijani", {"aze_Latn"}), "bat": ("Baltic languages", {"lit", "lav", "prg_Latn", "ltg", "sgs"}), "bel": ("Belarusian", {"bel", "bel_Latn"}), "ben": ("Bengali", {"ben"}), "bnt": ( "Bantu languages", {"kin", "lin", "lug", "nya", "run", "sna", "swh", "toi_Latn", "tso", "umb", "xho", "zul"}, ), "bul": ("Bulgarian", {"bul", "bul_Latn"}), "cat": ("Catalan", {"cat"}), "cau": ("Caucasian languages", {"abk", "kat", "che", "ady"}), "ccs": ("South Caucasian languages", {"kat"}), "ceb": ("Cebuano", {"ceb"}), "cel": ("Celtic languages", {"gla", "gle", "bre", "cor", "glv", "cym"}), "ces": ("Czech", {"ces"}), "cpf": ("Creoles and pidgins, French‑based", {"gcf_Latn", "hat", "mfe"}), "cpp": ( "Creoles and pidgins, Portuguese-based", {"zsm_Latn", "ind", "pap", "min", "tmw_Latn", "max_Latn", "zlm_Latn"}, ), "cus": ("Cushitic languages", {"som"}), "dan": ("Danish", {"dan"}), "deu": ("German", {"deu"}), "dra": ("Dravidian languages", {"tam", "kan", "mal", "tel"}), "ell": ("Modern Greek (1453-)", {"ell"}), "eng": ("English", {"eng"}), "epo": ("Esperanto", {"epo"}), "est": ("Estonian", {"est"}), "euq": ("Basque (family)", {"eus"}), "eus": ("Basque", {"eus"}), "fin": ("Finnish", {"fin"}), "fiu": ( "Finno-Ugrian languages", { "est", "fin", "fkv_Latn", "hun", "izh", "kpv", "krl", "liv_Latn", "mdf", "mhr", "myv", "sma", "sme", "udm", "vep", "vro", }, ), "fra": ("French", {"fra"}), "gem": ( "Germanic languages", { "afr", "ang_Latn", "dan", "deu", "eng", "enm_Latn", "fao", "frr", "fry", "gos", "got_Goth", "gsw", "isl", "ksh", "ltz", "nds", "nld", "nno", "nob", "nob_Hebr", "non_Latn", "pdc", "sco", "stq", "swe", "swg", "yid", }, ), "gle": ("Irish", {"gle"}), "glg": ("Galician", {"glg"}), "gmq": ("North Germanic languages", {"dan", "nob", "nob_Hebr", "swe", "isl", "nno", "non_Latn", "fao"}), "gmw": ( "West Germanic languages", { "afr", "ang_Latn", "deu", "eng", "enm_Latn", "frr", "fry", "gos", "gsw", "ksh", "ltz", "nds", "nld", "pdc", "sco", "stq", "swg", "yid", }, ), "grk": ("Greek languages", {"grc_Grek", "ell"}), "hbs": ("Serbo-Croatian", {"hrv", "srp_Cyrl", "bos_Latn", "srp_Latn"}), "heb": ("Hebrew", {"heb"}), "hin": ("Hindi", {"hin"}), "hun": ("Hungarian", {"hun"}), "hye": ("Armenian", {"hye", "hye_Latn"}), "iir": ( "Indo-Iranian languages", { "asm", "awa", "ben", "bho", "gom", "guj", "hif_Latn", "hin", "jdt_Cyrl", "kur_Arab", "kur_Latn", "mai", "mar", "npi", "ori", "oss", "pan_Guru", "pes", "pes_Latn", "pes_Thaa", "pnb", "pus", "rom", "san_Deva", "sin", "snd_Arab", "tgk_Cyrl", "tly_Latn", "urd", "zza", }, ), "ilo": ("Iloko", {"ilo"}), "inc": ( "Indic languages", { "asm", "awa", "ben", "bho", "gom", "guj", "hif_Latn", "hin", "mai", "mar", "npi", "ori", "pan_Guru", "pnb", "rom", "san_Deva", "sin", "snd_Arab", "urd", }, ), "ine": ( "Indo-European languages", { "afr", "afr_Arab", "aln", "ang_Latn", "arg", "asm", "ast", "awa", "bel", "bel_Latn", "ben", "bho", "bjn", "bos_Latn", "bre", "bul", "bul_Latn", "cat", "ces", "cor", "cos", "csb_Latn", "cym", "dan", "deu", "dsb", "egl", "ell", "eng", "enm_Latn", "ext", "fao", "fra", "frm_Latn", "frr", "fry", "gcf_Latn", "gla", "gle", "glg", "glv", "gom", "gos", "got_Goth", "grc_Grek", "gsw", "guj", "hat", "hif_Latn", "hin", "hrv", "hsb", "hye", "hye_Latn", "ind", "isl", "ita", "jdt_Cyrl", "ksh", "kur_Arab", "kur_Latn", "lad", "lad_Latn", "lat_Grek", "lat_Latn", "lav", "lij", "lit", "lld_Latn", "lmo", "ltg", "ltz", "mai", "mar", "max_Latn", "mfe", "min", "mkd", "mwl", "nds", "nld", "nno", "nob", "nob_Hebr", "non_Latn", "npi", "oci", "ori", "orv_Cyrl", "oss", "pan_Guru", "pap", "pcd", "pdc", "pes", "pes_Latn", "pes_Thaa", "pms", "pnb", "pol", "por", "prg_Latn", "pus", "roh", "rom", "ron", "rue", "rus", "rus_Latn", "san_Deva", "scn", "sco", "sgs", "sin", "slv", "snd_Arab", "spa", "sqi", "srd", "srp_Cyrl", "srp_Latn", "stq", "swe", "swg", "tgk_Cyrl", "tly_Latn", "tmw_Latn", "ukr", "urd", "vec", "wln", "yid", "zlm_Latn", "zsm_Latn", "zza", }, ), "isl": ("Icelandic", {"isl"}), "ita": ("Italian", {"ita"}), "itc": ( "Italic languages", { "arg", "ast", "bjn", "cat", "cos", "egl", "ext", "fra", "frm_Latn", "gcf_Latn", "glg", "hat", "ind", "ita", "lad", "lad_Latn", "lat_Grek", "lat_Latn", "lij", "lld_Latn", "lmo", "max_Latn", "mfe", "min", "mwl", "oci", "pap", "pcd", "pms", "por", "roh", "ron", "scn", "spa", "srd", "tmw_Latn", "vec", "wln", "zlm_Latn", "zsm_Latn", }, ), "jpn": ("Japanese", {"jpn", "jpn_Bopo", "jpn_Hang", "jpn_Hani", "jpn_Hira", "jpn_Kana", "jpn_Latn", "jpn_Yiii"}), "jpx": ("Japanese (family)", {"jpn"}), "kat": ("Georgian", {"kat"}), "kor": ("Korean", {"kor_Hani", "kor_Hang", "kor_Latn", "kor"}), "lav": ("Latvian", {"lav"}), "lit": ("Lithuanian", {"lit"}), "mkd": ("Macedonian", {"mkd"}), "mkh": ("Mon-Khmer languages", {"vie_Hani", "mnw", "vie", "kha", "khm_Latn", "khm"}), "msa": ("Malay (macrolanguage)", {"zsm_Latn", "ind", "max_Latn", "zlm_Latn", "min"}), "mul": ( "Multiple languages", { "abk", "acm", "ady", "afb", "afh_Latn", "afr", "akl_Latn", "aln", "amh", "ang_Latn", "apc", "ara", "arg", "arq", "ary", "arz", "asm", "ast", "avk_Latn", "awa", "aze_Latn", "bak", "bam_Latn", "bel", "bel_Latn", "ben", "bho", "bod", "bos_Latn", "bre", "brx", "brx_Latn", "bul", "bul_Latn", "cat", "ceb", "ces", "cha", "che", "chr", "chv", "cjy_Hans", "cjy_Hant", "cmn", "cmn_Hans", "cmn_Hant", "cor", "cos", "crh", "crh_Latn", "csb_Latn", "cym", "dan", "deu", "dsb", "dtp", "dws_Latn", "egl", "ell", "enm_Latn", "epo", "est", "eus", "ewe", "ext", "fao", "fij", "fin", "fkv_Latn", "fra", "frm_Latn", "frr", "fry", "fuc", "fuv", "gan", "gcf_Latn", "gil", "gla", "gle", "glg", "glv", "gom", "gos", "got_Goth", "grc_Grek", "grn", "gsw", "guj", "hat", "hau_Latn", "haw", "heb", "hif_Latn", "hil", "hin", "hnj_Latn", "hoc", "hoc_Latn", "hrv", "hsb", "hun", "hye", "iba", "ibo", "ido", "ido_Latn", "ike_Latn", "ile_Latn", "ilo", "ina_Latn", "ind", "isl", "ita", "izh", "jav", "jav_Java", "jbo", "jbo_Cyrl", "jbo_Latn", "jdt_Cyrl", "jpn", "kab", "kal", "kan", "kat", "kaz_Cyrl", "kaz_Latn", "kek_Latn", "kha", "khm", "khm_Latn", "kin", "kir_Cyrl", "kjh", "kpv", "krl", "ksh", "kum", "kur_Arab", "kur_Latn", "lad", "lad_Latn", "lao", "lat_Latn", "lav", "ldn_Latn", "lfn_Cyrl", "lfn_Latn", "lij", "lin", "lit", "liv_Latn", "lkt", "lld_Latn", "lmo", "ltg", "ltz", "lug", "lzh", "lzh_Hans", "mad", "mah", "mai", "mal", "mar", "max_Latn", "mdf", "mfe", "mhr", "mic", "min", "mkd", "mlg", "mlt", "mnw", "moh", "mon", "mri", "mwl", "mww", "mya", "myv", "nan", "nau", "nav", "nds", "niu", "nld", "nno", "nob", "nob_Hebr", "nog", "non_Latn", "nov_Latn", "npi", "nya", "oci", "ori", "orv_Cyrl", "oss", "ota_Arab", "ota_Latn", "pag", "pan_Guru", "pap", "pau", "pdc", "pes", "pes_Latn", "pes_Thaa", "pms", "pnb", "pol", "por", "ppl_Latn", "prg_Latn", "pus", "quc", "qya", "qya_Latn", "rap", "rif_Latn", "roh", "rom", "ron", "rue", "run", "rus", "sag", "sah", "san_Deva", "scn", "sco", "sgs", "shs_Latn", "shy_Latn", "sin", "sjn_Latn", "slv", "sma", "sme", "smo", "sna", "snd_Arab", "som", "spa", "sqi", "srp_Cyrl", "srp_Latn", "stq", "sun", "swe", "swg", "swh", "tah", "tam", "tat", "tat_Arab", "tat_Latn", "tel", "tet", "tgk_Cyrl", "tha", "tir", "tlh_Latn", "tly_Latn", "tmw_Latn", "toi_Latn", "ton", "tpw_Latn", "tso", "tuk", "tuk_Latn", "tur", "tvl", "tyv", "tzl", "tzl_Latn", "udm", "uig_Arab", "uig_Cyrl", "ukr", "umb", "urd", "uzb_Cyrl", "uzb_Latn", "vec", "vie", "vie_Hani", "vol_Latn", "vro", "war", "wln", "wol", "wuu", "xal", "xho", "yid", "yor", "yue", "yue_Hans", "yue_Hant", "zho", "zho_Hans", "zho_Hant", "zlm_Latn", "zsm_Latn", "zul", "zza", }, ), "nic": ( "Niger-Kordofanian languages", { "bam_Latn", "ewe", "fuc", "fuv", "ibo", "kin", "lin", "lug", "nya", "run", "sag", "sna", "swh", "toi_Latn", "tso", "umb", "wol", "xho", "yor", "zul", }, ), "nld": ("Dutch", {"nld"}), "nor": ("Norwegian", {"nob", "nno"}), "phi": ("Philippine languages", {"ilo", "akl_Latn", "war", "hil", "pag", "ceb"}), "pol": ("Polish", {"pol"}), "por": ("Portuguese", {"por"}), "pqe": ( "Eastern Malayo-Polynesian languages", {"fij", "gil", "haw", "mah", "mri", "nau", "niu", "rap", "smo", "tah", "ton", "tvl"}, ), "roa": ( "Romance languages", { "arg", "ast", "cat", "cos", "egl", "ext", "fra", "frm_Latn", "gcf_Latn", "glg", "hat", "ind", "ita", "lad", "lad_Latn", "lij", "lld_Latn", "lmo", "max_Latn", "mfe", "min", "mwl", "oci", "pap", "pms", "por", "roh", "ron", "scn", "spa", "tmw_Latn", "vec", "wln", "zlm_Latn", "zsm_Latn", }, ), "ron": ("Romanian", {"ron"}), "run": ("Rundi", {"run"}), "rus": ("Russian", {"rus"}), "sal": ("Salishan languages", {"shs_Latn"}), "sem": ("Semitic languages", {"acm", "afb", "amh", "apc", "ara", "arq", "ary", "arz", "heb", "mlt", "tir"}), "sla": ( "Slavic languages", { "bel", "bel_Latn", "bos_Latn", "bul", "bul_Latn", "ces", "csb_Latn", "dsb", "hrv", "hsb", "mkd", "orv_Cyrl", "pol", "rue", "rus", "slv", "srp_Cyrl", "srp_Latn", "ukr", }, ), "slv": ("Slovenian", {"slv"}), "spa": ("Spanish", {"spa"}), "swe": ("Swedish", {"swe"}), "taw": ("Tai", {"lao", "tha"}), "tgl": ("Tagalog", {"tgl_Latn"}), "tha": ("Thai", {"tha"}), "trk": ( "Turkic languages", { "aze_Latn", "bak", "chv", "crh", "crh_Latn", "kaz_Cyrl", "kaz_Latn", "kir_Cyrl", "kjh", "kum", "ota_Arab", "ota_Latn", "sah", "tat", "tat_Arab", "tat_Latn", "tuk", "tuk_Latn", "tur", "tyv", "uig_Arab", "uig_Cyrl", "uzb_Cyrl", "uzb_Latn", }, ), "tur": ("Turkish", {"tur"}), "ukr": ("Ukrainian", {"ukr"}), "urd": ("Urdu", {"urd"}), "urj": ( "Uralic languages", { "est", "fin", "fkv_Latn", "hun", "izh", "kpv", "krl", "liv_Latn", "mdf", "mhr", "myv", "sma", "sme", "udm", "vep", "vro", }, ), "vie": ("Vietnamese", {"vie", "vie_Hani"}), "war": ("Waray (Philippines)", {"war"}), "zho": ( "Chinese", { "cjy_Hans", "cjy_Hant", "cmn", "cmn_Bopo", "cmn_Hang", "cmn_Hani", "cmn_Hans", "cmn_Hant", "cmn_Hira", "cmn_Kana", "cmn_Latn", "cmn_Yiii", "gan", "hak_Hani", "lzh", "lzh_Bopo", "lzh_Hang", "lzh_Hani", "lzh_Hans", "lzh_Hira", "lzh_Kana", "lzh_Yiii", "nan", "nan_Hani", "wuu", "wuu_Bopo", "wuu_Hani", "wuu_Latn", "yue", "yue_Bopo", "yue_Hang", "yue_Hani", "yue_Hans", "yue_Hant", "yue_Hira", "yue_Kana", "zho", "zho_Hans", "zho_Hant", }, ), "zle": ("East Slavic languages", {"bel", "orv_Cyrl", "bel_Latn", "rus", "ukr", "rue"}), "zls": ("South Slavic languages", {"bos_Latn", "bul", "bul_Latn", "hrv", "mkd", "slv", "srp_Cyrl", "srp_Latn"}), "zlw": ("West Slavic languages", {"csb_Latn", "dsb", "hsb", "pol", "ces"}), } def l2front_matter(langs): return "".join(f"- {l}\n" for l in langs) def dedup(lst): """Preservers order""" new_lst = [] for item in lst: if not item: continue elif item in new_lst: continue else: new_lst.append(item) return new_lst if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "-m", "--models", action="append", help="<Required> Set flag", required=True, nargs="+", dest="models" ) parser.add_argument("-save_dir", "--save_dir", default="marian_converted", help="where to save converted models") args = parser.parse_args() resolver = TatoebaConverter(save_dir=args.save_dir) resolver.convert_models(args.models[0])
AdaMix/src/transformers/models/marian/convert_marian_tatoeba_to_pytorch.py/0
{ "file_path": "AdaMix/src/transformers/models/marian/convert_marian_tatoeba_to_pytorch.py", "repo_id": "AdaMix", "token_count": 21552 }
59
# coding=utf-8 # Copyright 2021 The Fairseq Authors and The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 MBart model. """ import random from typing import Dict, Optional, Tuple, Union import tensorflow as tf from ...activations_tf import get_tf_activation from ...file_utils import ( add_code_sample_docstrings, add_end_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_tf_outputs import ( TFBaseModelOutput, TFBaseModelOutputWithPast, TFSeq2SeqLMOutput, TFSeq2SeqModelOutput, ) # Public API from ...modeling_tf_utils import ( DUMMY_INPUTS, TFCausalLanguageModelingLoss, TFPreTrainedModel, TFSharedEmbeddings, TFWrappedEmbeddings, input_processing, keras_serializable, shape_list, ) from ...utils import logging from .configuration_mbart import MBartConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "facebook/mbart-large-cc25" _CONFIG_FOR_DOC = "MBartConfig" _TOKENIZER_FOR_DOC = "MBartTokenizer" LARGE_NEGATIVE = -1e8 def shift_tokens_right(input_ids: tf.Tensor, pad_token_id: int): """ Shift input ids one token to the right, and wrap the last non pad token (the <LID> token) Note that MBart does not have a single `decoder_start_token_id` in contrast to other Bart-like models. """ assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined." # replace possible -100 values in labels by `pad_token_id` input_ids = tf.where(input_ids == -100, tf.fill(shape_list(input_ids), pad_token_id), input_ids) language_id_index = ( tf.reduce_sum(tf.cast(tf.math.not_equal(input_ids, pad_token_id), dtype=input_ids.dtype), axis=-1) - 1 ) language_id_index = tf.stack([tf.range(shape_list(input_ids)[0]), language_id_index], axis=-1) languages_ids = tf.gather_nd(input_ids, language_id_index) shifted_input_ids = tf.concat([tf.expand_dims(languages_ids, axis=-1), input_ids[:, :-1]], axis=-1) return shifted_input_ids # Copied from transformers.models.bart.modeling_tf_bart._make_causal_mask def _make_causal_mask(input_ids_shape: tf.TensorShape, past_key_values_length: int = 0): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = tf.ones((tgt_len, tgt_len)) * LARGE_NEGATIVE mask_cond = tf.range(shape_list(mask)[-1]) mask = tf.where(mask_cond < tf.reshape(mask_cond + 1, (shape_list(mask)[-1], 1)), 0.0, mask) if past_key_values_length > 0: mask = tf.concat([tf.zeros((tgt_len, past_key_values_length)), mask], axis=-1) return tf.tile(mask[None, None, :, :], (bsz, 1, 1, 1)) # Copied from transformers.models.bart.modeling_tf_bart._expand_mask def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None, past_key_values_length: int = 0): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ src_len = shape_list(mask)[1] tgt_len = tgt_len if tgt_len is not None else src_len one_cst = tf.constant(1.0) mask = tf.cast(mask, dtype=one_cst.dtype) expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1)) return (one_cst - expanded_mask) * LARGE_NEGATIVE # Copied from transformers.models.bart.modeling_tf_bart.TFBartLearnedPositionalEmbedding with Bart->MBart class TFMBartLearnedPositionalEmbedding(TFSharedEmbeddings): """ This module learns positional embeddings up to a fixed maximum size. """ def __init__(self, num_embeddings: int, embedding_dim: int, **kwargs): # MBart is set up so that if padding_idx is specified then offset the embedding ids by 2 # and adjust num_embeddings appropriately. Other models dont have this hack self.offset = 2 super().__init__(num_embeddings + self.offset, embedding_dim, **kwargs) def call(self, input_shape: tf.TensorShape, past_key_values_length: int = 0): """Input is expected to be of size [bsz x seqlen].""" bsz, seq_len = input_shape[:2] positions = tf.range(past_key_values_length, seq_len + past_key_values_length, delta=1, name="range") return super().call(positions + self.offset) # Copied from transformers.models.bart.modeling_tf_bart.TFBartAttention with Bart->MBart class TFMBartAttention(tf.keras.layers.Layer): """Multi-headed attention from "Attention Is All You Need""" def __init__( self, embed_dim: int, num_heads: int, dropout: float = 0.0, is_decoder: bool = False, bias: bool = True, **kwargs, ): super().__init__(**kwargs) self.embed_dim = embed_dim self.num_heads = num_heads self.dropout = tf.keras.layers.Dropout(dropout) self.head_dim = embed_dim // num_heads assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads" self.scaling = self.head_dim ** -0.5 self.is_decoder = is_decoder self.k_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="k_proj") self.q_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="q_proj") self.v_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="v_proj") self.out_proj = tf.keras.layers.Dense(embed_dim, use_bias=bias, name="out_proj") def _shape(self, tensor: tf.Tensor, seq_len: int, bsz: int): return tf.transpose(tf.reshape(tensor, (bsz, seq_len, self.num_heads, self.head_dim)), (0, 2, 1, 3)) def call( self, hidden_states: tf.Tensor, key_value_states: Optional[tf.Tensor] = None, past_key_value: Optional[Tuple[Tuple[tf.Tensor]]] = None, attention_mask: Optional[tf.Tensor] = None, layer_head_mask: Optional[tf.Tensor] = None, training=False, ) -> Tuple[tf.Tensor, Optional[tf.Tensor]]: """Input shape: Batch x Time x Channel""" # if key_value_states are provided this layer is used as a cross-attention layer # for the decoder is_cross_attention = key_value_states is not None bsz, tgt_len, embed_dim = shape_list(hidden_states) # get query proj query_states = self.q_proj(hidden_states) * self.scaling # get key, value proj if is_cross_attention and past_key_value is not None: # reuse k,v, cross_attentions key_states = past_key_value[0] value_states = past_key_value[1] elif is_cross_attention: # cross_attentions key_states = self._shape(self.k_proj(key_value_states), -1, bsz) value_states = self._shape(self.v_proj(key_value_states), -1, bsz) elif past_key_value is not None: # reuse k, v, self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) key_states = tf.concat([past_key_value[0], key_states], axis=2) value_states = tf.concat([past_key_value[1], value_states], axis=2) else: # self_attention key_states = self._shape(self.k_proj(hidden_states), -1, bsz) value_states = self._shape(self.v_proj(hidden_states), -1, bsz) if self.is_decoder: # if cross_attention save Tuple(tf.Tensor, tf.Tensor) of all cross attention key/value_states. # Further calls to cross_attention layer can then reuse all cross-attention # key/value_states (first "if" case) # if uni-directional self-attention (decoder) save Tuple(tf.Tensor, tf.Tensor) of # all previous decoder key/value_states. Further calls to uni-directional self-attention # can concat previous decoder key/value_states to current projected key/value_states (third "elif" case) # if encoder bi-directional self-attention `past_key_value` is always `None` past_key_value = (key_states, value_states) proj_shape = (bsz * self.num_heads, -1, self.head_dim) query_states = tf.reshape(self._shape(query_states, tgt_len, bsz), proj_shape) key_states = tf.reshape(key_states, proj_shape) value_states = tf.reshape(value_states, proj_shape) src_len = shape_list(key_states)[1] attn_weights = tf.matmul(query_states, key_states, transpose_b=True) # The tf.debugging asserts are not compliant with XLA then they # have to be disabled in other modes than eager. if tf.executing_eagerly(): tf.debugging.assert_equal( shape_list(attn_weights), [bsz * self.num_heads, tgt_len, src_len], message=f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {shape_list(attn_weights)}", ) if attention_mask is not None: # The tf.debugging asserts are not compliant with XLA then they # have to be disabled in other modes than eager. if tf.executing_eagerly(): tf.debugging.assert_equal( shape_list(attention_mask), [bsz, 1, tgt_len, src_len], message=f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {shape_list(attention_mask)}", ) attention_mask = tf.cast(attention_mask, dtype=attn_weights.dtype) attn_weights = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) + attention_mask attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_weights = tf.nn.softmax(attn_weights, axis=-1) if layer_head_mask is not None: # The tf.debugging asserts are not compliant with XLA then they # have to be disabled in other modes than eager. if tf.executing_eagerly(): tf.debugging.assert_equal( shape_list(layer_head_mask), [self.num_heads], message=f"Head mask for a single layer should be of size {(self.num_heads)}, but is {shape_list(layer_head_mask)}", ) attn_weights = tf.reshape(layer_head_mask, (1, -1, 1, 1)) * tf.reshape( attn_weights, (bsz, self.num_heads, tgt_len, src_len) ) attn_weights = tf.reshape(attn_weights, (bsz * self.num_heads, tgt_len, src_len)) attn_probs = self.dropout(attn_weights, training=training) attn_output = tf.matmul(attn_probs, value_states) # The tf.debugging asserts are not compliant with XLA then they # have to be disabled in other modes than eager. if tf.executing_eagerly(): tf.debugging.assert_equal( shape_list(attn_output), [bsz * self.num_heads, tgt_len, self.head_dim], message=f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {shape_list(attn_output)}", ) attn_output = tf.transpose( tf.reshape(attn_output, (bsz, self.num_heads, tgt_len, self.head_dim)), (0, 2, 1, 3) ) attn_output = tf.reshape(attn_output, (bsz, tgt_len, embed_dim)) attn_output = self.out_proj(attn_output) attn_weights: tf.Tensor = tf.reshape(attn_weights, (bsz, self.num_heads, tgt_len, src_len)) return attn_output, attn_weights, past_key_value class TFMBartEncoderLayer(tf.keras.layers.Layer): def __init__(self, config: MBartConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TFMBartAttention( self.embed_dim, config.encoder_attention_heads, dropout=config.attention_dropout, name="self_attn" ) self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") self.dropout = tf.keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout) self.fc1 = tf.keras.layers.Dense(config.encoder_ffn_dim, name="fc1") self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2") self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") def call(self, hidden_states: tf.Tensor, attention_mask: tf.Tensor, layer_head_mask: tf.Tensor, training=False): """ Args: hidden_states (:obj:`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (:obj:`tf.Tensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (:obj:`tf.Tensor`): mask for attention heads in a given layer of size `(encoder_attention_heads,)` """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) hidden_states, self_attn_weights, _ = self.self_attn( hidden_states=hidden_states, attention_mask=attention_mask, layer_head_mask=layer_head_mask ) # The tf.debugging asserts are not compliant with XLA then they # have to be disabled in other modes than eager. if tf.executing_eagerly(): tf.debugging.assert_equal( shape_list(hidden_states), shape_list(residual), message=f"Self attn modified the shape of query {shape_list(residual)} to {shape_list(hidden_states)}", ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states return hidden_states, self_attn_weights class TFMBartDecoderLayer(tf.keras.layers.Layer): def __init__(self, config: MBartConfig, **kwargs): super().__init__(**kwargs) self.embed_dim = config.d_model self.self_attn = TFMBartAttention( embed_dim=self.embed_dim, num_heads=config.decoder_attention_heads, dropout=config.attention_dropout, name="self_attn", is_decoder=True, ) self.dropout = tf.keras.layers.Dropout(config.dropout) self.activation_fn = get_tf_activation(config.activation_function) self.activation_dropout = tf.keras.layers.Dropout(config.activation_dropout) self.self_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="self_attn_layer_norm") self.encoder_attn = TFMBartAttention( self.embed_dim, config.decoder_attention_heads, dropout=config.attention_dropout, name="encoder_attn", is_decoder=True, ) self.encoder_attn_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="encoder_attn_layer_norm") self.fc1 = tf.keras.layers.Dense(config.decoder_ffn_dim, name="fc1") self.fc2 = tf.keras.layers.Dense(self.embed_dim, name="fc2") self.final_layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="final_layer_norm") def call( self, hidden_states, attention_mask: Optional[tf.Tensor] = None, encoder_hidden_states: Optional[tf.Tensor] = None, encoder_attention_mask: Optional[tf.Tensor] = None, layer_head_mask: Optional[tf.Tensor] = None, encoder_layer_head_mask: Optional[tf.Tensor] = None, past_key_value: Optional[Tuple[tf.Tensor]] = None, training=False, ) -> Tuple[tf.Tensor, tf.Tensor, Tuple[Tuple[tf.Tensor]]]: """ Args: hidden_states (:obj:`tf.Tensor`): input to the layer of shape `(seq_len, batch, embed_dim)` attention_mask (:obj:`tf.Tensor`): attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. encoder_hidden_states (:obj:`tf.Tensor`): cross attention input to the layer of shape `(seq_len, batch, embed_dim)` encoder_attention_mask (:obj:`tf.Tensor`): encoder attention mask of size `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values. layer_head_mask (:obj:`tf.Tensor`): mask for attention heads in a given layer of size `(decoder_attention_heads,)` encoder_layer_head_mask (:obj:`tf.Tensor`): mask for encoder attention heads in a given layer of size `(encoder_attention_heads,)` past_key_value (:obj:`Tuple(tf.Tensor)`): cached past key and value projection states """ residual = hidden_states hidden_states = self.self_attn_layer_norm(hidden_states) # Self Attention # decoder uni-directional self-attention cached key/values tuple is at positions 1,2 self_attn_past_key_value = past_key_value[:2] if past_key_value is not None else None # add present self-attn cache to positions 1,2 of present_key_value tuple hidden_states, self_attn_weights, present_key_value = self.self_attn( hidden_states=hidden_states, past_key_value=self_attn_past_key_value, attention_mask=attention_mask, layer_head_mask=layer_head_mask, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states # Cross-Attention Block cross_attn_present_key_value = None if encoder_hidden_states is not None: residual = hidden_states hidden_states = self.encoder_attn_layer_norm(hidden_states) # cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple cross_attn_past_key_value = past_key_value[-2:] if past_key_value is not None else None hidden_states, _, cross_attn_present_key_value = self.encoder_attn( hidden_states=hidden_states, key_value_states=encoder_hidden_states, attention_mask=encoder_attention_mask, layer_head_mask=encoder_layer_head_mask, past_key_value=cross_attn_past_key_value, ) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states # add cross-attn to positions 3,4 of present_key_value tuple present_key_value = present_key_value + cross_attn_present_key_value # Fully Connected residual = hidden_states hidden_states = self.final_layer_norm(hidden_states) hidden_states = self.activation_fn(self.fc1(hidden_states)) hidden_states = self.activation_dropout(hidden_states, training=training) hidden_states = self.fc2(hidden_states) hidden_states = self.dropout(hidden_states, training=training) hidden_states = residual + hidden_states return ( hidden_states, self_attn_weights, present_key_value, ) class TFMBartPreTrainedModel(TFPreTrainedModel): config_class = MBartConfig base_model_prefix = "model" @property def dummy_inputs(self): pad_token = 1 input_ids = tf.cast(tf.convert_to_tensor(DUMMY_INPUTS), tf.int32) decoder_input_ids = tf.cast(tf.convert_to_tensor(DUMMY_INPUTS), tf.int32) dummy_inputs = { "decoder_input_ids": decoder_input_ids, "attention_mask": tf.math.not_equal(input_ids, pad_token), "input_ids": input_ids, } return dummy_inputs @tf.function( input_signature=[ { "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), "decoder_input_ids": tf.TensorSpec((None, None), tf.int32, name="decoder_input_ids"), "decoder_attention_mask": tf.TensorSpec((None, None), tf.int32, name="decoder_attention_mask"), } ] ) # Copied from transformers.models.bart.modeling_tf_bart.TFBartPretrainedModel.serving def serving(self, inputs): output = self.call(inputs) return self.serving_output(output) MBART_START_DOCSTRING = r""" This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. .. note:: TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all the tensors in the first argument of the model call function: :obj:`model(inputs)`. If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument : - a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(input_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: :obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: :obj:`model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Args: config (:class:`~transformers.MBartConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.TFPreTrainedModel.from_pretrained` method to load the model weights. """ MBART_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`tf.Tensor` of shape :obj:`({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.MBartTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`tf.Tensor` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ decoder_input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`): Indices of decoder input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.MBartTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ MBart uses a specific language id token as the starting token for :obj:`decoder_input_ids` generation that varies according to source and target language, *e.g.* 25004 for `en_XX`, and 25003 for `de_DE`. If :obj:`past_key_values` is used, optionally only the last :obj:`decoder_input_ids` have to be input (see :obj:`past_key_values`). For translation and summarization training, :obj:`decoder_input_ids` should be provided. If no :obj:`decoder_input_ids` is provided, the model will create this tensor by shifting the :obj:`input_ids` to the right for denoising pre-training following the paper. decoder_attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, target_sequence_length)`, `optional`): will be made by default and ignore pad tokens. It is not recommended to set this for most use cases. head_mask (:obj:`tf.Tensor` of shape :obj:`(encoder_layers, encoder_attention_heads)`, `optional`): Mask to nullify selected heads of the attention modules in the encoder. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the heas is **masked**. decoder_head_mask (:obj:`tf.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`): Mask to nullify selected heads of the attention modules in the decoder. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. encoder_outputs (:obj:`tf.FloatTensor`, `optional`): hidden states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. of shape :obj:`(batch_size, sequence_length, hidden_size)` is a sequence of past_key_values (:obj:`Tuple[Tuple[tf.Tensor]]` of length :obj:`config.n_layers`) contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids` of shape :obj:`(batch_size, sequence_length)`. use_cache (:obj:`bool`, `optional`, defaults to :obj:`True`): If set to :obj:`True`, :obj:`past_key_values` key value states are returned and can be used to speed up decoding (see :obj:`past_key_values`). Set to :obj:`False` during training, :obj:`True` during generation output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ MBART_GENERATION_EXAMPLE = r""" Summarization example:: >>> from transformers import MBartTokenizer, TFMBartForConditionalGeneration, MBartConfig >>> model = MBartForConditionalGeneration.from_pretrained('facebook/mbart-large-cc25') >>> tokenizer = MBartTokenizer.from_pretrained('facebook/mbart-large-cc25') >>> ARTICLE_TO_SUMMARIZE = "Meine Freunde sind cool, aber sie essen zu viel Kuchen." >>> inputs = tokenizer([ARTICLE_TO_SUMMARIZE], max_length=1024, return_tensors='tf') >>> # Generate Summary >>> summary_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=5, early_stopping=True) >>> print([tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids]) Mask filling example:: >>> from transformers import MBartTokenizer, TFMBartForConditionalGeneration >>> tokenizer = MBartTokenizer.from_pretrained('facebook/mbart-large-cc25') >>> # de_DE is the language symbol id <LID> for German >>> TXT = "</s> Meine Freunde sind <mask> nett aber sie essen zu viel Kuchen. </s> de_DE" >>> model = MBartForConditionalGeneration.from_pretrained('facebook/mbart-large-cc25') >>> input_ids = tokenizer([TXT], add_special_tokens=False, return_tensors='tf')['input_ids'] >>> logits = model(input_ids).logits >>> probs = tf.nn.softmax(logits[0]) >>> # probs[5] is associated with the mask token """ @keras_serializable class TFMBartEncoder(tf.keras.layers.Layer): config_class = MBartConfig """ Transformer encoder consisting of *config.encoder_layers* self attention layers. Each layer is a :class:`TFMBartEncoderLayer`. Args: config: MBartConfig """ def __init__(self, config: MBartConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, **kwargs): super().__init__(**kwargs) self.config = config self.dropout = tf.keras.layers.Dropout(config.dropout) self.layerdrop = config.encoder_layerdrop self.padding_idx = config.pad_token_id self.max_source_positions = config.max_position_embeddings self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0 self.embed_tokens = embed_tokens self.embed_positions = TFMBartLearnedPositionalEmbedding( config.max_position_embeddings, config.d_model, name="embed_positions", ) self.layers = [TFMBartEncoderLayer(config, name=f"layers.{i}") for i in range(config.encoder_layers)] self.layernorm_embedding = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding") self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm") def get_embed_tokens(self): return self.embed_tokens def set_embed_tokens(self, embed_tokens): self.embed_tokens = embed_tokens def call( self, input_ids=None, inputs_embeds=None, attention_mask=None, head_mask=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, **kwargs, ): """ Args: input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using :class:`~transformers.MBartTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ head_mask (:obj:`tf.Tensor` of shape :obj:`(encoder_layers, encoder_attention_heads)`, `optional): Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the heas is **masked**. inputs_embeds (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ inputs = input_processing( func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, kwargs_call=kwargs, ) if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif inputs["input_ids"] is not None: input_shape = shape_list(inputs["input_ids"]) elif inputs["inputs_embeds"] is not None: input_shape = shape_list(inputs["inputs_embeds"])[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs["inputs_embeds"] is None: inputs["inputs_embeds"] = self.embed_tokens(inputs["input_ids"]) * self.embed_scale embed_pos = self.embed_positions(input_shape) hidden_states = inputs["inputs_embeds"] + embed_pos hidden_states = self.layernorm_embedding(hidden_states) hidden_states = self.dropout(hidden_states, training=inputs["training"]) # check attention mask and invert if inputs["attention_mask"] is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] attention_mask = _expand_mask(inputs["attention_mask"]) else: attention_mask = None encoder_states = () if inputs["output_hidden_states"] else None all_attentions = () if inputs["output_attentions"] else None # check if head_mask has a correct number of layers specified if desired # The tf.debugging asserts are not compliant with XLA then they # have to be disabled in other modes than eager. if inputs["head_mask"] is not None and tf.executing_eagerly(): tf.debugging.assert_equal( shape_list(inputs["head_mask"])[0], len(self.layers), message=f"The head_mask should be specified for {len(self.layers)} layers, but it is for {shape_list(inputs['head_mask'])[0]}.", ) # encoder layers for idx, encoder_layer in enumerate(self.layers): if inputs["output_hidden_states"]: encoder_states = encoder_states + (hidden_states,) # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) dropout_probability = random.uniform(0, 1) if inputs["training"] and (dropout_probability < self.layerdrop): # skip the layer continue hidden_states, attn = encoder_layer( hidden_states, attention_mask, inputs["head_mask"][idx] if inputs["head_mask"] is not None else None, ) if inputs["output_attentions"]: all_attentions += (attn,) hidden_states = self.layer_norm(hidden_states) if inputs["output_hidden_states"]: encoder_states = encoder_states + (hidden_states,) if not inputs["return_dict"]: return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions ) @keras_serializable class TFMBartDecoder(tf.keras.layers.Layer): config_class = MBartConfig """ Transformer decoder consisting of *config.decoder_layers* layers. Each layer is a :class:`TFMBartDecoderLayer` Args: config: MBartConfig embed_tokens: output embedding """ def __init__(self, config: MBartConfig, embed_tokens: Optional[TFSharedEmbeddings] = None, **kwargs): super().__init__(**kwargs) self.config = config self.padding_idx = config.pad_token_id self.embed_tokens = embed_tokens self.layerdrop = config.decoder_layerdrop self.embed_positions = TFMBartLearnedPositionalEmbedding( config.max_position_embeddings, config.d_model, name="embed_positions", ) self.embed_scale = tf.math.sqrt(float(config.d_model)) if config.scale_embedding else 1.0 self.layers = [TFMBartDecoderLayer(config, name=f"layers.{i}") for i in range(config.decoder_layers)] self.layernorm_embedding = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layernorm_embedding") self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=1e-5, name="layer_norm") self.dropout = tf.keras.layers.Dropout(config.dropout) def get_embed_tokens(self): return self.embed_tokens def set_embed_tokens(self, embed_tokens): self.embed_tokens = embed_tokens def call( self, input_ids=None, inputs_embeds=None, attention_mask=None, encoder_hidden_states=None, encoder_attention_mask=None, head_mask=None, encoder_head_mask=None, past_key_values=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, **kwargs, ): r""" Args: input_ids (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide it. Indices can be obtained using :class:`~transformers.MBartTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ encoder_hidden_states (:obj:`tf.Tensor` of shape :obj:`(batch_size, encoder_sequence_length, hidden_size)`, `optional`): Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention of the decoder. encoder_attention_mask (:obj:`tf.Tensor` of shape :obj:`(batch_size, encoder_sequence_length)`, `optional`): Mask to avoid performing cross-attention on padding tokens indices of encoder input_ids. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ head_mask (:obj:`tf.Tensor` of shape :obj:`(decoder_layers, decoder_attention_heads)`, `optional`): Mask to nullify selected heads of the attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the heas is **masked**. encoder_head_mask (:obj:`tf.Tensor` of shape :obj:`(encoder_layers, encoder_attention_heads)`, `optional`): Mask to nullify selected heads of the attention modules in encoder to avoid performing cross-attention on hidden heads. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the heas is **masked**. past_key_values (:obj:`Tuple[Tuple[tf.Tensor]]` of length :obj:`config.n_layers` with each tuple having 2 tuples each of which has 2 tensors of shape :obj:`(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`): Contains precomputed key and value hidden-states of the attention blocks. Can be used to speed up decoding. If :obj:`past_key_values` are used, the user can optionally input only the last :obj:`decoder_input_ids` (those that don't have their past key value states given to this model) of shape :obj:`(batch_size, 1)` instead of all :obj:`decoder_input_ids`` of shape :obj:`(batch_size, sequence_length)`. inputs_embeds (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ inputs = input_processing( func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, head_mask=head_mask, encoder_head_mask=encoder_head_mask, inputs_embeds=inputs_embeds, past_key_values=past_key_values, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, kwargs_call=kwargs, ) if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None: raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time") elif inputs["input_ids"] is not None: input_shape = shape_list(inputs["input_ids"]) elif inputs["inputs_embeds"] is not None: input_shape = shape_list(inputs["inputs_embeds"])[:-1] else: raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds") past_key_values_length = ( shape_list(inputs["past_key_values"][0][0])[2] if inputs["past_key_values"] is not None else 0 ) # embed positions positions = self.embed_positions(input_shape, past_key_values_length) if inputs["inputs_embeds"] is None: inputs["inputs_embeds"] = self.embed_tokens(inputs["input_ids"]) * self.embed_scale hidden_states = inputs["inputs_embeds"] # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask(input_shape, past_key_values_length=past_key_values_length) else: combined_attention_mask = _expand_mask( tf.ones((input_shape[0], input_shape[1] + past_key_values_length)), tgt_len=input_shape[-1] ) if inputs["attention_mask"] is not None: combined_attention_mask = combined_attention_mask + _expand_mask( inputs["attention_mask"], tgt_len=input_shape[-1] ) if inputs["encoder_hidden_states"] is not None and inputs["encoder_attention_mask"] is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] inputs["encoder_attention_mask"] = _expand_mask(inputs["encoder_attention_mask"], tgt_len=input_shape[-1]) hidden_states = self.layernorm_embedding(hidden_states + positions) hidden_states = self.dropout(hidden_states, training=inputs["training"]) # decoder layers all_hidden_states = () all_self_attns = () present_key_values = () # check if head_mask has a correct number of layers specified if desired # The tf.debugging asserts are not compliant with XLA then they # have to be disabled in other modes than eager. if inputs["head_mask"] is not None and tf.executing_eagerly(): tf.debugging.assert_equal( shape_list(inputs["head_mask"])[0], len(self.layers), message=f"The head_mask should be specified for {len(self.layers)} layers, but it is for {shape_list(inputs['head_mask'])[0]}.", ) for idx, decoder_layer in enumerate(self.layers): # add LayerDrop (see https://arxiv.org/abs/1909.11556 for description) if inputs["output_hidden_states"]: all_hidden_states += (hidden_states,) dropout_probability = random.uniform(0, 1) if inputs["training"] and (dropout_probability < self.layerdrop): continue past_key_value = inputs["past_key_values"][idx] if inputs["past_key_values"] is not None else None hidden_states, layer_self_attn, present_key_value = decoder_layer( hidden_states, attention_mask=combined_attention_mask, encoder_hidden_states=inputs["encoder_hidden_states"], encoder_attention_mask=inputs["encoder_attention_mask"], layer_head_mask=inputs["head_mask"][idx] if inputs["head_mask"] is not None else None, encoder_layer_head_mask=inputs["encoder_head_mask"][idx] if inputs["encoder_head_mask"] is not None else None, past_key_value=past_key_value, ) if inputs["use_cache"]: present_key_values += (present_key_value,) if inputs["output_attentions"]: all_self_attns += (layer_self_attn,) hidden_states = self.layer_norm(hidden_states) if inputs["output_hidden_states"]: all_hidden_states += (hidden_states,) else: all_hidden_states = None all_self_attns = list(all_self_attns) if inputs["output_attentions"] else None present_key_values = (encoder_hidden_states, present_key_values) if inputs["use_cache"] else None if not inputs["return_dict"]: return hidden_states, present_key_values, all_hidden_states, all_self_attns else: return TFBaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=present_key_values, hidden_states=all_hidden_states, attentions=all_self_attns, ) @keras_serializable class TFMBartMainLayer(tf.keras.layers.Layer): config_class = MBartConfig def __init__(self, config: MBartConfig, **kwargs): super().__init__(**kwargs) self.config = config self.shared = TFSharedEmbeddings(config.vocab_size, config.d_model, config.pad_token_id, name="model.shared") with tf.compat.v1.variable_scope("model.shared") as shared_abs_scope_name: pass # Wraps layer to avoid problems with weight restoring and ensuring we're in the correct TF scope. embed_tokens = TFWrappedEmbeddings(self.shared, abs_scope_name=shared_abs_scope_name) embed_tokens.vocab_size = self.shared.vocab_size embed_tokens.hidden_size = self.shared.hidden_size self.encoder = TFMBartEncoder(config, embed_tokens, name="encoder") self.decoder = TFMBartDecoder(config, embed_tokens, name="decoder") def get_input_embeddings(self): return self.shared def set_input_embeddings(self, new_embeddings): self.shared.weight = new_embeddings self.shared.vocab_size = self.shared.weight.shape[0] # retrieve correct absolute scope for embed token wrapper with tf.compat.v1.variable_scope("model.shared") as shared_abs_scope_name: pass # Wraps layer to avoid problems with weight restoring and ensuring we're in the correct TF scope. embed_tokens = TFWrappedEmbeddings(self.shared, abs_scope_name=shared_abs_scope_name) self.encoder.set_embed_tokens(embed_tokens) self.decoder.set_embed_tokens(embed_tokens) def call( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, **kwargs ): inputs = input_processing( func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, kwargs_call=kwargs, ) if inputs["decoder_input_ids"] is None and inputs["decoder_inputs_embeds"] is None: inputs["use_cache"] = False inputs["output_hidden_states"] = ( inputs["output_hidden_states"] if inputs["output_hidden_states"] is not None else self.config.output_hidden_states ) if inputs["decoder_input_ids"] is None and inputs["input_ids"] is not None: inputs["decoder_input_ids"] = shift_tokens_right(inputs["input_ids"], self.config.pad_token_id) if inputs["encoder_outputs"] is None: inputs["encoder_outputs"] = self.encoder( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], head_mask=inputs["head_mask"], inputs_embeds=inputs["inputs_embeds"], output_attentions=inputs["output_attentions"], output_hidden_states=inputs["output_hidden_states"], return_dict=inputs["return_dict"], training=inputs["training"], ) # If the user passed a tuple for encoder_outputs, we wrap it in a TFBaseModelOutput when return_dict=True elif inputs["return_dict"] and not isinstance(inputs["encoder_outputs"], TFBaseModelOutput): inputs["encoder_outputs"] = TFBaseModelOutput( last_hidden_state=inputs["encoder_outputs"][0], hidden_states=inputs["encoder_outputs"][1] if len(inputs["encoder_outputs"]) > 1 else None, attentions=inputs["encoder_outputs"][2] if len(inputs["encoder_outputs"]) > 2 else None, ) # If the user passed a TFBaseModelOutput for encoder_outputs, we wrap it in a tuple when return_dict=False elif not inputs["return_dict"] and not isinstance(inputs["encoder_outputs"], tuple): inputs["encoder_outputs"] = inputs["encoder_outputs"].to_tuple() decoder_outputs = self.decoder( inputs["decoder_input_ids"], attention_mask=inputs["decoder_attention_mask"], encoder_hidden_states=inputs["encoder_outputs"][0], encoder_attention_mask=inputs["attention_mask"], head_mask=inputs["decoder_head_mask"], encoder_head_mask=inputs["head_mask"], past_key_values=inputs["past_key_values"], inputs_embeds=inputs["decoder_inputs_embeds"], use_cache=inputs["use_cache"], output_attentions=inputs["output_attentions"], output_hidden_states=inputs["output_hidden_states"], return_dict=inputs["return_dict"], training=inputs["training"], ) if not inputs["return_dict"]: return decoder_outputs + inputs["encoder_outputs"] return TFSeq2SeqModelOutput( last_hidden_state=decoder_outputs.last_hidden_state, past_key_values=decoder_outputs.past_key_values, decoder_hidden_states=decoder_outputs.hidden_states, decoder_attentions=decoder_outputs.attentions, encoder_last_hidden_state=inputs["encoder_outputs"].last_hidden_state, encoder_hidden_states=inputs["encoder_outputs"].hidden_states, encoder_attentions=inputs["encoder_outputs"].attentions, ) @add_start_docstrings( "The bare MBART Model outputting raw hidden-states without any specific head on top.", MBART_START_DOCSTRING, ) class TFMBartModel(TFMBartPreTrainedModel): def __init__(self, config: MBartConfig, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.model = TFMBartMainLayer(config, name="model") def get_encoder(self): return self.model.encoder def get_decoder(self): return self.model.decoder @add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSeq2SeqModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, encoder_outputs: Optional[Union[Tuple, TFBaseModelOutput]] = None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, **kwargs ): inputs = input_processing( func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, kwargs_call=kwargs, ) outputs = self.model( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], decoder_input_ids=inputs["decoder_input_ids"], decoder_attention_mask=inputs["decoder_attention_mask"], head_mask=inputs["head_mask"], decoder_head_mask=inputs["decoder_head_mask"], encoder_outputs=inputs["encoder_outputs"], past_key_values=inputs["past_key_values"], inputs_embeds=inputs["inputs_embeds"], decoder_inputs_embeds=inputs["decoder_inputs_embeds"], use_cache=inputs["use_cache"], output_attentions=inputs["output_attentions"], output_hidden_states=inputs["output_hidden_states"], return_dict=inputs["return_dict"], training=inputs["training"], ) return outputs # Copied from transformers.models.bart.modeling_tf_bart.TFBartModel.serving_output def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None return TFSeq2SeqModelOutput( last_hidden_state=output.last_hidden_state, past_key_values=pkv, decoder_hidden_states=dec_hs, decoder_attentions=dec_attns, encoder_last_hidden_state=output.encoder_last_hidden_state, encoder_hidden_states=enc_hs, encoder_attentions=enc_attns, ) @add_start_docstrings( "The MBART Model with a language modeling head. Can be used for summarization.", MBART_START_DOCSTRING, ) class TFMBartForConditionalGeneration(TFMBartPreTrainedModel, TFCausalLanguageModelingLoss): _keys_to_ignore_on_load_unexpected = [ r"model.encoder.embed_tokens.weight", r"model.decoder.embed_tokens.weight", ] def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.model = TFMBartMainLayer(config, name="model") self.use_cache = config.use_cache # final_bias_logits is registered as a buffer in pytorch, so not trainable for the the sake of consistency. self.final_logits_bias = self.add_weight( name="final_logits_bias", shape=[1, config.vocab_size], initializer="zeros", trainable=False ) def get_decoder(self): return self.model.decoder def get_encoder(self): return self.model.encoder def get_output_embeddings(self): return self.get_input_embeddings() def set_output_embeddings(self, value): self.set_input_embeddings(value) def get_bias(self): return {"final_logits_bias": self.final_logits_bias} def set_bias(self, value): self.final_logits_bias = value["final_logits_bias"] @add_start_docstrings_to_model_forward(MBART_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFSeq2SeqLMOutput, config_class=_CONFIG_FOR_DOC) @add_end_docstrings(MBART_GENERATION_EXAMPLE) def call( self, input_ids=None, attention_mask=None, decoder_input_ids=None, decoder_attention_mask=None, head_mask=None, decoder_head_mask=None, encoder_outputs: Optional[TFBaseModelOutput] = None, past_key_values=None, inputs_embeds=None, decoder_inputs_embeds=None, use_cache=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, **kwargs, ): """ labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the masked language modeling loss. Indices should either be in ``[0, ..., config.vocab_size]`` or -100 (see ``input_ids`` docstring). Tokens with indices set to ``-100`` are ignored (masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``. Returns: """ inputs = input_processing( func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, head_mask=head_mask, decoder_head_mask=decoder_head_mask, encoder_outputs=encoder_outputs, past_key_values=past_key_values, inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels, training=training, kwargs_call=kwargs, ) if inputs["labels"] is not None: inputs["labels"] = tf.where( inputs["labels"] == self.config.pad_token_id, tf.fill(shape_list(inputs["labels"]), -100), inputs["labels"], ) inputs["use_cache"] = False if inputs["decoder_input_ids"] is None: inputs["decoder_input_ids"] = shift_tokens_right(inputs["labels"], self.config.pad_token_id) outputs = self.model( inputs["input_ids"], attention_mask=inputs["attention_mask"], decoder_input_ids=inputs["decoder_input_ids"], encoder_outputs=inputs["encoder_outputs"], decoder_attention_mask=inputs["decoder_attention_mask"], head_mask=inputs["head_mask"], decoder_head_mask=inputs["decoder_head_mask"], past_key_values=inputs["past_key_values"], inputs_embeds=inputs["inputs_embeds"], decoder_inputs_embeds=inputs["decoder_inputs_embeds"], use_cache=inputs["use_cache"], output_attentions=inputs["output_attentions"], output_hidden_states=inputs["output_hidden_states"], return_dict=inputs["return_dict"], training=inputs["training"], ) lm_logits = self.model.shared(outputs[0], mode="linear") lm_logits = lm_logits + self.final_logits_bias masked_lm_loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], lm_logits) if not inputs["return_dict"]: output = (lm_logits,) + outputs[1:] return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output return TFSeq2SeqLMOutput( loss=masked_lm_loss, logits=lm_logits, past_key_values=outputs.past_key_values, # index 1 of d outputs decoder_hidden_states=outputs.decoder_hidden_states, # index 2 of d outputs decoder_attentions=outputs.decoder_attentions, # index 3 of d outputs encoder_last_hidden_state=outputs.encoder_last_hidden_state, # index 0 of encoder outputs encoder_hidden_states=outputs.encoder_hidden_states, # 1 of e out encoder_attentions=outputs.encoder_attentions, # 2 of e out ) # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.serving_output def serving_output(self, output): pkv = tf.tuple(output.past_key_values)[1] if self.config.use_cache else None dec_hs = tf.convert_to_tensor(output.decoder_hidden_states) if self.config.output_hidden_states else None dec_attns = tf.convert_to_tensor(output.decoder_attentions) if self.config.output_attentions else None enc_hs = tf.convert_to_tensor(output.encoder_hidden_states) if self.config.output_hidden_states else None enc_attns = tf.convert_to_tensor(output.encoder_attentions) if self.config.output_attentions else None return TFSeq2SeqLMOutput( logits=output.logits, past_key_values=pkv, decoder_hidden_states=dec_hs, decoder_attentions=dec_attns, encoder_last_hidden_state=output.encoder_last_hidden_state, encoder_hidden_states=enc_hs, encoder_attentions=enc_attns, ) # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration.prepare_inputs_for_generation def prepare_inputs_for_generation( self, decoder_input_ids, past, attention_mask, head_mask=None, use_cache=None, **kwargs, ) -> Dict: assert past is not None and len(past) in {1, 2}, f"past has to be an iterable of length 1,2 got {past}" if len(past) == 1: assert isinstance(past[0], tf.Tensor), f"`past[0]` has to be of type `tf.Tensor`, but is {type(past[0])}" encoder_outputs = TFBaseModelOutput(last_hidden_state=past[0]) past_key_values = None else: assert ( len(past) == 2 ), "`past` has to be of length 2 with the encoder_outputs at the first position and past_key_values at the second position." encoder_outputs, past_key_values = past if isinstance(encoder_outputs, tuple): assert isinstance( encoder_outputs[0], tf.Tensor ), f"`encoder_outputs[0]` has to be of type `tf.Tensor`, but is {type(encoder_outputs[0])}" encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_outputs[0]) elif isinstance(encoder_outputs, tf.Tensor): encoder_outputs = TFBaseModelOutput(last_hidden_state=encoder_outputs) assert ( past_key_values ), f"decoder cached states must be truthy. got {past_key_values} from the 2nd element of past" decoder_input_ids = decoder_input_ids[:, -1:] assert isinstance( encoder_outputs, TFBaseModelOutput ), f"encoder_outputs should be a TFBaseModelOutput, Instead got {type(encoder_outputs)}." return { "input_ids": None, # encoder_outputs is defined. input_ids not needed "encoder_outputs": encoder_outputs, "past_key_values": past_key_values, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "head_mask": head_mask, "use_cache": use_cache, # change this to avoid caching (presumably for debugging) } @staticmethod # Copied from transformers.models.bart.modeling_tf_bart.TFBartForConditionalGeneration._reorder_cache def _reorder_cache(past, beam_idx): if len(past) == 1: return past past_key_values = past[1] reordered_past = () for layer_past_key_values in past_key_values: reordered_past += ( tuple(tf.gather(layer_past_key_value, beam_idx) for layer_past_key_value in layer_past_key_values[:2]) + layer_past_key_values[2:], ) return (past[0], reordered_past)
AdaMix/src/transformers/models/mbart/modeling_tf_mbart.py/0
{ "file_path": "AdaMix/src/transformers/models/mbart/modeling_tf_mbart.py", "repo_id": "AdaMix", "token_count": 30914 }
60
# coding=utf-8 # Copyright 2018 The OpenAI Team Authors and HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 OpenAI GPT model.""" from dataclasses import dataclass from typing import Optional, Tuple import tensorflow as tf from ...activations_tf import get_tf_activation from ...file_utils import ( ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_tf_outputs import TFBaseModelOutput, TFCausalLMOutput, TFSequenceClassifierOutput from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFConv1D, TFPreTrainedModel, TFSequenceClassificationLoss, TFSequenceSummary, TFSharedEmbeddings, get_initializer, input_processing, keras_serializable, shape_list, ) from ...utils import logging from .configuration_openai import OpenAIGPTConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "openai-gpt" _CONFIG_FOR_DOC = "OpenAIGPTConfig" _TOKENIZER_FOR_DOC = "OpenAIGPTTokenizer" TF_OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST = [ "openai-gpt", # See all OpenAI GPT models at https://huggingface.co/models?filter=openai-gpt ] class TFAttention(tf.keras.layers.Layer): def __init__(self, nx, n_ctx, config, scale=False, **kwargs): super().__init__(**kwargs) n_state = nx # in Attention: n_state=768 (nx=n_embd) # [switch nx => n_state from Block to Attention to keep identical to TF implem] assert ( n_state % config.n_head == 0 ), f"Hidden dimension {n_state} not dividable by number of heads {config.n_head}" self.n_ctx = n_ctx self.n_head = config.n_head self.split_size = n_state self.scale = scale self.output_attentions = config.output_attentions self.c_attn = TFConv1D(n_state * 3, nx, initializer_range=config.initializer_range, name="c_attn") self.c_proj = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_proj") self.attn_dropout = tf.keras.layers.Dropout(config.attn_pdrop) self.resid_dropout = tf.keras.layers.Dropout(config.resid_pdrop) self.pruned_heads = set() def prune_heads(self, heads): pass @staticmethod def causal_attention_mask(nd, ns): """ 1's in the lower triangle, counting from the lower right corner. Same as tf.matrix_band_part(tf.ones([nd, ns]), -1, ns-nd), but doesn't produce garbage on TPUs. """ i = tf.range(nd)[:, None] j = tf.range(ns) m = i >= j - ns + nd return m def _attn(self, q, k, v, attention_mask, head_mask, output_attentions, training=False): # q, k, v have shape [batch, heads, sequence, features] w = tf.matmul(q, k, transpose_b=True) if self.scale: dk = tf.cast(shape_list(k)[-1], dtype=w.dtype) # scale attention_scores w = w / tf.math.sqrt(dk) # w has shape [batch, heads, dst_sequence, src_sequence], where information flows from src to dst. _, _, nd, ns = shape_list(w) b = tf.cast(self.causal_attention_mask(nd, ns), dtype=w.dtype) b = tf.reshape(b, [1, 1, nd, ns]) w = w * b - 1e4 * (1 - b) if attention_mask is not None: # Apply the attention mask attention_mask = tf.cast(attention_mask, dtype=w.dtype) w = w + attention_mask w = tf.nn.softmax(w, axis=-1) w = self.attn_dropout(w, training=training) # Mask heads if we want to if head_mask is not None: w = w * head_mask outputs = [tf.matmul(w, v)] if output_attentions: outputs.append(w) return outputs def merge_heads(self, x): x = tf.transpose(x, [0, 2, 1, 3]) x_shape = shape_list(x) new_x_shape = x_shape[:-2] + [x_shape[-2] * x_shape[-1]] return tf.reshape(x, new_x_shape) def split_heads(self, x): x_shape = shape_list(x) new_x_shape = x_shape[:-1] + [self.n_head, x_shape[-1] // self.n_head] x = tf.reshape(x, new_x_shape) return tf.transpose(x, (0, 2, 1, 3)) # (batch, head, seq_length, head_features) def call(self, x, attention_mask, head_mask, output_attentions, training=False): x = self.c_attn(x) query, key, value = tf.split(x, 3, axis=2) query = self.split_heads(query) key = self.split_heads(key) value = self.split_heads(value) attn_outputs = self._attn(query, key, value, attention_mask, head_mask, output_attentions, training=training) a = attn_outputs[0] a = self.merge_heads(a) a = self.c_proj(a) a = self.resid_dropout(a, training=training) outputs = [a] + attn_outputs[1:] return outputs # a, (attentions) class TFMLP(tf.keras.layers.Layer): def __init__(self, n_state, config, **kwargs): super().__init__(**kwargs) nx = config.n_embd self.c_fc = TFConv1D(n_state, nx, initializer_range=config.initializer_range, name="c_fc") self.c_proj = TFConv1D(nx, n_state, initializer_range=config.initializer_range, name="c_proj") self.act = get_tf_activation("gelu") self.dropout = tf.keras.layers.Dropout(config.resid_pdrop) def call(self, x, training=False): h = self.act(self.c_fc(x)) h2 = self.c_proj(h) h2 = self.dropout(h2, training=training) return h2 class TFBlock(tf.keras.layers.Layer): def __init__(self, n_ctx, config, scale=False, **kwargs): super().__init__(**kwargs) nx = config.n_embd self.attn = TFAttention(nx, n_ctx, config, scale, name="attn") self.ln_1 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_1") self.mlp = TFMLP(4 * nx, config, name="mlp") self.ln_2 = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_epsilon, name="ln_2") def call(self, x, attention_mask, head_mask, output_attentions, training=False): output_attn = self.attn(x, attention_mask, head_mask, output_attentions, training=training) a = output_attn[0] # output_attn: a, (attentions) n = self.ln_1(x + a) m = self.mlp(n, training=training) h = self.ln_2(n + m) outputs = [h] + output_attn[1:] return outputs # x, (attentions) @keras_serializable class TFOpenAIGPTMainLayer(tf.keras.layers.Layer): config_class = OpenAIGPTConfig def __init__(self, config, *inputs, **kwargs): super().__init__(*inputs, **kwargs) self.config = config self.output_hidden_states = config.output_hidden_states self.output_attentions = config.output_attentions self.return_dict = config.use_return_dict self.num_hidden_layers = config.n_layer self.vocab_size = config.vocab_size self.n_embd = config.n_embd self.n_positions = config.n_positions self.initializer_range = config.initializer_range self.tokens_embed = TFSharedEmbeddings( config.vocab_size, config.n_embd, initializer_range=config.initializer_range, name="tokens_embed" ) self.drop = tf.keras.layers.Dropout(config.embd_pdrop) self.h = [TFBlock(config.n_ctx, config, scale=True, name="h_._{}".format(i)) for i in range(config.n_layer)] def build(self, input_shape): with tf.name_scope("positions_embed"): self.positions_embed = self.add_weight( name="embeddings", shape=[self.n_positions, self.n_embd], initializer=get_initializer(self.initializer_range), ) super().build(input_shape) def get_input_embeddings(self): return self.tokens_embed def set_input_embeddings(self, value): self.tokens_embed.weight = value self.tokens_embed.vocab_size = shape_list(value)[0] def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} """ raise NotImplementedError def call( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, **kwargs, ): inputs = input_processing( func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, kwargs_call=kwargs, ) if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif inputs["input_ids"] is not None: input_shape = shape_list(inputs["input_ids"]) inputs["input_ids"] = tf.reshape(inputs["input_ids"], [-1, input_shape[-1]]) elif inputs["inputs_embeds"] is not None: input_shape = shape_list(inputs["inputs_embeds"])[:-1] else: raise ValueError("You have to specify either input_ids or inputs_embeds") if inputs["position_ids"] is None: inputs["position_ids"] = tf.expand_dims(tf.range(input_shape[-1]), axis=0) if inputs["attention_mask"] is not None: # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. inputs["attention_mask"] = tf.reshape(inputs["attention_mask"], (input_shape[0], 1, 1, input_shape[1])) # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. one_cst = tf.constant(1.0) inputs["attention_mask"] = tf.cast(inputs["attention_mask"], dtype=one_cst.dtype) inputs["attention_mask"] = tf.multiply( tf.subtract(one_cst, inputs["attention_mask"]), tf.constant(-10000.0) ) else: inputs["attention_mask"] = None # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length] if inputs["head_mask"] is not None: raise NotImplementedError else: inputs["head_mask"] = [None] * self.num_hidden_layers # head_mask = tf.constant([0] * self.num_hidden_layers) inputs["position_ids"] = tf.reshape(inputs["position_ids"], [-1, shape_list(inputs["position_ids"])[-1]]) if inputs["inputs_embeds"] is None: inputs["inputs_embeds"] = self.tokens_embed(inputs["input_ids"], mode="embedding") position_embeds = tf.gather(self.positions_embed, inputs["position_ids"]) if inputs["token_type_ids"] is not None: inputs["token_type_ids"] = tf.reshape( inputs["token_type_ids"], [-1, shape_list(inputs["token_type_ids"])[-1]] ) token_type_embeds = self.tokens_embed(inputs["token_type_ids"], mode="embedding") else: token_type_embeds = 0 hidden_states = inputs["inputs_embeds"] + position_embeds + token_type_embeds hidden_states = self.drop(hidden_states, training=inputs["training"]) output_shape = input_shape + [shape_list(hidden_states)[-1]] all_attentions = () if inputs["output_attentions"] else None all_hidden_states = () if inputs["output_hidden_states"] else None for i, block in enumerate(self.h): if inputs["output_hidden_states"]: all_hidden_states = all_hidden_states + (tf.reshape(hidden_states, output_shape),) outputs = block( hidden_states, inputs["attention_mask"], inputs["head_mask"][i], inputs["output_attentions"], training=inputs["training"], ) hidden_states = outputs[0] if inputs["output_attentions"]: all_attentions = all_attentions + (outputs[1],) hidden_states = tf.reshape(hidden_states, output_shape) # Add last hidden state if inputs["output_hidden_states"]: all_hidden_states = all_hidden_states + (hidden_states,) if inputs["output_attentions"]: # let the number of heads free (-1) so we can extract attention even after head pruning attention_output_shape = input_shape[:-1] + [-1] + shape_list(all_attentions[0])[-2:] all_attentions = tuple(tf.reshape(t, attention_output_shape) for t in all_attentions) if not inputs["return_dict"]: return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None) return TFBaseModelOutput( last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions, ) class TFOpenAIGPTPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = OpenAIGPTConfig base_model_prefix = "transformer" @tf.function( input_signature=[ { "input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"), "attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"), } ] ) def serving(self, inputs): output = self.call(inputs) return self.serving_output(output) @dataclass class TFOpenAIGPTDoubleHeadsModelOutput(ModelOutput): """ Base class for outputs of models predicting if two sentences are consecutive or not. Args: logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, num_choices, sequence_length, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). mc_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, num_choices)`): Prediction scores of the multiple choice classification head (scores for each choice before SoftMax). hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ logits: tf.Tensor = None mc_logits: tf.Tensor = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None OPENAI_GPT_START_DOCSTRING = r""" This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. .. note:: TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all the tensors in the first argument of the model call function: :obj:`model(inputs)`. If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument : - a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(inputs_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: :obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: :obj:`model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Parameters: config (:class:`~transformers.OpenAIGPTConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ OPENAI_GPT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.OpenAIGPTTokenizer`. See :func:`transformers.PreTrainedTokenizer.__call__` and :func:`transformers.PreTrainedTokenizer.encode` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ token_type_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: - 0 corresponds to a `sentence A` token, - 1 corresponds to a `sentence B` token. `What are token type IDs? <../glossary.html#token-type-ids>`__ position_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length)`, `optional`): Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0, config.max_position_embeddings - 1]``. `What are position IDs? <../glossary.html#position-ids>`__ head_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare OpenAI GPT transformer model outputting raw hidden-states without any specific head on top.", OPENAI_GPT_START_DOCSTRING, ) class TFOpenAIGPTModel(TFOpenAIGPTPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFOpenAIGPTMainLayer(config, name="transformer") @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFBaseModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, **kwargs, ): inputs = input_processing( func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, kwargs_call=kwargs, ) outputs = self.transformer( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], token_type_ids=inputs["token_type_ids"], position_ids=inputs["position_ids"], head_mask=inputs["head_mask"], inputs_embeds=inputs["inputs_embeds"], output_attentions=inputs["output_attentions"], output_hidden_states=inputs["output_hidden_states"], return_dict=inputs["return_dict"], training=inputs["training"], ) return outputs # Copied from transformers.models.distilbert.modeling_tf_distilbert.TFDistilBertModel.serving_output def serving_output(self, output): hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFBaseModelOutput(last_hidden_state=output.last_hidden_state, hidden_states=hs, attentions=attns) @add_start_docstrings( """ OpenAI GPT Model transformer with a language modeling head on top (linear layer with weights tied to the input embeddings). """, OPENAI_GPT_START_DOCSTRING, ) class TFOpenAIGPTLMHeadModel(TFOpenAIGPTPreTrainedModel, TFCausalLanguageModelingLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFOpenAIGPTMainLayer(config, name="transformer") def get_output_embeddings(self): return self.get_input_embeddings() def set_output_embeddings(self, value): self.set_input_embeddings(value) @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFCausalLMOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, **kwargs, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the cross entropy classification loss. Indices should be in ``[0, ..., config.vocab_size - 1]``. """ inputs = input_processing( func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels, training=training, kwargs_call=kwargs, ) transformer_outputs = self.transformer( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], token_type_ids=inputs["token_type_ids"], position_ids=inputs["position_ids"], head_mask=inputs["head_mask"], inputs_embeds=inputs["inputs_embeds"], output_attentions=inputs["output_attentions"], output_hidden_states=inputs["output_hidden_states"], return_dict=inputs["return_dict"], training=inputs["training"], ) hidden_states = transformer_outputs[0] logits = self.transformer.tokens_embed(hidden_states, mode="linear") loss = None if inputs["labels"] is not None: # shift labels to the left and cut last logit token logits = logits[:, :-1] labels = inputs["labels"][:, 1:] loss = self.compute_loss(labels, logits) if not inputs["return_dict"]: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFCausalLMOutput( loss=loss, logits=logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) # Copied from transformers.models.bert.modeling_tf_bert.TFBertLMHeadModel.serving_output def serving_output(self, output: TFCausalLMOutput) -> TFCausalLMOutput: hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFCausalLMOutput(logits=output.logits, hidden_states=hs, attentions=attns) @add_start_docstrings( """ OpenAI GPT Model transformer with a language modeling and a multiple-choice classification head on top e.g. for RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the input embeddings, the classification head takes as input the input of a specified classification token index in the input sequence). """, OPENAI_GPT_START_DOCSTRING, ) class TFOpenAIGPTDoubleHeadsModel(TFOpenAIGPTPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) config.num_labels = 1 self.transformer = TFOpenAIGPTMainLayer(config, name="transformer") self.multiple_choice_head = TFSequenceSummary( config, initializer_range=config.initializer_range, name="multiple_choice_head" ) @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING) @replace_return_docstrings(output_type=TFOpenAIGPTDoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, mc_token_ids=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, **kwargs, ): r""" mc_token_ids (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, num_choices)`, `optional`, default to index of the last token of the input): Index of the classification token in each input sequence. Selected in the range ``[0, input_ids.size(-1) - 1]``. Return: Examples:: >>> import tensorflow as tf >>> from transformers import OpenAIGPTTokenizer, TFOpenAIGPTDoubleHeadsModel >>> tokenizer = OpenAIGPTTokenizer.from_pretrained('openai-gpt') >>> model = TFOpenAIGPTDoubleHeadsModel.from_pretrained('openai-gpt') >>> # Add a [CLS] to the vocabulary (we should train it also!) >>> tokenizer.add_special_tokens({'cls_token': '[CLS]'}) >>> model.resize_token_embeddings(len(tokenizer)) # Update the model embeddings with the new vocabulary size >>> print(tokenizer.cls_token_id, len(tokenizer)) # The newly token the last token of the vocabulary >>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"] >>> encoding = tokenizer(choices, return_tensors="tf") >>> inputs = {k: tf.expand_dims(v, 0) for k, v in encoding.items()} >>> inputs["mc_token_ids"]= tf.constant([inputs["input_ids"].shape[-1] - 1, inputs["input_ids"].shape[-1] - 1])[None, :] # Batch size 1 >>> outputs = model(inputs) >>> lm_prediction_scores, mc_prediction_scores = outputs[:2] """ inputs = input_processing( func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, mc_token_ids=mc_token_ids, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, kwargs_call=kwargs, ) if inputs["input_ids"] is not None: input_shapes = shape_list(inputs["input_ids"]) else: input_shapes = shape_list(inputs["inputs_embeds"])[:-1] seq_length = input_shapes[-1] flat_input_ids = tf.reshape(inputs["input_ids"], (-1, seq_length)) if inputs["input_ids"] is not None else None flat_attention_mask = ( tf.reshape(inputs["attention_mask"], (-1, seq_length)) if inputs["attention_mask"] is not None else None ) flat_token_type_ids = ( tf.reshape(inputs["token_type_ids"], (-1, seq_length)) if inputs["token_type_ids"] is not None else None ) flat_position_ids = ( tf.reshape(inputs["position_ids"], (-1, seq_length)) if inputs["position_ids"] is not None else None ) transformer_outputs = self.transformer( flat_input_ids, flat_attention_mask, flat_token_type_ids, flat_position_ids, inputs["head_mask"], inputs["inputs_embeds"], inputs["output_attentions"], inputs["output_hidden_states"], return_dict=inputs["return_dict"], training=inputs["training"], ) hidden_states = transformer_outputs[0] hidden_states = tf.reshape(hidden_states, input_shapes + shape_list(hidden_states)[-1:]) lm_logits = self.transformer.tokens_embed(hidden_states, mode="linear") mc_logits = self.multiple_choice_head(hidden_states, inputs["mc_token_ids"], training=inputs["training"]) mc_logits = tf.squeeze(mc_logits, axis=-1) if not inputs["return_dict"]: return (lm_logits, mc_logits) + transformer_outputs[1:] return TFOpenAIGPTDoubleHeadsModelOutput( logits=lm_logits, mc_logits=mc_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @tf.function( input_signature=[ { "input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"), "attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"), "mc_token_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"), } ] ) def serving(self, inputs): output = self.call(inputs) return self.serving_output(output) def serving_output(self, output): hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFOpenAIGPTDoubleHeadsModelOutput( logits=output.logits, mc_logits=output.mc_logits, hidden_states=hs, attentions=attns ) @add_start_docstrings( """ The OpenAI GPT Model transformer with a sequence classification head on top (linear layer). :class:`~transformers.TFOpenAIGPTForSequenceClassification` uses the last token in order to do the classification, as other causal models (e.g. GPT-2) do. Since it does classification on the last token, it requires to know the position of the last token. If a :obj:`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If no :obj:`pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the padding tokens when :obj:`inputs_embeds` are passed instead of :obj:`input_ids`, it does the same (take the last value in each row of the batch). """, OPENAI_GPT_START_DOCSTRING, ) class TFOpenAIGPTForSequenceClassification(TFOpenAIGPTPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.score = tf.keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="score", use_bias=False, ) self.transformer = TFOpenAIGPTMainLayer(config, name="transformer") @add_start_docstrings_to_model_forward(OPENAI_GPT_INPUTS_DOCSTRING) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFSequenceClassifierOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids=None, attention_mask=None, token_type_ids=None, position_ids=None, head_mask=None, inputs_embeds=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, **kwargs, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the cross entropy classification loss. Indices should be in ``[0, ..., config.vocab_size - 1]``. """ inputs = input_processing( func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, token_type_ids=token_type_ids, position_ids=position_ids, head_mask=head_mask, inputs_embeds=inputs_embeds, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels, training=training, kwargs_call=kwargs, ) transformer_outputs = self.transformer( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], token_type_ids=inputs["token_type_ids"], position_ids=inputs["position_ids"], head_mask=inputs["head_mask"], inputs_embeds=inputs["inputs_embeds"], output_attentions=inputs["output_attentions"], output_hidden_states=inputs["output_hidden_states"], return_dict=inputs["return_dict"], training=inputs["training"], ) hidden_states = transformer_outputs[0] logits = self.score(hidden_states) in_logits = None if self.config.pad_token_id is None: sequence_lengths = -1 else: if inputs["input_ids"] is not None: sequence_lengths = ( tf.reduce_sum( tf.cast( tf.math.not_equal(inputs["input_ids"], self.config.pad_token_id), dtype=inputs["input_ids"].dtype, ), -1, keepdims=False, ) - 1 ) in_logits = tf.gather(logits, sequence_lengths, batch_dims=1, axis=1) else: sequence_lengths = -1 logger.warning( f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " f"unexpected if using padding tokens in conjunction with `inputs_embeds.`" ) loss = None if inputs["labels"] is not None: if input_ids is not None: batch_size, sequence_length = shape_list(inputs["input_ids"])[:2] else: batch_size, sequence_length = shape_list(inputs["inputs_embeds"])[:2] assert ( self.config.pad_token_id is not None or batch_size == 1 ), "Cannot handle batch sizes > 1 if no padding token is defined." if not tf.is_tensor(sequence_lengths): in_logits = logits[0:batch_size, sequence_lengths] loss = self.compute_loss( tf.reshape(inputs["labels"], [-1, 1]), tf.reshape(in_logits, [-1, self.num_labels]) ) pooled_logits = in_logits if in_logits is not None else logits if not inputs["return_dict"]: output = (pooled_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFSequenceClassifierOutput( loss=loss, logits=pooled_logits, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) # Copied from transformers.models.bert.modeling_tf_bert.TFBertForSequenceClassification.serving_output def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput: hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
AdaMix/src/transformers/models/openai/modeling_tf_openai.py/0
{ "file_path": "AdaMix/src/transformers/models/openai/modeling_tf_openai.py", "repo_id": "AdaMix", "token_count": 18349 }
61
# coding=utf-8 # Copyright 2020 The Trax Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tokenization class for model Reformer.""" import os from shutil import copyfile from typing import Dict, Optional, Tuple import sentencepiece as spm from ...tokenization_utils import PreTrainedTokenizer from ...utils import logging logger = logging.get_logger(__name__) SPIECE_UNDERLINE = "▁" VOCAB_FILES_NAMES = {"vocab_file": "spiece.model"} PRETRAINED_VOCAB_FILES_MAP = { "vocab_file": { "google/reformer-crime-and-punishment": "https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model" } } PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = { "google/reformer-crime-and-punishment": 524288, } class ReformerTokenizer(PreTrainedTokenizer): """ Construct a Reformer tokenizer. Based on `SentencePiece <https://github.com/google/sentencepiece>`__ . This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods. Users should refer to this superclass for more information regarding those methods. Args: vocab_file (:obj:`str`): `SentencePiece <https://github.com/google/sentencepiece>`__ file (generally has a `.spm` extension) that contains the vocabulary necessary to instantiate a tokenizer. eos_token (:obj:`str`, `optional`, defaults to :obj:`"</s>"`): The end of sequence token. .. note:: When building a sequence using special tokens, this is not the token that is used for the end of sequence. The token used is the :obj:`sep_token`. unk_token (:obj:`str`, `optional`, defaults to :obj:`"<unk>"`): The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this token instead. pad_token (:obj:`str`, `optional`, defaults to :obj:`"<pad>"`): The token used for padding, for example when batching sequences of different lengths. additional_special_tokens (:obj:`List[str]`, `optional`): Additional special tokens used by the tokenizer. """ vocab_files_names = VOCAB_FILES_NAMES pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES model_input_names = ["input_ids", "attention_mask"] def __init__(self, vocab_file, eos_token="</s>", unk_token="<unk>", additional_special_tokens=[], **kwargs): super().__init__( eos_token=eos_token, unk_token=unk_token, additional_special_tokens=additional_special_tokens, **kwargs, ) self.vocab_file = vocab_file self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(vocab_file) @property def vocab_size(self): return self.sp_model.get_piece_size() def get_vocab(self) -> Dict[str, int]: vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)} vocab.update(self.added_tokens_encoder) return vocab def __getstate__(self): state = self.__dict__.copy() state["sp_model"] = None return state def __setstate__(self, d): self.__dict__ = d self.sp_model = spm.SentencePieceProcessor() self.sp_model.Load(self.vocab_file) def _tokenize(self, text, sample=False): """Take as input a string and return a list of strings (tokens) for words/sub-words""" if not sample: pieces = self.sp_model.EncodeAsPieces(text) else: pieces = self.sp_model.SampleEncodeAsPieces(text, 64, 0.1) return pieces def _convert_token_to_id(self, token): """ Converts a token (str) in an id using the vocab. """ return self.sp_model.piece_to_id(token) def _convert_id_to_token(self, index): """Converts an index (integer) in a token (str) using the vocab.""" if index < self.sp_model.get_piece_size(): token = self.sp_model.IdToPiece(index) return token def convert_tokens_to_string(self, tokens): """ Converts a sequence of tokens (string) in a single string. """ out_string = self.sp_model.decode_pieces(tokens) return out_string def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]: if not os.path.isdir(save_directory): logger.error("Vocabulary path ({}) should be a directory".format(save_directory)) return out_vocab_file = os.path.join( save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] ) if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file): copyfile(self.vocab_file, out_vocab_file) return (out_vocab_file,)
AdaMix/src/transformers/models/reformer/tokenization_reformer.py/0
{ "file_path": "AdaMix/src/transformers/models/reformer/tokenization_reformer.py", "repo_id": "AdaMix", "token_count": 2205 }
62
# coding=utf-8 # Copyright 2020 The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Convert TAPAS checkpoint.""" import argparse from transformers import ( TapasConfig, TapasForMaskedLM, TapasForQuestionAnswering, TapasForSequenceClassification, TapasModel, TapasTokenizer, load_tf_weights_in_tapas, ) from transformers.utils import logging logging.set_verbosity_info() def convert_tf_checkpoint_to_pytorch( task, reset_position_index_per_cell, tf_checkpoint_path, tapas_config_file, pytorch_dump_path ): # Initialise PyTorch model. # If you want to convert a checkpoint that uses absolute position embeddings, make sure to set reset_position_index_per_cell of # TapasConfig to False. # initialize configuration from json file config = TapasConfig.from_json_file(tapas_config_file) # set absolute/relative position embeddings parameter config.reset_position_index_per_cell = reset_position_index_per_cell # set remaining parameters of TapasConfig as well as the model based on the task if task == "SQA": model = TapasForQuestionAnswering(config=config) elif task == "WTQ": # run_task_main.py hparams config.num_aggregation_labels = 4 config.use_answer_as_supervision = True # hparam_utils.py hparams config.answer_loss_cutoff = 0.664694 config.cell_selection_preference = 0.207951 config.huber_loss_delta = 0.121194 config.init_cell_selection_weights_to_zero = True config.select_one_column = True config.allow_empty_column_selection = False config.temperature = 0.0352513 model = TapasForQuestionAnswering(config=config) elif task == "WIKISQL_SUPERVISED": # run_task_main.py hparams config.num_aggregation_labels = 4 config.use_answer_as_supervision = False # hparam_utils.py hparams config.answer_loss_cutoff = 36.4519 config.cell_selection_preference = 0.903421 config.huber_loss_delta = 222.088 config.init_cell_selection_weights_to_zero = True config.select_one_column = True config.allow_empty_column_selection = True config.temperature = 0.763141 model = TapasForQuestionAnswering(config=config) elif task == "TABFACT": model = TapasForSequenceClassification(config=config) elif task == "MLM": model = TapasForMaskedLM(config=config) elif task == "INTERMEDIATE_PRETRAINING": model = TapasModel(config=config) print("Building PyTorch model from configuration: {}".format(str(config))) # Load weights from tf checkpoint load_tf_weights_in_tapas(model, config, tf_checkpoint_path) # Save pytorch-model (weights and configuration) print("Save PyTorch model to {}".format(pytorch_dump_path)) model.save_pretrained(pytorch_dump_path[:-17]) # Save tokenizer files dir_name = r"C:\Users\niels.rogge\Documents\Python projecten\tensorflow\Tensorflow models\SQA\Base\tapas_sqa_inter_masklm_base_reset" tokenizer = TapasTokenizer(vocab_file=dir_name + r"\vocab.txt", model_max_length=512) print("Save tokenizer files to {}".format(pytorch_dump_path)) tokenizer.save_pretrained(pytorch_dump_path[:-17]) print("Used relative position embeddings:", model.config.reset_position_index_per_cell) if __name__ == "__main__": parser = argparse.ArgumentParser() # Required parameters parser.add_argument( "--task", default="SQA", type=str, help="Model task for which to convert a checkpoint. Defaults to SQA." ) parser.add_argument( "--reset_position_index_per_cell", default=False, action="store_true", help="Whether to use relative position embeddings or not. Defaults to True.", ) parser.add_argument( "--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path." ) parser.add_argument( "--tapas_config_file", default=None, type=str, required=True, help="The config json file corresponding to the pre-trained TAPAS model. \n" "This specifies the model architecture.", ) parser.add_argument( "--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model." ) args = parser.parse_args() convert_tf_checkpoint_to_pytorch( args.task, args.reset_position_index_per_cell, args.tf_checkpoint_path, args.tapas_config_file, args.pytorch_dump_path, )
AdaMix/src/transformers/models/tapas/convert_tapas_original_tf_checkpoint_to_pytorch.py/0
{ "file_path": "AdaMix/src/transformers/models/tapas/convert_tapas_original_tf_checkpoint_to_pytorch.py", "repo_id": "AdaMix", "token_count": 1942 }
63
# coding=utf-8 # Copyright 2019-present, Facebook, Inc and the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ XLM configuration """ from ...configuration_utils import PretrainedConfig from ...utils import logging logger = logging.get_logger(__name__) XLM_PRETRAINED_CONFIG_ARCHIVE_MAP = { "xlm-mlm-en-2048": "https://huggingface.co/xlm-mlm-en-2048/resolve/main/config.json", "xlm-mlm-ende-1024": "https://huggingface.co/xlm-mlm-ende-1024/resolve/main/config.json", "xlm-mlm-enfr-1024": "https://huggingface.co/xlm-mlm-enfr-1024/resolve/main/config.json", "xlm-mlm-enro-1024": "https://huggingface.co/xlm-mlm-enro-1024/resolve/main/config.json", "xlm-mlm-tlm-xnli15-1024": "https://huggingface.co/xlm-mlm-tlm-xnli15-1024/resolve/main/config.json", "xlm-mlm-xnli15-1024": "https://huggingface.co/xlm-mlm-xnli15-1024/resolve/main/config.json", "xlm-clm-enfr-1024": "https://huggingface.co/xlm-clm-enfr-1024/resolve/main/config.json", "xlm-clm-ende-1024": "https://huggingface.co/xlm-clm-ende-1024/resolve/main/config.json", "xlm-mlm-17-1280": "https://huggingface.co/xlm-mlm-17-1280/resolve/main/config.json", "xlm-mlm-100-1280": "https://huggingface.co/xlm-mlm-100-1280/resolve/main/config.json", } class XLMConfig(PretrainedConfig): """ This is the configuration class to store the configuration of a :class:`~transformers.XLMModel` or a :class:`~transformers.TFXLMModel`. It is used to instantiate a XLM model according to the specified arguments, defining the model architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of the `xlm-mlm-en-2048 <https://huggingface.co/xlm-mlm-en-2048>`__ architecture. Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig` for more information. Args: vocab_size (:obj:`int`, `optional`, defaults to 30145): Vocabulary size of the BERT model. Defines the number of different tokens that can be represented by the :obj:`inputs_ids` passed when calling :class:`~transformers.XLMModel` or :class:`~transformers.TFXLMModel`. emb_dim (:obj:`int`, `optional`, defaults to 2048): Dimensionality of the encoder layers and the pooler layer. n_layer (:obj:`int`, `optional`, defaults to 12): Number of hidden layers in the Transformer encoder. n_head (:obj:`int`, `optional`, defaults to 16): Number of attention heads for each attention layer in the Transformer encoder. dropout (:obj:`float`, `optional`, defaults to 0.1): The dropout probability for all fully connected layers in the embeddings, encoder, and pooler. attention_dropout (:obj:`float`, `optional`, defaults to 0.1): The dropout probability for the attention mechanism gelu_activation (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to use `gelu` for the activations instead of `relu`. sinusoidal_embeddings (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to use sinusoidal positional embeddings instead of absolute positional embeddings. causal (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not the model should behave in a causal manner. Causal models use a triangular attention mask in order to only attend to the left-side context instead if a bidirectional context. asm (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to use an adaptive log softmax projection layer instead of a linear layer for the prediction layer. n_langs (:obj:`int`, `optional`, defaults to 1): The number of languages the model handles. Set to 1 for monolingual models. use_lang_emb (:obj:`bool`, `optional`, defaults to :obj:`True`) Whether to use language embeddings. Some models use additional language embeddings, see `the multilingual models page <http://huggingface.co/transformers/multilingual.html#xlm-language-embeddings>`__ for information on how to use them. max_position_embeddings (:obj:`int`, `optional`, defaults to 512): The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). embed_init_std (:obj:`float`, `optional`, defaults to 2048^-0.5): The standard deviation of the truncated_normal_initializer for initializing the embedding matrices. init_std (:obj:`int`, `optional`, defaults to 50257): The standard deviation of the truncated_normal_initializer for initializing all weight matrices except the embedding matrices. layer_norm_eps (:obj:`float`, `optional`, defaults to 1e-12): The epsilon used by the layer normalization layers. bos_index (:obj:`int`, `optional`, defaults to 0): The index of the beginning of sentence token in the vocabulary. eos_index (:obj:`int`, `optional`, defaults to 1): The index of the end of sentence token in the vocabulary. pad_index (:obj:`int`, `optional`, defaults to 2): The index of the padding token in the vocabulary. unk_index (:obj:`int`, `optional`, defaults to 3): The index of the unknown token in the vocabulary. mask_index (:obj:`int`, `optional`, defaults to 5): The index of the masking token in the vocabulary. is_encoder(:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not the initialized model should be a transformer encoder or decoder as seen in Vaswani et al. summary_type (:obj:`string`, `optional`, defaults to "first"): Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. Has to be one of the following options: - :obj:`"last"`: Take the last token hidden state (like XLNet). - :obj:`"first"`: Take the first token hidden state (like BERT). - :obj:`"mean"`: Take the mean of all tokens hidden states. - :obj:`"cls_index"`: Supply a Tensor of classification token position (like GPT/GPT-2). - :obj:`"attn"`: Not implemented now, use multi-head attention. summary_use_proj (:obj:`bool`, `optional`, defaults to :obj:`True`): Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. Whether or not to add a projection after the vector extraction. summary_activation (:obj:`str`, `optional`): Argument used when doing sequence summary. Used in the sequence classification and multiple choice models. Pass :obj:`"tanh"` for a tanh activation to the output, any other value will result in no activation. summary_proj_to_labels (:obj:`bool`, `optional`, defaults to :obj:`True`): Used in the sequence classification and multiple choice models. Whether the projection outputs should have :obj:`config.num_labels` or :obj:`config.hidden_size` classes. summary_first_dropout (:obj:`float`, `optional`, defaults to 0.1): Used in the sequence classification and multiple choice models. The dropout ratio to be used after the projection and activation. start_n_top (:obj:`int`, `optional`, defaults to 5): Used in the SQuAD evaluation script. end_n_top (:obj:`int`, `optional`, defaults to 5): Used in the SQuAD evaluation script. mask_token_id (:obj:`int`, `optional`, defaults to 0): Model agnostic parameter to identify masked tokens when generating text in an MLM context. lang_id (:obj:`int`, `optional`, defaults to 1): The ID of the language used by the model. This parameter is used when generating text in a given language. Examples:: >>> from transformers import XLMConfig, XLMModel >>> # Initializing a XLM configuration >>> configuration = XLMConfig() >>> # Initializing a model from the configuration >>> model = XLMModel(configuration) >>> # Accessing the model configuration >>> configuration = model.config """ model_type = "xlm" def __init__( self, vocab_size=30145, emb_dim=2048, n_layers=12, n_heads=16, dropout=0.1, attention_dropout=0.1, gelu_activation=True, sinusoidal_embeddings=False, causal=False, asm=False, n_langs=1, use_lang_emb=True, max_position_embeddings=512, embed_init_std=2048 ** -0.5, layer_norm_eps=1e-12, init_std=0.02, bos_index=0, eos_index=1, pad_index=2, unk_index=3, mask_index=5, is_encoder=True, summary_type="first", summary_use_proj=True, summary_activation=None, summary_proj_to_labels=True, summary_first_dropout=0.1, start_n_top=5, end_n_top=5, mask_token_id=0, lang_id=0, pad_token_id=2, bos_token_id=0, **kwargs ): """Constructs XLMConfig.""" super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, **kwargs) self.vocab_size = vocab_size self.emb_dim = emb_dim self.n_layers = n_layers self.n_heads = n_heads self.dropout = dropout self.attention_dropout = attention_dropout self.gelu_activation = gelu_activation self.sinusoidal_embeddings = sinusoidal_embeddings self.causal = causal self.asm = asm self.n_langs = n_langs self.use_lang_emb = use_lang_emb self.layer_norm_eps = layer_norm_eps self.bos_index = bos_index self.eos_index = eos_index self.pad_index = pad_index self.unk_index = unk_index self.mask_index = mask_index self.is_encoder = is_encoder self.max_position_embeddings = max_position_embeddings self.embed_init_std = embed_init_std self.init_std = init_std self.summary_type = summary_type self.summary_use_proj = summary_use_proj self.summary_activation = summary_activation self.summary_proj_to_labels = summary_proj_to_labels self.summary_first_dropout = summary_first_dropout self.start_n_top = start_n_top self.end_n_top = end_n_top self.mask_token_id = mask_token_id self.lang_id = lang_id if "n_words" in kwargs: self.n_words = kwargs["n_words"] @property def n_words(self): # For backward compatibility return self.vocab_size @n_words.setter def n_words(self, value): # For backward compatibility self.vocab_size = value @property def hidden_size(self): return self.emb_dim @property def num_attention_heads(self): return self.n_heads @property def num_hidden_layers(self): return self.n_layers
AdaMix/src/transformers/models/xlm/configuration_xlm.py/0
{ "file_path": "AdaMix/src/transformers/models/xlm/configuration_xlm.py", "repo_id": "AdaMix", "token_count": 4782 }
64
# coding=utf-8 # Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team. # Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TF 2.0 XLNet model. """ import warnings from dataclasses import dataclass from typing import List, Optional, Tuple import tensorflow as tf from ...activations_tf import get_tf_activation from ...file_utils import ( MULTIPLE_CHOICE_DUMMY_INPUTS, ModelOutput, add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings, ) from ...modeling_tf_utils import ( TFCausalLanguageModelingLoss, TFMultipleChoiceLoss, TFPreTrainedModel, TFQuestionAnsweringLoss, TFSequenceClassificationLoss, TFSequenceSummary, TFSharedEmbeddings, TFTokenClassificationLoss, get_initializer, input_processing, keras_serializable, shape_list, ) from ...utils import logging from .configuration_xlnet import XLNetConfig logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "xlnet-base-cased" _CONFIG_FOR_DOC = "XLNetConfig" _TOKENIZER_FOR_DOC = "XLNetTokenizer" TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST = [ "xlnet-base-cased", "xlnet-large-cased", # See all XLNet models at https://huggingface.co/models?filter=xlnet ] class TFXLNetRelativeAttention(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) if config.d_model % config.n_head != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.d_model, config.n_head) ) self.n_head = config.n_head self.d_head = config.d_head self.d_model = config.d_model self.scale = 1 / (config.d_head ** 0.5) self.initializer_range = config.initializer_range self.output_attentions = config.output_attentions self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") self.dropout = tf.keras.layers.Dropout(config.dropout) def build(self, input_shape): initializer = get_initializer(self.initializer_range) self.q = self.add_weight( shape=(self.d_model, self.n_head, self.d_head), initializer=initializer, trainable=True, name="q" ) self.k = self.add_weight( shape=(self.d_model, self.n_head, self.d_head), initializer=initializer, trainable=True, name="k" ) self.v = self.add_weight( shape=(self.d_model, self.n_head, self.d_head), initializer=initializer, trainable=True, name="v" ) self.o = self.add_weight( shape=(self.d_model, self.n_head, self.d_head), initializer=initializer, trainable=True, name="o" ) self.r = self.add_weight( shape=(self.d_model, self.n_head, self.d_head), initializer=initializer, trainable=True, name="r" ) self.r_r_bias = self.add_weight( shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_r_bias" ) self.r_s_bias = self.add_weight( shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_s_bias" ) self.r_w_bias = self.add_weight( shape=(self.n_head, self.d_head), initializer="zeros", trainable=True, name="r_w_bias" ) self.seg_embed = self.add_weight( shape=(2, self.n_head, self.d_head), initializer=initializer, trainable=True, name="seg_embed" ) super().build(input_shape) def prune_heads(self, heads): raise NotImplementedError def rel_shift(self, x, klen=-1): """perform relative shift to form the relative attention score.""" x_size = shape_list(x) x = tf.reshape(x, (x_size[1], x_size[0], x_size[2], x_size[3])) x = x[1:, ...] x = tf.reshape(x, (x_size[0], x_size[1] - 1, x_size[2], x_size[3])) x = x[:, 0:klen, :, :] # x = torch.index_select(x, 1, torch.arange(klen, device=x.device, dtype=torch.long)) return x def rel_attn_core( self, q_head, k_head_h, v_head_h, k_head_r, seg_mat, attn_mask, head_mask, output_attentions, training=False ): """Core relative positional attention operations.""" # content based attention score ac = tf.einsum("ibnd,jbnd->ijbn", q_head + self.r_w_bias, k_head_h) # position based attention score bd = tf.einsum("ibnd,jbnd->ijbn", q_head + self.r_r_bias, k_head_r) bd = self.rel_shift(bd, klen=shape_list(ac)[1]) # segment based attention score if seg_mat is None: ef = 0 else: ef = tf.einsum("ibnd,snd->ibns", q_head + self.r_s_bias, self.seg_embed) ef = tf.einsum("ijbs,ibns->ijbn", seg_mat, ef) # merge attention scores and perform masking attn_score = (ac + bd + ef) * self.scale if attn_mask is not None: # attn_score = attn_score * (1 - attn_mask) - 1e30 * attn_mask if attn_mask.dtype == tf.float16 or attn_mask.dtype == tf.bfloat16: attn_score = attn_score - 65500 * attn_mask else: attn_score = attn_score - 1e30 * attn_mask # attention probability attn_prob = tf.nn.softmax(attn_score, axis=1) attn_prob = self.dropout(attn_prob, training=training) # Mask heads if we want to if head_mask is not None: attn_prob = attn_prob * head_mask # attention output attn_vec = tf.einsum("ijbn,jbnd->ibnd", attn_prob, v_head_h) if output_attentions: return attn_vec, attn_prob return attn_vec def post_attention(self, h, attn_vec, residual=True, training=False): """Post-attention processing.""" # post-attention projection (back to `d_model`) attn_out = tf.einsum("ibnd,hnd->ibh", attn_vec, self.o) attn_out = self.dropout(attn_out, training=training) if residual: attn_out = attn_out + h output = self.layer_norm(attn_out) return output def call( self, h, g, attn_mask_h, attn_mask_g, r, seg_mat, mems, target_mapping, head_mask, output_attentions, training=False, ): if g is not None: # Two-stream attention with relative positional encoding. # content based attention score if mems is not None and len(shape_list(mems)) > 1: cat = tf.concat([mems, h], axis=0) else: cat = h # content-based key head k_head_h = tf.einsum("ibh,hnd->ibnd", cat, self.k) # content-based value head v_head_h = tf.einsum("ibh,hnd->ibnd", cat, self.v) # position-based key head k_head_r = tf.einsum("ibh,hnd->ibnd", r, self.r) # h-stream # content-stream query head q_head_h = tf.einsum("ibh,hnd->ibnd", h, self.q) # core attention ops attn_vec_h = self.rel_attn_core( q_head_h, k_head_h, v_head_h, k_head_r, seg_mat, attn_mask_h, head_mask, output_attentions, training=training, ) if output_attentions: attn_vec_h, attn_prob_h = attn_vec_h # post processing output_h = self.post_attention(h, attn_vec_h, training=training) # g-stream # query-stream query head q_head_g = tf.einsum("ibh,hnd->ibnd", g, self.q) # core attention ops if target_mapping is not None: q_head_g = tf.einsum("mbnd,mlb->lbnd", q_head_g, target_mapping) attn_vec_g = self.rel_attn_core( q_head_g, k_head_h, v_head_h, k_head_r, seg_mat, attn_mask_g, head_mask, output_attentions, training=training, ) if output_attentions: attn_vec_g, attn_prob_g = attn_vec_g attn_vec_g = tf.einsum("lbnd,mlb->mbnd", attn_vec_g, target_mapping) else: attn_vec_g = self.rel_attn_core( q_head_g, k_head_h, v_head_h, k_head_r, seg_mat, attn_mask_g, head_mask, output_attentions, training=training, ) if output_attentions: attn_vec_g, attn_prob_g = attn_vec_g # post processing output_g = self.post_attention(g, attn_vec_g, training=training) if output_attentions: attn_prob = attn_prob_h, attn_prob_g else: # Multi-head attention with relative positional encoding if mems is not None and len(shape_list(mems)) > 1: cat = tf.concat([mems, h], axis=0) else: cat = h # content heads q_head_h = tf.einsum("ibh,hnd->ibnd", h, self.q) k_head_h = tf.einsum("ibh,hnd->ibnd", cat, self.k) v_head_h = tf.einsum("ibh,hnd->ibnd", cat, self.v) # positional heads k_head_r = tf.einsum("ibh,hnd->ibnd", r, self.r) # core attention ops attn_vec = self.rel_attn_core( q_head_h, k_head_h, v_head_h, k_head_r, seg_mat, attn_mask_h, head_mask, output_attentions, training=training, ) if output_attentions: attn_vec, attn_prob = attn_vec # post processing output_h = self.post_attention(h, attn_vec, training=training) output_g = None outputs = (output_h, output_g) if output_attentions: outputs = outputs + (attn_prob,) return outputs class TFXLNetFeedForward(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.layer_norm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm") self.layer_1 = tf.keras.layers.Dense( config.d_inner, kernel_initializer=get_initializer(config.initializer_range), name="layer_1" ) self.layer_2 = tf.keras.layers.Dense( config.d_model, kernel_initializer=get_initializer(config.initializer_range), name="layer_2" ) self.dropout = tf.keras.layers.Dropout(config.dropout) if isinstance(config.ff_activation, str): self.activation_function = get_tf_activation(config.ff_activation) else: self.activation_function = config.ff_activation def call(self, inp, training=False): output = inp output = self.layer_1(output) output = self.activation_function(output) output = self.dropout(output, training=training) output = self.layer_2(output) output = self.dropout(output, training=training) output = self.layer_norm(output + inp) return output class TFXLNetLayer(tf.keras.layers.Layer): def __init__(self, config, **kwargs): super().__init__(**kwargs) self.rel_attn = TFXLNetRelativeAttention(config, name="rel_attn") self.ff = TFXLNetFeedForward(config, name="ff") self.dropout = tf.keras.layers.Dropout(config.dropout) def call( self, output_h, output_g, non_tgt_mask, attn_mask, pos_emb, seg_mat, mems, target_mapping, head_mask, output_attentions, training=False, ): outputs = self.rel_attn( output_h, output_g, non_tgt_mask, attn_mask, pos_emb, seg_mat, mems, target_mapping, head_mask, output_attentions, training=training, ) output_h, output_g = outputs[:2] if output_g is not None: output_g = self.ff(output_g, training=training) output_h = self.ff(output_h, training=training) outputs = (output_h, output_g) + outputs[2:] # Add again attentions if there are there return outputs class TFXLNetLMHead(tf.keras.layers.Layer): def __init__(self, config, input_embeddings, **kwargs): super().__init__(**kwargs) self.vocab_size = config.vocab_size # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.input_embeddings = input_embeddings def build(self, input_shape): self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias") super().build(input_shape) def get_output_embeddings(self): return self.input_embeddings def set_output_embeddings(self, value): self.input_embeddings.weight = value self.input_embeddings.vocab_size = shape_list(value)[0] def get_bias(self): return {"bias": self.bias} def set_bias(self, value): self.bias = value["bias"] self.vocab_size = shape_list(value["bias"])[0] def call(self, hidden_states): hidden_states = self.input_embeddings(hidden_states, mode="linear") hidden_states = hidden_states + self.bias return hidden_states @keras_serializable class TFXLNetMainLayer(tf.keras.layers.Layer): config_class = XLNetConfig def __init__(self, config, **kwargs): super().__init__(**kwargs) self.config = config self.output_hidden_states = config.output_hidden_states self.output_attentions = config.output_attentions self.return_dict = config.return_dict self.mem_len = config.mem_len self.reuse_len = config.reuse_len self.d_model = config.d_model self.same_length = config.same_length self.attn_type = config.attn_type self.bi_data = config.bi_data self.clamp_len = config.clamp_len self.n_layer = config.n_layer self.use_bfloat16 = config.use_bfloat16 self.initializer_range = config.initializer_range self.word_embedding = TFSharedEmbeddings( config.vocab_size, config.d_model, initializer_range=config.initializer_range, name="word_embedding" ) self.layer = [TFXLNetLayer(config, name="layer_._{}".format(i)) for i in range(config.n_layer)] self.dropout = tf.keras.layers.Dropout(config.dropout) self.use_mems_eval = config.use_mems_eval self.use_mems_train = config.use_mems_train def get_input_embeddings(self): return self.word_embedding def set_input_embeddings(self, value): self.word_embedding.weight = value self.word_embedding.vocab_size = shape_list(value)[0] def build(self, input_shape): initializer = get_initializer(self.initializer_range) self.mask_emb = self.add_weight( shape=(1, 1, self.d_model), initializer=initializer, trainable=True, name="mask_emb" ) def _prune_heads(self, heads_to_prune): raise NotImplementedError def create_mask(self, qlen, mlen): """ Creates causal attention mask. Float mask where 1.0 indicates masked, 0.0 indicates not-masked. Args: qlen: TODO Lysandre didn't fill mlen: TODO Lysandre didn't fill :: same_length=False: same_length=True: <mlen > < qlen > <mlen > < qlen > ^ [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 1 1 1 1] [0 0 0 0 0 0 1 1 1] [1 0 0 0 0 0 1 1 1] qlen [0 0 0 0 0 0 0 1 1] [1 1 0 0 0 0 0 1 1] [0 0 0 0 0 0 0 0 1] [1 1 1 0 0 0 0 0 1] v [0 0 0 0 0 0 0 0 0] [1 1 1 1 0 0 0 0 0] """ attn_mask = tf.ones([qlen, qlen]) mask_u = tf.matrix_band_part(attn_mask, 0, -1) mask_dia = tf.matrix_band_part(attn_mask, 0, 0) attn_mask_pad = tf.zeros([qlen, mlen]) ret = tf.concat([attn_mask_pad, mask_u - mask_dia], 1) if self.same_length: mask_l = tf.matrix_band_part(attn_mask, -1, 0) ret = tf.concat([ret[:, :qlen] + mask_l - mask_dia, ret[:, qlen:]], 1) return ret def cache_mem(self, curr_out, prev_mem): # cache hidden states into memory. if self.reuse_len is not None and self.reuse_len > 0: curr_out = curr_out[: self.reuse_len] if self.mem_len is None or self.mem_len == 0: # If :obj:`use_mems` is active but no `mem_len` is defined, the model behaves like GPT-2 at inference time # and returns all of the past and current hidden states. cutoff = 0 else: # If :obj:`use_mems` is active and `mem_len` is defined, the model returns the last `mem_len` hidden # states. This is the preferred setting for training and long-form generation. cutoff = -self.mem_len if prev_mem is None: # if :obj:`use_mems` is active and `mem_len` is defined, the model new_mem = curr_out[cutoff:] else: new_mem = tf.concat([prev_mem, curr_out], 0)[cutoff:] return tf.stop_gradient(new_mem) @staticmethod def positional_embedding(pos_seq, inv_freq, bsz=None): sinusoid_inp = tf.einsum("i,d->id", pos_seq, inv_freq) pos_emb = tf.concat([tf.sin(sinusoid_inp), tf.cos(sinusoid_inp)], axis=-1) pos_emb = pos_emb[:, None, :] if bsz is not None: pos_emb = tf.tile(pos_emb, [1, bsz, 1]) return pos_emb def relative_positional_encoding(self, qlen, klen, bsz=None): """create relative positional encoding.""" freq_seq = tf.range(0, self.d_model, 2.0) inv_freq = 1 / (10000 ** (freq_seq / self.d_model)) if self.attn_type == "bi": # beg, end = klen - 1, -qlen beg, end = klen, -qlen elif self.attn_type == "uni": # beg, end = klen - 1, -1 beg, end = klen, -1 else: raise ValueError("Unknown `attn_type` {}.".format(self.attn_type)) if self.bi_data: fwd_pos_seq = tf.range(beg, end, -1.0) bwd_pos_seq = tf.range(-beg, -end, 1.0) if self.clamp_len > 0: fwd_pos_seq = tf.clip_by_value(fwd_pos_seq, -self.clamp_len, self.clamp_len) bwd_pos_seq = tf.clip_by_value(bwd_pos_seq, -self.clamp_len, self.clamp_len) if bsz is not None: assert bsz % 2 == 0, f"With bi_data, the batch size {bsz} should be divisible by 2" fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz // 2) bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq, bsz // 2) else: fwd_pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq) bwd_pos_emb = self.positional_embedding(bwd_pos_seq, inv_freq) pos_emb = tf.concat([fwd_pos_emb, bwd_pos_emb], axis=1) else: fwd_pos_seq = tf.range(beg, end, -1.0) if self.clamp_len > 0: fwd_pos_seq = tf.clip_by_value(fwd_pos_seq, -self.clamp_len, self.clamp_len) pos_emb = self.positional_embedding(fwd_pos_seq, inv_freq, bsz) return pos_emb def call( self, input_ids=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, use_mems=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, **kwargs, ): inputs = input_processing( func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, kwargs_call=kwargs, ) if training and inputs["use_mems"] is None: inputs["use_mems"] = self.use_mems_train else: inputs["use_mems"] = self.use_mems_eval # the original code for XLNet uses shapes [len, bsz] with the batch dimension at the end # but we want a unified interface in the library with the batch size on the first dimension # so we move here the first dimension (batch) to the end if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None: raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time") elif inputs["input_ids"] is not None: inputs["input_ids"] = tf.transpose(inputs["input_ids"], perm=(1, 0)) qlen, bsz = shape_list(inputs["input_ids"])[:2] elif inputs["inputs_embeds"] is not None: inputs["inputs_embeds"] = tf.transpose(inputs["inputs_embeds"], perm=(1, 0, 2)) qlen, bsz = shape_list(inputs["inputs_embeds"])[:2] else: raise ValueError("You have to specify either input_ids or inputs_embeds") inputs["token_type_ids"] = ( tf.transpose(inputs["token_type_ids"], perm=(1, 0)) if inputs["token_type_ids"] is not None else None ) inputs["input_mask"] = ( tf.transpose(inputs["input_mask"], perm=(1, 0)) if inputs["input_mask"] is not None else None ) inputs["attention_mask"] = ( tf.transpose(inputs["attention_mask"], perm=(1, 0)) if inputs["attention_mask"] is not None else None ) inputs["perm_mask"] = ( tf.transpose(inputs["perm_mask"], perm=(1, 2, 0)) if inputs["perm_mask"] is not None else None ) inputs["target_mapping"] = ( tf.transpose(inputs["target_mapping"], perm=(1, 2, 0)) if inputs["target_mapping"] is not None else None ) mlen = shape_list(inputs["mems"][0])[0] if inputs["mems"] is not None and inputs["mems"][0] is not None else 0 klen = mlen + qlen # Attention mask # causal attention mask if self.attn_type == "uni": attn_mask = self.create_mask(qlen, mlen) attn_mask = attn_mask[:, :, None, None] elif self.attn_type == "bi": attn_mask = None else: raise ValueError("Unsupported attention type: {}".format(self.attn_type)) # data mask: input mask & perm mask assert inputs["input_mask"] is None or inputs["attention_mask"] is None, ( "You can only use one of input_mask (uses 1 for padding) " "or attention_mask (uses 0 for padding, added for compatibility with BERT). Please choose one." ) if inputs["input_mask"] is None and inputs["attention_mask"] is not None: one_cst = tf.constant(1.0) inputs["input_mask"] = 1.0 - tf.cast(inputs["attention_mask"], dtype=one_cst.dtype) if inputs["input_mask"] is not None and inputs["perm_mask"] is not None: data_mask = inputs["input_mask"][None] + inputs["perm_mask"] elif inputs["input_mask"] is not None and inputs["perm_mask"] is None: data_mask = inputs["input_mask"][None] elif inputs["input_mask"] is None and inputs["perm_mask"] is not None: data_mask = inputs["perm_mask"] else: data_mask = None if data_mask is not None: # all mems can be attended to if mlen > 0: mems_mask = tf.zeros([shape_list(data_mask)[0], mlen, bsz]) data_mask = tf.concat([mems_mask, data_mask], axis=1) if attn_mask is None: attn_mask = data_mask[:, :, :, None] else: attn_mask += data_mask[:, :, :, None] if attn_mask is not None: attn_mask = tf.cast(attn_mask > 0, dtype=attn_mask.dtype) if attn_mask is not None: non_tgt_mask = -tf.eye(qlen) if mlen > 0: non_tgt_mask = tf.concat([tf.zeros([qlen, mlen]), non_tgt_mask], axis=-1) non_tgt_mask = tf.cast((attn_mask + non_tgt_mask[:, :, None, None]) > 0, dtype=non_tgt_mask.dtype) else: non_tgt_mask = None # Word embeddings and prepare h & g hidden states if inputs["inputs_embeds"] is not None: word_emb_k = inputs["inputs_embeds"] else: word_emb_k = self.word_embedding(inputs["input_ids"]) output_h = self.dropout(word_emb_k, training=inputs["training"]) if inputs["target_mapping"] is not None: word_emb_q = tf.tile(self.mask_emb, [shape_list(inputs["target_mapping"])[0], bsz, 1]) # else: # We removed the inp_q input which was same as target mapping # inp_q_ext = inp_q[:, :, None] # word_emb_q = inp_q_ext * self.mask_emb + (1 - inp_q_ext) * word_emb_k output_g = self.dropout(word_emb_q, training=inputs["training"]) else: output_g = None # Segment embedding if inputs["token_type_ids"] is not None: # Convert `token_type_ids` to one-hot `seg_mat` if mlen > 0: mem_pad = tf.zeros([mlen, bsz], dtype=inputs["token_type_ids"].dtype) cat_ids = tf.concat([mem_pad, inputs["token_type_ids"]], 0) else: cat_ids = inputs["token_type_ids"] # `1` indicates not in the same segment [qlen x klen x bsz] seg_mat = tf.cast( tf.logical_not(tf.equal(inputs["token_type_ids"][:, None], cat_ids[None, :])), dtype=inputs["token_type_ids"].dtype, ) seg_mat = tf.one_hot(seg_mat, 2) else: seg_mat = None # Positional encoding pos_emb = self.relative_positional_encoding(qlen, klen, bsz=bsz) pos_emb = self.dropout(pos_emb, training=inputs["training"]) # Prepare head mask if needed # 1.0 in head_mask indicate we keep the head # attention_probs has shape bsz x n_heads x N x N # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads] (a head_mask for each layer) # and head_mask is converted to shape [num_hidden_layers x qlen x klen x bsz x n_head] if inputs["head_mask"] is not None: raise NotImplementedError else: inputs["head_mask"] = [None] * self.n_layer new_mems = () if inputs["mems"] is None: inputs["mems"] = [None] * len(self.layer) attentions = [] if inputs["output_attentions"] else None hidden_states = [] if inputs["output_hidden_states"] else None for i, layer_module in enumerate(self.layer): # cache new mems if inputs["use_mems"]: new_mems = new_mems + (self.cache_mem(output_h, inputs["mems"][i]),) if inputs["output_hidden_states"]: hidden_states.append((output_h, output_g) if output_g is not None else output_h) outputs = layer_module( output_h, output_g, non_tgt_mask, attn_mask, pos_emb, seg_mat, inputs["mems"][i], inputs["target_mapping"], inputs["head_mask"][i], inputs["output_attentions"], training=inputs["training"], ) output_h, output_g = outputs[:2] if inputs["output_attentions"]: attentions.append(outputs[2]) # Add last hidden state if inputs["output_hidden_states"]: hidden_states.append((output_h, output_g) if output_g is not None else output_h) output = self.dropout(output_g if output_g is not None else output_h, training=inputs["training"]) # Prepare outputs, we transpose back here to shape [bsz, len, hidden_dim] (cf. beginning of forward() method) output = tf.transpose(output, perm=(1, 0, 2)) if not inputs["use_mems"]: new_mems = None if inputs["output_hidden_states"]: if output_g is not None: hidden_states = tuple(tf.transpose(h, perm=(1, 0, 2)) for hs in hidden_states for h in hs) else: hidden_states = tuple(tf.transpose(hs, perm=(1, 0, 2)) for hs in hidden_states) if inputs["output_attentions"]: attentions = tuple(tf.transpose(t, perm=(2, 3, 0, 1)) for t in attentions) if not inputs["return_dict"]: return tuple(v for v in [output, new_mems, hidden_states, attentions] if v is not None) return TFXLNetModelOutput( last_hidden_state=output, mems=new_mems, hidden_states=hidden_states, attentions=attentions ) class TFXLNetPreTrainedModel(TFPreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = XLNetConfig base_model_prefix = "transformer" @dataclass class TFXLNetModelOutput(ModelOutput): """ Output type of :class:`~transformers.TFXLNetModel`. Args: last_hidden_state (:obj:`tf.Tensor` of shape :obj:`(batch_size, num_predict, hidden_size)`): Sequence of hidden-states at the last layer of the model. ``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then ``num_predict`` corresponds to ``sequence_length``. mems (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers`): Contains pre-computed hidden-states. Can be used (see :obj:`mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they have already been computed. hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ last_hidden_state: tf.Tensor = None mems: Optional[List[tf.Tensor]] = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None @dataclass class TFXLNetLMHeadModelOutput(ModelOutput): """ Output type of :class:`~transformers.TFXLNetLMHeadModel`. Args: loss (:obj:`tf.Tensor` of shape `(1,)`, `optional`, returned when ``labels`` is provided) Language modeling loss (for next-token prediction). logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, num_predict, config.vocab_size)`): Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). ``num_predict`` corresponds to ``target_mapping.shape[1]``. If ``target_mapping`` is ``None``, then ``num_predict`` corresponds to ``sequence_length``. mems (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers`): Contains pre-computed hidden-states. Can be used (see :obj:`mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they have already been computed. hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[tf.Tensor] = None logits: tf.Tensor = None mems: Optional[List[tf.Tensor]] = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None @dataclass class TFXLNetForSequenceClassificationOutput(ModelOutput): """ Output type of :class:`~transformers.TFXLNetForSequenceClassification`. Args: loss (:obj:`tf.Tensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`label` is provided): Classification (or regression if config.num_labels==1) loss. logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, config.num_labels)`): Classification (or regression if config.num_labels==1) scores (before SoftMax). mems (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers`): Contains pre-computed hidden-states. Can be used (see :obj:`mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they have already been computed. hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[tf.Tensor] = None logits: tf.Tensor = None mems: Optional[List[tf.Tensor]] = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None @dataclass class TFXLNetForTokenClassificationOutput(ModelOutput): """ Output type of :class:`~transformers.TFXLNetForTokenClassificationOutput`. Args: loss (:obj:`tf.Tensor` of shape :obj:`(1,)`, `optional`, returned when ``labels`` is provided) : Classification loss. logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.num_labels)`): Classification scores (before SoftMax). mems (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers`): Contains pre-computed hidden-states. Can be used (see :obj:`mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they have already been computed. hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[tf.Tensor] = None logits: tf.Tensor = None mems: Optional[List[tf.Tensor]] = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None @dataclass class TFXLNetForMultipleChoiceOutput(ModelOutput): """ Output type of :class:`~transformers.TFXLNetForMultipleChoice`. Args: loss (:obj:`tf.Tensor` of shape `(1,)`, `optional`, returned when :obj:`labels` is provided): Classification loss. logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, num_choices)`): `num_choices` is the second dimension of the input tensors. (see `input_ids` above). Classification scores (before SoftMax). mems (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers`): Contains pre-computed hidden-states. Can be used (see :obj:`mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they have already been computed. hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[tf.Tensor] = None logits: tf.Tensor = None mems: Optional[List[tf.Tensor]] = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None @dataclass class TFXLNetForQuestionAnsweringSimpleOutput(ModelOutput): """ Output type of :class:`~transformers.TFXLNetForQuestionAnsweringSimple`. Args: loss (:obj:`tf.Tensor` of shape :obj:`(1,)`, `optional`, returned when :obj:`labels` is provided): Total span extraction loss is the sum of a Cross-Entropy for the start and end positions. start_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length,)`): Span-start scores (before SoftMax). end_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length,)`): Span-end scores (before SoftMax). mems (:obj:`List[tf.Tensor]` of length :obj:`config.n_layers`): Contains pre-computed hidden-states. Can be used (see :obj:`mems` input) to speed up sequential decoding. The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they have already been computed. hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. Hidden-states of the model at the output of each layer plus the initial embedding outputs. attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``): Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads. """ loss: Optional[tf.Tensor] = None start_logits: tf.Tensor = None end_logits: tf.Tensor = None mems: Optional[List[tf.Tensor]] = None hidden_states: Optional[Tuple[tf.Tensor]] = None attentions: Optional[Tuple[tf.Tensor]] = None XLNET_START_DOCSTRING = r""" This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the generic methods the library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads etc.) This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and behavior. .. note:: TF 2.0 models accepts two formats as inputs: - having all inputs as keyword arguments (like PyTorch models), or - having all inputs as a list, tuple or dict in the first positional arguments. This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all the tensors in the first argument of the model call function: :obj:`model(inputs)`. If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the first positional argument : - a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(inputs_ids)` - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring: :obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])` - a dictionary with one or several input Tensors associated to the input names given in the docstring: :obj:`model({"input_ids": input_ids, "token_type_ids": token_type_ids})` Parameters: config (:class:`~transformers.XLNetConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ XLNET_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.BertTokenizer`. See :func:`transformers.PreTrainedTokenizer.__call__` and :func:`transformers.PreTrainedTokenizer.encode` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: - 1 for tokens that are **not masked**, - 0 for tokens that are **masked**. `What are attention masks? <../glossary.html#attention-mask>`__ mems (:obj:`List[torch.FloatTensor]` of length :obj:`config.n_layers`): Contains pre-computed hidden-states (see :obj:`mems` output below) . Can be used to speed up sequential decoding. The token ids which have their past given to this model should not be passed as :obj:`input_ids` as they have already been computed. :obj::obj:`use_mems` has to be set to :obj:`True` to make use of :obj:`mems`. perm_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, sequence_length, sequence_length)`, `optional`): Mask to indicate the attention pattern for each input token with values selected in ``[0, 1]``: - if ``perm_mask[k, i, j] = 0``, i attend to j in batch k; - if ``perm_mask[k, i, j] = 1``, i does not attend to j in batch k. If not set, each token attends to all the others (full bidirectional attention). Only used during pretraining (to define factorization order) or for sequential decoding (generation). target_mapping (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`(batch_size, num_predict, sequence_length)`, `optional`): Mask to indicate the output tokens to use. If ``target_mapping[k, i, j] = 1``, the i-th predict in batch k is on the j-th token. token_type_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`): Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0, 1]``: - 0 corresponds to a `sentence A` token, - 1 corresponds to a `sentence B` token. `What are token type IDs? <../glossary.html#token-type-ids>`__ input_mask (:obj:`tf.Tensor` or :obj:`Numpy array` of shape :obj:`({0})`, `optional`): Mask to avoid performing attention on padding token indices. Negative of :obj:`attention_mask`, i.e. with 0 for real tokens and 1 for padding which is kept for compatibility with the original code base. Mask values selected in ``[0, 1]``: - 1 for tokens that are **masked**, - 0 for tokens that are **not maked**. You can only uses one of :obj:`input_mask` and :obj:`attention_mask`. head_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: - 1 indicates the head is **not masked**, - 0 indicates the head is **masked**. inputs_embeds (:obj:`tf.Tensor` of shape :obj:`({0}, hidden_size)`, `optional`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert :obj:`input_ids` indices into associated vectors than the model's internal embedding lookup matrix. output_attentions (:obj:`bool`, `optional`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. output_hidden_states (:obj:`bool`, `optional`): Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the config will be used instead. return_dict (:obj:`bool`, `optional`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. This argument can be used in eager mode, in graph mode the value will always be set to True. training (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to use the model in training mode (some modules like dropout modules have different behaviors between training and evaluation). """ @add_start_docstrings( "The bare XLNet Model transformer outputting raw hidden-states without any specific head on top.", XLNET_START_DOCSTRING, ) class TFXLNetModel(TFXLNetPreTrainedModel): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFXLNetMainLayer(config, name="transformer") @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFXLNetModelOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, use_mems=None, output_attentions=None, output_hidden_states=None, return_dict=None, training=False, **kwargs, ): inputs = input_processing( func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, training=training, kwargs_call=kwargs, ) outputs = self.transformer( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], mems=inputs["mems"], perm_mask=inputs["perm_mask"], target_mapping=inputs["target_mapping"], token_type_ids=inputs["token_type_ids"], input_mask=inputs["input_mask"], head_mask=inputs["head_mask"], inputs_embeds=inputs["inputs_embeds"], use_mems=inputs["use_mems"], output_attentions=inputs["output_attentions"], output_hidden_states=inputs["output_hidden_states"], return_dict=inputs["return_dict"], training=inputs["training"], ) return outputs def serving_output(self, output): hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None mems = tf.convert_to_tensor(output.mems) if output.mems is not None else None return TFXLNetModelOutput( last_hidden_state=output.last_hidden_state, mems=mems, hidden_states=hs, attentions=attns ) @add_start_docstrings( """ XLNet Model with a language modeling head on top (linear layer with weights tied to the input embeddings). """, XLNET_START_DOCSTRING, ) class TFXLNetLMHeadModel(TFXLNetPreTrainedModel, TFCausalLanguageModelingLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFXLNetMainLayer(config, name="transformer") self.lm_loss = TFXLNetLMHead(config, self.transformer.word_embedding, name="lm_loss") def get_lm_head(self): return self.lm_loss def get_prefix_bias_name(self): warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning) return self.name + "/" + self.lm_loss.name def prepare_inputs_for_generation(self, inputs, past, use_mems=None, **kwargs): # Add dummy token at the end (no attention on this one) # At every pass, the attention values for the new token and the two last generated tokens # are computed, the rest is reloaded from the `past` cache. A purely auto-regressive model would have # offset = 1; offset = 2 seems to have slightly better computation. offset = 2 effective_batch_size = inputs.shape[0] dummy_token = tf.zeros((effective_batch_size, 1), dtype=inputs.dtype) if past: inputs = tf.concat([inputs[:, -offset:], dummy_token], axis=1) else: inputs = tf.concat([inputs, dummy_token], axis=1) # Build permutation mask so that previous tokens don't see last token sequence_length = inputs.shape[1] perm_mask = tf.zeros((effective_batch_size, sequence_length, sequence_length - 1)) perm_mask_seq_end = tf.ones((effective_batch_size, sequence_length, 1)) perm_mask = tf.concat([perm_mask, perm_mask_seq_end], axis=-1) # We'll only predict the last token target_mapping = tf.zeros((effective_batch_size, 1, sequence_length - 1)) target_mapping_seq_end = tf.ones((effective_batch_size, 1, 1)) target_mapping = tf.concat([target_mapping, target_mapping_seq_end], axis=-1) inputs = { "input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "use_mems": kwargs.get("use_mems"), } # if past is defined in model kwargs then use it for faster decoding if past: inputs["mems"] = tuple(layer_past[:-offset, :, :] for layer_past in past) return inputs @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @replace_return_docstrings(output_type=TFXLNetLMHeadModelOutput, config_class=_CONFIG_FOR_DOC) def call( self, input_ids=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, use_mems=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, **kwargs, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the cross entropy classification loss. Indices should be in ``[0, ..., config.vocab_size - 1]``. Return: Examples:: >>> import tensorflow as tf >>> import numpy as np >>> from transformers import XLNetTokenizer, TFXLNetLMHeadModel >>> tokenizer = XLNetTokenizer.from_pretrained('xlnet-large-cased') >>> model = TFXLNetLMHeadModel.from_pretrained('xlnet-large-cased') >>> # We show how to setup inputs to predict a next token using a bi-directional context. >>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is very <mask>", add_special_tokens=True))[None, :] # We will predict the masked token >>> perm_mask = np.zeros((1, input_ids.shape[1], input_ids.shape[1])) >>> perm_mask[:, :, -1] = 1.0 # Previous tokens don't see last token >>> target_mapping = np.zeros((1, 1, input_ids.shape[1])) # Shape [1, 1, seq_length] => let's predict one token >>> target_mapping[0, 0, -1] = 1.0 # Our first (and only) prediction will be the last token of the sequence (the masked token) >>> outputs = model(input_ids, perm_mask=tf.constant(perm_mask, dtype=tf.float32), target_mapping=tf.constant(target_mapping, dtype=tf.float32)) >>> next_token_logits = outputs[0] # Output has shape [target_mapping.size(0), target_mapping.size(1), config.vocab_size] """ inputs = input_processing( func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels, training=training, kwargs_call=kwargs, ) transformer_outputs = self.transformer( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], mems=inputs["mems"], perm_mask=inputs["perm_mask"], target_mapping=inputs["target_mapping"], token_type_ids=inputs["token_type_ids"], input_mask=inputs["input_mask"], head_mask=inputs["head_mask"], inputs_embeds=inputs["inputs_embeds"], use_mems=inputs["use_mems"], output_attentions=inputs["output_attentions"], output_hidden_states=inputs["output_hidden_states"], return_dict=inputs["return_dict"], training=inputs["training"], ) hidden_state = transformer_outputs[0] logits = self.lm_loss(hidden_state, training=inputs["training"]) loss = None if inputs["labels"] is not None: # shift labels to the left and cut last logit token logits = logits[:, :-1] labels = inputs["labels"][:, 1:] loss = self.compute_loss(labels, logits) if not inputs["return_dict"]: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFXLNetLMHeadModelOutput( loss=loss, logits=logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def serving_output(self, output): hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None mems = tf.convert_to_tensor(output.mems) if output.mems is not None else None return TFXLNetLMHeadModelOutput(logits=output.logits, mems=mems, hidden_states=hs, attentions=attns) @add_start_docstrings( """ XLNet Model with a sequence classification/regression head on top (a linear layer on top of the pooled output) e.g. for GLUE tasks. """, XLNET_START_DOCSTRING, ) class TFXLNetForSequenceClassification(TFXLNetPreTrainedModel, TFSequenceClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.transformer = TFXLNetMainLayer(config, name="transformer") self.sequence_summary = TFSequenceSummary( config, initializer_range=config.initializer_range, name="sequence_summary" ) self.logits_proj = tf.keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="logits_proj" ) @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFXLNetForSequenceClassificationOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, use_mems=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, **kwargs, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the sequence classification/regression loss. Indices should be in ``[0, ..., config.num_labels - 1]``. If ``config.num_labels == 1`` a regression loss is computed (Mean-Square loss), If ``config.num_labels > 1`` a classification loss is computed (Cross-Entropy). """ inputs = input_processing( func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels, training=training, kwargs_call=kwargs, ) transformer_outputs = self.transformer( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], mems=inputs["mems"], perm_mask=inputs["perm_mask"], target_mapping=inputs["target_mapping"], token_type_ids=inputs["token_type_ids"], input_mask=inputs["input_mask"], head_mask=inputs["head_mask"], inputs_embeds=inputs["inputs_embeds"], use_mems=inputs["use_mems"], output_attentions=inputs["output_attentions"], output_hidden_states=inputs["output_hidden_states"], return_dict=return_dict, training=inputs["training"], ) output = transformer_outputs[0] output = self.sequence_summary(output) logits = self.logits_proj(output) loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], logits) if not inputs["return_dict"]: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFXLNetForSequenceClassificationOutput( loss=loss, logits=logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def serving_output(self, output): hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None mems = tf.convert_to_tensor(output.mems) if output.mems is not None else None return TFXLNetForSequenceClassificationOutput( logits=output.logits, mems=mems, hidden_states=hs, attentions=attns ) @add_start_docstrings( """ XLNET Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a softmax) e.g. for RocStories/SWAG tasks. """, XLNET_START_DOCSTRING, ) class TFXLNetForMultipleChoice(TFXLNetPreTrainedModel, TFMultipleChoiceLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFXLNetMainLayer(config, name="transformer") self.sequence_summary = TFSequenceSummary( config, initializer_range=config.initializer_range, name="sequence_summary" ) self.logits_proj = tf.keras.layers.Dense( 1, kernel_initializer=get_initializer(config.initializer_range), name="logits_proj" ) @property def dummy_inputs(self): """ Dummy inputs to build the network. Returns: tf.Tensor with dummy inputs """ return {"input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS)} @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFXLNetForMultipleChoiceOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids=None, token_type_ids=None, input_mask=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, head_mask=None, inputs_embeds=None, use_mems=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, **kwargs, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`): Labels for computing the multiple choice classification loss. Indices should be in ``[0, ..., num_choices]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See :obj:`input_ids` above) """ inputs = input_processing( func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels, training=training, kwargs_call=kwargs, ) if inputs["input_ids"] is not None: num_choices = shape_list(inputs["input_ids"])[1] seq_length = shape_list(inputs["input_ids"])[2] else: num_choices = shape_list(inputs["inputs_embeds"])[1] seq_length = shape_list(inputs["inputs_embeds"])[2] flat_input_ids = tf.reshape(inputs["input_ids"], (-1, seq_length)) if inputs["input_ids"] is not None else None flat_attention_mask = ( tf.reshape(inputs["attention_mask"], (-1, seq_length)) if inputs["attention_mask"] is not None else None ) flat_token_type_ids = ( tf.reshape(inputs["token_type_ids"], (-1, seq_length)) if inputs["token_type_ids"] is not None else None ) flat_input_mask = ( tf.reshape(inputs["input_mask"], (-1, seq_length)) if inputs["input_mask"] is not None else None ) flat_inputs_embeds = ( tf.reshape(inputs["inputs_embeds"], (-1, seq_length, shape_list(inputs["inputs_embeds"])[3])) if inputs["inputs_embeds"] is not None else None ) transformer_outputs = self.transformer( flat_input_ids, flat_attention_mask, inputs["mems"], inputs["perm_mask"], inputs["target_mapping"], flat_token_type_ids, flat_input_mask, inputs["head_mask"], flat_inputs_embeds, inputs["use_mems"], inputs["output_attentions"], inputs["output_hidden_states"], return_dict=inputs["return_dict"], training=inputs["training"], ) output = transformer_outputs[0] logits = self.sequence_summary(output) logits = self.logits_proj(logits) reshaped_logits = tf.reshape(logits, (-1, num_choices)) loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], reshaped_logits) if not inputs["return_dict"]: output = (reshaped_logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFXLNetForMultipleChoiceOutput( loss=loss, logits=reshaped_logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) @tf.function( input_signature=[ { "input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"), "attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"), "token_type_ids": tf.TensorSpec((None, None, None), tf.int32, name="token_type_ids"), } ] ) def serving(self, inputs): output = self.call(inputs) return self.serving_output(output) def serving_output(self, output): hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None mems = tf.convert_to_tensor(output.mems) if output.mems is not None else None return TFXLNetForMultipleChoiceOutput(logits=output.logits, mems=mems, hidden_states=hs, attentions=attns) @add_start_docstrings( """ XLNet Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for Named-Entity-Recognition (NER) tasks. """, XLNET_START_DOCSTRING, ) class TFXLNetForTokenClassification(TFXLNetPreTrainedModel, TFTokenClassificationLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.num_labels = config.num_labels self.transformer = TFXLNetMainLayer(config, name="transformer") self.classifier = tf.keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier" ) @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFXLNetForTokenClassificationOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, use_mems=None, output_attentions=None, output_hidden_states=None, return_dict=None, labels=None, training=False, **kwargs, ): r""" labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels - 1]``. """ inputs = input_processing( func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, labels=labels, training=training, kwargs_call=kwargs, ) transformer_outputs = self.transformer( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], mems=inputs["mems"], perm_mask=inputs["perm_mask"], target_mapping=inputs["target_mapping"], token_type_ids=inputs["token_type_ids"], input_mask=inputs["input_mask"], head_mask=inputs["head_mask"], inputs_embeds=inputs["inputs_embeds"], use_mems=inputs["use_mems"], output_attentions=inputs["output_attentions"], output_hidden_states=inputs["output_hidden_states"], return_dict=inputs["return_dict"], training=inputs["training"], ) output = transformer_outputs[0] logits = self.classifier(output) loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], logits) if not inputs["return_dict"]: output = (logits,) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFXLNetForTokenClassificationOutput( loss=loss, logits=logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def serving_output(self, output): hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None mems = tf.convert_to_tensor(output.mems) if output.mems is not None else None return TFXLNetForTokenClassificationOutput(logits=output.logits, mems=mems, hidden_states=hs, attentions=attns) @add_start_docstrings( """ XLNet Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`). """, XLNET_START_DOCSTRING, ) class TFXLNetForQuestionAnsweringSimple(TFXLNetPreTrainedModel, TFQuestionAnsweringLoss): def __init__(self, config, *inputs, **kwargs): super().__init__(config, *inputs, **kwargs) self.transformer = TFXLNetMainLayer(config, name="transformer") self.qa_outputs = tf.keras.layers.Dense( config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs" ) @add_start_docstrings_to_model_forward(XLNET_INPUTS_DOCSTRING.format("batch_size, sequence_length")) @add_code_sample_docstrings( tokenizer_class=_TOKENIZER_FOR_DOC, checkpoint=_CHECKPOINT_FOR_DOC, output_type=TFXLNetForQuestionAnsweringSimpleOutput, config_class=_CONFIG_FOR_DOC, ) def call( self, input_ids=None, attention_mask=None, mems=None, perm_mask=None, target_mapping=None, token_type_ids=None, input_mask=None, head_mask=None, inputs_embeds=None, use_mems=None, output_attentions=None, output_hidden_states=None, return_dict=None, start_positions=None, end_positions=None, training=False, **kwargs, ): r""" start_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the start of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. end_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`): Labels for position (index) of the end of the labelled span for computing the token classification loss. Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the sequence are not taken into account for computing the loss. """ inputs = input_processing( func=self.call, config=self.config, input_ids=input_ids, attention_mask=attention_mask, mems=mems, perm_mask=perm_mask, target_mapping=target_mapping, token_type_ids=token_type_ids, input_mask=input_mask, head_mask=head_mask, inputs_embeds=inputs_embeds, use_mems=use_mems, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, start_positions=start_positions, end_positions=end_positions, training=training, kwargs_call=kwargs, ) transformer_outputs = self.transformer( input_ids=inputs["input_ids"], attention_mask=inputs["attention_mask"], mems=inputs["mems"], perm_mask=inputs["perm_mask"], target_mapping=inputs["target_mapping"], token_type_ids=inputs["token_type_ids"], input_mask=inputs["input_mask"], head_mask=inputs["head_mask"], inputs_embeds=inputs["inputs_embeds"], use_mems=inputs["use_mems"], output_attentions=inputs["output_attentions"], output_hidden_states=inputs["output_hidden_states"], return_dict=inputs["return_dict"], training=inputs["training"], ) sequence_output = transformer_outputs[0] logits = self.qa_outputs(sequence_output) start_logits, end_logits = tf.split(logits, 2, axis=-1) start_logits = tf.squeeze(start_logits, axis=-1) end_logits = tf.squeeze(end_logits, axis=-1) loss = None if inputs["start_positions"] is not None and inputs["end_positions"] is not None: labels = {"start_position": inputs["start_positions"]} labels["end_position"] = inputs["end_positions"] loss = self.compute_loss(labels, (start_logits, end_logits)) if not inputs["return_dict"]: output = (start_logits, end_logits) + transformer_outputs[1:] return ((loss,) + output) if loss is not None else output return TFXLNetForQuestionAnsweringSimpleOutput( loss=loss, start_logits=start_logits, end_logits=end_logits, mems=transformer_outputs.mems, hidden_states=transformer_outputs.hidden_states, attentions=transformer_outputs.attentions, ) def serving_output(self, output): hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None mems = tf.convert_to_tensor(output.mems) if output.mems is not None else None return TFXLNetForQuestionAnsweringSimpleOutput( start_logits=output.start_logits, end_logits=output.end_logits, mems=mems, hidden_states=hs, attentions=attns, )
AdaMix/src/transformers/models/xlnet/modeling_tf_xlnet.py/0
{ "file_path": "AdaMix/src/transformers/models/xlnet/modeling_tf_xlnet.py", "repo_id": "AdaMix", "token_count": 36908 }
65
from typing import TYPE_CHECKING, List, Optional, Union import numpy as np from ..file_utils import add_end_docstrings, is_tf_available, is_torch_available from ..modelcard import ModelCard from ..models.bert.tokenization_bert import BasicTokenizer from ..tokenization_utils import PreTrainedTokenizer from .base import PIPELINE_INIT_ARGS, ArgumentHandler, Pipeline if TYPE_CHECKING: from ..modeling_tf_utils import TFPreTrainedModel from ..modeling_utils import PreTrainedModel if is_tf_available(): from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING if is_torch_available(): import torch from ..models.auto.modeling_auto import MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING class TokenClassificationArgumentHandler(ArgumentHandler): """ Handles arguments for token classification. """ def __call__(self, inputs: Union[str, List[str]], **kwargs): if inputs is not None and isinstance(inputs, (list, tuple)) and len(inputs) > 0: inputs = list(inputs) batch_size = len(inputs) elif isinstance(inputs, str): inputs = [inputs] batch_size = 1 else: raise ValueError("At least one input is required.") offset_mapping = kwargs.get("offset_mapping") if offset_mapping: if isinstance(offset_mapping, list) and isinstance(offset_mapping[0], tuple): offset_mapping = [offset_mapping] if len(offset_mapping) != batch_size: raise ValueError("offset_mapping should have the same batch size as the input") return inputs, offset_mapping @add_end_docstrings( PIPELINE_INIT_ARGS, r""" ignore_labels (:obj:`List[str]`, defaults to :obj:`["O"]`): A list of labels to ignore. grouped_entities (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to group the tokens corresponding to the same entity together in the predictions or not. """, ) class TokenClassificationPipeline(Pipeline): """ Named Entity Recognition pipeline using any :obj:`ModelForTokenClassification`. See the `named entity recognition examples <../task_summary.html#named-entity-recognition>`__ for more information. This token recognition pipeline can currently be loaded from :func:`~transformers.pipeline` using the following task identifier: :obj:`"ner"` (for predicting the classes of tokens in a sequence: person, organisation, location or miscellaneous). The models that this pipeline can use are models that have been fine-tuned on a token classification task. See the up-to-date list of available models on `huggingface.co/models <https://huggingface.co/models?filter=token-classification>`__. """ default_input_names = "sequences" def __init__( self, model: Union["PreTrainedModel", "TFPreTrainedModel"], tokenizer: PreTrainedTokenizer, modelcard: Optional[ModelCard] = None, framework: Optional[str] = None, args_parser: ArgumentHandler = TokenClassificationArgumentHandler(), device: int = -1, binary_output: bool = False, ignore_labels=["O"], task: str = "", grouped_entities: bool = False, ignore_subwords: bool = False, ): super().__init__( model=model, tokenizer=tokenizer, modelcard=modelcard, framework=framework, device=device, binary_output=binary_output, task=task, ) self.check_model_type( TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING if self.framework == "tf" else MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING ) self._basic_tokenizer = BasicTokenizer(do_lower_case=False) self._args_parser = args_parser self.ignore_labels = ignore_labels self.grouped_entities = grouped_entities self.ignore_subwords = ignore_subwords if self.ignore_subwords and not self.tokenizer.is_fast: raise ValueError( "Slow tokenizers cannot ignore subwords. Please set the `ignore_subwords` option" "to `False` or use a fast tokenizer." ) def __call__(self, inputs: Union[str, List[str]], **kwargs): """ Classify each token of the text(s) given as inputs. Args: inputs (:obj:`str` or :obj:`List[str]`): One or several texts (or one list of texts) for token classification. Return: A list or a list of list of :obj:`dict`: Each result comes as a list of dictionaries (one for each token in the corresponding input, or each entity if this pipeline was instantiated with :obj:`grouped_entities=True`) with the following keys: - **word** (:obj:`str`) -- The token/word classified. - **score** (:obj:`float`) -- The corresponding probability for :obj:`entity`. - **entity** (:obj:`str`) -- The entity predicted for that token/word (it is named `entity_group` when `grouped_entities` is set to True. - **index** (:obj:`int`, only present when ``self.grouped_entities=False``) -- The index of the corresponding token in the sentence. - **start** (:obj:`int`, `optional`) -- The index of the start of the corresponding entity in the sentence. Only exists if the offsets are available within the tokenizer - **end** (:obj:`int`, `optional`) -- The index of the end of the corresponding entity in the sentence. Only exists if the offsets are available within the tokenizer """ _inputs, offset_mappings = self._args_parser(inputs, **kwargs) answers = [] for i, sentence in enumerate(_inputs): # Manage correct placement of the tensors with self.device_placement(): tokens = self.tokenizer( sentence, return_attention_mask=False, return_tensors=self.framework, truncation=True, return_special_tokens_mask=True, return_offsets_mapping=self.tokenizer.is_fast, ) if self.tokenizer.is_fast: offset_mapping = tokens.pop("offset_mapping").cpu().numpy()[0] elif offset_mappings: offset_mapping = offset_mappings[i] else: offset_mapping = None special_tokens_mask = tokens.pop("special_tokens_mask").cpu().numpy()[0] # Forward if self.framework == "tf": entities = self.model(tokens.data)[0][0].numpy() input_ids = tokens["input_ids"].numpy()[0] else: with torch.no_grad(): tokens = self.ensure_tensor_on_device(**tokens) entities = self.model(**tokens)[0][0].cpu().numpy() input_ids = tokens["input_ids"].cpu().numpy()[0] score = np.exp(entities) / np.exp(entities).sum(-1, keepdims=True) labels_idx = score.argmax(axis=-1) entities = [] # Filter to labels not in `self.ignore_labels` # Filter special_tokens filtered_labels_idx = [ (idx, label_idx) for idx, label_idx in enumerate(labels_idx) if (self.model.config.id2label[label_idx] not in self.ignore_labels) and not special_tokens_mask[idx] ] for idx, label_idx in filtered_labels_idx: if offset_mapping is not None: start_ind, end_ind = offset_mapping[idx] word_ref = sentence[start_ind:end_ind] word = self.tokenizer.convert_ids_to_tokens([int(input_ids[idx])])[0] is_subword = len(word_ref) != len(word) if int(input_ids[idx]) == self.tokenizer.unk_token_id: word = word_ref is_subword = False else: word = self.tokenizer.convert_ids_to_tokens(int(input_ids[idx])) start_ind = None end_ind = None entity = { "word": word, "score": score[idx][label_idx].item(), "entity": self.model.config.id2label[label_idx], "index": idx, "start": start_ind, "end": end_ind, } if self.grouped_entities and self.ignore_subwords: entity["is_subword"] = is_subword entities += [entity] if self.grouped_entities: answers += [self.group_entities(entities)] # Append ungrouped entities else: answers += [entities] if len(answers) == 1: return answers[0] return answers def group_sub_entities(self, entities: List[dict]) -> dict: """ Group together the adjacent tokens with the same entity predicted. Args: entities (:obj:`dict`): The entities predicted by the pipeline. """ # Get the first entity in the entity group entity = entities[0]["entity"].split("-")[-1] scores = np.nanmean([entity["score"] for entity in entities]) tokens = [entity["word"] for entity in entities] entity_group = { "entity_group": entity, "score": np.mean(scores), "word": self.tokenizer.convert_tokens_to_string(tokens), "start": entities[0]["start"], "end": entities[-1]["end"], } return entity_group def group_entities(self, entities: List[dict]) -> List[dict]: """ Find and group together the adjacent tokens with the same entity predicted. Args: entities (:obj:`dict`): The entities predicted by the pipeline. """ entity_groups = [] entity_group_disagg = [] if entities: last_idx = entities[-1]["index"] for entity in entities: is_last_idx = entity["index"] == last_idx is_subword = self.ignore_subwords and entity["is_subword"] if not entity_group_disagg: entity_group_disagg += [entity] if is_last_idx: entity_groups += [self.group_sub_entities(entity_group_disagg)] continue # If the current entity is similar and adjacent to the previous entity, append it to the disaggregated entity group # The split is meant to account for the "B" and "I" suffixes # Shouldn't merge if both entities are B-type if ( ( entity["entity"].split("-")[-1] == entity_group_disagg[-1]["entity"].split("-")[-1] and entity["entity"].split("-")[0] != "B" ) and entity["index"] == entity_group_disagg[-1]["index"] + 1 ) or is_subword: # Modify subword type to be previous_type if is_subword: entity["entity"] = entity_group_disagg[-1]["entity"].split("-")[-1] entity["score"] = np.nan # set ignored scores to nan and use np.nanmean entity_group_disagg += [entity] # Group the entities at the last entity if is_last_idx: entity_groups += [self.group_sub_entities(entity_group_disagg)] # If the current entity is different from the previous entity, aggregate the disaggregated entity group else: entity_groups += [self.group_sub_entities(entity_group_disagg)] entity_group_disagg = [entity] # If it's the last entity, add it to the entity groups if is_last_idx: entity_groups += [self.group_sub_entities(entity_group_disagg)] return entity_groups NerPipeline = TokenClassificationPipeline
AdaMix/src/transformers/pipelines/token_classification.py/0
{ "file_path": "AdaMix/src/transformers/pipelines/token_classification.py", "repo_id": "AdaMix", "token_count": 5725 }
66
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from dataclasses import dataclass, field from .file_utils import add_start_docstrings from .training_args import TrainingArguments logger = logging.getLogger(__name__) @dataclass @add_start_docstrings(TrainingArguments.__doc__) class Seq2SeqTrainingArguments(TrainingArguments): """ sortish_sampler (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to use a `sortish sampler` or not. Only possible if the underlying datasets are `Seq2SeqDataset` for now but will become generally available in the near future. It sorts the inputs according to lengths in order to minimize the padding size, with a bit of randomness for the training set. predict_with_generate (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to use generate to calculate generative metrics (ROUGE, BLEU). """ sortish_sampler: bool = field(default=False, metadata={"help": "Whether to use SortishSampler or not."}) predict_with_generate: bool = field( default=False, metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
AdaMix/src/transformers/training_args_seq2seq.py/0
{ "file_path": "AdaMix/src/transformers/training_args_seq2seq.py", "repo_id": "AdaMix", "token_count": 533 }
67
# coding=utf-8 # Copyright 2020 Optuna, Hugging Face # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Logging utilities. """ import logging import os import sys import threading from logging import CRITICAL # NOQA from logging import DEBUG # NOQA from logging import ERROR # NOQA from logging import FATAL # NOQA from logging import INFO # NOQA from logging import NOTSET # NOQA from logging import WARN # NOQA from logging import WARNING # NOQA from typing import Optional _lock = threading.Lock() _default_handler: Optional[logging.Handler] = None log_levels = { "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, } _default_log_level = logging.WARNING def _get_default_logging_level(): """ If TRANSFORMERS_VERBOSITY env var is set to one of the valid choices return that as the new default level. If it is not - fall back to ``_default_log_level`` """ env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, " f"has to be one of: { ', '.join(log_levels.keys()) }" ) return _default_log_level def _get_library_name() -> str: return __name__.split(".")[0] def _get_library_root_logger() -> logging.Logger: return logging.getLogger(_get_library_name()) def _configure_library_root_logger() -> None: global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return _default_handler = logging.StreamHandler() # Set sys.stderr as stream. _default_handler.flush = sys.stderr.flush # Apply our default configuration to the library root logger. library_root_logger = _get_library_root_logger() library_root_logger.addHandler(_default_handler) library_root_logger.setLevel(_get_default_logging_level()) library_root_logger.propagate = False def _reset_library_root_logger() -> None: global _default_handler with _lock: if not _default_handler: return library_root_logger = _get_library_root_logger() library_root_logger.removeHandler(_default_handler) library_root_logger.setLevel(logging.NOTSET) _default_handler = None def get_logger(name: Optional[str] = None) -> logging.Logger: """ Return a logger with the specified name. This function is not supposed to be directly accessed unless you are writing a custom transformers module. """ if name is None: name = _get_library_name() _configure_library_root_logger() return logging.getLogger(name) def get_verbosity() -> int: """ Return the current level for the 🤗 Transformers's root logger as an int. Returns: :obj:`int`: The logging level. .. note:: 🤗 Transformers has following logging levels: - 50: ``transformers.logging.CRITICAL`` or ``transformers.logging.FATAL`` - 40: ``transformers.logging.ERROR`` - 30: ``transformers.logging.WARNING`` or ``transformers.logging.WARN`` - 20: ``transformers.logging.INFO`` - 10: ``transformers.logging.DEBUG`` """ _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def set_verbosity(verbosity: int) -> None: """ Set the vebosity level for the 🤗 Transformers's root logger. Args: verbosity (:obj:`int`): Logging level, e.g., one of: - ``transformers.logging.CRITICAL`` or ``transformers.logging.FATAL`` - ``transformers.logging.ERROR`` - ``transformers.logging.WARNING`` or ``transformers.logging.WARN`` - ``transformers.logging.INFO`` - ``transformers.logging.DEBUG`` """ _configure_library_root_logger() _get_library_root_logger().setLevel(verbosity) def set_verbosity_info(): """Set the verbosity to the :obj:`INFO` level.""" return set_verbosity(INFO) def set_verbosity_warning(): """Set the verbosity to the :obj:`WARNING` level.""" return set_verbosity(WARNING) def set_verbosity_debug(): """Set the verbosity to the :obj:`DEBUG` level.""" return set_verbosity(DEBUG) def set_verbosity_error(): """Set the verbosity to the :obj:`ERROR` level.""" return set_verbosity(ERROR) def disable_default_handler() -> None: """Disable the default handler of the HuggingFace Transformers's root logger.""" _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler) def enable_default_handler() -> None: """Enable the default handler of the HuggingFace Transformers's root logger.""" _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler) def add_handler(handler: logging.Handler) -> None: """adds a handler to the HuggingFace Transformers's root logger.""" _configure_library_root_logger() assert handler is not None _get_library_root_logger().addHandler(handler) def remove_handler(handler: logging.Handler) -> None: """removes given handler from the HuggingFace Transformers's root logger.""" _configure_library_root_logger() assert handler is not None and handler not in _get_library_root_logger().handlers _get_library_root_logger().removeHandler(handler) def disable_propagation() -> None: """ Disable propagation of the library log outputs. Note that log propagation is disabled by default. """ _configure_library_root_logger() _get_library_root_logger().propagate = False def enable_propagation() -> None: """ Enable propagation of the library log outputs. Please disable the HuggingFace Transformers's default handler to prevent double logging if the root logger has been configured. """ _configure_library_root_logger() _get_library_root_logger().propagate = True def enable_explicit_format() -> None: """ Enable explicit formatting for every HuggingFace Transformers's logger. The explicit formatter is as follows: :: [LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE All handlers currently bound to the root logger are affected by this method. """ handlers = _get_library_root_logger().handlers for handler in handlers: formatter = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s") handler.setFormatter(formatter) def reset_format() -> None: """ Resets the formatting for HuggingFace Transformers's loggers. All handlers currently bound to the root logger are affected by this method. """ handlers = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(None)
AdaMix/src/transformers/utils/logging.py/0
{ "file_path": "AdaMix/src/transformers/utils/logging.py", "repo_id": "AdaMix", "token_count": 2705 }
68
# coding=utf-8 # Copyright 2021 The HuggingFace Inc. team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. {% if cookiecutter.is_encoder_decoder_model == "False" %} import unittest from transformers import is_tf_available, {{cookiecutter.camelcase_modelname}}Config from transformers.testing_utils import require_tf, slow from .test_configuration_common import ConfigTester from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor if is_tf_available(): import tensorflow as tf from transformers import ( TF{{cookiecutter.camelcase_modelname}}ForCausalLM, TF{{cookiecutter.camelcase_modelname}}ForMaskedLM, TF{{cookiecutter.camelcase_modelname}}ForMultipleChoice, TF{{cookiecutter.camelcase_modelname}}ForQuestionAnswering, TF{{cookiecutter.camelcase_modelname}}ForSequenceClassification, TF{{cookiecutter.camelcase_modelname}}ForTokenClassification, TF{{cookiecutter.camelcase_modelname}}Model, ) class TF{{cookiecutter.camelcase_modelname}}ModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_mask = True self.use_token_type_ids = True self.use_labels = True self.vocab_size = 99 self.hidden_size = 32 self.num_hidden_layers = 5 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = {{cookiecutter.camelcase_modelname}}Config( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, return_dict=True, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def create_and_check_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TF{{cookiecutter.camelcase_modelname}}Model(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) def create_and_check_lm_head( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.is_decoder = True model = TF{{cookiecutter.camelcase_modelname}}ForCausalLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } prediction_scores = model(inputs)["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape), [self.batch_size, self.seq_length, self.vocab_size] ) def create_and_check_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TF{{cookiecutter.camelcase_modelname}}ForMaskedLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TF{{cookiecutter.camelcase_modelname}}ForSequenceClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = TF{{cookiecutter.camelcase_modelname}}ForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TF{{cookiecutter.camelcase_modelname}}ForTokenClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TF{{cookiecutter.camelcase_modelname}}ForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TF{{cookiecutter.camelcase_modelname}}ModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = ( ( TF{{cookiecutter.camelcase_modelname}}Model, TF{{cookiecutter.camelcase_modelname}}ForCausalLM, TF{{cookiecutter.camelcase_modelname}}ForMaskedLM, TF{{cookiecutter.camelcase_modelname}}ForQuestionAnswering, TF{{cookiecutter.camelcase_modelname}}ForSequenceClassification, TF{{cookiecutter.camelcase_modelname}}ForTokenClassification, TF{{cookiecutter.camelcase_modelname}}ForMultipleChoice, ) if is_tf_available() else () ) test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TF{{cookiecutter.camelcase_modelname}}ModelTester(self) self.config_tester = ConfigTester(self, config_class={{cookiecutter.camelcase_modelname}}Config, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_masked_lm(*config_and_inputs) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lm_head(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_multiple_choice(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_for_token_classification(*config_and_inputs) @slow def test_model_from_pretrained(self): model = TF{{cookiecutter.camelcase_modelname}}Model.from_pretrained("{{cookiecutter.checkpoint_identifier}}") self.assertIsNotNone(model) @require_tf class TF{{cookiecutter.camelcase_modelname}}ModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = TF{{cookiecutter.camelcase_modelname}}ForMaskedLM.from_pretrained("{{cookiecutter.checkpoint_identifier}}") input_ids = tf.constant([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] # TODO Replace vocab size vocab_size = 32000 expected_shape = [1, 6, vocab_size] self.assertEqual(output.shape, expected_shape) print(output[:, :3, :3]) # TODO Replace values below with what was printed above. expected_slice = tf.constant( [ [ [-0.05243197, -0.04498899, 0.05512108], [-0.07444685, -0.01064632, 0.04352357], [-0.05020351, 0.05530146, 0.00700043], ] ] ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-4) {% else %} import unittest from transformers import ( is_tf_available, {{cookiecutter.camelcase_modelname}}Config, {{cookiecutter.camelcase_modelname}}Tokenizer, ) from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow from .test_configuration_common import ConfigTester from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor if is_tf_available(): import tensorflow as tf from transformers import ( TF{{cookiecutter.camelcase_modelname}}ForConditionalGeneration, TF{{cookiecutter.camelcase_modelname}}Model, ) @require_tf class TF{{cookiecutter.camelcase_modelname}}ModelTester: config_cls = {{cookiecutter.camelcase_modelname}}Config config_updates = {} hidden_act = "gelu" def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_labels=False, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=20, eos_token_id=2, pad_token_id=1, bos_token_id=0, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.eos_token_id = eos_token_id self.pad_token_id = pad_token_id self.bos_token_id = bos_token_id def prepare_config_and_inputs_for_common(self): input_ids = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size) eos_tensor = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size), 1) input_ids = tf.concat([input_ids, eos_tensor], axis=1) decoder_input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = self.config_cls( vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, ) inputs_dict = prepare_{{cookiecutter.lowercase_modelname}}_inputs_dict(config, input_ids, decoder_input_ids) return config, inputs_dict def check_decoder_model_past_large_inputs(self, config, inputs_dict): model = TF{{cookiecutter.camelcase_modelname}}Model(config=config).get_decoder() input_ids = inputs_dict["input_ids"] input_ids = input_ids[:1, :] attention_mask = inputs_dict["attention_mask"][:1, :] self.batch_size = 1 # first forward pass outputs = model(input_ids, attention_mask=attention_mask, use_cache=True) output, past_key_values = outputs.to_tuple() past_key_values = past_key_values[1] # create hypothetical next token and extent to next_input_ids next_tokens = ids_tensor((self.batch_size, 3), config.vocab_size) next_attn_mask = tf.cast(ids_tensor((self.batch_size, 3), 2), tf.int8) # append to next input_ids and next_input_ids = tf.concat([input_ids, next_tokens], axis=-1) next_attention_mask = tf.concat([attention_mask, next_attn_mask], axis=-1) output_from_no_past = model(next_input_ids, attention_mask=next_attention_mask)[0] output_from_past = model(next_tokens, attention_mask=next_attention_mask, past_key_values=past_key_values)[0] self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1]) # select random slice random_slice_idx = int(ids_tensor((1,), output_from_past.shape[-1])) output_from_no_past_slice = output_from_no_past[:, -3:, random_slice_idx] output_from_past_slice = output_from_past[:, :, random_slice_idx] # test that outputs are equal for slice tf.debugging.assert_near(output_from_past_slice, output_from_no_past_slice, rtol=1e-3) def prepare_{{cookiecutter.lowercase_modelname}}_inputs_dict( config, input_ids, decoder_input_ids, attention_mask=None, decoder_attention_mask=None, ): if attention_mask is None: attention_mask = tf.cast(tf.math.not_equal(input_ids, config.pad_token_id), tf.int8) if decoder_attention_mask is None: decoder_attention_mask = tf.concat([tf.ones(decoder_input_ids[:, :1].shape, dtype=tf.int8), tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:], config.pad_token_id), tf.int8)], axis=-1) return { "input_ids": input_ids, "decoder_input_ids": decoder_input_ids, "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, } @require_tf class TF{{cookiecutter.camelcase_modelname}}ModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = (TF{{cookiecutter.camelcase_modelname}}ForConditionalGeneration, TF{{cookiecutter.camelcase_modelname}}Model) if is_tf_available() else () all_generative_model_classes = (TF{{cookiecutter.camelcase_modelname}}ForConditionalGeneration,) if is_tf_available() else () is_encoder_decoder = True test_pruning = False test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TF{{cookiecutter.camelcase_modelname}}ModelTester(self) self.config_tester = ConfigTester(self, config_class={{cookiecutter.camelcase_modelname}}Config) def test_config(self): self.config_tester.run_common_tests() def test_decoder_model_past_large_inputs(self): config_and_inputs = self.model_tester.prepare_config_and_inputs_for_common() self.model_tester.check_decoder_model_past_large_inputs(*config_and_inputs) def test_model_common_attributes(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() for model_class in self.all_model_classes: model = model_class(config) assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer) if model_class in self.all_generative_model_classes: x = model.get_output_embeddings() assert isinstance(x, tf.keras.layers.Layer) name = model.get_bias() assert isinstance(name, dict) for k, v in name.items(): assert isinstance(v, tf.Variable) else: x = model.get_output_embeddings() assert x is None name = model.get_bias() assert name is None def test_resize_token_embeddings(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def _get_word_embedding_weight(model, embedding_layer): if hasattr(embedding_layer, "weight"): return embedding_layer.weight else: # Here we build the word embeddings weights if not exists. # And then we retry to get the attribute once built. model(model.dummy_inputs) if hasattr(embedding_layer, "weight"): return embedding_layer.weight else: return None for model_class in self.all_model_classes: for size in [config.vocab_size - 10, config.vocab_size + 10, None]: # build the embeddings model = model_class(config=config) old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings()) old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings()) old_final_logits_bias = model.get_bias() # reshape the embeddings model.resize_token_embeddings(size) new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings()) new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings()) new_final_logits_bias = model.get_bias() # check that the resized embeddings size matches the desired size. assert_size = size if size is not None else config.vocab_size self.assertEqual(new_input_embeddings.shape[0], assert_size) # check that weights remain the same after resizing models_equal = True for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) if old_output_embeddings is not None and new_output_embeddings is not None: self.assertEqual(new_output_embeddings.shape[0], assert_size) models_equal = True for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) if old_final_logits_bias is not None and new_final_logits_bias is not None: old_final_logits_bias = old_final_logits_bias["final_logits_bias"] new_final_logits_bias = new_final_logits_bias["final_logits_bias"] self.assertEqual(new_final_logits_bias.shape[0], 1) self.assertEqual(new_final_logits_bias.shape[1], assert_size) models_equal = True for old, new in zip(old_final_logits_bias.value(), new_final_logits_bias.value()): for p1, p2 in zip(old, new): if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0: models_equal = False self.assertTrue(models_equal) def _assert_tensors_equal(a, b, atol=1e-12, prefix=""): """If tensors not close, or a and b arent both tensors, raise a nice Assertion error.""" if a is None and b is None: return True try: if tf.debugging.assert_near(a, b, atol=atol): return True raise except Exception: msg = "{} != {}".format(a, b) if prefix: msg = prefix + ": " + msg raise AssertionError(msg) def _long_tensor(tok_lst): return tf.constant(tok_lst, dtype=tf.int32) TOLERANCE = 1e-4 @slow @require_sentencepiece @require_tokenizers @require_tf class TF{{cookiecutter.camelcase_modelname}}ModelIntegrationTest(unittest.TestCase): def test_inference_no_head(self): model = TF{{cookiecutter.camelcase_modelname}}Model.from_pretrained('{{cookiecutter.checkpoint_identifier}}') # change to intended input here input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) decoder_input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) inputs_dict = prepare_{{cookiecutter.lowercase_modelname}}_inputs_dict(model.config, input_ids, decoder_input_ids) output = model(**inputs_dict)[0] expected_shape = (1, 11, 1024) self.assertEqual(output.shape, expected_shape) # change to expected output here expected_slice = tf.Tensor( [[0.7144, 0.8143, -1.2813], [0.7144, 0.8143, -1.2813], [-0.0467, 2.5911, -2.1845]], ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=TOLERANCE) def test_inference_with_head(self): model = TF{{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}') # change to intended input here input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) decoder_input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) inputs_dict = prepare_{{cookiecutter.lowercase_modelname}}_inputs_dict(model.config, input_ids, decoder_input_ids) output = model(**inputs_dict)[0] expected_shape = (1, 11, 1024) self.assertEqual(output.shape, expected_shape) # change to expected output here expected_slice = tf.Tensor( [[0.7144, 0.8143, -1.2813], [0.7144, 0.8143, -1.2813], [-0.0467, 2.5911, -2.1845]], ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=TOLERANCE) def test_seq_to_seq_generation(self): hf = TF{{cookiecutter.camelcase_modelname}}ForConditionalGeneration.from_pretrained('{{cookiecutter.checkpoint_identifier}}') tok = {{cookiecutter.camelcase_modelname}}Tokenizer.from_pretrained('{{cookiecutter.checkpoint_identifier}}') batch_input = [ # string 1, # string 2, # string 3, # string 4, ] # The below article tests that we don't add any hypotheses outside of the top n_beams dct = tok.batch_encode_plus( batch_input, max_length=512, padding="max_length", truncation_strategy="only_first", truncation=True, return_tensors="tf", ) hypotheses_batch = hf.generate( input_ids=dct["input_ids"], attention_mask=dct["attention_mask"], num_beams=2, ) EXPECTED = [ # here expected 1, # here expected 2, # here expected 3, # here expected 4, ] generated = tok.batch_decode( hypotheses_batch.tolist(), clean_up_tokenization_spaces=True, skip_special_tokens=True ) assert generated == EXPECTED {%- endif %}
AdaMix/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/test_modeling_tf_{{cookiecutter.lowercase_modelname}}.py/0
{ "file_path": "AdaMix/templates/adding_a_new_model/cookiecutter-template-{{cookiecutter.modelname}}/test_modeling_tf_{{cookiecutter.lowercase_modelname}}.py", "repo_id": "AdaMix", "token_count": 12792 }
69
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import doctest import logging import os import unittest from pathlib import Path from typing import List, Union import transformers from transformers.testing_utils import require_tf, require_torch, slow logger = logging.getLogger() @unittest.skip("Temporarily disable the doc tests.") @require_torch @require_tf @slow class TestCodeExamples(unittest.TestCase): def analyze_directory( self, directory: Path, identifier: Union[str, None] = None, ignore_files: Union[List[str], None] = None, n_identifier: Union[str, List[str], None] = None, only_modules: bool = True, ): """ Runs through the specific directory, looking for the files identified with `identifier`. Executes the doctests in those files Args: directory (:obj:`Path`): Directory containing the files identifier (:obj:`str`): Will parse files containing this ignore_files (:obj:`List[str]`): List of files to skip n_identifier (:obj:`str` or :obj:`List[str]`): Will not parse files containing this/these identifiers. only_modules (:obj:`bool`): Whether to only analyze modules """ files = [file for file in os.listdir(directory) if os.path.isfile(os.path.join(directory, file))] if identifier is not None: files = [file for file in files if identifier in file] if n_identifier is not None: if isinstance(n_identifier, List): for n_ in n_identifier: files = [file for file in files if n_ not in file] else: files = [file for file in files if n_identifier not in file] ignore_files = ignore_files or [] ignore_files.append("__init__.py") files = [file for file in files if file not in ignore_files] for file in files: # Open all files print("Testing", file) if only_modules: module_identifier = file.split(".")[0] try: module_identifier = getattr(transformers, module_identifier) suite = doctest.DocTestSuite(module_identifier) result = unittest.TextTestRunner().run(suite) self.assertIs(len(result.failures), 0) except AttributeError: logger.info(f"{module_identifier} is not a module.") else: result = doctest.testfile(str(".." / directory / file), optionflags=doctest.ELLIPSIS) self.assertIs(result.failed, 0) def test_modeling_examples(self): transformers_directory = Path("src/transformers") files = "modeling" ignore_files = [ "modeling_ctrl.py", "modeling_tf_ctrl.py", ] self.analyze_directory(transformers_directory, identifier=files, ignore_files=ignore_files) def test_tokenization_examples(self): transformers_directory = Path("src/transformers") files = "tokenization" self.analyze_directory(transformers_directory, identifier=files) def test_configuration_examples(self): transformers_directory = Path("src/transformers") files = "configuration" self.analyze_directory(transformers_directory, identifier=files) def test_remaining_examples(self): transformers_directory = Path("src/transformers") n_identifiers = ["configuration", "modeling", "tokenization"] self.analyze_directory(transformers_directory, n_identifier=n_identifiers) def test_doc_sources(self): doc_source_directory = Path("docs/source") ignore_files = ["favicon.ico"] self.analyze_directory(doc_source_directory, ignore_files=ignore_files, only_modules=False)
AdaMix/tests/test_doc_samples.py/0
{ "file_path": "AdaMix/tests/test_doc_samples.py", "repo_id": "AdaMix", "token_count": 1766 }
70
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import ( DUMMY_UNKWOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, require_scatter, require_torch, slow, ) if is_torch_available(): from transformers import ( AutoConfig, AutoModel, AutoModelForCausalLM, AutoModelForMaskedLM, AutoModelForPreTraining, AutoModelForQuestionAnswering, AutoModelForSeq2SeqLM, AutoModelForSequenceClassification, AutoModelForTableQuestionAnswering, AutoModelForTokenClassification, AutoModelWithLMHead, BertConfig, BertForMaskedLM, BertForPreTraining, BertForQuestionAnswering, BertForSequenceClassification, BertForTokenClassification, BertModel, GPT2Config, GPT2LMHeadModel, RobertaForMaskedLM, T5Config, T5ForConditionalGeneration, TapasConfig, TapasForQuestionAnswering, ) from transformers.models.auto.modeling_auto import ( MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, MODEL_FOR_PRETRAINING_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, MODEL_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, ) from transformers.models.bert.modeling_bert import BERT_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.gpt2.modeling_gpt2 import GPT2_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.t5.modeling_t5 import T5_PRETRAINED_MODEL_ARCHIVE_LIST from transformers.models.tapas.modeling_tapas import TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST @require_torch class AutoModelTest(unittest.TestCase): @slow def test_model_from_pretrained(self): for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = AutoModel.from_pretrained(model_name) model, loading_info = AutoModel.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertModel) for value in loading_info.values(): self.assertEqual(len(value), 0) @slow def test_model_for_pretraining_from_pretrained(self): for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = AutoModelForPreTraining.from_pretrained(model_name) model, loading_info = AutoModelForPreTraining.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertForPreTraining) # Only one value should not be initialized and in the missing keys. missing_keys = loading_info.pop("missing_keys") self.assertListEqual(["cls.predictions.decoder.bias"], missing_keys) for key, value in loading_info.items(): self.assertEqual(len(value), 0) @slow def test_lmhead_model_from_pretrained(self): for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = AutoModelWithLMHead.from_pretrained(model_name) model, loading_info = AutoModelWithLMHead.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertForMaskedLM) @slow def test_model_for_causal_lm(self): for model_name in GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, GPT2Config) model = AutoModelForCausalLM.from_pretrained(model_name) model, loading_info = AutoModelForCausalLM.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, GPT2LMHeadModel) @slow def test_model_for_masked_lm(self): for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = AutoModelForMaskedLM.from_pretrained(model_name) model, loading_info = AutoModelForMaskedLM.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertForMaskedLM) @slow def test_model_for_encoder_decoder_lm(self): for model_name in T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, T5Config) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) model, loading_info = AutoModelForSeq2SeqLM.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, T5ForConditionalGeneration) @slow def test_sequence_classification_model_from_pretrained(self): for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = AutoModelForSequenceClassification.from_pretrained(model_name) model, loading_info = AutoModelForSequenceClassification.from_pretrained( model_name, output_loading_info=True ) self.assertIsNotNone(model) self.assertIsInstance(model, BertForSequenceClassification) @slow def test_question_answering_model_from_pretrained(self): for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = AutoModelForQuestionAnswering.from_pretrained(model_name) model, loading_info = AutoModelForQuestionAnswering.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertForQuestionAnswering) @slow @require_scatter def test_table_question_answering_model_from_pretrained(self): for model_name in TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST[5:6]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, TapasConfig) model = AutoModelForTableQuestionAnswering.from_pretrained(model_name) model, loading_info = AutoModelForTableQuestionAnswering.from_pretrained( model_name, output_loading_info=True ) self.assertIsNotNone(model) self.assertIsInstance(model, TapasForQuestionAnswering) @slow def test_token_classification_model_from_pretrained(self): for model_name in BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: config = AutoConfig.from_pretrained(model_name) self.assertIsNotNone(config) self.assertIsInstance(config, BertConfig) model = AutoModelForTokenClassification.from_pretrained(model_name) model, loading_info = AutoModelForTokenClassification.from_pretrained(model_name, output_loading_info=True) self.assertIsNotNone(model) self.assertIsInstance(model, BertForTokenClassification) def test_from_pretrained_identifier(self): model = AutoModelWithLMHead.from_pretrained(SMALL_MODEL_IDENTIFIER) self.assertIsInstance(model, BertForMaskedLM) self.assertEqual(model.num_parameters(), 14410) self.assertEqual(model.num_parameters(only_trainable=True), 14410) def test_from_identifier_from_model_type(self): model = AutoModelWithLMHead.from_pretrained(DUMMY_UNKWOWN_IDENTIFIER) self.assertIsInstance(model, RobertaForMaskedLM) self.assertEqual(model.num_parameters(), 14410) self.assertEqual(model.num_parameters(only_trainable=True), 14410) def test_parents_and_children_in_mappings(self): # Test that the children are placed before the parents in the mappings, as the `instanceof` will be triggered # by the parents and will return the wrong configuration type when using auto models mappings = ( MODEL_MAPPING, MODEL_FOR_PRETRAINING_MAPPING, MODEL_FOR_QUESTION_ANSWERING_MAPPING, MODEL_FOR_TABLE_QUESTION_ANSWERING_MAPPING, MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, MODEL_WITH_LM_HEAD_MAPPING, MODEL_FOR_CAUSAL_LM_MAPPING, MODEL_FOR_MASKED_LM_MAPPING, MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, ) for mapping in mappings: mapping = tuple(mapping.items()) for index, (child_config, child_model) in enumerate(mapping[1:]): for parent_config, parent_model in mapping[: index + 1]: assert not issubclass( child_config, parent_config ), f"{child_config.__name__} is child of {parent_config.__name__}" assert not issubclass( child_model, parent_model ), f"{child_config.__name__} is child of {parent_config.__name__}"
AdaMix/tests/test_modeling_auto.py/0
{ "file_path": "AdaMix/tests/test_modeling_auto.py", "repo_id": "AdaMix", "token_count": 4729 }
71
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import is_torch_available from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device from .test_configuration_common import ConfigTester from .test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask if is_torch_available(): import torch from transformers import ( MODEL_FOR_PRETRAINING_MAPPING, MobileBertConfig, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, MobileBertModel, ) class MobileBertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=64, embedding_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = batch_size self.seq_length = seq_length self.is_training = is_training self.use_input_mask = use_input_mask self.use_token_type_ids = use_token_type_ids self.use_labels = use_labels self.vocab_size = vocab_size self.hidden_size = hidden_size self.embedding_size = embedding_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.type_sequence_label_size = type_sequence_label_size self.initializer_range = initializer_range self.num_labels = num_labels self.num_choices = num_choices self.scope = scope def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = random_attention_mask([self.batch_size, self.seq_length]) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = MobileBertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, embedding_size=self.embedding_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, is_decoder=False, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def create_and_check_mobilebert_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = MobileBertModel(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids) result = model(input_ids, token_type_ids=token_type_ids) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_mobilebert_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = MobileBertForMaskedLM(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_mobilebert_for_next_sequence_prediction( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = MobileBertForNextSentencePrediction(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, 2)) def create_and_check_mobilebert_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = MobileBertForPreTraining(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels, next_sentence_label=sequence_labels, ) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2)) def create_and_check_mobilebert_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = MobileBertForQuestionAnswering(config=config) model.to(torch_device) model.eval() result = model( input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, start_positions=sequence_labels, end_positions=sequence_labels, ) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def create_and_check_mobilebert_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = MobileBertForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=sequence_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_mobilebert_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = MobileBertForTokenClassification(config=config) model.to(torch_device) model.eval() result = model(input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_mobilebert_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = MobileBertForMultipleChoice(config=config) model.to(torch_device) model.eval() multiple_choice_inputs_ids = input_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_token_type_ids = token_type_ids.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() multiple_choice_input_mask = input_mask.unsqueeze(1).expand(-1, self.num_choices, -1).contiguous() result = model( multiple_choice_inputs_ids, attention_mask=multiple_choice_input_mask, token_type_ids=multiple_choice_token_type_ids, labels=choice_labels, ) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_torch class MobileBertModelTest(ModelTesterMixin, unittest.TestCase): all_model_classes = ( ( MobileBertModel, MobileBertForMaskedLM, MobileBertForMultipleChoice, MobileBertForNextSentencePrediction, MobileBertForPreTraining, MobileBertForQuestionAnswering, MobileBertForSequenceClassification, MobileBertForTokenClassification, ) if is_torch_available() else () ) # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in MODEL_FOR_PRETRAINING_MAPPING.values(): inputs_dict["labels"] = torch.zeros( (self.model_tester.batch_size, self.model_tester.seq_length), dtype=torch.long, device=torch_device ) inputs_dict["next_sentence_label"] = torch.zeros( self.model_tester.batch_size, dtype=torch.long, device=torch_device ) return inputs_dict def setUp(self): self.model_tester = MobileBertModelTester(self) self.config_tester = ConfigTester(self, config_class=MobileBertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_mobilebert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_masked_lm(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_multiple_choice(*config_and_inputs) def test_for_next_sequence_prediction(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_pretraining(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_mobilebert_for_token_classification(*config_and_inputs) def _long_tensor(tok_lst): return torch.tensor( tok_lst, dtype=torch.long, device=torch_device, ) TOLERANCE = 1e-3 @require_torch @require_sentencepiece @require_tokenizers class MobileBertModelIntegrationTests(unittest.TestCase): @slow def test_inference_no_head(self): model = MobileBertModel.from_pretrained("google/mobilebert-uncased").to(torch_device) input_ids = _long_tensor([[101, 7110, 1005, 1056, 2023, 11333, 17413, 1029, 102]]) with torch.no_grad(): output = model(input_ids)[0] expected_shape = torch.Size((1, 9, 512)) self.assertEqual(output.shape, expected_shape) expected_slice = torch.tensor( [ [ [-2.4736526e07, 8.2691656e04, 1.6521838e05], [-5.7541704e-01, 3.9056022e00, 4.4011507e00], [2.6047359e00, 1.5677652e00, -1.7324188e-01], ] ], device=torch_device, ) # MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a # ~1 difference, it's therefore not a good idea to measure using addition. # Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the # result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE lower_bound = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE) upper_bound = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE) self.assertTrue(lower_bound and upper_bound)
AdaMix/tests/test_modeling_mobilebert.py/0
{ "file_path": "AdaMix/tests/test_modeling_mobilebert.py", "repo_id": "AdaMix", "token_count": 6717 }
72
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest from transformers import BertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from .test_configuration_common import ConfigTester from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor if is_tf_available(): import tensorflow as tf from transformers import TF_MODEL_FOR_PRETRAINING_MAPPING from transformers.models.bert.modeling_tf_bert import ( TFBertForMaskedLM, TFBertForMultipleChoice, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertLMHeadModel, TFBertModel, ) class TFBertModelTester: def __init__( self, parent, batch_size=13, seq_length=7, is_training=True, use_input_mask=True, use_token_type_ids=True, use_labels=True, vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=16, type_sequence_label_size=2, initializer_range=0.02, num_labels=3, num_choices=4, scope=None, ): self.parent = parent self.batch_size = 13 self.seq_length = 7 self.is_training = True self.use_input_mask = True self.use_token_type_ids = True self.use_labels = True self.vocab_size = 99 self.hidden_size = 32 self.num_hidden_layers = 5 self.num_attention_heads = 4 self.intermediate_size = 37 self.hidden_act = "gelu" self.hidden_dropout_prob = 0.1 self.attention_probs_dropout_prob = 0.1 self.max_position_embeddings = 512 self.type_vocab_size = 16 self.type_sequence_label_size = 2 self.initializer_range = 0.02 self.num_labels = 3 self.num_choices = 4 self.scope = None def prepare_config_and_inputs(self): input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_mask = None if self.use_input_mask: input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) sequence_labels = None token_labels = None choice_labels = None if self.use_labels: sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size) token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels) choice_labels = ids_tensor([self.batch_size], self.num_choices) config = BertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_hidden_layers=self.num_hidden_layers, num_attention_heads=self.num_attention_heads, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, ) return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels def create_and_check_bert_model( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFBertModel(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} sequence_output, pooled_output = model(inputs) inputs = [input_ids, input_mask] result = model(inputs) result = model(input_ids) self.parent.assertEqual(result.last_hidden_state.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result.pooler_output.shape, (self.batch_size, self.hidden_size)) def create_and_check_bert_lm_head( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.is_decoder = True model = TFBertLMHeadModel(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } prediction_scores = model(inputs)["logits"] self.parent.assertListEqual( list(prediction_scores.numpy().shape), [self.batch_size, self.seq_length, self.vocab_size] ) def create_and_check_bert_for_masked_lm( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFBertForMaskedLM(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) def create_and_check_bert_for_next_sequence_prediction( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFBertForNextSentencePrediction(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, 2)) def create_and_check_bert_for_pretraining( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFBertForPreTraining(config=config) inputs = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids} result = model(inputs) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertEqual(result.seq_relationship_logits.shape, (self.batch_size, 2)) def create_and_check_bert_for_sequence_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFBertForSequenceClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def create_and_check_bert_for_multiple_choice( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_choices = self.num_choices model = TFBertForMultipleChoice(config=config) multiple_choice_inputs_ids = tf.tile(tf.expand_dims(input_ids, 1), (1, self.num_choices, 1)) multiple_choice_input_mask = tf.tile(tf.expand_dims(input_mask, 1), (1, self.num_choices, 1)) multiple_choice_token_type_ids = tf.tile(tf.expand_dims(token_type_ids, 1), (1, self.num_choices, 1)) inputs = { "input_ids": multiple_choice_inputs_ids, "attention_mask": multiple_choice_input_mask, "token_type_ids": multiple_choice_token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_choices)) def create_and_check_bert_for_token_classification( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): config.num_labels = self.num_labels model = TFBertForTokenClassification(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.seq_length, self.num_labels)) def create_and_check_bert_for_question_answering( self, config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels ): model = TFBertForQuestionAnswering(config=config) inputs = { "input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids, } result = model(inputs) self.parent.assertEqual(result.start_logits.shape, (self.batch_size, self.seq_length)) self.parent.assertEqual(result.end_logits.shape, (self.batch_size, self.seq_length)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels, ) = config_and_inputs inputs_dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask} return config, inputs_dict @require_tf class TFBertModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = ( ( TFBertModel, TFBertForMaskedLM, TFBertLMHeadModel, TFBertForNextSentencePrediction, TFBertForPreTraining, TFBertForQuestionAnswering, TFBertForSequenceClassification, TFBertForTokenClassification, TFBertForMultipleChoice, ) if is_tf_available() else () ) test_head_masking = False test_onnx = True onnx_min_opset = 10 # special case for ForPreTraining model def _prepare_for_class(self, inputs_dict, model_class, return_labels=False): inputs_dict = super()._prepare_for_class(inputs_dict, model_class, return_labels=return_labels) if return_labels: if model_class in TF_MODEL_FOR_PRETRAINING_MAPPING.values(): inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32) return inputs_dict def setUp(self): self.model_tester = TFBertModelTester(self) self.config_tester = ConfigTester(self, config_class=BertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_bert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_bert_model(*config_and_inputs) def test_for_masked_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_bert_for_masked_lm(*config_and_inputs) def test_for_causal_lm(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_bert_lm_head(*config_and_inputs) def test_for_multiple_choice(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_bert_for_multiple_choice(*config_and_inputs) def test_for_next_sequence_prediction(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_bert_for_next_sequence_prediction(*config_and_inputs) def test_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_bert_for_pretraining(*config_and_inputs) def test_for_question_answering(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_bert_for_question_answering(*config_and_inputs) def test_for_sequence_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_bert_for_sequence_classification(*config_and_inputs) def test_for_token_classification(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_bert_for_token_classification(*config_and_inputs) def test_model_from_pretrained(self): model = TFBertModel.from_pretrained("jplu/tiny-tf-bert-random") self.assertIsNotNone(model) def test_model_common_attributes(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() list_lm_models = [TFBertForMaskedLM, TFBertForPreTraining, TFBertLMHeadModel] for model_class in self.all_model_classes: model = model_class(config) assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer) if model_class in list_lm_models: x = model.get_output_embeddings() assert isinstance(x, tf.keras.layers.Layer) name = model.get_bias() assert isinstance(name, dict) for k, v in name.items(): assert isinstance(v, tf.Variable) else: x = model.get_output_embeddings() assert x is None name = model.get_bias() assert name is None def test_custom_load_tf_weights(self): model, output_loading_info = TFBertForTokenClassification.from_pretrained( "jplu/tiny-tf-bert-random", output_loading_info=True ) self.assertEqual(sorted(output_loading_info["unexpected_keys"]), []) for layer in output_loading_info["missing_keys"]: self.assertTrue(layer.split("_")[0] in ["dropout", "classifier"]) @require_tf class TFBertModelIntegrationTest(unittest.TestCase): @slow def test_inference_masked_lm(self): model = TFBertForPreTraining.from_pretrained("lysandre/tiny-bert-random") input_ids = tf.constant([[0, 1, 2, 3, 4, 5]]) output = model(input_ids)[0] expected_shape = [1, 6, 32000] self.assertEqual(output.shape, expected_shape) print(output[:, :3, :3]) expected_slice = tf.constant( [ [ [-0.05243197, -0.04498899, 0.05512108], [-0.07444685, -0.01064632, 0.04352357], [-0.05020351, 0.05530146, 0.00700043], ] ] ) tf.debugging.assert_near(output[:, :3, :3], expected_slice, atol=1e-4)
AdaMix/tests/test_modeling_tf_bert.py/0
{ "file_path": "AdaMix/tests/test_modeling_tf_bert.py", "repo_id": "AdaMix", "token_count": 7044 }
73
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import tempfile import unittest from transformers import LxmertConfig, is_tf_available from transformers.testing_utils import require_tf, slow from .test_configuration_common import ConfigTester from .test_modeling_tf_common import TFModelTesterMixin, ids_tensor if is_tf_available(): import tensorflow as tf from transformers.models.lxmert.modeling_tf_lxmert import TFLxmertForPreTraining, TFLxmertModel class TFLxmertModelTester(object): def __init__( self, parent, vocab_size=300, hidden_size=28, num_attention_heads=2, num_labels=2, intermediate_size=64, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, num_qa_labels=30, num_object_labels=16, num_attr_labels=4, num_visual_features=10, l_layers=2, x_layers=1, r_layers=1, visual_feat_dim=128, visual_pos_dim=4, visual_loss_normalizer=6.67, seq_length=20, batch_size=8, is_training=True, task_matched=True, task_mask_lm=True, task_obj_predict=True, task_qa=True, visual_obj_loss=True, visual_attr_loss=True, visual_feat_loss=True, use_token_type_ids=True, use_lang_mask=True, output_attentions=False, output_hidden_states=False, scope=None, ): self.parent = parent self.vocab_size = vocab_size self.hidden_size = hidden_size self.num_attention_heads = num_attention_heads self.num_labels = num_labels self.intermediate_size = intermediate_size self.hidden_act = hidden_act self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.initializer_range = initializer_range self.layer_norm_eps = layer_norm_eps self.pad_token_id = pad_token_id self.num_qa_labels = num_qa_labels self.num_object_labels = num_object_labels self.num_attr_labels = num_attr_labels self.l_layers = l_layers self.x_layers = x_layers self.r_layers = r_layers self.visual_feat_dim = visual_feat_dim self.visual_pos_dim = visual_pos_dim self.visual_loss_normalizer = visual_loss_normalizer self.seq_length = seq_length self.batch_size = batch_size self.is_training = is_training self.use_lang_mask = use_lang_mask self.task_matched = task_matched self.task_mask_lm = task_mask_lm self.task_obj_predict = task_obj_predict self.task_qa = task_qa self.visual_obj_loss = visual_obj_loss self.visual_attr_loss = visual_attr_loss self.visual_feat_loss = visual_feat_loss self.num_visual_features = num_visual_features self.use_token_type_ids = use_token_type_ids self.output_attentions = output_attentions self.output_hidden_states = output_hidden_states self.scope = scope self.num_hidden_layers = {"vision": r_layers, "cross_encoder": x_layers, "language": l_layers} def prepare_config_and_inputs(self): output_attentions = self.output_attentions input_ids = ids_tensor([self.batch_size, self.seq_length], vocab_size=self.vocab_size) visual_feats = tf.random.uniform((self.batch_size, self.num_visual_features, self.visual_feat_dim)) bounding_boxes = tf.random.uniform((self.batch_size, self.num_visual_features, 4)) input_mask = None if self.use_lang_mask: input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2) token_type_ids = None if self.use_token_type_ids: token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size) obj_labels = None if self.task_obj_predict: obj_labels = {} if self.visual_attr_loss and self.task_obj_predict: obj_labels["attr"] = ( ids_tensor([self.batch_size, self.num_visual_features], self.num_attr_labels), ids_tensor([self.batch_size, self.num_visual_features], self.num_attr_labels), ) if self.visual_feat_loss and self.task_obj_predict: obj_labels["feat"] = ( ids_tensor( [self.batch_size, self.num_visual_features, self.visual_feat_dim], self.num_visual_features ), ids_tensor([self.batch_size, self.num_visual_features], self.num_visual_features), ) if self.visual_obj_loss and self.task_obj_predict: obj_labels["obj"] = ( ids_tensor([self.batch_size, self.num_visual_features], self.num_object_labels), ids_tensor([self.batch_size, self.num_visual_features], self.num_object_labels), ) ans = None if self.task_qa: ans = ids_tensor([self.batch_size], self.num_qa_labels) masked_lm_labels = None if self.task_mask_lm: masked_lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) matched_label = None if self.task_matched: matched_label = ids_tensor([self.batch_size], self.num_labels) config = LxmertConfig( vocab_size=self.vocab_size, hidden_size=self.hidden_size, num_attention_heads=self.num_attention_heads, num_labels=self.num_labels, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, hidden_dropout_prob=self.hidden_dropout_prob, attention_probs_dropout_prob=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, type_vocab_size=self.type_vocab_size, initializer_range=self.initializer_range, layer_norm_eps=self.layer_norm_eps, pad_token_id=self.pad_token_id, num_qa_labels=self.num_qa_labels, num_object_labels=self.num_object_labels, num_attr_labels=self.num_attr_labels, l_layers=self.l_layers, x_layers=self.x_layers, r_layers=self.r_layers, visual_feat_dim=self.visual_feat_dim, visual_pos_dim=self.visual_pos_dim, visual_loss_normalizer=self.visual_loss_normalizer, task_matched=self.task_matched, task_mask_lm=self.task_mask_lm, task_obj_predict=self.task_obj_predict, task_qa=self.task_qa, visual_obj_loss=self.visual_obj_loss, visual_attr_loss=self.visual_attr_loss, visual_feat_loss=self.visual_feat_loss, output_attentions=self.output_attentions, output_hidden_states=self.output_hidden_states, ) return ( config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions, ) def create_and_check_lxmert_model( self, config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions, ): model = TFLxmertModel(config=config) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, output_attentions=output_attentions, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, output_attentions=not output_attentions, ) result = model(input_ids, visual_feats, bounding_boxes, return_dict=False) result = model(input_ids, visual_feats, bounding_boxes, return_dict=True) self.parent.assertEqual(result.language_output.shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual( result.vision_output.shape, (self.batch_size, self.num_visual_features, self.hidden_size) ) self.parent.assertEqual(result.pooled_output.shape, (self.batch_size, self.hidden_size)) def prepare_config_and_inputs_for_common(self, return_obj_labels=False): config_and_inputs = self.prepare_config_and_inputs() ( config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions, ) = config_and_inputs inputs_dict = { "input_ids": input_ids, "visual_feats": visual_feats, "visual_pos": bounding_boxes, "token_type_ids": token_type_ids, "attention_mask": input_mask, } if return_obj_labels: inputs_dict["obj_labels"] = obj_labels return config, inputs_dict def create_and_check_lxmert_for_pretraining( self, config, input_ids, visual_feats, bounding_boxes, token_type_ids, input_mask, obj_labels, masked_lm_labels, matched_label, ans, output_attentions, ): model = TFLxmertForPreTraining(config=config) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, masked_lm_labels=masked_lm_labels, obj_labels=obj_labels, matched_label=matched_label, ans=ans, output_attentions=output_attentions, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, masked_lm_labels=masked_lm_labels, output_attentions=not output_attentions, return_dict=False, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, masked_lm_labels=masked_lm_labels, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, obj_labels=obj_labels, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, matched_label=matched_label, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, ans=ans, ) result = model( input_ids, visual_feats, bounding_boxes, token_type_ids=token_type_ids, attention_mask=input_mask, masked_lm_labels=masked_lm_labels, obj_labels=obj_labels, matched_label=matched_label, ans=ans, output_attentions=not output_attentions, ) self.parent.assertEqual(result.prediction_logits.shape, (self.batch_size, self.seq_length, self.vocab_size)) @require_tf class TFLxmertModelTest(TFModelTesterMixin, unittest.TestCase): all_model_classes = (TFLxmertModel, TFLxmertForPreTraining) if is_tf_available() else () test_head_masking = False test_onnx = False def setUp(self): self.model_tester = TFLxmertModelTester(self) self.config_tester = ConfigTester(self, config_class=LxmertConfig, hidden_size=37) def test_config(self): self.config_tester.run_common_tests() def test_lxmert_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lxmert_model(*config_and_inputs) def test_lxmert_for_pretraining(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_lxmert_for_pretraining(*config_and_inputs) @slow def test_model_from_pretrained(self): for model_name in ["unc-nlp/lxmert-base-uncased"]: model = TFLxmertModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_attention_outputs(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() encoder_seq_length = ( self.model_tester.encoder_seq_length if hasattr(self.model_tester, "encoder_seq_length") else self.model_tester.seq_length ) encoder_key_length = ( self.model_tester.key_length if hasattr(self.model_tester, "key_length") else encoder_seq_length ) for model_class in self.all_model_classes: inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = False model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) language_attentions, vision_attentions, cross_encoder_attentions = (outputs[-3], outputs[-2], outputs[-1]) self.assertEqual(model.config.output_hidden_states, False) self.assertEqual(len(language_attentions), self.model_tester.num_hidden_layers["language"]) self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers["vision"]) self.assertEqual(len(cross_encoder_attentions), self.model_tester.num_hidden_layers["cross_encoder"]) attentions = [language_attentions, vision_attentions, cross_encoder_attentions] attention_shapes = [ [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], [ self.model_tester.num_attention_heads, self.model_tester.num_visual_features, self.model_tester.num_visual_features, ], [self.model_tester.num_attention_heads, encoder_key_length, self.model_tester.num_visual_features], ] for attention, attention_shape in zip(attentions, attention_shapes): self.assertListEqual(list(attention[0].shape[-3:]), attention_shape) out_len = len(outputs) # Check attention is always last and order is fine inputs_dict["output_attentions"] = True inputs_dict["output_hidden_states"] = True model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) # 2 hidden states were added self.assertEqual(out_len + 2, len(outputs)) language_attentions, vision_attentions, cross_encoder_attentions = (outputs[-3], outputs[-2], outputs[-1]) self.assertEqual(len(language_attentions), self.model_tester.num_hidden_layers["language"]) self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers["vision"]) self.assertEqual(len(cross_encoder_attentions), self.model_tester.num_hidden_layers["cross_encoder"]) attentions = [language_attentions, vision_attentions, cross_encoder_attentions] attention_shapes = [ [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], [ self.model_tester.num_attention_heads, self.model_tester.num_visual_features, self.model_tester.num_visual_features, ], [self.model_tester.num_attention_heads, encoder_key_length, self.model_tester.num_visual_features], ] for attention, attention_shape in zip(attentions, attention_shapes): self.assertListEqual(list(attention[0].shape[-3:]), attention_shape) def test_hidden_states_output(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() def check_hidden_states_output(config, inputs_dict, model_class): model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) language_hidden_states, vision_hidden_states = outputs[-2], outputs[-1] self.assertEqual(len(language_hidden_states), self.model_tester.num_hidden_layers["language"] + 1) self.assertEqual(len(vision_hidden_states), self.model_tester.num_hidden_layers["vision"] + 1) seq_length = self.model_tester.seq_length num_visual_features = self.model_tester.num_visual_features self.assertListEqual( list(language_hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) self.assertListEqual( list(vision_hidden_states[0].shape[-2:]), [num_visual_features, self.model_tester.hidden_size], ) for model_class in self.all_model_classes: inputs_dict["output_hidden_states"] = True check_hidden_states_output(config, inputs_dict, model_class) del inputs_dict["output_hidden_states"] config.output_hidden_states = True check_hidden_states_output(config, inputs_dict, model_class) def test_pt_tf_model_equivalence(self): from transformers import is_torch_available if not is_torch_available(): return import torch import transformers for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common( return_obj_labels="PreTraining" in model_class.__name__ ) pt_model_class_name = model_class.__name__[2:] # Skip the "TF" at the beginning pt_model_class = getattr(transformers, pt_model_class_name) config.output_hidden_states = True config.task_obj_predict = False tf_model = model_class(config) pt_model = pt_model_class(config) # Check we can load pt model in tf and vice-versa with model => model functions tf_model = transformers.load_pytorch_model_in_tf2_model( tf_model, pt_model, tf_inputs=self._prepare_for_class(inputs_dict, model_class) ) pt_model = transformers.load_tf2_model_in_pytorch_model(pt_model, tf_model) # Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences pt_model.eval() # Delete obj labels as we want to compute the hidden states and not the loss if "obj_labels" in inputs_dict: del inputs_dict["obj_labels"] def torch_type(key): if key in ("visual_feats", "visual_pos"): return torch.float32 else: return torch.long def recursive_numpy_convert(iterable): return_dict = {} for key, value in iterable.items(): if isinstance(value, dict): return_dict[key] = recursive_numpy_convert(value) else: if isinstance(value, (list, tuple)): return_dict[key] = ( torch.from_numpy(iter_value.numpy()).to(torch_type(key)) for iter_value in value ) else: return_dict[key] = torch.from_numpy(value.numpy()).to(torch_type(key)) return return_dict pt_inputs_dict = recursive_numpy_convert(self._prepare_for_class(inputs_dict, model_class)) # need to rename encoder-decoder "inputs" for PyTorch if "inputs" in pt_inputs_dict and self.is_encoder_decoder: pt_inputs_dict["input_ids"] = pt_inputs_dict.pop("inputs") with torch.no_grad(): pto = pt_model(**pt_inputs_dict) tfo = tf_model(self._prepare_for_class(inputs_dict, model_class), training=False) tf_hidden_states = tfo[0].numpy() pt_hidden_states = pto[0].numpy() import numpy as np tf_nans = np.copy(np.isnan(tf_hidden_states)) pt_nans = np.copy(np.isnan(pt_hidden_states)) pt_hidden_states[tf_nans] = 0 tf_hidden_states[tf_nans] = 0 pt_hidden_states[pt_nans] = 0 tf_hidden_states[pt_nans] = 0 max_diff = np.amax(np.abs(tf_hidden_states - pt_hidden_states)) # Debug info (remove when fixed) if max_diff >= 2e-2: print("===") print(model_class) print(config) print(inputs_dict) print(pt_inputs_dict) self.assertLessEqual(max_diff, 6e-2) # Check we can load pt model in tf and vice-versa with checkpoint => model functions with tempfile.TemporaryDirectory() as tmpdirname: import os pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin") torch.save(pt_model.state_dict(), pt_checkpoint_path) tf_model = transformers.load_pytorch_checkpoint_in_tf2_model(tf_model, pt_checkpoint_path) tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5") tf_model.save_weights(tf_checkpoint_path) pt_model = transformers.load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path) # Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences pt_model.eval() pt_inputs_dict = dict( (name, torch.from_numpy(key.numpy()).to(torch.long)) for name, key in self._prepare_for_class(inputs_dict, model_class).items() ) for key, value in pt_inputs_dict.items(): if key in ("visual_feats", "visual_pos"): pt_inputs_dict[key] = value.to(torch.float32) else: pt_inputs_dict[key] = value.to(torch.long) with torch.no_grad(): pto = pt_model(**pt_inputs_dict) tfo = tf_model(self._prepare_for_class(inputs_dict, model_class)) tfo = tfo[0].numpy() pto = pto[0].numpy() tf_nans = np.copy(np.isnan(tfo)) pt_nans = np.copy(np.isnan(pto)) pto[tf_nans] = 0 tfo[tf_nans] = 0 pto[pt_nans] = 0 tfo[pt_nans] = 0 max_diff = np.amax(np.abs(tfo - pto)) self.assertLessEqual(max_diff, 6e-2) def test_save_load(self): for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common( return_obj_labels="PreTraining" in model_class.__name__ ) model = model_class(config) outputs = model(self._prepare_for_class(inputs_dict, model_class)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname) after_outputs = model(self._prepare_for_class(inputs_dict, model_class)) self.assert_outputs_same(after_outputs, outputs) def test_compile_tf_model(self): optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08, clipnorm=1.0) loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True) metric = tf.keras.metrics.SparseCategoricalAccuracy("accuracy") for model_class in self.all_model_classes: config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common( return_obj_labels="PreTraining" in model_class.__name__ ) input_ids = tf.keras.Input( batch_shape=(self.model_tester.batch_size, self.model_tester.seq_length), name="input_ids", dtype="int32", ) visual_feats = tf.keras.Input( batch_shape=( self.model_tester.batch_size, self.model_tester.num_visual_features, self.model_tester.visual_feat_dim, ), name="visual_feats", dtype="int32", ) visual_pos = tf.keras.Input( batch_shape=(self.model_tester.batch_size, self.model_tester.num_visual_features, 4), name="visual_pos", dtype="int32", ) # Prepare our model model = model_class(config) # Let's load it from the disk to be sure we can use pretrained weights with tempfile.TemporaryDirectory() as tmpdirname: outputs = model(self._prepare_for_class(inputs_dict, model_class)) # build the model model.save_pretrained(tmpdirname) model = model_class.from_pretrained(tmpdirname) outputs_dict = model(input_ids, visual_feats, visual_pos) hidden_states = outputs_dict[0] # Add a dense layer on top to test integration with other keras modules outputs = tf.keras.layers.Dense(2, activation="softmax", name="outputs")(hidden_states) # Compile extended model extended_model = tf.keras.Model(inputs=[input_ids, visual_feats, visual_pos], outputs=[outputs]) extended_model.compile(optimizer=optimizer, loss=loss, metrics=[metric]) def test_model_common_attributes(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() list_lm_models = [TFLxmertForPreTraining] for model_class in self.all_model_classes: model = model_class(config) assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer) if model_class in list_lm_models: x = model.get_output_embeddings() assert isinstance(x, tf.keras.layers.Layer) name = model.get_bias() assert isinstance(name, dict) for k, v in name.items(): assert isinstance(v, tf.Variable) else: x = model.get_output_embeddings() assert x is None name = model.get_bias() assert name is None def test_saved_model_creation(self): # This test is too long (>30sec) and makes fail the CI pass @slow def test_saved_model_creation_extended(self): config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common() config.output_hidden_states = True config.output_attentions = True if hasattr(config, "use_cache"): config.use_cache = True encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length) encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length) for model_class in self.all_model_classes: class_inputs_dict = self._prepare_for_class(inputs_dict, model_class) model = model_class(config) num_out = len(model(class_inputs_dict)) with tempfile.TemporaryDirectory() as tmpdirname: model.save_pretrained(tmpdirname, saved_model=True) saved_model_dir = os.path.join(tmpdirname, "saved_model", "1") model = tf.keras.models.load_model(saved_model_dir) outputs = model(class_inputs_dict) language_hidden_states = outputs["language_hidden_states"] vision_hidden_states = outputs["vision_hidden_states"] language_attentions = outputs["language_attentions"] vision_attentions = outputs["vision_attentions"] cross_encoder_attentions = outputs["cross_encoder_attentions"] self.assertEqual(len(outputs), num_out) self.assertEqual(len(language_hidden_states), self.model_tester.num_hidden_layers["language"] + 1) self.assertEqual(len(vision_hidden_states), self.model_tester.num_hidden_layers["vision"] + 1) seq_length = self.model_tester.seq_length num_visual_features = self.model_tester.num_visual_features self.assertListEqual( list(language_hidden_states[0].shape[-2:]), [seq_length, self.model_tester.hidden_size], ) self.assertListEqual( list(vision_hidden_states[0].shape[-2:]), [num_visual_features, self.model_tester.hidden_size], ) self.assertEqual(len(language_attentions), self.model_tester.num_hidden_layers["language"]) self.assertEqual(len(vision_attentions), self.model_tester.num_hidden_layers["vision"]) self.assertEqual(len(cross_encoder_attentions), self.model_tester.num_hidden_layers["cross_encoder"]) attentions = [language_attentions, vision_attentions, cross_encoder_attentions] attention_shapes = [ [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length], [ self.model_tester.num_attention_heads, self.model_tester.num_visual_features, self.model_tester.num_visual_features, ], [self.model_tester.num_attention_heads, encoder_key_length, self.model_tester.num_visual_features], ] for attention, attention_shape in zip(attentions, attention_shapes): self.assertListEqual(list(attention[0].shape[-3:]), attention_shape)
AdaMix/tests/test_modeling_tf_lxmert.py/0
{ "file_path": "AdaMix/tests/test_modeling_tf_lxmert.py", "repo_id": "AdaMix", "token_count": 15687 }
74
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import copy import random import unittest from transformers import is_torch_available from transformers.testing_utils import require_torch, require_torch_multi_gpu, slow, torch_device from .test_configuration_common import ConfigTester from .test_generation_utils import GenerationTesterMixin from .test_modeling_common import ModelTesterMixin, ids_tensor if is_torch_available(): import torch from transformers import TransfoXLConfig, TransfoXLForSequenceClassification, TransfoXLLMHeadModel, TransfoXLModel from transformers.models.transfo_xl.modeling_transfo_xl import TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST class TransfoXLModelTester: def __init__( self, parent, ): self.parent = parent self.batch_size = 14 self.seq_length = 7 self.mem_len = 30 self.key_length = self.seq_length + self.mem_len self.clamp_len = 15 self.is_training = False self.use_labels = True self.vocab_size = 99 self.cutoffs = [10, 50, 80] self.hidden_size = 32 self.d_embed = 32 self.num_attention_heads = 4 self.d_head = 8 self.d_inner = 128 self.div_val = 2 self.num_hidden_layers = 5 self.scope = None self.seed = 1 self.eos_token_id = 0 self.num_labels = 3 self.pad_token_id = self.vocab_size - 1 def prepare_config_and_inputs(self): input_ids_1 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) input_ids_2 = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) lm_labels = None if self.use_labels: lm_labels = ids_tensor([self.batch_size, self.seq_length], self.vocab_size) config = TransfoXLConfig( vocab_size=self.vocab_size, mem_len=self.mem_len, clamp_len=self.clamp_len, cutoffs=self.cutoffs, d_model=self.hidden_size, d_embed=self.d_embed, n_head=self.num_attention_heads, d_head=self.d_head, d_inner=self.d_inner, div_val=self.div_val, n_layer=self.num_hidden_layers, eos_token_id=self.eos_token_id, pad_token_id=self.pad_token_id, ) return (config, input_ids_1, input_ids_2, lm_labels) def set_seed(self): random.seed(self.seed) torch.manual_seed(self.seed) def create_transfo_xl_model(self, config, input_ids_1, input_ids_2, lm_labels): model = TransfoXLModel(config) model.to(torch_device) model.eval() outputs1 = model(input_ids_1) outputs2 = model(input_ids_2, outputs1["mems"]) outputs = { "hidden_states_1": outputs1["last_hidden_state"], "mems_1": outputs1["mems"], "hidden_states_2": outputs2["last_hidden_state"], "mems_2": outputs2["mems"], } return outputs def check_transfo_xl_model_output(self, result): self.parent.assertEqual(result["hidden_states_1"].shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertEqual(result["hidden_states_2"].shape, (self.batch_size, self.seq_length, self.hidden_size)) self.parent.assertListEqual( [mem.shape for mem in result["mems_1"]], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, ) self.parent.assertListEqual( [mem.shape for mem in result["mems_2"]], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, ) def create_transfo_xl_lm_head(self, config, input_ids_1, input_ids_2, lm_labels): model = TransfoXLLMHeadModel(config) model.to(torch_device) model.eval() lm_logits_1 = model(input_ids_1)["prediction_scores"] outputs1 = model(input_ids_1, labels=lm_labels) lm_logits_2 = model(input_ids_2, mems=outputs1["mems"])["prediction_scores"] outputs2 = model(input_ids_2, labels=lm_labels, mems=outputs1["mems"]) outputs = { "loss_1": outputs1["losses"], "mems_1": outputs1["mems"], "lm_logits_1": lm_logits_1, "loss_2": outputs2["losses"], "mems_2": outputs2["mems"], "lm_logits_2": lm_logits_2, } return outputs def check_transfo_xl_lm_head_output(self, result): self.parent.assertEqual(result["loss_1"].shape, (self.batch_size, self.seq_length - 1)) self.parent.assertEqual(result["lm_logits_1"].shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertListEqual( [mem.shape for mem in result["mems_1"]], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, ) self.parent.assertEqual(result["loss_2"].shape, (self.batch_size, self.seq_length - 1)) self.parent.assertEqual(result["lm_logits_2"].shape, (self.batch_size, self.seq_length, self.vocab_size)) self.parent.assertListEqual( [mem.shape for mem in result["mems_2"]], [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers, ) def create_and_check_transfo_xl_for_sequence_classification(self, config, input_ids_1, input_ids_2, lm_labels): config.num_labels = self.num_labels model = TransfoXLForSequenceClassification(config) model.to(torch_device) model.eval() result = model(input_ids_1) self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels)) def prepare_config_and_inputs_for_common(self): config_and_inputs = self.prepare_config_and_inputs() (config, input_ids_1, input_ids_2, lm_labels) = config_and_inputs inputs_dict = {"input_ids": input_ids_1} return config, inputs_dict @require_torch class TransfoXLModelTest(ModelTesterMixin, GenerationTesterMixin, unittest.TestCase): all_model_classes = ( (TransfoXLModel, TransfoXLLMHeadModel, TransfoXLForSequenceClassification) if is_torch_available() else () ) all_generative_model_classes = (TransfoXLLMHeadModel,) if is_torch_available() else () test_pruning = False test_torchscript = False test_resize_embeddings = True def check_cutoffs_and_n_token( self, copied_cutoffs, layer, model_embed, model, model_class, resized_value, vocab_size ): # Check that the cutoffs were modified accordingly for i in range(len(copied_cutoffs)): if i < layer: self.assertEqual(model_embed.cutoffs[i], copied_cutoffs[i]) if model_class == TransfoXLLMHeadModel: self.assertEqual(model.crit.cutoffs[i], copied_cutoffs[i]) if i < len(model.config.cutoffs): self.assertEqual(model.config.cutoffs[i], copied_cutoffs[i]) else: self.assertEqual(model_embed.cutoffs[i], copied_cutoffs[i] + resized_value) if model_class == TransfoXLLMHeadModel: self.assertEqual(model.crit.cutoffs[i], copied_cutoffs[i] + resized_value) if i < len(model.config.cutoffs): self.assertEqual(model.config.cutoffs[i], copied_cutoffs[i] + resized_value) self.assertEqual(model_embed.n_token, vocab_size + resized_value) if model_class == TransfoXLLMHeadModel: self.assertEqual(model.crit.n_token, vocab_size + resized_value) def setUp(self): self.model_tester = TransfoXLModelTester(self) self.config_tester = ConfigTester(self, config_class=TransfoXLConfig, d_embed=37) def test_config(self): self.config_tester.run_common_tests() def test_transfo_xl_model(self): self.model_tester.set_seed() config_and_inputs = self.model_tester.prepare_config_and_inputs() output_result = self.model_tester.create_transfo_xl_model(*config_and_inputs) self.model_tester.check_transfo_xl_model_output(output_result) def test_transfo_xl_lm_head(self): self.model_tester.set_seed() config_and_inputs = self.model_tester.prepare_config_and_inputs() output_result = self.model_tester.create_transfo_xl_lm_head(*config_and_inputs) self.model_tester.check_transfo_xl_lm_head_output(output_result) def test_transfo_xl_sequence_classification_model(self): config_and_inputs = self.model_tester.prepare_config_and_inputs() self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*config_and_inputs) def test_retain_grad_hidden_states_attentions(self): # xlnet cannot keep gradients in attentions or hidden states return @require_torch_multi_gpu def test_multi_gpu_data_parallel_forward(self): # Opt-out of this test. pass @slow def test_model_from_pretrained(self): for model_name in TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]: model = TransfoXLModel.from_pretrained(model_name) self.assertIsNotNone(model) def test_resize_tokens_embeddings(self): (original_config, inputs_dict) = self.model_tester.prepare_config_and_inputs_for_common() if not self.test_resize_embeddings: return for model_class in self.all_model_classes: config = copy.deepcopy(original_config) model = model_class(config) model.to(torch_device) if self.model_tester.is_training is False: model.eval() model_vocab_size = config.vocab_size # Retrieve the embeddings and clone theme model_embed = model.resize_token_embeddings(model_vocab_size) cloned_embeddings = [emb.weight.clone() for emb in model_embed.emb_layers] # Retrieve the cutoffs and copy them copied_cutoffs = copy.copy(model_embed.cutoffs) test_layers = [x for x in range(config.div_val)] for layer in test_layers: # Check that resizing the token embeddings with a larger vocab size increases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size + 10, layer) self.assertEqual(model.config.vocab_size, model_vocab_size + 10) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.emb_layers[layer].weight.shape[0], cloned_embeddings[layer].shape[0] + 10) # Check that the cutoffs were modified accordingly self.check_cutoffs_and_n_token( copied_cutoffs, layer, model_embed, model, model_class, 10, model_vocab_size ) # Check that the model can still do a forward pass successfully (every parameter should be resized) model(**inputs_dict) # Check that resizing the token embeddings with a smaller vocab size decreases the model's vocab size model_embed = model.resize_token_embeddings(model_vocab_size - 5, layer) self.assertEqual(model.config.vocab_size, model_vocab_size - 5) # Check that it actually resizes the embeddings matrix self.assertEqual(model_embed.emb_layers[layer].weight.shape[0], cloned_embeddings[layer].shape[0] - 5) # Check that the cutoffs were modified accordingly self.check_cutoffs_and_n_token( copied_cutoffs, layer, model_embed, model, model_class, -5, model_vocab_size ) # Check that the model can still do a forward pass successfully (every parameter should be resized) # Input ids should be clamped to the maximum size of the vocabulary inputs_dict["input_ids"].clamp_(max=model_vocab_size - 5 - 1) model(**inputs_dict) # Check that adding and removing tokens has not modified the first part of the embedding matrix. models_equal = True for p1, p2 in zip(cloned_embeddings[layer], model_embed.emb_layers[layer].weight): if p1.data.ne(p2.data).sum() > 0: models_equal = False self.assertTrue(models_equal) # Reset model embeddings to original size model.resize_token_embeddings(model_vocab_size, layer) self.assertEqual(model_vocab_size, model.config.vocab_size) self.assertEqual(model_embed.emb_layers[layer].weight.shape[0], cloned_embeddings[layer].shape[0]) def test_resize_embeddings_untied(self): # transfo-xl requires special resize for lm-head return def _check_attentions_for_generate( self, batch_size, attentions, min_length, max_length, config, use_cache=False, num_beam_groups=1 ): self.assertIsInstance(attentions, tuple) self.assertListEqual( [isinstance(iter_attentions, tuple) for iter_attentions in attentions], [True] * len(attentions) ) self.assertEqual(len(attentions), (max_length - min_length) * num_beam_groups) for idx, iter_attentions in enumerate(attentions): tgt_len = min_length if idx == 0 else (min_length - 2) src_len = (min_length + config.mem_len) if idx == 0 else (min_length + config.mem_len - 2) expected_shape = ( batch_size * num_beam_groups, config.num_attention_heads, tgt_len, src_len, ) # check attn size self.assertListEqual( [layer_attention.shape for layer_attention in iter_attentions], [expected_shape] * len(iter_attentions) ) def _check_hidden_states_for_generate( self, batch_size, hidden_states, min_length, max_length, config, use_cache=False, num_beam_groups=1 ): self.assertIsInstance(hidden_states, tuple) self.assertListEqual( [isinstance(iter_hidden_states, tuple) for iter_hidden_states in hidden_states], [True] * len(hidden_states), ) self.assertEqual(len(hidden_states), (max_length - min_length) * num_beam_groups) for idx, iter_hidden_states in enumerate(hidden_states): seq_len = min_length if idx == 0 else min_length - 2 expected_shape = (batch_size * num_beam_groups, seq_len, config.hidden_size) # check hidden size self.assertListEqual( [layer_hidden_states.shape for layer_hidden_states in iter_hidden_states], [expected_shape] * len(iter_hidden_states), ) @require_torch class TransfoXLModelLanguageGenerationTest(unittest.TestCase): @slow def test_lm_generate_transfo_xl_wt103(self): model = TransfoXLLMHeadModel.from_pretrained("transfo-xl-wt103") model.to(torch_device) input_ids = torch.tensor( [ [ 33, 1297, 2, 1, 1009, 4, 1109, 11739, 4762, 358, 5, 25, 245, 22, 1706, 17, 20098, 5, 3215, 21, 37, 1110, 3, 13, 1041, 4, 24, 603, 490, 2, 71477, 20098, 104447, 2, 20961, 1, 2604, 4, 1, 329, 3, 6224, 831, 16002, 2, 8, 603, 78967, 29546, 23, 803, 20, 25, 416, 5, 8, 232, 4, 277, 6, 1855, 4601, 3, 29546, 54, 8, 3609, 5, 57211, 49, 4, 1, 277, 18, 8, 1755, 15691, 3, 341, 25, 416, 693, 42573, 71, 17, 401, 94, 31, 17919, 2, 29546, 7873, 18, 1, 435, 23, 11011, 755, 5, 5167, 3, 7983, 98, 84, 2, 29546, 3267, 8, 3609, 4, 1, 4865, 1075, 2, 6087, 71, 6, 346, 8, 5854, 3, 29546, 824, 1400, 1868, 2, 19, 160, 2, 311, 8, 5496, 2, 20920, 17, 25, 15097, 3, 24, 24, 0, ] ], dtype=torch.long, device=torch_device, ) # In 1991 , the remains of Russian Tsar Nicholas II and his family # ( except for Alexei and Maria ) are discovered . # The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the # remainder of the story . 1883 Western Siberia , # a young Grigori Rasputin is asked by his father and a group of men to perform magic . # Rasputin has a vision and denounces one of the men as a horse thief . Although his # father initially slaps him for making such an accusation , Rasputin watches as the # man is chased outside and beaten . Twenty years later , Rasputin sees a vision of # the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous , # with people , even a bishop , begging for his blessing . <eod> </s> <eos> expected_output_ids = [ 33, 1297, 2, 1, 1009, 4, 1109, 11739, 4762, 358, 5, 25, 245, 22, 1706, 17, 20098, 5, 3215, 21, 37, 1110, 3, 13, 1041, 4, 24, 603, 490, 2, 71477, 20098, 104447, 2, 20961, 1, 2604, 4, 1, 329, 3, 6224, 831, 16002, 2, 8, 603, 78967, 29546, 23, 803, 20, 25, 416, 5, 8, 232, 4, 277, 6, 1855, 4601, 3, 29546, 54, 8, 3609, 5, 57211, 49, 4, 1, 277, 18, 8, 1755, 15691, 3, 341, 25, 416, 693, 42573, 71, 17, 401, 94, 31, 17919, 2, 29546, 7873, 18, 1, 435, 23, 11011, 755, 5, 5167, 3, 7983, 98, 84, 2, 29546, 3267, 8, 3609, 4, 1, 4865, 1075, 2, 6087, 71, 6, 346, 8, 5854, 3, 29546, 824, 1400, 1868, 2, 19, 160, 2, 311, 8, 5496, 2, 20920, 17, 25, 15097, 3, 24, 24, 0, 33, 1, 142, 1298, 188, 2, 29546, 113, 8, 3654, 4, 1, 1109, 7136, 833, 3, 13, 1645, 4, 29546, 11, 104, 7, 1, 1109, 532, 7129, 2, 10, 83507, 2, 1162, 1123, 2, 6, 7245, 10, 2, 5, 11, 104, 7, 1, 1109, 532, 7129, 2, 10, 24, 24, 10, 22, 10, 13, 770, 5863, 4, 7245, 10, ] # In 1991, the remains of Russian Tsar Nicholas II and his family ( except for # Alexei and Maria ) are discovered. The voice of young son, Tsarevich Alexei # Nikolaevich, narrates the remainder of the story. 1883 Western Siberia, a young # Grigori Rasputin is asked by his father and a group of men to perform magic. # Rasputin has a vision and denounces one of the men as a horse thief. Although # his father initially slaps him for making such an accusation, Rasputin watches # as the man is chased outside and beaten. Twenty years later, Rasputin sees a # vision of the Virgin Mary, prompting him to become a priest. Rasputin quickly # becomes famous, with people, even a bishop, begging for his blessing. In the # early 20th century, Rasputin became a symbol of the Russian Orthodox Church. # The image of Rasputin was used in the Russian national anthem, " Nearer, My God, # to Heaven ", and was used in the Russian national anthem, " " ( " The Great Spirit # of Heaven " output_ids = model.generate(input_ids, max_length=200, do_sample=False) self.assertListEqual(output_ids[0].tolist(), expected_output_ids)
AdaMix/tests/test_modeling_transfo_xl.py/0
{ "file_path": "AdaMix/tests/test_modeling_transfo_xl.py", "repo_id": "AdaMix", "token_count": 13999 }
75
# coding=utf-8 # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from transformers import BertTokenizerFast from transformers.models.bert.tokenization_bert import ( VOCAB_FILES_NAMES, BasicTokenizer, BertTokenizer, WordpieceTokenizer, _is_control, _is_punctuation, _is_whitespace, ) from transformers.testing_utils import require_tokenizers, slow from .test_tokenization_common import TokenizerTesterMixin, filter_non_english @require_tokenizers class BertTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = BertTokenizer rust_tokenizer_class = BertTokenizerFast test_rust_tokenizer = True space_between_special_tokens = True from_pretrained_filter = filter_non_english def setUp(self): super().setUp() vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def get_input_output_texts(self, tokenizer): input_text = "UNwant\u00E9d,running" output_text = "unwanted, running" return input_text, output_text def test_full_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file) tokens = tokenizer.tokenize("UNwant\u00E9d,running") self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [9, 6, 7, 12, 10, 11]) def test_rust_and_python_full_tokenizers(self): if not self.test_rust_tokenizer: return tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() sequence = "UNwant\u00E9d,running" tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) # With lower casing tokenizer = self.get_tokenizer(do_lower_case=True) rust_tokenizer = self.get_rust_tokenizer(do_lower_case=True) sequence = "UNwant\u00E9d,running" tokens = tokenizer.tokenize(sequence) rust_tokens = rust_tokenizer.tokenize(sequence) self.assertListEqual(tokens, rust_tokens) ids = tokenizer.encode(sequence, add_special_tokens=False) rust_ids = rust_tokenizer.encode(sequence, add_special_tokens=False) self.assertListEqual(ids, rust_ids) rust_tokenizer = self.get_rust_tokenizer() ids = tokenizer.encode(sequence) rust_ids = rust_tokenizer.encode(sequence) self.assertListEqual(ids, rust_ids) def test_chinese(self): tokenizer = BasicTokenizer() self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz"), ["ah", "\u535A", "\u63A8", "zz"]) def test_basic_tokenizer_lower(self): tokenizer = BasicTokenizer(do_lower_case=True) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["hello", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"]) def test_basic_tokenizer_lower_strip_accents_false(self): tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=False) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hällo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["h\u00E9llo"]) def test_basic_tokenizer_lower_strip_accents_true(self): tokenizer = BasicTokenizer(do_lower_case=True, strip_accents=True) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"]) def test_basic_tokenizer_lower_strip_accents_default(self): tokenizer = BasicTokenizer(do_lower_case=True) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["hallo", "!", "how", "are", "you", "?"] ) self.assertListEqual(tokenizer.tokenize("H\u00E9llo"), ["hello"]) def test_basic_tokenizer_no_lower(self): tokenizer = BasicTokenizer(do_lower_case=False) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? "), ["HeLLo", "!", "how", "Are", "yoU", "?"] ) def test_basic_tokenizer_no_lower_strip_accents_false(self): tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=False) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HäLLo", "!", "how", "Are", "yoU", "?"] ) def test_basic_tokenizer_no_lower_strip_accents_true(self): tokenizer = BasicTokenizer(do_lower_case=False, strip_accents=True) self.assertListEqual( tokenizer.tokenize(" \tHäLLo!how \n Are yoU? "), ["HaLLo", "!", "how", "Are", "yoU", "?"] ) def test_basic_tokenizer_respects_never_split_tokens(self): tokenizer = BasicTokenizer(do_lower_case=False, never_split=["[UNK]"]) self.assertListEqual( tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]"), ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] ) def test_wordpiece_tokenizer(self): vocab_tokens = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"] vocab = {} for (i, token) in enumerate(vocab_tokens): vocab[token] = i tokenizer = WordpieceTokenizer(vocab=vocab, unk_token="[UNK]") self.assertListEqual(tokenizer.tokenize(""), []) self.assertListEqual(tokenizer.tokenize("unwanted running"), ["un", "##want", "##ed", "runn", "##ing"]) self.assertListEqual(tokenizer.tokenize("unwantedX running"), ["[UNK]", "runn", "##ing"]) def test_is_whitespace(self): self.assertTrue(_is_whitespace(" ")) self.assertTrue(_is_whitespace("\t")) self.assertTrue(_is_whitespace("\r")) self.assertTrue(_is_whitespace("\n")) self.assertTrue(_is_whitespace("\u00A0")) self.assertFalse(_is_whitespace("A")) self.assertFalse(_is_whitespace("-")) def test_is_control(self): self.assertTrue(_is_control("\u0005")) self.assertFalse(_is_control("A")) self.assertFalse(_is_control(" ")) self.assertFalse(_is_control("\t")) self.assertFalse(_is_control("\r")) def test_is_punctuation(self): self.assertTrue(_is_punctuation("-")) self.assertTrue(_is_punctuation("$")) self.assertTrue(_is_punctuation("`")) self.assertTrue(_is_punctuation(".")) self.assertFalse(_is_punctuation("A")) self.assertFalse(_is_punctuation(" ")) def test_clean_text(self): tokenizer = self.get_tokenizer() rust_tokenizer = self.get_rust_tokenizer() # Example taken from the issue https://github.com/huggingface/tokenizers/issues/340 self.assertListEqual([tokenizer.tokenize(t) for t in ["Test", "\xad", "test"]], [["[UNK]"], [], ["[UNK]"]]) self.assertListEqual( [rust_tokenizer.tokenize(t) for t in ["Test", "\xad", "test"]], [["[UNK]"], [], ["[UNK]"]] ) @slow def test_sequence_builders(self): tokenizer = self.tokenizer_class.from_pretrained("bert-base-uncased") text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == [101] + text + [102] assert encoded_pair == [101] + text + [102] + text_2 + [102] def test_offsets_with_special_characters(self): for tokenizer, pretrained_name, kwargs in self.tokenizers_list: with self.subTest("{} ({})".format(tokenizer.__class__.__name__, pretrained_name)): tokenizer_r = self.rust_tokenizer_class.from_pretrained(pretrained_name, **kwargs) sentence = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence." tokens = tokenizer_r.encode_plus( sentence, return_attention_mask=False, return_token_type_ids=False, return_offsets_mapping=True, add_special_tokens=True, ) do_lower_case = tokenizer_r.do_lower_case if hasattr(tokenizer_r, "do_lower_case") else False expected_results = ( [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "A"), ((1, 2), ","), ((3, 5), "na"), ((5, 6), "##ï"), ((6, 8), "##ve"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "Allen"), ((21, 23), "##NL"), ((23, 24), "##P"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] if not do_lower_case else [ ((0, 0), tokenizer_r.cls_token), ((0, 1), "a"), ((1, 2), ","), ((3, 8), "naive"), ((9, 15), tokenizer_r.mask_token), ((16, 21), "allen"), ((21, 23), "##nl"), ((23, 24), "##p"), ((25, 33), "sentence"), ((33, 34), "."), ((0, 0), tokenizer_r.sep_token), ] ) self.assertEqual( [e[1] for e in expected_results], tokenizer_r.convert_ids_to_tokens(tokens["input_ids"]) ) self.assertEqual([e[0] for e in expected_results], tokens["offset_mapping"])
AdaMix/tests/test_tokenization_bert.py/0
{ "file_path": "AdaMix/tests/test_tokenization_bert.py", "repo_id": "AdaMix", "token_count": 5611 }
76
# coding=utf-8 # Copyright 2018 The Microsoft Research Asia LayoutLM Team Authors, The Hugging Face Team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import unittest from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES from transformers.testing_utils import require_tokenizers from .test_tokenization_common import TokenizerTesterMixin @require_tokenizers class LayoutLMTokenizationTest(TokenizerTesterMixin, unittest.TestCase): tokenizer_class = LayoutLMTokenizer rust_tokenizer_class = LayoutLMTokenizerFast test_rust_tokenizer = True space_between_special_tokens = True def setUp(self): super().setUp() vocab_tokens = [ "[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing", ",", "low", "lowest", ] self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES["vocab_file"]) with open(self.vocab_file, "w", encoding="utf-8") as vocab_writer: vocab_writer.write("".join([x + "\n" for x in vocab_tokens])) def get_tokenizer(self, **kwargs): return LayoutLMTokenizer.from_pretrained(self.tmpdirname, **kwargs) def get_input_output_texts(self, tokenizer): input_text = "UNwant\u00E9d,running" output_text = "unwanted, running" return input_text, output_text def test_full_tokenizer(self): tokenizer = self.tokenizer_class(self.vocab_file) tokens = tokenizer.tokenize("UNwant\u00E9d,running") self.assertListEqual(tokens, ["un", "##want", "##ed", ",", "runn", "##ing"]) self.assertListEqual(tokenizer.convert_tokens_to_ids(tokens), [7, 4, 5, 10, 8, 9]) def test_special_tokens_as_you_expect(self): """If you are training a seq2seq model that expects a decoder_prefix token make sure it is prepended to decoder_input_ids """ pass
AdaMix/tests/test_tokenization_layoutlm.py/0
{ "file_path": "AdaMix/tests/test_tokenization_layoutlm.py", "repo_id": "AdaMix", "token_count": 1057 }
77
# coding=utf-8 # Copyright 2020 The SqueezeBert authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from transformers import SqueezeBertTokenizer, SqueezeBertTokenizerFast from transformers.testing_utils import require_tokenizers, slow from .test_tokenization_bert import BertTokenizationTest @require_tokenizers class SqueezeBertTokenizationTest(BertTokenizationTest): tokenizer_class = SqueezeBertTokenizer rust_tokenizer_class = SqueezeBertTokenizerFast test_rust_tokenizer = True def get_rust_tokenizer(self, **kwargs): return SqueezeBertTokenizerFast.from_pretrained(self.tmpdirname, **kwargs) @slow def test_sequence_builders(self): tokenizer = SqueezeBertTokenizer.from_pretrained("squeezebert/squeezebert-mnli-headless") text = tokenizer.encode("sequence builders", add_special_tokens=False) text_2 = tokenizer.encode("multi-sequence build", add_special_tokens=False) encoded_sentence = tokenizer.build_inputs_with_special_tokens(text) encoded_pair = tokenizer.build_inputs_with_special_tokens(text, text_2) assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_2 + [ tokenizer.sep_token_id ]
AdaMix/tests/test_tokenization_squeezebert.py/0
{ "file_path": "AdaMix/tests/test_tokenization_squeezebert.py", "repo_id": "AdaMix", "token_count": 642 }
78
# Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import re import shutil import sys import tempfile import unittest git_repo_path = os.path.abspath(os.path.dirname(os.path.dirname(__file__))) sys.path.append(os.path.join(git_repo_path, "utils")) import check_copies # noqa: E402 # This is the reference code that will be used in the tests. # If BertLMPredictionHead is changed in modeling_bert.py, this code needs to be manually updated. REFERENCE_CODE = """ def __init__(self, config): super().__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False) self.bias = nn.Parameter(torch.zeros(config.vocab_size)) # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings` self.decoder.bias = self.bias def forward(self, hidden_states): hidden_states = self.transform(hidden_states) hidden_states = self.decoder(hidden_states) return hidden_states """ class CopyCheckTester(unittest.TestCase): def setUp(self): self.transformer_dir = tempfile.mkdtemp() os.makedirs(os.path.join(self.transformer_dir, "models/bert/")) check_copies.TRANSFORMER_PATH = self.transformer_dir shutil.copy( os.path.join(git_repo_path, "src/transformers/models/bert/modeling_bert.py"), os.path.join(self.transformer_dir, "models/bert/modeling_bert.py"), ) def tearDown(self): check_copies.TRANSFORMER_PATH = "src/transformers" shutil.rmtree(self.transformer_dir) def check_copy_consistency(self, comment, class_name, class_code, overwrite_result=None): code = comment + f"\nclass {class_name}(nn.Module):\n" + class_code if overwrite_result is not None: expected = comment + f"\nclass {class_name}(nn.Module):\n" + overwrite_result fname = os.path.join(self.transformer_dir, "new_code.py") with open(fname, "w") as f: f.write(code) if overwrite_result is None: self.assertTrue(len(check_copies.is_copy_consistent(fname)) == 0) else: check_copies.is_copy_consistent(f.name, overwrite=True) with open(fname, "r") as f: self.assertTrue(f.read(), expected) def test_find_code_in_transformers(self): code = check_copies.find_code_in_transformers("models.bert.modeling_bert.BertLMPredictionHead") self.assertEqual(code, REFERENCE_CODE) def test_is_copy_consistent(self): # Base copy consistency self.check_copy_consistency( "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead", "BertLMPredictionHead", REFERENCE_CODE + "\n", ) # With no empty line at the end self.check_copy_consistency( "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead", "BertLMPredictionHead", REFERENCE_CODE, ) # Copy consistency with rename self.check_copy_consistency( "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel", "TestModelLMPredictionHead", re.sub("Bert", "TestModel", REFERENCE_CODE), ) # Copy consistency with a really long name long_class_name = "TestModelWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReasonIReallyDontUnderstand" self.check_copy_consistency( f"# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->{long_class_name}", f"{long_class_name}LMPredictionHead", re.sub("Bert", long_class_name, REFERENCE_CODE), ) # Copy consistency with overwrite self.check_copy_consistency( "# Copied from transformers.models.bert.modeling_bert.BertLMPredictionHead with Bert->TestModel", "TestModelLMPredictionHead", REFERENCE_CODE, overwrite_result=re.sub("Bert", "TestModel", REFERENCE_CODE), )
AdaMix/tests/test_utils_check_copies.py/0
{ "file_path": "AdaMix/tests/test_utils_check_copies.py", "repo_id": "AdaMix", "token_count": 1989 }
79
{ "opsets": { "1": [ "Abs", "Add", "AddV2", "ArgMax", "ArgMin", "AvgPool", "AvgPool3D", "BatchMatMul", "BatchMatMulV2", "BatchToSpaceND", "BiasAdd", "BiasAddV1", "Cast", "Ceil", "CheckNumerics", "ComplexAbs", "Concat", "ConcatV2", "Const", "ConstV2", "Conv1D", "Conv2D", "Conv2DBackpropInput", "Conv3D", "Conv3DBackpropInputV2", "DepthToSpace", "DepthwiseConv2d", "DepthwiseConv2dNative", "Div", "Dropout", "Elu", "Equal", "Erf", "Exp", "ExpandDims", "Flatten", "Floor", "Gather", "GatherNd", "GatherV2", "Greater", "Identity", "IdentityN", "If", "LRN", "LSTMBlockCell", "LeakyRelu", "Less", "Log", "LogSoftmax", "LogicalAnd", "LogicalNot", "LogicalOr", "LookupTableSizeV2", "MatMul", "Max", "MaxPool", "MaxPool3D", "MaxPoolV2", "Maximum", "Mean", "Min", "Minimum", "MirrorPad", "Mul", "Neg", "NoOp", "NotEqual", "OneHot", "Pack", "Pad", "PadV2", "Placeholder", "PlaceholderV2", "PlaceholderWithDefault", "Pow", "Prod", "RFFT", "RandomNormal", "RandomNormalLike", "RandomUniform", "RandomUniformLike", "RealDiv", "Reciprocal", "Relu", "Relu6", "Reshape", "Rsqrt", "Selu", "Shape", "Sigmoid", "Sign", "Size", "Slice", "Softmax", "Softplus", "Softsign", "SpaceToBatchND", "SpaceToDepth", "Split", "SplitV", "Sqrt", "Square", "SquaredDifference", "Squeeze", "StatelessIf", "StopGradient", "StridedSlice", "StringJoin", "Sub", "Sum", "Tanh", "Tile", "TopKV2", "Transpose", "TruncateDiv", "Unpack", "ZerosLike" ], "2": [], "3": [], "4": [], "5": [], "6": [ "AddN", "All", "Any", "FloorDiv", "FusedBatchNorm", "FusedBatchNormV2", "FusedBatchNormV3" ], "7": [ "Acos", "Asin", "Atan", "Cos", "Fill", "FloorMod", "GreaterEqual", "LessEqual", "Loop", "MatrixBandPart", "Multinomial", "Range", "ResizeBilinear", "ResizeNearestNeighbor", "Scan", "Select", "SelectV2", "Sin", "SoftmaxCrossEntropyWithLogits", "SparseSoftmaxCrossEntropyWithLogits", "StatelessWhile", "Tan", "TensorListFromTensor", "TensorListGetItem", "TensorListLength", "TensorListReserve", "TensorListResize", "TensorListSetItem", "TensorListStack", "While" ], "8": [ "BroadcastTo", "ClipByValue", "FIFOQueueV2", "HashTableV2", "IteratorGetNext", "IteratorV2", "LookupTableFindV2", "MaxPoolWithArgmax", "QueueDequeueManyV2", "QueueDequeueUpToV2", "QueueDequeueV2", "ReverseSequence" ], "9": [ "SegmentMax", "SegmentMean", "SegmentMin", "SegmentProd", "SegmentSum", "Sinh", "SparseSegmentMean", "SparseSegmentMeanWithNumSegments", "SparseSegmentSqrtN", "SparseSegmentSqrtNWithNumSegments", "SparseSegmentSum", "SparseSegmentSumWithNumSegments", "UnsortedSegmentMax", "UnsortedSegmentMin", "UnsortedSegmentProd", "UnsortedSegmentSum", "Where" ], "10": [ "CropAndResize", "CudnnRNN", "DynamicStitch", "FakeQuantWithMinMaxArgs", "IsFinite", "IsInf", "NonMaxSuppressionV2", "NonMaxSuppressionV3", "NonMaxSuppressionV4", "NonMaxSuppressionV5", "ParallelDynamicStitch", "ReverseV2", "Roll" ], "11": [ "Bincount", "Cumsum", "InvertPermutation", "LeftShift", "MatrixDeterminant", "MatrixDiagPart", "MatrixDiagPartV2", "MatrixDiagPartV3", "RaggedRange", "RightShift", "Round", "ScatterNd", "SparseFillEmptyRows", "SparseReshape", "SparseToDense", "TensorScatterUpdate", "Unique" ], "12": [ "Einsum", "MatrixDiag", "MatrixDiagV2", "MatrixDiagV3", "MatrixSetDiagV3", "SquaredDistance" ], "13": [] } }
AdaMix/utils/tf_ops/onnx.json/0
{ "file_path": "AdaMix/utils/tf_ops/onnx.json", "repo_id": "AdaMix", "token_count": 4081 }
80
#!/bin/bash wget -c https://github.com/microsoft/AirSim-Drone-Racing-Lab/releases/download/v1.0-linux/ADRL.zip unzip ADRL.zip; rm ADRL.zip; mkdir -p /home/$USER/Documents/AirSim; wget --directory-prefix=/home/$USER/Documents/AirSim -c https://github.com/microsoft/AirSim-Drone-Racing-Lab/releases/download/v1.0-linux/settings.json;
AirSim-Drone-Racing-Lab/download_linux_binaries.sh/0
{ "file_path": "AirSim-Drone-Racing-Lab/download_linux_binaries.sh", "repo_id": "AirSim-Drone-Racing-Lab", "token_count": 129 }
81
from __future__ import division import time import numpy as np import vel_regressor import cv2 import math import tensorflow as tf import os, sys import airsimdroneracingvae import airsimdroneracingvae.types import airsimdroneracingvae.utils # import utils curr_dir = os.path.dirname(os.path.abspath(__file__)) import_path = os.path.join(curr_dir, '..') sys.path.insert(0, import_path) import racing_utils ########################################### # DEFINE DEPLOYMENT META PARAMETERS # policy options: bc_con, bc_unc, bc_img, bc_reg, bc_full policy_type = 'bc_con' gate_noise = 1.0 ########################################### def process_image(client, img_res): image_response = client.simGetImages([airsimdroneracingvae.ImageRequest('0', airsimdroneracingvae.ImageType.Scene, False, False)])[0] img_1d = np.fromstring(image_response.image_data_uint8, dtype=np.uint8) # get numpy array img_bgr = img_1d.reshape(image_response.height, image_response.width, 3) # reshape array to 4 channel image array H X W X 3 img_resized = cv2.resize(img_bgr, (img_res, img_res)).astype(np.float32) img_batch_1 = np.array([img_resized]) cam_pos = image_response.camera_position cam_orientation = image_response.camera_orientation return img_batch_1, cam_pos, cam_orientation def move_drone(client, vel_cmd): # good multipliers originally: 0.4 for vel, 0.8 for yaw # good multipliers new policies: 0.8 for vel, 0.8 for yaw vel_cmd[0:2] = vel_cmd[0:2] *1.0 # usually base speed is 3/ms vel_cmd[3] = vel_cmd[3] * 1.0 # yaw rate is given in deg/s!! not rad/s yaw_mode = airsimdroneracingvae.YawMode(is_rate=True, yaw_or_rate=vel_cmd[3]*180.0/np.pi) client.moveByVelocityAsync(vel_cmd[0], vel_cmd[1], vel_cmd[2], duration=0.1, yaw_mode=yaw_mode) print(os.path.abspath(airsimdroneracingvae.__file__)) os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1' os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true' # my_devices = tf.config.experimental.list_physical_devices(device_type='CPU') # tf.config.experimental.set_visible_devices(devices=my_devices, device_type='CPU') # tf.debugging.set_log_device_placement(True) if __name__ == "__main__": # set airsim client client = airsimdroneracingvae.MultirotorClient() client.confirmConnection() # client.simLoadLevel('Soccer_Field_Easy') client.simLoadLevel('Soccer_Field_Easy') time.sleep(2) # should match the names in settings.json drone_name = "drone_0" client.enableApiControl(True, vehicle_name=drone_name) time.sleep(0.01) client.armDisarm(True, vehicle_name=drone_name) time.sleep(0.01) client.setTrajectoryTrackerGains(airsimdroneracingvae.TrajectoryTrackerGains().to_list(), vehicle_name=drone_name) time.sleep(0.01) # destroy all previous gates in map racing_utils.trajectory_utils.AllGatesDestroyer(client) # spawn red gates in appropriate locations # gate_poses = racing_utils.trajectory_utils.RedGateSpawner(client, num_gates=1, noise_amp=0) offset = [0, 0, -0] gate_poses = racing_utils.trajectory_utils.RedGateSpawnerCircle(client, num_gates=8, radius=8, radius_noise=gate_noise, height_range=[0, -gate_noise], track_offset=offset) # wait till takeoff complete vel_max = 5.0 acc_max = 2.0 time.sleep(1.0) # takeoff_position = airsimdroneracingvae.Vector3r(25, 7, -1.5) # takeoff_orientation = airsimdroneracingvae.Vector3r(.2, -0.9, 0) # # takeoff_position = airsimdroneracingvae.Vector3r(25, -7, -1.5) # takeoff_orientation = airsimdroneracingvae.Vector3r(-.2, 0.9, 0) takeoff_position = airsimdroneracingvae.Vector3r(5.5, -4, -1.5+offset[2]) takeoff_orientation = airsimdroneracingvae.Vector3r(0.4, 0.9, 0) # takeoff_position = airsimdroneracingvae.Vector3r(0, 0, -2) # takeoff_position = airsimdroneracingvae.Vector3r(0, 0, 10) # takeoff_orientation = airsimdroneracingvae.Vector3r(1, 0, 0) # client.plot_tf([takeoff_pose], duration=20.0, vehicle_name=drone_name) # client.moveOnSplineAsync([airsimdroneracingvae.Vector3r(0, 0, -3)], vel_max=15.0, acc_max=5.0, vehicle_name=drone_name, viz_traj=True).join() client.moveOnSplineVelConstraintsAsync([takeoff_position], [takeoff_orientation], vel_max=vel_max, acc_max=acc_max, vehicle_name=drone_name, viz_traj=False).join() # client.moveOnSplineVelConstraintsAsync([airsimdroneracingvae.Vector3r(1, 0, 8)], [airsimdroneracingvae.Vector3r(1, 0, 0)], vel_max=vel_max, acc_max=acc_max, vehicle_name=drone_name, viz_traj=True) time.sleep(1.0) img_res = 64 if policy_type == 'bc_con': training_mode = 'latent' latent_space_constraints = True bc_weights_path = '/home/rb/all_files/model_outputs/bc_con/bc_model_150.ckpt' feature_weights_path = '/home/rb/all_files/model_outputs/cmvae_con/cmvae_model_40.ckpt' elif policy_type == 'bc_unc': training_mode = 'latent' latent_space_constraints = False bc_weights_path = '/home/rb/all_files/model_outputs/bc_unc/bc_model_150.ckpt' feature_weights_path = '/home/rb/all_files/model_outputs/cmvae_unc/cmvae_model_45.ckpt' elif policy_type == 'bc_img': training_mode = 'latent' latent_space_constraints = True bc_weights_path = '/home/rb/all_files/model_outputs/bc_img/bc_model_100.ckpt' feature_weights_path = '/home/rb/all_files/model_outputs/cmvae_img/cmvae_model_45.ckpt' elif policy_type == 'bc_reg': training_mode = 'reg' latent_space_constraints = True bc_weights_path = '/home/rb/all_files/model_outputs/bc_reg/bc_model_80.ckpt' feature_weights_path = '/home/rb/all_files/model_outputs/reg/reg_model_25.ckpt' elif policy_type == 'bc_full': training_mode = 'full' latent_space_constraints = True bc_weights_path = '/home/rb/all_files/model_outputs/bc_full/bc_model_120.ckpt' feature_weights_path = None vel_regressor = vel_regressor.VelRegressor(regressor_type=training_mode, bc_weights_path=bc_weights_path, feature_weights_path=feature_weights_path, latent_space_constraints=latent_space_constraints) count = 0 max_count = 50 times_net = np.zeros((max_count,)) times_loop = np.zeros((max_count,)) while True: start_time = time.time() img_batch_1, cam_pos, cam_orientation = process_image(client, img_res) elapsed_time_net = time.time() - start_time times_net[count] = elapsed_time_net p_o_b = airsimdroneracingvae.types.Pose(cam_pos, cam_orientation) vel_cmd = vel_regressor.predict_velocities(img_batch_1, p_o_b) # print(vel_cmd) # print('Before sending vel cmd') move_drone(client, vel_cmd) # print('After sending vel cmd') # time.sleep(0.05) elapsed_time_loop = time.time() - start_time times_loop[count] = elapsed_time_loop count = count + 1 if count == max_count: count = 0 avg_time = np.mean(times_net) avg_freq = 1.0/avg_time print('Avg network time over {} iterations: {} ms | {} Hz'.format(max_count, avg_time*1000, avg_freq)) avg_time = np.mean(times_loop) avg_freq = 1.0 / avg_time print('Avg loop time over {} iterations: {} ms | {} Hz'.format(max_count, avg_time * 1000, avg_freq))
AirSim-Drone-Racing-VAE-Imitation/imitation_learning/bc_navigation.py/0
{ "file_path": "AirSim-Drone-Racing-VAE-Imitation/imitation_learning/bc_navigation.py", "repo_id": "AirSim-Drone-Racing-VAE-Imitation", "token_count": 3194 }
82
from argparse import ArgumentParser import airsimneurips as airsim import time import utils import threading import numpy as np import cv2 from baseline_racer import BaselineRacer class BaselineRacerImageBenchmarker(BaselineRacer): def __init__(self, img_benchmark_type = 'simGetImage', drone_name = "drone_1", viz_traj = False, viz_traj_color_rgba=[1.0, 1.0, 0.0, 1.0], viz_image_cv2 = False): super().__init__(drone_name=drone_name, viz_traj=viz_traj, viz_image_cv2=viz_image_cv2) self.image_benchmark_num_images = 0 self.image_benchmark_total_time = 0.0 self.image_callback_thread = None if img_benchmark_type == "simGetImage": self.image_callback_thread = threading.Thread(target=self.repeat_timer_img, args=(self.image_callback_benchmark_simGetImage, 0.05)) if img_benchmark_type == "simGetImages": self.image_callback_thread = threading.Thread(target=self.repeat_timer_img, args=(self.image_callback_benchmark_simGetImages, 0.05)) self.is_image_thread_active = False def start_img_benchmark_thread(self): if not self.is_image_thread_active: self.is_image_thread_active = True self.image_callback_thread.start() print("Started img image_callback thread") def stop_img_benchmark_thread(self): if self.is_image_thread_active: self.is_image_thread_active = False self.image_callback_thread.join() print("Stopped image callback thread.") def repeat_timer_img(self, task, period): while self.is_image_thread_active: task() time.sleep(period) def print_benchmark_results(self): avg_fps = 1.0 / ((self.image_benchmark_total_time) / float(self.image_benchmark_num_images)) print(self.level_name + ": {} avg_fps for {} num of images".format(avg_fps, self.image_benchmark_num_images)) def image_callback_benchmark_simGetImage(self): self.image_benchmark_num_images += 1 iter_start_time = time.time() response = self.airsim_client_images.simGetImage("fpv_cam", airsim.ImageType.Scene) img_rgb = cv2.imdecode(airsim.string_to_uint8_array(response), cv2.IMREAD_UNCHANGED) self.image_benchmark_total_time += time.time() - iter_start_time avg_fps = 1.0 / ((self.image_benchmark_total_time) / float(self.image_benchmark_num_images)) print(self.level_name + ": {} avg_fps for {} num of images".format(avg_fps, self.image_benchmark_num_images)) # uncomment following lines to viz image # if self.viz_image_cv2: # cv2.imshow("img_rgb", img_rgb_1d_new) # cv2.waitKey(1) def image_callback_benchmark_simGetImages(self): self.image_benchmark_num_images += 1 iter_start_time = time.time() request = [airsim.ImageRequest("fpv_cam", airsim.ImageType.Scene, False, False)] response = self.airsim_client_images.simGetImages(request) img_rgb_1d = np.fromstring(response[0].image_data_uint8, dtype=np.uint8) img_rgb = img_rgb_1d.reshape(response[0].height, response[0].width, 3) self.image_benchmark_total_time += time.time() - iter_start_time avg_fps = 1.0 / ((self.image_benchmark_total_time) / float(self.image_benchmark_num_images)) print(self.level_name + ": {} avg_fps for {} num of images".format(avg_fps, self.image_benchmark_num_images)) # uncomment following lines to viz image # if self.viz_image_cv2: # cv2.imshow("img_rgb", img_rgb_1d_new) # cv2.waitKey(1) def main(args): # ensure you have generated the neurips planning settings file by running python generate_settings_file.py baseline_racer = BaselineRacerImageBenchmarker(img_benchmark_type=args.img_benchmark_type, \ drone_name="drone_1", \ viz_traj=args.viz_traj, \ viz_traj_color_rgba=[1.0, 1.0, 0.0, 1.0], \ viz_image_cv2=args.viz_image_cv2) baseline_racer.load_level(args.level_name) if args.level_name == "Qualifier_Tier_1": args.race_tier = 1 if args.level_name == "Qualifier_Tier_2": args.race_tier = 2 if args.level_name == "Qualifier_Tier_3": args.race_tier = 3 baseline_racer.start_race(args.race_tier) baseline_racer.initialize_drone() baseline_racer.takeoff_with_moveOnSpline() baseline_racer.get_ground_truth_gate_poses() baseline_racer.start_img_benchmark_thread() baseline_racer.fly_through_all_gates_at_once_with_moveOnSpline().join() baseline_racer.stop_img_benchmark_thread() baseline_racer.print_benchmark_results() if __name__ == "__main__": parser = ArgumentParser() parser.add_argument('--level_name', type=str, choices=["Soccer_Field_Easy", "Soccer_Field_Medium", "ZhangJiaJie_Medium", "Building99_Hard", "Qualifier_Tier_1", "Qualifier_Tier_2", "Qualifier_Tier_3"], default="ZhangJiaJie_Medium") parser.add_argument('--enable_viz_traj', dest='viz_traj', action='store_true', default=False) parser.add_argument('--img_benchmark_type', type=str, choices=["simGetImage", "simGetImages"], default="simGetImages") parser.add_argument('--enable_viz_image_cv2', dest='viz_image_cv2', action='store_true', default=False) parser.add_argument('--race_tier', type=int, choices=[1,2,3], default=1) args = parser.parse_args() main(args)
AirSim-NeurIPS2019-Drone-Racing/baselines/baseline_racer_image_benchmarker.py/0
{ "file_path": "AirSim-NeurIPS2019-Drone-Racing/baselines/baseline_racer_image_benchmarker.py", "repo_id": "AirSim-NeurIPS2019-Drone-Racing", "token_count": 2341 }
83
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. from logging import INFO, LogRecord, getLogger from typing import Dict, Optional from opentelemetry.sdk._logs import LoggingHandler from opentelemetry.util.types import Attributes _APPLICATION_INSIGHTS_EVENT_MARKER_ATTRIBUTE = ( "APPLICATION_INSIGHTS_EVENT_MARKER_ATTRIBUTE" ) _event_logger = getLogger(__name__) _event_logger.propagate = False class _AzureMonitorOpenTelemetryEventHandler(LoggingHandler): @staticmethod def _get_attributes(record: LogRecord) -> Attributes: attributes = LoggingHandler._get_attributes(record) attributes[_APPLICATION_INSIGHTS_EVENT_MARKER_ATTRIBUTE] = True return attributes class _AzureMonitorEventsExtension: _initialized = False def _initialize(): if not _AzureMonitorEventsExtension._initialized: _event_logger.addHandler(_AzureMonitorOpenTelemetryEventHandler()) _event_logger.setLevel(INFO) _AzureMonitorEventsExtension._initialized = True def track_event(name: str, custom_dimensions: Optional[Dict[str, str]] = None): _AzureMonitorEventsExtension._initialize() _event_logger.info(name, extra=custom_dimensions)
ApplicationInsights-Python/azure-monitor-events-extension/azure/monitor/events/extension/_events.py/0
{ "file_path": "ApplicationInsights-Python/azure-monitor-events-extension/azure/monitor/events/extension/_events.py", "repo_id": "ApplicationInsights-Python", "token_count": 438 }
84
#!/bin/bash set -e if [ -z "$DOCKER_GROUP_ID" ]; then groupadd docker else groupadd -g "$DOCKER_GROUP_ID" docker fi usermod -aG docker "$1" && newgrp docker getent group docker
AzureTRE/.devcontainer/scripts/docker-client.sh/0
{ "file_path": "AzureTRE/.devcontainer/scripts/docker-client.sh", "repo_id": "AzureTRE", "token_count": 77 }
85
{ "scriptFile": "__init__.py", "bindings": [ { "name": "msg", "type": "serviceBusTrigger", "direction": "in", "queueName": "%AIRLOCK_STATUS_CHANGED_QUEUE_NAME%", "connection": "SB_CONNECTION_STRING" }, { "type": "eventGrid", "name": "stepResultEvent", "topicEndpointUri": "EVENT_GRID_STEP_RESULT_TOPIC_URI_SETTING", "topicKeySetting": "EVENT_GRID_STEP_RESULT_TOPIC_KEY_SETTING", "direction": "out" }, { "type": "eventGrid", "name": "dataDeletionEvent", "topicEndpointUri": "EVENT_GRID_DATA_DELETION_TOPIC_URI_SETTING", "topicKeySetting": "EVENT_GRID_DATA_DELETION_TOPIC_KEY_SETTING", "direction": "out" } ] }
AzureTRE/airlock_processor/StatusChangedQueueTrigger/function.json/0
{ "file_path": "AzureTRE/airlock_processor/StatusChangedQueueTrigger/function.json", "repo_id": "AzureTRE", "token_count": 352 }
86
FROM python:3.8-slim-bullseye as base COPY requirements.txt /. RUN pip3 install --no-cache-dir -r requirements.txt FROM base as test COPY requirements-dev.txt /. RUN pip3 install --no-cache-dir -r requirements-dev.txt COPY . /api WORKDIR /api RUN ./run_tests_and_exit_succesfully.sh FROM scratch as test-results COPY --from=test /test-results/* /. FROM base as runtime COPY . /api WORKDIR /api RUN groupadd -r api && useradd -r -s /bin/false -g api api_user USER api_user CMD ["gunicorn", "main:app", "--bind", "0.0.0.0:8000", "-k", "uvicorn.workers.UvicornWorker","--timeout", "60", "--workers", "5"]
AzureTRE/api_app/Dockerfile/0
{ "file_path": "AzureTRE/api_app/Dockerfile", "repo_id": "AzureTRE", "token_count": 240 }
87
from collections import defaultdict from typing import Any, DefaultDict, Dict, Optional from fastapi import APIRouter, Request, Depends, HTTPException, status from fastapi.openapi.docs import get_swagger_ui_html, get_swagger_ui_oauth2_redirect_html from fastapi.openapi.utils import get_openapi from api.helpers import get_repository from db.repositories.workspaces import WorkspaceRepository from api.routes import health, ping, workspaces, workspace_templates, workspace_service_templates, user_resource_templates, \ shared_services, shared_service_templates, migrations, costs, airlock, operations, metadata from core import config from resources import strings core_tags_metadata = [ {"name": "health", "description": "Verify that the TRE is up and running"}, {"name": "workspace templates", "description": "**TRE admin** registers and can access templates"}, {"name": "workspace service templates", "description": "**TRE admin** registers templates and can access templates"}, {"name": "user resource templates", "description": "**TRE admin** registers templates and can access templates"}, {"name": "workspaces", "description": "**TRE admin** administers workspaces, **TRE Users** can view their own workspaces"}, ] workspace_tags_metadata = [ {"name": "workspaces", "description": " **Workspace Owners and Researchers** can view their own workspaces"}, {"name": "workspace services", "description": "**Workspace Owners** administer workspace services, **Workspace Owners and Researchers** can view services in the workspaces they belong to"}, {"name": "user resources", "description": "**Researchers** administer and can view their own researchers, **Workspace Owners** can view/update/delete all user resources in their workspaces"}, {"name": "shared services", "description": "**TRE administratiors** administer shared services"}, ] # Root router = APIRouter() router.include_router(health.router, tags=["health"]) router.include_router(ping.router, tags=["health"]) router.include_router(metadata.router, tags=["metadata"]) # Core API core_router = APIRouter(prefix=config.API_PREFIX) core_router.include_router(health.router, tags=["health"]) core_router.include_router(ping.router, tags=["health"]) core_router.include_router(metadata.router, tags=["metadata"]) core_router.include_router(workspace_templates.workspace_templates_admin_router, tags=["workspace templates"]) core_router.include_router(workspace_service_templates.workspace_service_templates_core_router, tags=["workspace service templates"]) core_router.include_router(user_resource_templates.user_resource_templates_core_router, tags=["user resource templates"]) core_router.include_router(shared_service_templates.shared_service_templates_core_router, tags=["shared service templates"]) core_router.include_router(shared_services.shared_services_router, tags=["shared services"]) core_router.include_router(operations.operations_router, tags=["operations"]) core_router.include_router(workspaces.workspaces_core_router, tags=["workspaces"]) core_router.include_router(workspaces.workspaces_shared_router, tags=["workspaces"]) core_router.include_router(migrations.migrations_core_router, tags=["migrations"]) core_router.include_router(costs.costs_core_router, tags=["costs"]) core_router.include_router(costs.costs_workspace_router, tags=["costs"]) core_swagger_router = APIRouter() swagger_disabled_router = APIRouter() openapi_definitions: DefaultDict[str, Optional[Dict[str, Any]]] = defaultdict(lambda: None) @core_swagger_router.get("/openapi.json", include_in_schema=False, name="core_openapi") async def core_openapi(request: Request): global openapi_definitions if openapi_definitions["core"] is None: openapi_definitions["core"] = get_openapi( title=f"{config.PROJECT_NAME}", description=config.API_DESCRIPTION, version=config.VERSION, routes=core_router.routes, tags=core_tags_metadata ) return openapi_definitions["core"] @core_swagger_router.get('/docs/oauth2-redirect', include_in_schema=False) async def swagger_ui_redirect(): return get_swagger_ui_oauth2_redirect_html() @core_swagger_router.get("/docs", include_in_schema=False, name="core_swagger") async def get_swagger(request: Request): swagger_ui_html = get_swagger_ui_html( openapi_url="openapi.json", title=request.app.title + " - Swagger UI", oauth2_redirect_url="/api/docs/oauth2-redirect", init_oauth={ "usePkceWithAuthorizationCodeGrant": True, "clientId": config.SWAGGER_UI_CLIENT_ID, "scopes": ["openid", "offline_access", config.API_ROOT_SCOPE] } ) return swagger_ui_html @swagger_disabled_router.get("/docs", include_in_schema=False, name="swagger_disabled") async def get_disabled_swagger(): raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=strings.SWAGGER_DISABLED) # Workspace API workspace_router = APIRouter(prefix=config.API_PREFIX) workspace_router.include_router(workspaces.workspaces_shared_router, tags=["workspaces"]) workspace_router.include_router(workspaces.workspace_services_workspace_router, tags=["workspace services"]) workspace_router.include_router(workspaces.user_resources_workspace_router, tags=["user resources"]) workspace_router.include_router(costs.costs_workspace_router, tags=["workspace costs"]) workspace_router.include_router(airlock.airlock_workspace_router, tags=["airlock"]) workspace_swagger_router = APIRouter() workspace_swagger_disabled_router = APIRouter() def get_scope(workspace) -> str: # Cope with the fact that scope id can have api:// at the front. return f"api://{workspace.properties['scope_id'].replace('api://','')}/user_impersonation" @workspace_swagger_router.get("/workspaces/{workspace_id}/openapi.json", include_in_schema=False, name="openapi_definitions") async def get_openapi_json(workspace_id: str, request: Request, workspace_repo=Depends(get_repository(WorkspaceRepository))): global openapi_definitions if openapi_definitions[workspace_id] is None: openapi_definitions[workspace_id] = get_openapi( title=f"{config.PROJECT_NAME} - Workspace {workspace_id}", description=config.API_DESCRIPTION, version=config.VERSION, routes=workspace_router.routes, tags=workspace_tags_metadata ) workspace = await workspace_repo.get_workspace_by_id(workspace_id) scope = {get_scope(workspace): "List and Get TRE Workspaces"} openapi_definitions[workspace_id]['components']['securitySchemes']['oauth2']['flows']['authorizationCode']['scopes'] = scope # Add an example into every workspace_id path parameter so users don't have to cut and paste them in. for route in openapi_definitions[workspace_id]['paths'].values(): for verb in route.values(): # We now have a list of parameters for each route for parameter in verb['parameters']: if (parameter['name'] == 'workspace_id'): parameter['schema']['example'] = workspace_id return openapi_definitions[workspace_id] @workspace_swagger_router.get("/workspaces/{workspace_id}/docs", include_in_schema=False, name="workspace_swagger") async def get_workspace_swagger(workspace_id, request: Request, workspace_repo=Depends(get_repository(WorkspaceRepository))): workspace = await workspace_repo.get_workspace_by_id(workspace_id) scope = get_scope(workspace) swagger_ui_html = get_swagger_ui_html( openapi_url="openapi.json", title=request.app.title + " - Swagger UI", oauth2_redirect_url="/api/docs/oauth2-redirect", init_oauth={ "usePkceWithAuthorizationCodeGrant": True, "clientId": config.SWAGGER_UI_CLIENT_ID, "scopes": ["openid", "offline_access", scope] } ) return swagger_ui_html @workspace_swagger_disabled_router.get("/workspaces/{workspace_id}/docs", include_in_schema=False, name="workspace_swagger_disabled") async def get_disabled_workspace_swagger(): raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=strings.SWAGGER_DISABLED) if config.ENABLE_SWAGGER: core_router.include_router(core_swagger_router) workspace_router.include_router(workspace_swagger_router) else: core_router.include_router(swagger_disabled_router) workspace_router.include_router(workspace_swagger_disabled_router) router.include_router(core_router) router.include_router(workspace_router)
AzureTRE/api_app/api/routes/api.py/0
{ "file_path": "AzureTRE/api_app/api/routes/api.py", "repo_id": "AzureTRE", "token_count": 3153 }
88
from contextlib import asynccontextmanager from core.config import MANAGED_IDENTITY_CLIENT_ID, AAD_AUTHORITY_URL from azure.core.credentials import TokenCredential from urllib.parse import urlparse from azure.identity import ( DefaultAzureCredential, ManagedIdentityCredential, ChainedTokenCredential, ) from azure.identity.aio import ( DefaultAzureCredential as DefaultAzureCredentialASync, ManagedIdentityCredential as ManagedIdentityCredentialASync, ChainedTokenCredential as ChainedTokenCredentialASync, ) def get_credential() -> TokenCredential: if MANAGED_IDENTITY_CLIENT_ID: return ChainedTokenCredential( ManagedIdentityCredential(client_id=MANAGED_IDENTITY_CLIENT_ID) ) else: return DefaultAzureCredential(authority=urlparse(AAD_AUTHORITY_URL).netloc, exclude_shared_token_cache_credential=True, exclude_workload_identity_credential=True, exclude_developer_cli_credential=True, exclude_managed_identity_credential=True, exclude_powershell_credential=True ) async def get_credential_async(): return ( ChainedTokenCredentialASync( ManagedIdentityCredentialASync(client_id=MANAGED_IDENTITY_CLIENT_ID) ) if MANAGED_IDENTITY_CLIENT_ID else DefaultAzureCredentialASync(authority=urlparse(AAD_AUTHORITY_URL).netloc, exclude_shared_token_cache_credential=True, exclude_workload_identity_credential=True, exclude_developer_cli_credential=True, exclude_managed_identity_credential=True, exclude_powershell_credential=True ) ) @asynccontextmanager async def get_credential_async_context() -> TokenCredential: """ Context manager which yields the default credentials. """ credential = await get_credential_async() yield credential await credential.close()
AzureTRE/api_app/core/credentials.py/0
{ "file_path": "AzureTRE/api_app/core/credentials.py", "repo_id": "AzureTRE", "token_count": 1152 }
89
import uuid from typing import List, Tuple from pydantic import parse_obj_as from db.repositories.resources_history import ResourceHistoryRepository from models.domain.resource_template import ResourceTemplate from models.domain.authentication import User from db.errors import EntityDoesNotExist from db.repositories.resource_templates import ResourceTemplateRepository from db.repositories.resources import ResourceRepository, IS_NOT_DELETED_CLAUSE from models.domain.resource import ResourceType from models.domain.user_resource import UserResource from models.schemas.resource import ResourcePatch from models.schemas.user_resource import UserResourceInCreate class UserResourceRepository(ResourceRepository): @classmethod async def create(cls): cls = UserResourceRepository() await super().create() return cls @staticmethod def user_resources_query(workspace_id: str, service_id: str): return f'SELECT * FROM c WHERE c.resourceType = "{ResourceType.UserResource}" AND c.parentWorkspaceServiceId = "{service_id}" AND c.workspaceId = "{workspace_id}"' @staticmethod def active_user_resources_query(workspace_id: str, service_id: str): return f'SELECT * FROM c WHERE {IS_NOT_DELETED_CLAUSE} AND c.resourceType = "{ResourceType.UserResource}" AND c.parentWorkspaceServiceId = "{service_id}" AND c.workspaceId = "{workspace_id}"' async def create_user_resource_item(self, user_resource_input: UserResourceInCreate, workspace_id: str, parent_workspace_service_id: str, parent_template_name: str, user_id: str, user_roles: List[str]) -> Tuple[UserResource, ResourceTemplate]: full_user_resource_id = str(uuid.uuid4()) template = await self.validate_input_against_template(user_resource_input.templateName, user_resource_input, ResourceType.UserResource, user_roles, parent_template_name) # we don't want something in the input to overwrite the system parameters, so dict.update can't work. resource_spec_parameters = {**user_resource_input.properties, **self.get_user_resource_spec_params()} user_resource = UserResource( id=full_user_resource_id, workspaceId=workspace_id, ownerId=user_id, parentWorkspaceServiceId=parent_workspace_service_id, templateName=user_resource_input.templateName, templateVersion=template.version, properties=resource_spec_parameters, resourcePath=f'/workspaces/{workspace_id}/workspace-services/{parent_workspace_service_id}/user-resources/{full_user_resource_id}', etag='' ) return user_resource, template async def get_user_resources_for_workspace_service(self, workspace_id: str, service_id: str) -> List[UserResource]: """ returns a list of "non-deleted" user resources linked to this workspace service """ query = self.active_user_resources_query(workspace_id, service_id) user_resources = await self.query(query=query) return parse_obj_as(List[UserResource], user_resources) async def get_user_resource_by_id(self, workspace_id: str, service_id: str, resource_id: str) -> UserResource: query = self.user_resources_query(workspace_id, service_id) + f' AND c.id = "{resource_id}"' user_resources = await self.query(query=query) if not user_resources: raise EntityDoesNotExist return parse_obj_as(UserResource, user_resources[0]) def get_user_resource_spec_params(self): return self.get_resource_base_spec_params() async def patch_user_resource(self, user_resource: UserResource, user_resource_patch: ResourcePatch, etag: str, resource_template_repo: ResourceTemplateRepository, resource_history_repo: ResourceHistoryRepository, parent_template_name: str, user: User, force_version_update: bool) -> Tuple[UserResource, ResourceTemplate]: # get user resource template user_resource_template = await resource_template_repo.get_template_by_name_and_version(user_resource.templateName, user_resource.templateVersion, ResourceType.UserResource, parent_service_name=parent_template_name) return await self.patch_resource(user_resource, user_resource_patch, user_resource_template, etag, resource_template_repo, resource_history_repo, user, force_version_update)
AzureTRE/api_app/db/repositories/user_resources.py/0
{ "file_path": "AzureTRE/api_app/db/repositories/user_resources.py", "repo_id": "AzureTRE", "token_count": 1493 }
90
from enum import Enum from typing import List, Optional from pydantic import Field from pydantic.types import UUID4 from models.domain.azuretremodel import AzureTREModel from models.domain.resource import Output, ResourceType from resources import strings class Status(str, Enum): """ Operation status """ AwaitingDeployment = strings.RESOURCE_STATUS_AWAITING_DEPLOYMENT Deploying = strings.RESOURCE_STATUS_DEPLOYING Deployed = strings.RESOURCE_STATUS_DEPLOYED DeploymentFailed = strings.RESOURCE_STATUS_DEPLOYMENT_FAILED AwaitingUpdate = strings.RESOURCE_STATUS_AWAITING_UPDATE Updating = strings.RESOURCE_STATUS_UPDATING Updated = strings.RESOURCE_STATUS_UPDATED UpdatingFailed = strings.RESOURCE_STATUS_UPDATING_FAILED AwaitingDeletion = strings.RESOURCE_STATUS_AWAITING_DELETION Deleting = strings.RESOURCE_STATUS_DELETING Deleted = strings.RESOURCE_STATUS_DELETED DeletingFailed = strings.RESOURCE_STATUS_DELETING_FAILED AwaitingAction = strings.RESOURCE_STATUS_AWAITING_ACTION InvokingAction = strings.RESOURCE_ACTION_STATUS_INVOKING ActionSucceeded = strings.RESOURCE_ACTION_STATUS_SUCCEEDED ActionFailed = strings.RESOURCE_ACTION_STATUS_FAILED PipelineRunning = strings.RESOURCE_ACTION_STATUS_PIPELINE_RUNNING # set whilst a resource in a pipeline is running, as each step will have its own status class OperationStep(AzureTREModel): """ Model to define a step in an operation. Each step references either a secondary resource or the primary resource (stepId=main) The steps are built up front as the operation is created from the initial user request. As each step completes, the next one is processed. """ id: str = Field(title="Id", description="Unique id identifying the step") templateStepId: str = Field(title="templateStepId", description="Unique id identifying the step") stepTitle: Optional[str] = Field(title="stepTitle", description="Human readable title of what the step is for") resourceId: Optional[str] = Field(title="resourceId", description="Id of the resource to update") resourceTemplateName: Optional[str] = Field("", title="resourceTemplateName", description="Name of the template for the resource under change") resourceType: Optional[ResourceType] = Field(title="resourceType", description="Type of resource under change") resourceAction: Optional[str] = Field(title="resourceAction", description="Action - install / upgrade / uninstall etc") status: Optional[Status] = Field(None, title="Operation step status") message: Optional[str] = Field("", title="Additional operation step status information") updatedWhen: Optional[float] = Field("", title="POSIX Timestamp for When the operation step was updated") # An example for this property will be if we have a step that is responsible for updating the firewall, and its origin was the guacamole workspace service, the id here will be the guacamole id sourceTemplateResourceId: Optional[str] = Field(title="sourceTemplateResourceId", description="Id of the parent of the resource to update") def is_success(self) -> bool: return self.status in ( Status.ActionSucceeded, Status.Deployed, Status.Deleted, Status.Updated ) def is_failure(self) -> bool: return self.status in ( Status.ActionFailed, Status.DeletingFailed, Status.DeploymentFailed, Status.UpdatingFailed ) def is_action(self) -> bool: return self.status in ( Status.ActionSucceeded, Status.ActionFailed, Status.InvokingAction ) class Operation(AzureTREModel): """ Operation model """ id: str = Field(title="Id", description="GUID identifying the operation") resourceId: str = Field(title="resourceId", description="GUID identifying the resource") resourcePath: str = Field(title="resourcePath", description="Path of the resource undergoing change, i.e. '/workspaces/guid/workspace-services/guid/'") resourceVersion: int = Field(0, title="resourceVersion", description="Version of the resource this operation relates to") status: Status = Field(None, title="Operation status") action: str = Field(title="action", description="Name of the action being performed on the resource, i.e. install, uninstall, start") message: str = Field("", title="Additional operation status information") createdWhen: float = Field("", title="POSIX Timestamp for when the operation was submitted") updatedWhen: float = Field("", title="POSIX Timestamp for When the operation was updated") user: dict = {} steps: Optional[List[OperationStep]] = Field(None, title="Operation Steps") class DeploymentStatusUpdateMessage(AzureTREModel): """ Model for service bus message flowing back to API to update status in DB """ operationId: UUID4 = Field(title="", description="") stepId: str = Field(title="", description="") id: UUID4 = Field(title="", description="") status: Status = Field(title="", description="") message: str = Field(title="", description="") outputs: List[Output] = Field(title="", description="", default=[])
AzureTRE/api_app/models/domain/operation.py/0
{ "file_path": "AzureTRE/api_app/models/domain/operation.py", "repo_id": "AzureTRE", "token_count": 1691 }
91
from typing import List from pydantic import BaseModel class Migration(BaseModel): """ Migration """ issueNumber: str status: str class MigrationOutList(BaseModel): migrations: List[Migration]
AzureTRE/api_app/models/schemas/migrations.py/0
{ "file_path": "AzureTRE/api_app/models/schemas/migrations.py", "repo_id": "AzureTRE", "token_count": 74 }
92
import base64 from collections import defaultdict from enum import Enum from typing import List, Optional import jwt import requests from fastapi import Request, HTTPException, status from msal import ConfidentialClientApplication from services.access_service import AccessService, AuthConfigValidationError from core import config from db.errors import EntityDoesNotExist from models.domain.authentication import User, RoleAssignment from models.domain.workspace import Workspace, WorkspaceRole from resources import strings from db.repositories.workspaces import WorkspaceRepository from services.logging import logger from cryptography.hazmat.primitives.asymmetric import rsa from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives import serialization MICROSOFT_GRAPH_URL = config.MICROSOFT_GRAPH_URL.strip("/") class PrincipalType(Enum): User = "User" Group = "Group" ServicePrincipal = "ServicePrincipal" class AzureADAuthorization(AccessService): _jwt_keys: dict = {} require_one_of_roles = None aad_instance = config.AAD_AUTHORITY_URL TRE_CORE_ROLES = ['TREAdmin', 'TREUser'] WORKSPACE_ROLES_DICT = {'WorkspaceOwner': 'app_role_id_workspace_owner', 'WorkspaceResearcher': 'app_role_id_workspace_researcher', 'AirlockManager': 'app_role_id_workspace_airlock_manager'} def __init__(self, auto_error: bool = True, require_one_of_roles: Optional[list] = None): super(AzureADAuthorization, self).__init__( authorizationUrl=f"{self.aad_instance}/{config.AAD_TENANT_ID}/oauth2/v2.0/authorize", tokenUrl=f"{self.aad_instance}/{config.AAD_TENANT_ID}/oauth2/v2.0/token", refreshUrl=f"{self.aad_instance}/{config.AAD_TENANT_ID}/oauth2/v2.0/token", scheme_name="oauth2", auto_error=auto_error ) self.require_one_of_roles = require_one_of_roles async def __call__(self, request: Request) -> User: token: str = await super(AzureADAuthorization, self).__call__(request) decoded_token = None # Try workspace app registration if appropriate if 'workspace_id' in request.path_params and any(role in self.require_one_of_roles for role in self.WORKSPACE_ROLES_DICT.keys()): # as we have a workspace_id not given, try decoding token logger.debug("Workspace ID was provided. Getting Workspace API app registration") try: # get the app reg id - which might be blank if the workspace hasn't fully created yet. # if it's blank, don't use workspace auth, use core auth - and a TRE Admin can still get it app_reg_id = await self._fetch_ws_app_reg_id_from_ws_id(request) if app_reg_id != "": decoded_token = self._decode_token(token, app_reg_id) except HTTPException as h: raise h except Exception as e: logger.debug(e) logger.debug("Failed to decode using workspace_id, trying with TRE API app registration") pass # Try TRE API app registration if appropriate if decoded_token is None and any(role in self.require_one_of_roles for role in self.TRE_CORE_ROLES): try: decoded_token = self._decode_token(token, config.API_AUDIENCE) except jwt.exceptions.InvalidSignatureError: logger.debug("Failed to decode using TRE API app registration (Invalid Signatrue)") raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail=strings.INVALID_SIGNATURE) except jwt.exceptions.ExpiredSignatureError: logger.debug("Failed to decode using TRE API app registration (Expired Signature)") raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail=strings.EXPIRED_SIGNATURE) except jwt.exceptions.InvalidTokenError: # any other token validation exception, we want to catch all of these... logger.debug("Failed to decode using TRE API app registration (Invalid token)") raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail=strings.INVALID_TOKEN) except Exception as e: # Unexpected token decoding/validation exception. making sure we are not crashing (with 500) logger.debug(e) pass # Failed to decode token using either app registration if decoded_token is None: raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail=strings.AUTH_UNABLE_TO_VALIDATE_TOKEN) try: user = self._get_user_from_token(decoded_token) except Exception as e: logger.debug(e) raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail=strings.ACCESS_UNABLE_TO_GET_ROLE_ASSIGNMENTS_FOR_USER, headers={"WWW-Authenticate": "Bearer"}) try: if not any(role in self.require_one_of_roles for role in user.roles): raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=f'{strings.ACCESS_USER_DOES_NOT_HAVE_REQUIRED_ROLE}: {self.require_one_of_roles}', headers={"WWW-Authenticate": "Bearer"}) except Exception as e: logger.debug(e) raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail=f'{strings.ACCESS_USER_DOES_NOT_HAVE_REQUIRED_ROLE}: {self.require_one_of_roles}', headers={"WWW-Authenticate": "Bearer"}) return user @staticmethod async def _fetch_ws_app_reg_id_from_ws_id(request: Request) -> str: workspace_id = None if "workspace_id" not in request.path_params: logger.error("Neither a workspace ID nor a default app registration id were provided") raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail=strings.AUTH_COULD_NOT_VALIDATE_CREDENTIALS) try: workspace_id = request.path_params['workspace_id'] ws_repo = await WorkspaceRepository.create() workspace = await ws_repo.get_workspace_by_id(workspace_id) ws_app_reg_id = "" if "client_id" in workspace.properties: ws_app_reg_id = workspace.properties['client_id'] return ws_app_reg_id except EntityDoesNotExist: logger.exception(strings.WORKSPACE_DOES_NOT_EXIST) raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=strings.WORKSPACE_DOES_NOT_EXIST) except Exception: logger.exception(f"Failed to get workspace app registration ID for workspace {workspace_id}") raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail=strings.AUTH_COULD_NOT_VALIDATE_CREDENTIALS) @staticmethod def _get_user_from_token(decoded_token: dict) -> User: user_id = decoded_token['oid'] return User(id=user_id, name=decoded_token.get('name', ''), email=decoded_token.get('email', ''), roles=decoded_token.get('roles', [])) def _decode_token(self, token: str, ws_app_reg_id: str) -> dict: key_id = self._get_key_id(token) key = self._get_token_key(key_id) logger.debug("workspace app registration id: %s", ws_app_reg_id) return jwt.decode(token, key, options={"verify_signature": True}, algorithms=['RS256'], audience=ws_app_reg_id) @staticmethod def _get_key_id(token: str) -> str: headers = jwt.get_unverified_header(token) return headers['kid'] if headers and 'kid' in headers else None @staticmethod def _ensure_b64padding(key: str) -> str: """ The base64 encoded keys are not always correctly padded, so pad with the right number of = """ key = key.encode('utf-8') missing_padding = len(key) % 4 for _ in range(missing_padding): key = key + b'=' return key def _get_token_key(self, key_id: str) -> str: """ Rather tha use PyJWKClient.get_signing_key_from_jwt every time, we'll get all the keys from AAD and cache them. """ if key_id not in AzureADAuthorization._jwt_keys: response = requests.get(f"{self.aad_instance}/{config.AAD_TENANT_ID}/v2.0/.well-known/openid-configuration") aad_metadata = response.json() if response.ok else None jwks_uri = aad_metadata['jwks_uri'] if aad_metadata and 'jwks_uri' in aad_metadata else None if jwks_uri: response = requests.get(jwks_uri) keys = response.json() if response.ok else None if keys and 'keys' in keys: for key in keys['keys']: n = int.from_bytes(base64.urlsafe_b64decode(self._ensure_b64padding(key['n'])), "big") e = int.from_bytes(base64.urlsafe_b64decode(self._ensure_b64padding(key['e'])), "big") pub_key = rsa.RSAPublicNumbers(e, n).public_key(default_backend()) # Cache the PEM formatted public key. AzureADAuthorization._jwt_keys[key['kid']] = pub_key.public_bytes( encoding=serialization.Encoding.PEM, format=serialization.PublicFormat.PKCS1 ) return AzureADAuthorization._jwt_keys[key_id] # The below functions are needed to list which workspaces a specific user has access to i.e. GET /workspaces. # The below functions require Directory.ReadAll permissions on AzureAD. # If there is no need to list all workspaces for a specific user, then Directory.ReadAll permissions are not required. @staticmethod def _get_msgraph_token() -> str: scopes = [f"{MICROSOFT_GRAPH_URL}/.default"] app = ConfidentialClientApplication(client_id=config.API_CLIENT_ID, client_credential=config.API_CLIENT_SECRET, authority=f"{config.AAD_AUTHORITY_URL}/{config.AAD_TENANT_ID}") try: result = app.acquire_token_silent(scopes=scopes, account=None) except Exception: result = None if not result: logger.debug('No suitable token exists in cache, getting a new one from AAD') result = app.acquire_token_for_client(scopes=scopes) if "access_token" not in result: raise Exception(f"API app registration access token cannot be retrieved. {result.get('error')}: {result.get('error_description')}") return result["access_token"] @staticmethod def _get_auth_header(msgraph_token: str) -> dict: return {'Authorization': 'Bearer ' + msgraph_token} @staticmethod def _get_service_principal_endpoint(client_id) -> str: return f"{MICROSOFT_GRAPH_URL}/v1.0/serviceprincipals?$filter=appid eq '{client_id}'" @staticmethod def _get_service_principal_assigned_roles_endpoint(client_id) -> str: return f"{MICROSOFT_GRAPH_URL}/v1.0/serviceprincipals/{client_id}/appRoleAssignedTo?$select=appRoleId,principalId,principalType" @staticmethod def _get_batch_endpoint() -> str: return f"{MICROSOFT_GRAPH_URL}/v1.0/$batch" @staticmethod def _get_users_endpoint(user_object_id) -> str: return "/users/" + user_object_id + "?$select=mail,id" @staticmethod def _get_group_members_endpoint(group_object_id) -> str: return "/groups/" + group_object_id + "/transitiveMembers?$select=mail,id" def _get_app_sp_graph_data(self, client_id: str) -> dict: msgraph_token = self._get_msgraph_token() sp_endpoint = self._get_service_principal_endpoint(client_id) graph_data = requests.get(sp_endpoint, headers=self._get_auth_header(msgraph_token)).json() return graph_data def _get_user_role_assignments(self, client_id, msgraph_token): sp_roles_endpoint = self._get_service_principal_assigned_roles_endpoint(client_id) return requests.get(sp_roles_endpoint, headers=self._get_auth_header(msgraph_token)).json() def _get_user_emails(self, roles_graph_data, msgraph_token): batch_endpoint = self._get_batch_endpoint() batch_request_body = self._get_batch_users_by_role_assignments_body(roles_graph_data) headers = self._get_auth_header(msgraph_token) headers["Content-type"] = "application/json" max_number_request = 20 requests_from_batch = batch_request_body["requests"] # We split the original batch request body in sub-lits with at most max_number_request elements batch_request_body_list = [requests_from_batch[i:i + max_number_request] for i in range(0, len(requests_from_batch), max_number_request)] users_graph_data = {"responses": []} # For each sub-list it's required to call the batch endpoint for retrieveing user/group information for request_body_element in batch_request_body_list: batch_request_body_tmp = {"requests": request_body_element} users_graph_data_tmp = requests.post(batch_endpoint, json=batch_request_body_tmp, headers=headers).json() users_graph_data["responses"] = users_graph_data["responses"] + users_graph_data_tmp["responses"] return users_graph_data def _get_user_emails_from_response(self, users_graph_data): user_emails = {} for user_data in users_graph_data["responses"]: # Handle user endpoint response if "users" in user_data["body"]["@odata.context"] and user_data["body"]["mail"] is not None: user_emails[user_data["body"]["id"]] = [user_data["body"]["mail"]] # Handle group endpoint response if "directoryObjects" in user_data["body"]["@odata.context"]: group_members_emails = [] for group_member in user_data["body"]["value"]: if group_member["mail"] is not None and group_member["mail"] not in group_members_emails: group_members_emails.append(group_member["mail"]) user_emails[user_data["id"]] = group_members_emails return user_emails def get_workspace_role_assignment_details(self, workspace: Workspace): msgraph_token = self._get_msgraph_token() app_role_ids = {role_name: workspace.properties[role_id] for role_name, role_id in self.WORKSPACE_ROLES_DICT.items()} inverted_app_role_ids = {role_id: role_name for role_name, role_id in app_role_ids.items()} sp_id = workspace.properties["sp_id"] roles_graph_data = self._get_user_role_assignments(sp_id, msgraph_token) users_graph_data = self._get_user_emails(roles_graph_data, msgraph_token) user_emails = self._get_user_emails_from_response(users_graph_data) workspace_role_assignments_details = defaultdict(list) for role_assignment in roles_graph_data["value"]: principal_id = role_assignment["principalId"] principal_type = role_assignment["principalType"] if principal_type != "ServicePrincipal" and principal_id in user_emails: app_role_id = role_assignment["appRoleId"] app_role_name = inverted_app_role_ids.get(app_role_id) if app_role_name: workspace_role_assignments_details[app_role_name].extend(user_emails[principal_id]) return workspace_role_assignments_details def _get_batch_users_by_role_assignments_body(self, roles_graph_data): request_body = {"requests": []} met_principal_ids = set() for role_assignment in roles_graph_data['value']: if role_assignment["principalId"] not in met_principal_ids: batch_url = "" if role_assignment["principalType"] == "User": batch_url = self._get_users_endpoint(role_assignment["principalId"]) elif role_assignment["principalType"] == "Group": batch_url = self._get_group_members_endpoint(role_assignment["principalId"]) else: continue request_body["requests"].append( {"method": "GET", "url": batch_url, "id": role_assignment["principalId"]}) met_principal_ids.add(role_assignment["principalId"]) return request_body # This method is called when you create a workspace and you already have an AAD App Registration # to link it to. You pass in the client_id and go and get the extra information you need from AAD # If the auth_type is `Automatic`, then these values will be written by Terraform. def _get_app_auth_info(self, client_id: str) -> dict: graph_data = self._get_app_sp_graph_data(client_id) if 'value' not in graph_data or len(graph_data['value']) == 0: logger.debug(graph_data) raise AuthConfigValidationError(f"{strings.ACCESS_UNABLE_TO_GET_INFO_FOR_APP} {client_id}") app_info = graph_data['value'][0] authInfo = {'sp_id': app_info['id'], 'scope_id': app_info['servicePrincipalNames'][0]} # Convert the roles into ids (We could have more roles defined in the app than we need.) for appRole in app_info['appRoles']: if appRole['value'] in self.WORKSPACE_ROLES_DICT.keys(): authInfo[self.WORKSPACE_ROLES_DICT[appRole['value']]] = appRole['id'] return authInfo def _ms_graph_query(self, url: str, http_method: str, json=None) -> dict: msgraph_token = self._get_msgraph_token() auth_headers = self._get_auth_header(msgraph_token) graph_data = {} while True: if not url: break logger.debug(f"Making request to: {url}") if json: response = requests.request(method=http_method, url=url, json=json, headers=auth_headers) else: response = requests.request(method=http_method, url=url, headers=auth_headers) url = "" if response.status_code == 200: json_response = response.json() graph_data = merge_dict(graph_data, json_response) if '@odata.nextLink' in json_response: url = json_response['@odata.nextLink'] else: logger.error(f"MS Graph query to: {url} failed with status code {response.status_code}") logger.error(f"Full response: {response}") return graph_data def _get_role_assignment_graph_data_for_user(self, user_id: str) -> dict: user_endpoint = f"{MICROSOFT_GRAPH_URL}/v1.0/users/{user_id}/appRoleAssignments" graph_data = self._ms_graph_query(user_endpoint, "GET") return graph_data def _get_role_assignment_graph_data_for_service_principal(self, principal_id: str) -> dict: svc_principal_endpoint = f"{MICROSOFT_GRAPH_URL}/v1.0/servicePrincipals/{principal_id}/appRoleAssignments" graph_data = self._ms_graph_query(svc_principal_endpoint, "GET") return graph_data def _get_identity_type(self, id: str) -> str: objects_endpoint = f"{MICROSOFT_GRAPH_URL}/v1.0/directoryObjects/getByIds" request_body = {"ids": [id], "types": ["user", "servicePrincipal"]} graph_data = self._ms_graph_query(objects_endpoint, "POST", json=request_body) logger.debug(graph_data) if "value" not in graph_data or len(graph_data["value"]) != 1: logger.debug(graph_data) raise AuthConfigValidationError(f"{strings.ACCESS_UNABLE_TO_GET_ACCOUNT_TYPE} {id}") object_info = graph_data["value"][0] if "@odata.type" not in object_info: logger.debug(object_info) raise AuthConfigValidationError(f"{strings.ACCESS_UNABLE_TO_GET_ACCOUNT_TYPE} {id}") return object_info["@odata.type"] def extract_workspace_auth_information(self, data: dict) -> dict: if ("auth_type" not in data) or (data["auth_type"] != "Automatic" and "client_id" not in data): raise AuthConfigValidationError(strings.ACCESS_PLEASE_SUPPLY_CLIENT_ID) auth_info = {} # The user may want us to create the AAD workspace app and therefore they # don't know the client_id yet. if data["auth_type"] != "Automatic": auth_info = self._get_app_auth_info(data["client_id"]) # Check we've get all our required roles for role in self.WORKSPACE_ROLES_DICT.items(): if role[1] not in auth_info: raise AuthConfigValidationError(f"{strings.ACCESS_APP_IS_MISSING_ROLE} {role[0]}") return auth_info def get_identity_role_assignments(self, user_id: str) -> List[RoleAssignment]: identity_type = self._get_identity_type(user_id) if identity_type == "#microsoft.graph.user": graph_data = self._get_role_assignment_graph_data_for_user(user_id) elif identity_type == "#microsoft.graph.servicePrincipal": graph_data = self._get_role_assignment_graph_data_for_service_principal(user_id) else: raise AuthConfigValidationError(f"{strings.ACCESS_UNHANDLED_ACCOUNT_TYPE} {identity_type}") if 'value' not in graph_data: logger.debug(graph_data) raise AuthConfigValidationError(f"{strings.ACCESS_UNABLE_TO_GET_ROLE_ASSIGNMENTS_FOR_USER} {user_id}") logger.debug(graph_data) return [RoleAssignment(role_assignment['resourceId'], role_assignment['appRoleId']) for role_assignment in graph_data['value']] def get_workspace_role(self, user: User, workspace: Workspace, user_role_assignments: List[RoleAssignment]) -> WorkspaceRole: if 'sp_id' not in workspace.properties: raise AuthConfigValidationError(strings.AUTH_CONFIGURATION_NOT_AVAILABLE_FOR_WORKSPACE) workspace_sp_id = workspace.properties['sp_id'] for requiredRole in self.WORKSPACE_ROLES_DICT.values(): if requiredRole not in workspace.properties: raise AuthConfigValidationError(strings.AUTH_CONFIGURATION_NOT_AVAILABLE_FOR_WORKSPACE) if RoleAssignment(resource_id=workspace_sp_id, role_id=workspace.properties['app_role_id_workspace_owner']) in user_role_assignments: return WorkspaceRole.Owner if RoleAssignment(resource_id=workspace_sp_id, role_id=workspace.properties['app_role_id_workspace_researcher']) in user_role_assignments: return WorkspaceRole.Researcher if RoleAssignment(resource_id=workspace_sp_id, role_id=workspace.properties['app_role_id_workspace_airlock_manager']) in user_role_assignments: return WorkspaceRole.AirlockManager return WorkspaceRole.NoRole def merge_dict(d1, d2): dd = defaultdict(list) for d in (d1, d2): for key, value in d.items(): if isinstance(value, list): dd[key].extend(value) else: dd[key].append(value) return dict(dd)
AzureTRE/api_app/services/aad_authentication.py/0
{ "file_path": "AzureTRE/api_app/services/aad_authentication.py", "repo_id": "AzureTRE", "token_count": 10003 }
93
import copy from unittest.mock import AsyncMock import pytest import pytest_asyncio from mock import patch, MagicMock import uuid from db.errors import EntityDoesNotExist, InvalidInput, ResourceIsNotDeployed from db.repositories.operations import OperationRepository from db.repositories.workspaces import WorkspaceRepository from models.domain.resource import ResourceType from models.domain.workspace import Workspace from models.schemas.workspace import WorkspaceInCreate @pytest.fixture def basic_workspace_request(): return WorkspaceInCreate(templateName="base-tre", properties={"display_name": "test", "description": "test", "client_id": "123", "tre_id": "test"}) @pytest_asyncio.fixture async def workspace_repo(): with patch('api.dependencies.database.Database.get_container_proxy', return_value=MagicMock()): workspace_repo = await WorkspaceRepository().create() yield workspace_repo @pytest_asyncio.fixture async def operations_repo(): with patch('api.dependencies.database.Database.get_container_proxy', return_value=MagicMock()): operations_repo = await OperationRepository().create() yield operations_repo @pytest.fixture def workspace(): workspace = Workspace( id="000000d3-82da-4bfc-b6e9-9a7853ef753e", templateVersion="0.1.0", etag="", properties={}, templateName="my-workspace-service", resourcePath="test" ) return workspace @pytest.mark.asyncio async def test_get_workspaces_queries_db(workspace_repo): workspace_repo.container.query_items = MagicMock() expected_query = workspace_repo.workspaces_query_string() await workspace_repo.get_workspaces() workspace_repo.container.query_items.assert_called_once_with(query=expected_query, parameters=None) @pytest.mark.asyncio async def test_get_active_workspaces_queries_db(workspace_repo): workspace_repo.container.query_items = MagicMock() expected_query = workspace_repo.active_workspaces_query_string() await workspace_repo.get_active_workspaces() workspace_repo.container.query_items.assert_called_once_with(query=expected_query, parameters=None) @pytest.mark.asyncio async def test_get_deployed_workspace_by_id_raises_resource_is_not_deployed_if_not_deployed(workspace_repo, workspace, operations_repo): workspace_id = "000000d3-82da-4bfc-b6e9-9a7853ef753e" sample_workspace = workspace workspace_repo.get_workspace_by_id = AsyncMock(return_value=sample_workspace) operations_repo.resource_has_deployed_operation = AsyncMock(return_value=False) with pytest.raises(ResourceIsNotDeployed): await workspace_repo.get_deployed_workspace_by_id(workspace_id, operations_repo) @pytest.mark.asyncio async def test_get_workspace_by_id_raises_entity_does_not_exist_if_item_does_not_exist(workspace_repo): workspace_id = uuid.uuid4() workspace_repo.container.query_items = MagicMock() with pytest.raises(EntityDoesNotExist): await workspace_repo.get_workspace_by_id(workspace_id) @pytest.mark.asyncio async def test_get_workspace_by_id_queries_db(workspace_repo, workspace): workspace_query_item_result = AsyncMock() workspace_query_item_result.__aiter__.return_value = [workspace.dict()] workspace_repo.container.query_items = MagicMock(return_value=workspace_query_item_result) expected_query = f'SELECT * FROM c WHERE c.resourceType = "workspace" AND c.id = "{workspace.id}"' await workspace_repo.get_workspace_by_id(workspace.id) workspace_repo.container.query_items.assert_called_once_with(query=expected_query, parameters=None) @pytest.mark.asyncio @patch('db.repositories.workspaces.generate_new_cidr') @patch('db.repositories.workspaces.WorkspaceRepository.validate_input_against_template') @patch('core.config.RESOURCE_LOCATION', "useast2") @patch('core.config.TRE_ID', "9876") async def test_create_workspace_item_creates_a_workspace_with_the_right_values(validate_input_mock, new_cidr_mock, workspace_repo, basic_workspace_request, basic_resource_template): workspace_to_create = basic_workspace_request # make sure the input has 'None' for values that we expect to be set workspace_to_create.properties.pop("address_space", None) workspace_to_create.properties.pop("address_spaces", None) workspace_to_create.properties.pop("workspace_owner_object_id", None) validate_input_mock.return_value = basic_resource_template new_cidr_mock.return_value = "1.2.3.4/24" workspace, _ = await workspace_repo.create_workspace_item(workspace_to_create, {}, "test_object_id", ["test_role"]) assert workspace.templateName == workspace_to_create.templateName assert workspace.resourceType == ResourceType.Workspace for key in ["display_name", "description", "azure_location", "workspace_id", "tre_id", "address_space", "workspace_owner_object_id"]: assert key in workspace.properties assert len(workspace.properties[key]) > 0 # need to make sure request doesn't override system param assert workspace.properties["tre_id"] != workspace_to_create.properties["tre_id"] # a new CIDR was allocated assert workspace.properties["address_space"] == "1.2.3.4/24" # TODO: uncomment with https://github.com/microsoft/AzureTRE/pull/2902 # assert workspace.properties["address_spaces"] == ["1.2.3.4/24"] assert workspace.properties["workspace_owner_object_id"] == "test_object_id" @pytest.mark.asyncio @patch('core.config.RESOURCE_LOCATION', "useast2") @patch('core.config.TRE_ID', "9876") @patch('core.config.CORE_ADDRESS_SPACE', "10.1.0.0/22") @patch('core.config.TRE_ADDRESS_SPACE', "10.0.0.0/12") async def test_get_address_space_based_on_size_with_small_address_space(workspace_repo, basic_workspace_request): workspace_to_create = basic_workspace_request workspace_to_create.properties["address_space_size"] = "small" assert "10.1.4.0/24" == await workspace_repo.get_address_space_based_on_size(workspace_to_create.properties) @pytest.mark.asyncio @patch('core.config.RESOURCE_LOCATION', "useast2") @patch('core.config.TRE_ID', "9876") @patch('core.config.CORE_ADDRESS_SPACE', "10.1.0.0/22") @patch('core.config.TRE_ADDRESS_SPACE', "10.0.0.0/12") async def test_get_address_space_based_on_size_with_medium_address_space(workspace_repo, basic_workspace_request): workspace_to_create = basic_workspace_request workspace_to_create.properties["address_space_size"] = "medium" address_space = await workspace_repo.get_address_space_based_on_size(workspace_to_create.properties) assert "10.1.4.0/22" == address_space @pytest.mark.asyncio @patch('core.config.RESOURCE_LOCATION', "useast2") @patch('core.config.TRE_ID', "9876") @patch('core.config.CORE_ADDRESS_SPACE', "10.1.0.0/22") @patch('core.config.TRE_ADDRESS_SPACE', "10.0.0.0/12") async def test_get_address_space_based_on_size_with_large_address_space(workspace_repo, basic_workspace_request): workspace_to_create = basic_workspace_request workspace_to_create.properties["address_space_size"] = "large" address_space = workspace_repo.get_address_space_based_on_size(workspace_to_create.properties) assert "10.0.0.0/16" == await address_space @pytest.mark.asyncio @patch('db.repositories.workspaces.WorkspaceRepository.validate_input_against_template') @patch('core.config.RESOURCE_LOCATION', "useast2") @patch('core.config.TRE_ID', "9876") @patch('core.config.CORE_ADDRESS_SPACE', "10.1.0.0/22") @patch('core.config.TRE_ADDRESS_SPACE', "10.0.0.0/12") async def test_create_workspace_item_creates_a_workspace_with_custom_address_space(validate_input_mock, workspace_repo, basic_workspace_request, basic_resource_template): workspace_to_create = basic_workspace_request workspace_to_create.properties["address_space_size"] = "custom" workspace_to_create.properties["address_space"] = "10.2.4.0/24" validate_input_mock.return_value = basic_resource_template workspace, _ = await workspace_repo.create_workspace_item(workspace_to_create, {}, "test_object_id", ["test_role"]) assert workspace.properties["address_space"] == workspace_to_create.properties["address_space"] @pytest.mark.asyncio @patch('db.repositories.workspaces.WorkspaceRepository.validate_input_against_template') @patch('core.config.RESOURCE_LOCATION', "useast2") @patch('core.config.TRE_ID', "9876") @patch('core.config.CORE_ADDRESS_SPACE', "10.1.0.0/22") @patch('core.config.TRE_ADDRESS_SPACE', "10.0.0.0/12") async def test_create_workspace_item_throws_exception_with_bad_custom_address_space(validate_input_mock, workspace_repo, basic_workspace_request, basic_resource_template): workspace_to_create = basic_workspace_request workspace_to_create.properties["address_space_size"] = "custom" workspace_to_create.properties["address_space"] = "192.168.0.0/24" validate_input_mock.return_value = basic_resource_template with pytest.raises(InvalidInput): await workspace_repo.create_workspace_item(workspace_to_create, {}, "test_object_id", ["test_role"]) @pytest.mark.asyncio async def test_get_address_space_based_on_size_with_custom_address_space_and_missing_address(workspace_repo, basic_workspace_request): workspace_to_create = basic_workspace_request workspace_to_create.properties["address_space_size"] = "custom" workspace_to_create.properties.pop("address_space", None) with pytest.raises(InvalidInput): await workspace_repo.get_address_space_based_on_size(workspace_to_create.properties) @pytest.mark.asyncio @patch('db.repositories.workspaces.WorkspaceRepository.get_active_workspaces') @patch('core.config.RESOURCE_LOCATION', "useast2") @patch('core.config.TRE_ID', "9876") @patch('core.config.CORE_ADDRESS_SPACE', "10.1.0.0/22") @patch('core.config.TRE_ADDRESS_SPACE', "10.0.0.0/12") async def test_get_address_space_based_on_size_with_address_space_only(get_active_workspaces_mock, workspace_repo, basic_workspace_request, workspace): workspace_with_address_space = copy.deepcopy(workspace) workspace_with_address_space.properties["address_space"] = "10.1.4.0/24" get_active_workspaces_mock.return_value = [workspace_with_address_space] workspace_to_create = basic_workspace_request address_space = await workspace_repo.get_address_space_based_on_size(workspace_to_create.properties) assert "10.1.5.0/24" == address_space @pytest.mark.asyncio @patch('db.repositories.workspaces.WorkspaceRepository.get_active_workspaces') @patch('core.config.RESOURCE_LOCATION', "useast2") @patch('core.config.TRE_ID', "9876") @patch('core.config.CORE_ADDRESS_SPACE', "10.1.0.0/22") @patch('core.config.TRE_ADDRESS_SPACE', "10.0.0.0/12") async def test_get_address_space_based_on_size_with_address_space_and_address_spaces(get_active_workspaces_mock, workspace_repo, basic_workspace_request, workspace): workspace_with_address_space = copy.deepcopy(workspace) workspace_with_address_space.properties["address_space"] = "10.1.4.0/24" workspace_with_address_spaces = copy.deepcopy(workspace) workspace_with_address_spaces.properties["address_spaces"] = ["10.1.5.0/24", "10.1.6.0/24"] workspace_with_both = copy.deepcopy(workspace) workspace_with_both.properties["address_spaces"] = ["10.1.7.0/24", "10.1.8.0/24"] workspace_with_both.properties["address_space"] = "10.1.7.0/24" get_active_workspaces_mock.return_value = [workspace_with_address_space, workspace_with_address_spaces, workspace_with_both] workspace_to_create = basic_workspace_request address_space = await workspace_repo.get_address_space_based_on_size(workspace_to_create.properties) assert "10.1.9.0/24" == address_space @pytest.mark.asyncio @patch('db.repositories.workspaces.WorkspaceRepository.validate_input_against_template') async def test_create_workspace_item_raises_value_error_if_template_is_invalid(validate_input_mock, workspace_repo, basic_workspace_request): workspace_input = basic_workspace_request validate_input_mock.side_effect = ValueError with pytest.raises(ValueError): await workspace_repo.create_workspace_item(workspace_input, {}, "test_object_id", ["test_role"]) def test_automatically_create_application_registration_returns_true(workspace_repo): dictToTest = {"auth_type": "Automatic"} assert workspace_repo.automatically_create_application_registration(dictToTest) is True def test_automatically_create_application_registration_returns_false(workspace_repo): dictToTest = {"client_id": "12345"} assert workspace_repo.automatically_create_application_registration(dictToTest) is False def test_workspace_owner_is_set_if_not_present_in_workspace_properties(workspace_repo): dictToTest = {} expected_object_id = "Expected" assert workspace_repo.get_workspace_owner(dictToTest, expected_object_id) is expected_object_id def test_workspace_owner_is_not_overwritten_if_present_in_workspace_properties(workspace_repo): dictToTest = {"workspace_owner_object_id": "Expected"} not_expected_object_id = "Not Expected" assert workspace_repo.get_workspace_owner(dictToTest, not_expected_object_id) == "Expected"
AzureTRE/api_app/tests_ma/test_db/test_repositories/test_workpaces_repository.py/0
{ "file_path": "AzureTRE/api_app/tests_ma/test_db/test_repositories/test_workpaces_repository.py", "repo_id": "AzureTRE", "token_count": 4823 }
94
import pytest from mock import patch, call import services.schema_service @patch('services.schema_service.read_schema') @patch('services.schema_service.enrich_template') def test_enrich_workspace_template_enriches_with_workspace_defaults_and_aad(enrich_template_mock, read_schema_mock, basic_resource_template): workspace_template = basic_resource_template # read schema called twice - once for default props and once for aad default_props = (['description'], {'description': {'type': 'string'}}) aad_props = (['client_id'], {'client_id': {'type': 'string'}}) read_schema_mock.side_effect = [default_props, aad_props] services.schema_service.enrich_workspace_template(workspace_template) read_schema_mock.assert_has_calls([call('workspace.json'), call('azuread.json')]) enrich_template_mock.assert_called_once_with(workspace_template, [default_props, aad_props], is_update=False) @patch('services.schema_service.read_schema') @patch('services.schema_service.enrich_template') def test_enrich_workspace_service_template_enriches_with_workspace_service_defaults(enrich_template_mock, read_schema_mock, basic_resource_template): workspace_service_template = basic_resource_template default_props = (['description'], {'description': {'type': 'string'}}) read_schema_mock.return_value = default_props services.schema_service.enrich_workspace_service_template(workspace_service_template) read_schema_mock.assert_called_once_with('workspace_service.json') enrich_template_mock.assert_called_once_with(workspace_service_template, [default_props], is_update=False) @patch('services.schema_service.read_schema') @patch('services.schema_service.enrich_template') def test_enrich_user_resource_template_enriches_with_user_resource_defaults(enrich_template_mock, read_schema_mock, basic_user_resource_template): user_resource_template = basic_user_resource_template default_props = (['description'], {'description': {'type': 'string'}}) read_schema_mock.return_value = default_props services.schema_service.enrich_user_resource_template(user_resource_template) read_schema_mock.assert_called_once_with('user_resource.json') enrich_template_mock.assert_called_once_with(user_resource_template, [default_props], is_update=False) @pytest.mark.parametrize('original, extra1, extra2, expected', [ # basic scenario ( {'num_vms': {'type': 'string'}}, {'description': {'type': 'string'}, 'display_name': {'type': 'string'}}, {'client_id': {'type': 'string'}}, {'num_vms': {'type': 'string'}, 'description': {'type': 'string'}, 'display_name': {'type': 'string'}, 'client_id': {'type': 'string'}} ), # empty original ( {}, {'description': {'type': 'string'}, 'display_name': {'type': 'string'}}, {'client_id': {'type': 'string'}}, {'description': {'type': 'string'}, 'display_name': {'type': 'string'}, 'client_id': {'type': 'string'}} ), # duplicates ( {'description': {'type': 'string'}}, {'description': {'type': 'string'}, 'display_name': {'type': 'string'}}, {'client_id': {'type': 'string'}}, {'description': {'type': 'string'}, 'display_name': {'type': 'string'}, 'client_id': {'type': 'string'}} ), # duplicate names - different defaults ( {'description': {'type': 'string', 'default': 'service description'}, 'display_name': {'type': 'string'}}, {'description': {'type': 'string', 'default': ''}}, {'client_id': {'type': 'string'}}, {'description': {'type': 'string', 'default': 'service description'}, 'display_name': {'type': 'string'}, 'client_id': {'type': 'string'}} )]) def test_enrich_template_combines_properties(original, extra1, extra2, expected, basic_resource_template): original_template = basic_resource_template original_template.properties = original template = services.schema_service.enrich_template(original_template, [([], extra1), ([], extra2)]) assert template['properties'] == expected @pytest.mark.parametrize('original, extra1, extra2, expected', [ # basic scenario ( ['num_vms'], ['description', 'display_name'], ['client_id'], ['num_vms', 'description', 'display_name', 'client_id'] ), # empty original ( [], ['description', 'display_name'], ['client_id'], ['description', 'display_name', 'client_id'] ), # duplicates ( ['description'], ['description', 'display_name'], ['client_id'], ['description', 'display_name', 'client_id'] )]) def test_enrich_template_combines_required(original, extra1, extra2, expected, basic_resource_template): original_template = basic_resource_template original_template.required = original template = services.schema_service.enrich_template(original_template, [(extra1, {}), (extra2, {})]) # test that the list contents are expected (sorting doesn't matter) actual = template['required'] assert len(actual) == len(expected) for item in expected: assert item in actual def test_enrich_template_adds_system_properties(basic_resource_template): original_template = basic_resource_template template = services.schema_service.enrich_template(original_template, []) assert 'tre_id' in template['system_properties'] def test_enrich_template_adds_read_only_on_update(basic_resource_template): original_template = basic_resource_template template = services.schema_service.enrich_template(original_template, [], is_update=True) assert "readOnly" not in template["properties"]["updateable_property"].keys() assert template["properties"]["fixed_property"]["readOnly"] is True
AzureTRE/api_app/tests_ma/test_services/test_schema_service.py/0
{ "file_path": "AzureTRE/api_app/tests_ma/test_services/test_schema_service.py", "repo_id": "AzureTRE", "token_count": 2135 }
95
import click class SharedServiceContext(object): def __init__(self, shared_service_id: str): self.shared_service_id = shared_service_id pass_shared_service_context = click.make_pass_decorator(SharedServiceContext) class SharedServiceOperationContext(object): def __init__(self, shared_service_id: str, operation_id: str): self.shared_service_id = shared_service_id self.operation_id = operation_id @staticmethod def add_operation_id_to_context_obj(ctx: click.Context, operation_id: str) -> "SharedServiceOperationContext": shared_service_context = ctx.find_object(SharedServiceContext) return SharedServiceOperationContext(shared_service_context.shared_service_id, operation_id) pass_shared_service_operation_context = click.make_pass_decorator(SharedServiceOperationContext)
AzureTRE/cli/tre/commands/shared_services/contexts.py/0
{ "file_path": "AzureTRE/cli/tre/commands/shared_services/contexts.py", "repo_id": "AzureTRE", "token_count": 278 }
96
import json import logging import click from tre.api_client import ApiClient from tre.output import output, output_option, query_option @click.group(name="workspace-templates", help="List workspace-templates ") def workspace_templates(): pass @click.command(name="list", help="List workspace-templates") @output_option() @query_option() def workspace_templates_list(output_format, query): log = logging.getLogger(__name__) client = ApiClient.get_api_client_from_config() response = client.call_api( log, 'GET', '/api/workspace-templates', ) output(response, output_format=output_format, query=query, default_table_query=r"templates[].{name:name, title: title, description:description}") @click.command(name="new", help="Register a new workspace template") @click.option('--definition', help='JSON definition for the template', required=False) @click.option('--definition-file', help='File containing JSON definition for the template', required=False, type=click.File("r")) @output_option() @query_option() def workspace_templates_create(definition, definition_file, output_format, query): log = logging.getLogger(__name__) if definition is None: if definition_file is None: raise click.UsageError('Please specify either a definition or a definition file') definition = definition_file.read() definition_dict = json.loads(definition) client = ApiClient.get_api_client_from_config() click.echo("Registering template...", err=True) response = client.call_api(log, 'POST', '/api/workspace-templates', json_data=definition_dict) output(response, output_format=output_format, query=query, default_table_query=r"{id: id, name:name, title: title, description:description}") return response.text workspace_templates.add_command(workspace_templates_list) workspace_templates.add_command(workspace_templates_create)
AzureTRE/cli/tre/commands/workspace_templates/workspace_templates.py/0
{ "file_path": "AzureTRE/cli/tre/commands/workspace_templates/workspace_templates.py", "repo_id": "AzureTRE", "token_count": 623 }
97
import logging import click from tre.commands.operation import get_operation_id_completion, operation_show from tre.output import output_option, query_option from tre.api_client import ApiClient from .contexts import pass_user_resource_operation_context, UserResourceOperationContext def operation_id_completion(ctx: click.Context, param: click.Parameter, incomplete: str): log = logging.getLogger(__name__) parent_ctx = ctx.parent user_resource_id = parent_ctx.params["user_resource_id"] parent2_ctx = parent_ctx.parent workspace_service_id = parent2_ctx.params["workspace_service_id"] parent3_ctx = parent2_ctx.parent workspace_id = parent3_ctx.params["workspace_id"] list_url = f'/api/workspaces/{workspace_id}/workspace-services/{workspace_service_id}/user-resources/{user_resource_id}/operations' client = ApiClient.get_api_client_from_config() workspace_scope = client.get_workspace_scope(log, workspace_id) return get_operation_id_completion(ctx, log, list_url, param, incomplete, scope_id=workspace_scope) @click.group(name="operation", invoke_without_command=True, help="Perform actions on an operation") @click.argument('operation_id', required=True, type=click.UUID, shell_complete=operation_id_completion) @click.pass_context def user_resource_operation(ctx: click.Context, operation_id) -> None: ctx.obj = UserResourceOperationContext.add_operation_id_to_context_obj(ctx, operation_id) @click.command(name="show", help="Show user resource operation") @click.option('--no-wait', help="If an operation is in progress, do not wait for it to complete", flag_value=True, default=False) @output_option() @query_option() @pass_user_resource_operation_context def user_resource_operation_show(user_resource_operation_context: UserResourceOperationContext, no_wait, output_format, query, suppress_output: bool = False) -> None: log = logging.getLogger(__name__) workspace_id = user_resource_operation_context.workspace_id if workspace_id is None: raise click.UsageError('Missing workspace ID') workspace_service_id = user_resource_operation_context.workspace_service_id if workspace_service_id is None: raise click.UsageError('Missing workspace-service ID') user_resource_id = user_resource_operation_context.user_resource_id if user_resource_id is None: raise click.UsageError('Missing user-resource ID') operation_id = user_resource_operation_context.operation_id if operation_id is None: raise click.UsageError('Missing operation ID') client = ApiClient.get_api_client_from_config() workspace_scope = client.get_workspace_scope(log, workspace_id) operation_url = f'/api/workspaces/{workspace_id}/workspace-services/{workspace_service_id}/user-resources/{user_resource_id}/operations/{operation_id}' operation_show(log, operation_url, no_wait, output_format, query, suppress_output, scope_id=workspace_scope) user_resource_operation.add_command(user_resource_operation_show)
AzureTRE/cli/tre/commands/workspaces/workspace_services/user_resources/operation.py/0
{ "file_path": "AzureTRE/cli/tre/commands/workspaces/workspace_services/user_resources/operation.py", "repo_id": "AzureTRE", "token_count": 1018 }
98
# Azure Provider source and version being used terraform { required_providers { azurerm = { source = "hashicorp/azurerm" version = "=3.74.0" } random = { source = "hashicorp/random" version = "~> 3.4.0" } local = { source = "hashicorp/local" version = "~> 2.4.0" } http = { source = "hashicorp/http" version = "~> 3.2.0" } azapi = { source = "Azure/azapi" version = "~> 1.9.0" } } backend "azurerm" {} } provider "azapi" { use_msi = var.arm_use_msi } provider "azurerm" { features { key_vault { # Don't purge on destroy (this would fail due to purge protection being enabled on keyvault) purge_soft_delete_on_destroy = false purge_soft_deleted_secrets_on_destroy = false purge_soft_deleted_certificates_on_destroy = false purge_soft_deleted_keys_on_destroy = false # When recreating an environment, recover any previously soft deleted secrets - set to true by default recover_soft_deleted_key_vaults = true recover_soft_deleted_secrets = true recover_soft_deleted_certificates = true recover_soft_deleted_keys = true } } } resource "azurerm_resource_group" "core" { location = var.location name = "rg-${var.tre_id}" tags = { project = "Azure Trusted Research Environment" tre_id = var.tre_id source = "https://github.com/microsoft/AzureTRE/" ci_git_ref = var.ci_git_ref # TODO: not include if empty } lifecycle { ignore_changes = [tags] } } module "azure_monitor" { source = "./azure-monitor" tre_id = var.tre_id location = var.location resource_group_name = azurerm_resource_group.core.name shared_subnet_id = module.network.shared_subnet_id azure_monitor_dns_zone_id = module.network.azure_monitor_dns_zone_id azure_monitor_oms_opinsights_dns_zone_id = module.network.azure_monitor_oms_opinsights_dns_zone_id azure_monitor_ods_opinsights_dns_zone_id = module.network.azure_monitor_ods_opinsights_dns_zone_id azure_monitor_agentsvc_dns_zone_id = module.network.azure_monitor_agentsvc_dns_zone_id blob_core_dns_zone_id = module.network.blob_core_dns_zone_id tre_core_tags = local.tre_core_tags enable_local_debugging = var.enable_local_debugging depends_on = [ module.network ] } module "network" { source = "./network" tre_id = var.tre_id location = var.location resource_group_name = azurerm_resource_group.core.name core_address_space = var.core_address_space arm_environment = var.arm_environment } module "appgateway" { source = "./appgateway" tre_id = var.tre_id location = var.location resource_group_name = azurerm_resource_group.core.name app_gw_subnet = module.network.app_gw_subnet_id shared_subnet = module.network.shared_subnet_id api_fqdn = azurerm_linux_web_app.api.default_hostname keyvault_id = azurerm_key_vault.kv.id static_web_dns_zone_id = module.network.static_web_dns_zone_id log_analytics_workspace_id = module.azure_monitor.log_analytics_workspace_id depends_on = [ module.network, azurerm_key_vault.kv, azurerm_key_vault_access_policy.deployer, azurerm_private_endpoint.api_private_endpoint ] } module "airlock_resources" { source = "./airlock" tre_id = var.tre_id location = var.location resource_group_name = azurerm_resource_group.core.name airlock_storage_subnet_id = module.network.airlock_storage_subnet_id airlock_events_subnet_id = module.network.airlock_events_subnet_id docker_registry_server = local.docker_registry_server mgmt_resource_group_name = var.mgmt_resource_group_name mgmt_acr_name = var.acr_name api_principal_id = azurerm_user_assigned_identity.id.principal_id airlock_app_service_plan_sku = var.core_app_service_plan_sku airlock_processor_subnet_id = module.network.airlock_processor_subnet_id airlock_servicebus = azurerm_servicebus_namespace.sb applicationinsights_connection_string = module.azure_monitor.app_insights_connection_string enable_malware_scanning = var.enable_airlock_malware_scanning arm_environment = var.arm_environment tre_core_tags = local.tre_core_tags log_analytics_workspace_id = module.azure_monitor.log_analytics_workspace_id blob_core_dns_zone_id = module.network.blob_core_dns_zone_id file_core_dns_zone_id = module.network.file_core_dns_zone_id queue_core_dns_zone_id = module.network.queue_core_dns_zone_id table_core_dns_zone_id = module.network.table_core_dns_zone_id enable_local_debugging = var.enable_local_debugging myip = local.myip depends_on = [ azurerm_servicebus_namespace.sb, module.network ] } module "resource_processor_vmss_porter" { count = var.resource_processor_type == "vmss_porter" ? 1 : 0 source = "./resource_processor/vmss_porter" tre_id = var.tre_id location = var.location resource_group_name = azurerm_resource_group.core.name acr_id = data.azurerm_container_registry.mgmt_acr.id app_insights_connection_string = module.azure_monitor.app_insights_connection_string resource_processor_subnet_id = module.network.resource_processor_subnet_id docker_registry_server = local.docker_registry_server resource_processor_vmss_porter_image_repository = var.resource_processor_vmss_porter_image_repository service_bus_namespace_id = azurerm_servicebus_namespace.sb.id service_bus_namespace_fqdn = local.service_bus_namespace_fqdn service_bus_resource_request_queue = azurerm_servicebus_queue.workspacequeue.name service_bus_deployment_status_update_queue = azurerm_servicebus_queue.service_bus_deployment_status_update_queue.name mgmt_storage_account_name = var.mgmt_storage_account_name mgmt_resource_group_name = var.mgmt_resource_group_name terraform_state_container_name = var.terraform_state_container_name key_vault_name = azurerm_key_vault.kv.name key_vault_url = azurerm_key_vault.kv.vault_uri key_vault_id = azurerm_key_vault.kv.id subscription_id = var.arm_subscription_id resource_processor_number_processes_per_instance = var.resource_processor_number_processes_per_instance resource_processor_vmss_sku = var.resource_processor_vmss_sku arm_environment = var.arm_environment logging_level = var.logging_level rp_bundle_values = var.rp_bundle_values depends_on = [ module.network, module.azure_monitor, azurerm_key_vault.kv, azurerm_key_vault_access_policy.deployer ] } module "terraform_azurerm_environment_configuration" { source = "git::https://github.com/microsoft/terraform-azurerm-environment-configuration.git?ref=0.2.0" arm_environment = var.arm_environment }
AzureTRE/core/terraform/main.tf/0
{ "file_path": "AzureTRE/core/terraform/main.tf", "repo_id": "AzureTRE", "token_count": 4102 }
99
variable "tre_id" { type = string } variable "location" { type = string } variable "acr_id" { type = string } variable "resource_group_name" { type = string } variable "resource_processor_subnet_id" { type = string } variable "resource_processor_vmss_porter_image_repository" { type = string } variable "docker_registry_server" { type = string } variable "service_bus_namespace_id" { type = string } variable "service_bus_namespace_fqdn" { type = string } variable "service_bus_resource_request_queue" { type = string } variable "service_bus_deployment_status_update_queue" { type = string } variable "mgmt_storage_account_name" { type = string } variable "mgmt_resource_group_name" { type = string } variable "terraform_state_container_name" { type = string } variable "app_insights_connection_string" { type = string } variable "key_vault_name" { type = string } variable "key_vault_url" { type = string } variable "key_vault_id" { type = string } variable "resource_processor_number_processes_per_instance" { type = string } variable "resource_processor_vmss_sku" { type = string } variable "arm_environment" { type = string } variable "subscription_id" { description = "The subscription id to create the resource processor permission/role. If not supplied will use the TF context." type = string default = "" } variable "logging_level" { type = string } variable "rp_bundle_values" { type = map(string) } locals { rp_bundle_values_formatted = join("\n ", [for key in keys(var.rp_bundle_values) : "RP_BUNDLE_${key}=${var.rp_bundle_values[key]}"]) }
AzureTRE/core/terraform/resource_processor/vmss_porter/variables.tf/0
{ "file_path": "AzureTRE/core/terraform/resource_processor/vmss_porter/variables.tf", "repo_id": "AzureTRE", "token_count": 578 }
100
#!/bin/bash # This script is designed to be `source`d to create reusable helper functions # Magic string for MSGraph msGraphAppId="00000003-0000-0000-c000-000000000000" function get_msgraph_scope() { oauthScope=$(az ad sp show --id ${msGraphAppId} --query "oauth2PermissionScopes[?value=='$1'].id | [0]" --output tsv --only-show-errors) jq -c . <<- JSON { "id": "${oauthScope}", "type": "Scope" } JSON } function get_msgraph_role() { appRoleScope=$(az ad sp show --id ${msGraphAppId} --query "appRoles[?value=='$1'].id | [0]" --output tsv --only-show-errors) jq -c . <<- JSON { "id": "${appRoleScope}", "type": "Role" } JSON }
AzureTRE/devops/scripts/aad/get_msgraph_access.sh/0
{ "file_path": "AzureTRE/devops/scripts/aad/get_msgraph_access.sh", "repo_id": "AzureTRE", "token_count": 297 }
101
#!/bin/bash # This script deletes a specific deployment of TRE including resource # groups of the managment (ops) part, core as well as all workspace ones. # It's doing this by finding all resource groups that start with the same # name as the core one! # If possible it will purge the keyvault making it possible to reuse the same # TRE_ID for a later deployment. set -o errexit set -o pipefail # set -o xtrace function usage() { cat <<USAGE Usage: $0 --core-tre-rg "something" [--no-wait] Options: --core-tre-rg The core resource group name of the TRE. --no-wait Doesn't wait for delete operations to complete and exits asap. USAGE exit 1 } no_wait=false while [ "$1" != "" ]; do case $1 in --core-tre-rg) shift core_tre_rg=$1 ;; --no-wait) no_wait=true ;; *) echo "Unexpected argument: '$1'" usage ;; esac if [[ -z "$2" ]]; then # if no more args then stop processing break fi shift # remove the current value for `$1` and use the next done # done with processing args and can set this set -o nounset if [[ -z ${core_tre_rg:-} ]]; then if [[ -n ${TRE_ID:-} ]]; then core_tre_rg="rg-${TRE_ID}" else echo "Core TRE resource group name wasn't provided" usage fi fi no_wait_option="" if ${no_wait} then no_wait_option="--no-wait" fi group_show_result=$(az group show --name "${core_tre_rg}" > /dev/null 2>&1; echo $?) if [[ "$group_show_result" != "0" ]]; then echo "Resource group ${core_tre_rg} not found - skipping destroy" exit 0 fi locks=$(az group lock list -g "${core_tre_rg}" --query [].id -o tsv | tr -d \') if [ -n "${locks:-}" ] then for lock in $locks do echo "Deleting lock ${lock}..." az resource lock delete --ids "${lock}" done fi delete_resource_diagnostic() { # the command will return an error if the resource doesn't support this setting, so need to suppress it. # first line works on azcli 2.37, second line works on azcli 2.42 { az monitor diagnostic-settings list --resource "$1" --query "value[].name" -o tsv 2> /dev/null \ && az monitor diagnostic-settings list --resource "$1" --query "[].name" -o tsv 2> /dev/null ; } | while read -r diag_name; do echo "Deleting ${diag_name} on $1" az monitor diagnostic-settings delete --resource "$1" --name "${diag_name}" done } export -f delete_resource_diagnostic echo "Looking for diagnostic settings..." # sometimes, diagnostic settings aren't deleted with the resource group. we need to manually do that, # and unfortunately, there's no easy way to list all that are present. # using xargs to run in parallel. az resource list --resource-group "${core_tre_rg}" --query '[].[id]' -o tsv | xargs -P 10 -I {} bash -c 'delete_resource_diagnostic "{}"' tre_id=${core_tre_rg#"rg-"} # purge keyvault if possible (makes it possible to reuse the same tre_id later) # this has to be done before we delete the resource group since we might not wait for it to complete keyvault_name="kv-${tre_id}" keyvault=$(az keyvault show --name "${keyvault_name}" --resource-group "${core_tre_rg}" -o json || echo 0) if [ "${keyvault}" != "0" ]; then secrets=$(az keyvault secret list --vault-name "${keyvault_name}" -o json | jq -r '.[].id') for secret_id in ${secrets}; do echo "Deleting ${secret_id}" az keyvault secret delete --id "${secret_id}" done keys=$(az keyvault key list --vault-name "${keyvault_name}" -o json | jq -r '.[].id') for key_id in ${keys}; do echo "Deleting ${key_id}" az keyvault key delete --id "${key_id}" done certificates=$(az keyvault certificate list --vault-name "${keyvault_name}" -o json | jq -r '.[].id') for certificate_id in ${certificates}; do echo "Deleting ${certificate_id}" az keyvault certificate delete --id "${certificate_id}" done echo "Removing access policies so if the vault is recovered there are not there" access_policies=$(echo "$keyvault" | jq -r '.properties.accessPolicies[].objectId' ) for access_policy_id in ${access_policies}; do echo "Attempting to delete access policy ${access_policy_id}" az keyvault delete-policy --name "${keyvault_name}" --resource-group "${core_tre_rg}" --object-id "${access_policy_id}" || echo "Not deleting access policy for ${access_policy_id}." done fi # Delete the vault if purge protection is not on. if [[ $(az keyvault list --resource-group "${core_tre_rg}" --query "[?properties.enablePurgeProtection==``null``] | length (@)" -o tsv) != 0 ]]; then echo "Deleting keyvault: ${keyvault_name}" az keyvault delete --name "${keyvault_name}" --resource-group "${core_tre_rg}" echo "Purging keyvault: ${keyvault_name}" az keyvault purge --name "${keyvault_name}" ${no_wait_option} else echo "Resource group ${core_tre_rg} doesn't have a keyvault without purge protection." fi # linked storage accounts don't get deleted with the workspace workspace_name="log-${tre_id}" workspace=$(az monitor log-analytics workspace show --workspace-name "${workspace_name}" --resource-group "${core_tre_rg}" || echo 0) if [ "${workspace}" != "0" ]; then echo "Deleting Linked Storage accounts if present..." az monitor log-analytics workspace linked-storage list -g "${core_tre_rg}" --workspace-name "${workspace_name}" -o tsv --query '[].id' \ | xargs -P 10 -I {} az rest --method delete --uri "{}?api-version=2020-08-01" fi # this will find the mgmt, core resource groups as well as any workspace ones # we are reverse-sorting to first delete the workspace groups (might not be # good enough because we use no-wait sometimes) az group list --query "[?starts_with(name, '${core_tre_rg}')].[name]" -o tsv | sort -r | while read -r rg_item; do echo "Deleting resource group: ${rg_item}" az group delete --resource-group "${rg_item}" --yes ${no_wait_option} done
AzureTRE/devops/scripts/destroy_env_no_terraform.sh/0
{ "file_path": "AzureTRE/devops/scripts/destroy_env_no_terraform.sh", "repo_id": "AzureTRE", "token_count": 2059 }
102
#!/bin/bash set -e function usage() { cat <<USAGE Usage: $0 [-g | --mgmt-resource-group-name ] [-s | --mgmt-storage-account-name] [-n | --state-container-name] [-k | --key] [-c | --cmd] [-l | --logfile] Options: -g, --mgmt-resource-group-name Management resource group name -s, --mgmt-storage-account-name Management storage account name -n, --state-container-name State container name -k, --key Key -c, --cmd Command to execute -l, --logfile Log file to write output to USAGE exit 1 } # if no arguments are provided, return usage function if [ $# -eq 0 ]; then usage # run usage function fi while [ "$1" != "" ]; do case $1 in -g | --mgmt-resource-group-name) shift mgmt_resource_group_name=$1 ;; -s | --mgmt-storage-account-name) shift mgmt_storage_account_name=$1 ;; -n | --state-container-name) shift container_name=$1 ;; -k | --key) shift key=$1 ;; -c | --cmd) shift tf_command=$1 ;; -l | --logfile) shift tf_logfile=$1 ;; *) usage ;; esac if [[ -z "$2" ]]; then # if no more args then stop processing break fi shift # remove the current value for `$1` and use the next done if [[ -z ${mgmt_resource_group_name+x} ]]; then echo -e "No terraform state resource group name provided\n" usage fi if [[ -z ${mgmt_storage_account_name+x} ]]; then echo -e "No terraform state storage account name provided\n" usage fi if [[ -z ${container_name+x} ]]; then echo -e "No terraform state container name provided\n" usage fi if [[ -z ${key+x} ]]; then echo -e "No key provided\n" usage fi if [[ -z ${tf_command+x} ]]; then echo -e "No command provided\n" usage fi if [[ -z ${tf_logfile+x} ]]; then tf_logfile="tmp$$.log" echo -e "No logfile provided, using ${tf_logfile}\n" fi terraform init -input=false -backend=true -reconfigure \ -backend-config="resource_group_name=${mgmt_resource_group_name}" \ -backend-config="storage_account_name=${mgmt_storage_account_name}" \ -backend-config="container_name=${container_name}" \ -backend-config="key=${key}" RUN_COMMAND=1 while [ $RUN_COMMAND = 1 ] do RUN_COMMAND=0 TF_CMD="$tf_command" script -c "$TF_CMD" "$tf_logfile" # upload the log file? if [[ $TF_LOG == "DEBUG" ]] ; then az storage blob upload --file $tf_logfile \ --container-name "tflogs" \ --account-name "$mgmt_storage_account_name" \ --auth-mode key fi LOCKED_STATE=$(cat ${tf_logfile} | grep -c 'Error acquiring the state lock') || true; TF_ERROR=$(cat ${tf_logfile} | grep -c 'COMMAND_EXIT_CODE="1"') || true; if [[ $LOCKED_STATE -gt 0 ]]; then RUN_COMMAND=1 echo "Error acquiring the state lock" sleep 10 elif [[ $TF_ERROR -gt 0 ]]; then echo "Terraform Error" exit 1 fi done
AzureTRE/devops/scripts/terraform_wrapper.sh/0
{ "file_path": "AzureTRE/devops/scripts/terraform_wrapper.sh", "repo_id": "AzureTRE", "token_count": 1468 }
103
# Cost Reporting The exact running costs will depend on the number of workspaces you have deployed, the workspace services you have enabled within them and the Azure Data center region. TRE users can use the Azure TRE cost API to get a report of the running costs of TRE resources according to their role. TRE provides a set of cost APIs which generates costs reports by custom timeframe, and level of details according to user's role. Cost APIs are based on [Azure Cost Management](https://docs.microsoft.com/en-us/azure/cost-management-billing/) and [TRE Templates azure resource tagging](#azure-resources-tagging), Azure resources must be tagged when authoring new template. ## Cost APIs | Method | Endpoint | Description | Allowed Roles | Level of details | | --- | --- | --- | --- | --- | | GET | /api/costs | Get overall costs of a TRE Instance | TRE Admin | Core services, Shared services, workspaces | | GET | /api/workspace/{workspace\_id}/costs | Get workspace costs | TRE Admin, Workspace Owner | Workspace, workspace services, user resources | ## Get overall cost report **Description** Get overall cost report for TRE instance (core services), per shared services and per workspace **Authorization** Only TRE Admin can call this url, others will get unauthorized **Endpoint** GET /api/costs **Query Parameters** | Parameter name| Type| Description| Default Value | | --- | --- | --- | --- | | from, to | datetime | Custom time period, up to 1 year timeframe, iso-8601 format | Month to date period | | granularity| Enum (Daily, None) | The granularity of rows in the query. | None | **Output** ```json { "coreServices": [ {"date":"", "cost":"", "currency":""} ], "sharedServices": [ { "id":"shared service id", "name":"shared service name", "costs": [ {"date":"", "cost":"", "currency":""} ] }], "workspaces": [ { "id": "workspace id", "name": "workspace name", "costs":[ {"date":"", "cost":"", "currency":""} ]} ] } ``` ## Get workspace cost report **Description** Get overall cost report for a specific workspace **Authorization** Only TRE Admin and the workspace owner can call this url, others will get unauthorized **Input** GET /api/workspaces/{workspace_id}/costs **Query Parameters** | Parameter name| Type| Description| Default Value | | --- | --- | --- | --- | | from, to | datetime | Custom time period, up to 1 year timeframe, iso-8601 format | Month to date period | | granularity| Enum (Daily, None) | The granularity of rows in the query. | None | | workspace_id |Guid |The workspace id to generate report for | Required field for workspace and user resource level apis | **Output** ```json { "id": "workspace id", "name": "workspace name", "workspaceServices":[{ "id": "workspace service id", "name": "workspace service name", "costs":[{"date":"", "cost":"", "currency":""}], "userResources":[{ "id": "user resource id", "name": "user resource name", "costs":[{"date":"", "cost":"", "currency":""}], }] }] } ``` ## Sequence Diagram [source](https://swimlanes.io/u/D8oktWo4j?rev=4) [![Sequence Diagram](../assets/cost-reporting-sequence-diagram.png)](../assets/cost-reporting-sequence-diagram.png) ## Limitations and notes * Cost and usage data is typically available in Cost Management within 8-24 hours. * Tags aren't applied to historical data, template authors need to make sure all relevant [Azure resources of a TRE resource are tagged as instructed](#azure-resources-tagging). * Cost records might include [multiple currencies](https://azure.microsoft.com/en-us/blog/azure-cost-management-updates-july-2019/#currency) for the same date and TRE resource. * Once cost data becomes available in Cost Management, it will be retained for at least seven years. Only the last 13 months is available from the TRE Cost API and Azure Portal. For historical data before 13 months, please use [Exports](https://docs.microsoft.com/en-us/azure/cost-management-billing/costs/tutorial-export-acm-data?tabs=azure-portal) or the [UsageDetails API](https://docs.microsoft.com/en-us/rest/api/consumption/usage-details/list?tabs=HTTP). * There are several Azure Offers that currently are not supported yet, Azure Offers are types of Azure subscriptions, for full list of supported and unspported Azure Offers please refer to [Supported Microsoft Azure offers](https://learn.microsoft.com/en-us/azure/cost-management-billing/costs/understand-cost-mgt-data#supported-microsoft-azure-offers), Azure TRE will not display costs for unsupported Azure subscriptions. * For more information please refer to [Understand Cost Management data](https://docs.microsoft.com/en-us/azure/cost-management-billing/costs/understand-cost-mgt-data) and Cost API swagger docs. ## Azure Resources Tagging TRE Cost Reporting is based on Azure tagging to be able to generate cost report for core services, shared services, workspace, workspace services and user resources. Templates authors need to make sure that underling Azure resources are tagged with the following tags: | <div style="width:160px">Tag</div> | Value | Applies to | | ---------------------------------- | ----- | ---------- | | `tre_id` | Unique ID of the TRE instance | All resources of a TRE instance | | `tre_core_service_id` | Unique ID of the TRE instance | All TRE core azure resources | | `shared_service_id` | The shared service unique ID | Shared Services | | `workspace_id` | The workspace unique ID | Workspaces, Workspace Services and User Resources | | `workspace_service_id` | The workspace service unique ID | Workspace Services and User Resources | | `user_resource_id` | The user resoruce unique ID | User Resources | !!! Notes - Main Azure Container Registry and Storage Account are not be tagged as those resources are used to spin up more than one Azure TRE Instance. - There are some cases in which azure resources cannot get tagged by template author due to different reasons, for example services get deployed outside of TRE (Example being Cyclecloud or Cromwell on Azure), or services which doesn't support tagging for cost management (for example Azure ML compute). [for the full list of tag support of Azure see this article](https://learn.microsoft.com/en-us/azure/azure-resource-manager/management/tag-support). [![Resource tagging example](../assets/resource-tagging-example.png)](../assets/resource-tagging-example.png) ## TRE Cost API Logic Cost management query API, which Azure TRE Cost APIs are based upon, returns a flat result of all cost with every tag combination which exists on the filtered resources by the provided tag, meaning that a resource which has tre_id, tre_core_service_id, tre_workspace_id, tre_workspace_service_id, and tre_user_resource_id will get be summarized by all those tags. if filtered resources have more tags, those tags will appear in the result. To rollup untagged resources into workspace costs Azure TRE cost API first calls Azure Resource Manager to get all resource group names which are tagged with the workspace_id and passes those names into Azure Cost Management Query API as a filter and group by resource group along with the tag name. untagged costs results will apear in with an empty tag name and get aggregated using the resource group and relevent the workspace id. Azure TRE Cost API joins this response with the hierarchical structure of the requested report. **Cost management query API Request example** ```text @SUBSCRIPTION = FILL_YOUR_SUBSCRIPTION_ID @COST_API_URI = https://management.azure.com/subscriptions/{{SUBSCRIPTION}}/providers/Microsoft.CostManagement @COST_API_VERSION = 2021-10-01 @TRE_ID = mytre @TIME_FROM = MM/DD/YYYY @TIME_TO = MM/DD/YYYY POST {{COST_API_URI}}/query?api-version={{COST_API_VERSION}}&$expand=properties/data Authorization: {{$aadToken $aadV2TenantId}} Content-Type: application/json ``` Payload ```json { "type": "ActualCost", "timeframe": "Custom", // can be also BillingMonthToDate|MonthToDate|TheLastBillingMonth|TheLastMonth|WeekToDate according to input "timePeriod": { "from": "{{TIME_FROM}}", "to": "{{TIME_TO}}" }, "dataset": { "granularity": "None", // can be also "Daily" for total costs "aggregation": { "totalCost": { "name": "PreTaxCost", // can be also 'UsageQuantity','Cost','CostUSD','PreTaxCostUSD' (up to two aggregations) "function": "Sum" } }, "filter": { "or": [ { "dimensions": { "name": "ResourceGroup", "operator": "In", "values": ["{{RG_NAME}}"] } }, { "tags": { "name": "{{TAG_KEY}}", "operator": "In", "values" : [ "{{TAG_VALUE}}", ] } } ] }, "grouping": [ { "type": "Dimension", "name": "ResourceGroup" }, { "type": "Tag", } ] } } ``` Response ```json { "id": "subscriptions/xxxxxxxxxxxxx/providers/Microsoft.CostManagement/query/ec1f0eae-9343-4f4e-a0c0-dd219a66efc4", "name": "ec1f0eae-9343-4f4e-a0c0-dd219a66efc4", "type": "Microsoft.CostManagement/query", "location": null, "sku": null, "eTag": null, "properties": { "nextLink": null, "columns": [ { "name": "PreTaxCost", "type": "Number" }, { "name": "ResourceGroup", "type": "String" }, { "name": "Tag", "type": "String" }, { "name": "Currency", "type": "String" } ], "rows": [ [ 0.00055748658857886549, "rg-mytre", "", "USD" ], [ 3.9306515711531222, "rg-mytre", "\"tre_core_service\":\"mytre\"", "USD" ], [ 11.66335497490112, "rg-mytre", "\"tre_id\":\"mytre\"", "USD" ], [ 0.033, "rg-mytre", "\"tre_shared_service\":\"2fea\"", "USD" ], [ 3.62175702964083, "rg-mytre", "\"tre_shared_service\":\"4a5b\"", "USD" ], [ 0.093523886643774659, "rg-mytre-ws-5e86", "\"tre_user_resource_id\":\"126d\"", "USD" ], [ 0.078465743393993009, "rg-mytre-ws-5e86", "\"tre_user_resource_id\":\"2627\"", "USD" ], [ 0.20275980676694544, "rg-mytre-ws-5e86", "\"tre_user_resource_id\":\"319e\"", "USD" ], [ 0.17165788614506686, "rg-mytre-ws-af30", "\"tre_user_resource_id\":\"3370\"", "USD" ], [ 0.17518017912599823, "rg-mytre-ws-af30", "\"tre_user_resource_id\":\"b2be\"", "USD" ], [ 0.00015748658857886549, "rg-mytre-ws-5e86", "", "USD" ], [ 0.39905729127776768, "rg-mytre-ws-5e86", "\"tre_workspace_id\":\"5e86\"", "USD" ], [ 0.98272254462049369, "rg-mytre-ws-af30", "\"tre_workspace_id\":\"af30\"", "USD" ], [ 0.17198963003776765, "rg-mytre-ws-5e86", "\"tre_workspace_service\":\"8d0a\"", "USD" ], [ 0.54959787203801058, "rg-mytre-ws-af30", "\"tre_workspace_service\":\"e70d\"", "USD" ] ] } } ```
AzureTRE/docs/azure-tre-overview/cost-reporting.md/0
{ "file_path": "AzureTRE/docs/azure-tre-overview/cost-reporting.md", "repo_id": "AzureTRE", "token_count": 4830 }
104
# Manually creating Microsoft Entra ID identities This guide is here if you wanted to create these Application Registrations manually. These should be created in order (if applicable) as some applications are then granted permission to other applications. ## Manual Creation Guides | Application | Comments | | ----------- | ---- | | [Application Admin](./application_admin.md) | This is required | | [TRE API](./api.md) | This is required | | [UI Client](./client.md) | This is created when you create the TRE API. | | [Automation Account](./test-account.md) | This is optional | | [Workspace](./workspace.md)| You need one of these per Workspace if you wish to have different users in each workspace.|
AzureTRE/docs/tre-admins/identities/auth-manual.md/0
{ "file_path": "AzureTRE/docs/tre-admins/identities/auth-manual.md", "repo_id": "AzureTRE", "token_count": 178 }
105
# Setup Auth configuration Next, you will set the configuration variables for the specific Azure TRE instance: 1. Open the `/config.sample.yaml` file and then save it without the .sample extension. You should now have a file called `config.yaml` located in the root folder. The file contains configuration variables. In this part you will add the configuration required for the shared management infrastructure which is used to support the deployment of one or more Azure TRE instances. 1. Provide the values for the following variables under management section in your `config.yaml` file: | Variable | Description | | -------- | ----------- | | `location` | The [Azure location (region)](https://azure.microsoft.com/global-infrastructure/geographies/#geographies) for all resources. E.g., `westeurope` | | `mgmt_resource_group_name` | The shared resource group for all management resources, including the storage account. | | `mgmt_storage_account_name` | The name of the storage account to hold the Terraform state and other deployment artifacts. | | `acr_name` | A globally unique name for the [Azure Container Registry (ACR)](https://docs.microsoft.com/azure/container-registry/) that will be created to store deployment images. | | `arm_subscription_id` | The Azure subscription ID for all resources. | !!! tip To retrieve your Azure subscription ID, use the `az` command line interface available in the development container. In the terminal window in Visual Studio Code, type `az login` followed by `az account show` to see your default subscription. Please refer to `az account -help` for further details on how to change your active subscription. The rest of the variables can have their default values. 1. Decide on a name for your `tre_id` ID for the Azure TRE instance. The value will be used in various Azure resources and Microsoft Entra ID application names. It **needs to be globally unique and less than 12 characters in length**. Use **only** lowercase alphanumerics. Choose wisely! 1. Once you have decided on which AD Tenant paradigm, then you should be able to set `aad_tenant_id` in the authentication section in your `config.yaml` file. 1. Your Microsoft Entra ID Tenant Admin can now use the terminal window in Visual Studio Code to execute the following script from within the development container to create all the Microsoft Entra ID Applications that are used for TRE. The details of the script are covered in the [auth document](../auth.md). ```bash make auth ``` !!! note Credentials created by the `make auth` command will be added under the authentication section in your `config.yaml` file !!! note In case you have several subscriptions and would like to change your default subscription use `az account set --subscription <desired subscription ID>` !!! note The full functionality of the script requires directory admin privileges. You may need to contact your friendly Microsoft Entra ID admin to complete this step. The app registrations can be created manually in Azure Portal too. For more information, see [Authentication and authorization](../auth.md). All other variables can have their default values for now. ## Add admin user Make sure the **TRE Administrators** and **TRE Users** roles, defined by the API app registration, are assigned to your user and others as required. See [Enabling users](../auth.md#enabling-users) for instructions.
AzureTRE/docs/tre-admins/setup-instructions/setup-auth-entities.md/0
{ "file_path": "AzureTRE/docs/tre-admins/setup-instructions/setup-auth-entities.md", "repo_id": "AzureTRE", "token_count": 853 }
106
# Resource Processor (VMSS) Resource Processor is the Azure TRE component automating [Porter](https://porter.sh) bundle deployments. It hosts Porter and its dependencies. This page is a guide for a developer looking to make a change to the Resource Processor and debug it. ## Overview The logic in Resource Processor is written in Python. The Resource Processor implementation is located in [`resource_processor` folder](https://github.com/microsoft/AzureTRE/blob/main/resource_processor/) of the repository. Read [how a workspace is provisioned using Porter](./../azure-tre-overview/architecture.md#provisioning-a-workspace) ## Local debugging To set up local debugging, first run, if you haven't done so already (make sure `ENABLE_LOCAL_DEBUGGING` is set to `true` in your `.env` file): ```cmd az login make setup-local-debugging ``` This will allowlist your local IP against Azure resources and create a Service Principal for the Resource Processor. Next, disable the existing Resource Processor from running in your deployment. The easiest way to do this is to stop the VM scale set: [![Stop RP VM scale set](../assets/rp_stop_vm_scale_set.png)](../assets/rp_stop_vm_scale_set.png) Now, go to "Run and Debug" panel in VSCode, and select Resource Processor. [![VSCode RP debugging screenshot](../assets/rp_local_debugging_vscode_screenshot.png)](../assets/rp_local_debugging_vscode_screenshot.png) !!! info If you get a credential error when trying to connect to Service Bus, make sure you've authenticated in the AZ CLI first as it uses your local credentials. !!! info If you get an error similar to `Environment variable 'ARM_CLIENT_ID' is not set correctly`, make sure you have ran `make setup-local-debugging` You can use an API instance deployed in your environment to create deployment requests, and debug your locally running Resource Processor. For more information on how to use API, refer to [API documentation](./api.md#using-swagger-ui). ## Cloud instance On Azure Portal, find an Virtual VM scale set with a name `vmss-rp-porter-${TRE_ID}`. ### Connect to the Resource Processor terminal The processor runs in a VNET, and you cannot connect to it directly. To SSH to this instance, use Bastion. 1. Navigate to the VMSS instance named `vmss-rp-porter-${TRE_ID}` in the Azure Portal. 1. Click on `Instances` in the left menu. 1. Click on an instance name. 1. Click on `Connect -> Bastion` in the top menu. ![Bastion](../assets/bastion.png "Bastion") 1. Set `Authentication type` to `Password from Azure Key Vault`. 1. Set Username to `adminuser`. 1. Set the Key Vault to `kv-${TRE_ID}` and Azure Key Vault Secret to `resource-processor-vmss-password`. If you don't have permissions to see the secret, add yourself to the Access Policy of this keyvault with a permission to read secrets: [![Keyvault access policy](../assets/rp_kv_access_policy.png)](../assets/rp_kv_access_policy.png) 1. Click `Connect`. ### Getting container logs 1. SSH into the Resource Processor VM as described above 1. Check the status of the container using `docker ps` If you see nothing (and the container was pulled) then the processor has either not started yet or it has crashed. 1. Get the logs from the container using `docker logs <container_id>` command. ### Starting container manually 1. Find the **runner_image:tag** by running ``docker ps`` 1. Execute the following command from the root (/) of the file system ```cmd sudo docker run -v /var/run/docker.sock:/var/run/docker.sock --env-file .env --name resource_processor_vmss_porter_debug [runner_image:tag] ``` !!! info If you start a container manually you will probably want to install software, for example, an editor. However, the firewall blocks all ingress traffic, so you cannot run `sudo apt update`. You need to add an override rule in the firewall to allow the traffic. !!! caution Remember to remove this rule when debugging is done. ## Troubleshooting ### No container logs on the instance 1. If you don't see container logs, you should check the status of **cloud-init** which is used to bootstrap the machine with docker and start the processor. Log files for cloud init are: - `/var/log/cloud-init.log` - `/var/log/cloud-init-output.log` If the Docker container is pulled as shown in logs then the resource processor should start. 1. Check the status of all Docker processes using `docker ps -a` which should show you if the container terminated prematurely. ## Implementation details ### Porter Azure TRE needed a solution for implementing and deploying workspaces and workspace services with the following properties: * Means for packaging and versioning workspaces and workspace services * Providing unified structure for deployment definitions (scripts, pipelines) so that the process can be easily automated * Solid developer experience - easy to use and learn Porter meets all these requirements well. Porter packages cloud application into a versioned, self-contained Docker container called a Porter bundle. <!-- markdownlint-disable MD013 --> CNAB spec defines actions that Porter [implements](https://porter.sh/author-bundles/#bundle-actions): **install, upgrade and uninstall**. The developer has practically complete freedom on how to implement logic for these actions. The deployment pipeline definition is created in YAML. The YAML file is called [Porter manifest](https://porter.sh/author-bundles/) and in additon to the actions, it contains the name, version, description of the bundle and defines the input parameters, possible credentials and output. Furthermore, Porter provides a set of [mixins](https://porter.sh/mixins/) - analogous to the concrete actions in GitHub workflows and tasks in Azure DevOps pipelines - which simplify and reduce the development cost when implementing deployment logic. For example, Terraform mixin installs the required tools and provides a clean step in the pipeline to execute Terraform deployments. [Exec mixin](https://porter.sh/mixins/exec/) allows running any command or script; especially useful, if no suitable mixin for a specific technology is available. Implementing custom mixins is possible too. <!-- markdownlint-enable MD013 --> ### Porter Azure plugin Resource Processor uses [Porter Azure plugin](https://github.com/getporter/azure-plugins) to access secrets in Azure Key Vault. ### Porter bundle inputs When Porter runs bundle actions, it passes input parameters. Full set of inputs that Porter passes can be found in [config.py](https://github.com/microsoft/AzureTRE/blob/main/resource_processor/shared/config.py). !!! info Note that Resource Processor does not pass any location-related attributes when running bundle actions. Instead, a `location` attribute is passed from the API. This is so that different TRE resources could be potentially deployed to different regions.
AzureTRE/docs/tre-developers/resource-processor.md/0
{ "file_path": "AzureTRE/docs/tre-developers/resource-processor.md", "repo_id": "AzureTRE", "token_count": 1785 }
107
# Guacamole Service bundle See: [https://guacamole.apache.org/](https://guacamole.apache.org/) ## Firewall Rules Please be aware that the following Firewall rules are opened for the workspace when this service is deployed: Service Tags: - AzureActiveDirectory ## Prerequisites - [A base workspace bundle installed](../workspaces/base.md) ## Guacamole Workspace Service Configuration When deploying a Guacamole service into a workspace the following properties need to be configured. ### Optional Properties | Property | Options | Description | | -------- | ------- | ----------- | | `guac_disable_copy` | `true`/`false` (Default: `true`) | Disable Copy functionality | | `guac_disable_paste` | `true`/`false` (Default: `false`) | Disable Paste functionality" | | `guac_enable_drive` | `true`/`false` (Default: `true`) | Enable mounted drive | | `guac_disable_download` | `true`/`false` (Default: `true`) | Disable files download | | `guac_disable_upload` | `true`/`false` (Default: `true`) | Disable files upload | | `is_exposed_externally` | `true`/`false` (Default: `true`) | Is the Guacamole service exposed outside of the vnet |
AzureTRE/docs/tre-templates/workspace-services/guacamole.md/0
{ "file_path": "AzureTRE/docs/tre-templates/workspace-services/guacamole.md", "repo_id": "AzureTRE", "token_count": 345 }
108
# Checking the logs in App Insights Every component of TRE should send their trace logs to App Insights logging backend, and you should be able to see the process flow by using a suitable query. !!! note AppTraces can take time to appear so be patient. To find logs in Application Insights, go to your resource group, then to Application Insights instance, which is named like `appi-${TRE_ID}`. Click on *Logs* in the left menu. Under Queries, select either `TRE Resource Processor Logs` or `TRE API Logs`. ![App Insights Queries](app_insights_queries.png) A manual query can also be created such as: ```kusto traces | where cloud_RoleName == "resource_processor" | order by timestamp desc ``` ## Check the logs for a specific deployment You can run the following example query to get the logs for a specific deployment. ```kusto let tracking_id="<workspace_id>"; AppTraces | where Message has tracking_id or OperationId == tracking_id | sort by TimeGenerated desc ``` [![App Insights](../assets/app_insights.png)](../assets/app_insights.png) For a successful deployment you should see a message similiar to: ```text Received deployment status update message with correlation ID 70b09db1-30c4-475c-b1a1-8f9599a5a2f4: {'id': '70b09db1-30c4-475c-b1a1-8f9599a5a2f4', 'status': 'deployed', 'message': 'cse-msr-dev-b7f6: Workspace was deployed successfully...'} ``` It should also be evident from the message flow where the current processing is stuck or failed. Failed deployment status should also be available in the `GET /api/workspaces/{workspace_id}` and this is just another way to confirm it.
AzureTRE/docs/troubleshooting-faq/app-insights-logs.md/0
{ "file_path": "AzureTRE/docs/troubleshooting-faq/app-insights-logs.md", "repo_id": "AzureTRE", "token_count": 490 }
109
# API parameters # -------------- # These keys should be copied into /workspaces/AzureTRE/e2e_tests/.env for you # TRE_ID=cse-msr-dev # AAD_TENANT_ID=<auth tenant id> # API_CLIENT_ID=<Client id of the API app. This is defined here - /workspaces/AzureTRE/config.yaml> # TEST_ACCOUNT_CLIENT_ID=<test user> # TEST_ACCOUNT_CLIENT_SECRET=<test user password> RESOURCE_LOCATION=westeurope TEST_APP_ID=<client_id for the app that allows direct username and password login) TEST_USER_NAME=<username for custom e2e tester user> TEST_USER_PASSWORD=<password for custom e2e tester user> TEST_WORKSPACE_APP_ID=<workspace app registration client id> TEST_WORKSPACE_APP_SECRET=<workspace app registration client secret> # TODO: move to RP default with https://github.com/microsoft/AzureTRE/pull/2634 WORKSPACE_APP_SERVICE_PLAN_SKU="P1v2" TEST_WORKSPACE_ID=<ID of pre-created workspace> TEST_AAD_WORKSPACE_ID=ID of pre-created Microsoft Entra ID workspace> # Run tests sequentially. Change this value if you want to run tests in parallel locally E2E_TESTS_NUMBER_PROCESSES=1
AzureTRE/e2e_tests/.env.sample/0
{ "file_path": "AzureTRE/e2e_tests/.env.sample", "repo_id": "AzureTRE", "token_count": 378 }
110
# Retrieve Bearer token # @name auth POST https://login.microsoftonline.com/<CHANGE_ME_TENANT_ID>/oauth2/token HTTP/1.1 Content-type: application/x-www-form-urlencoded grant_type=password &resource=__CHANGE_ME_ &username=__CHANGE_ME_ &password=__CHANGE_ME_ &scope=__CHANGE_ME_ &client_id=__CHANGE_ME_ ############################# # Create workspace template ### Get all workspace templates GET https://cse-msr-dev.westeurope.cloudapp.azure.com/api/workspace-templates Authorization: Bearer {{auth.response.body.access_token}} Content-type: application/json ### Create a new workspace with an id # @name newworkspace POST https://cse-msr-dev.westeurope.cloudapp.azure.com/api/workspaces Authorization: Bearer {{auth.response.body.access_token}} Content-type: application/json { "templateName": "tre-workspace-base", "properties": { "display_name": "E2E test", "description": "workspace for E2E", "client_id": "<CHANGE_ME_CLIENT_ID>", "address_space_size": "small" } } ### Get workspace info for the provisioned workspace GET https://cse-msr-dev.westeurope.cloudapp.azure.com/api/workspaces/{{newworkspace.response.body.workspaceId}} Authorization: Bearer {{auth.response.body.access_token}} Content-type: application/json ### Disable workspace PATCH https://cse-msr-dev.westeurope.cloudapp.azure.com/api/workspaces/{{newworkspace.response.body.workspaceId}} Authorization: Bearer {{auth.response.body.access_token}} Content-type: application/json { "enabled": false } ### Delete workspace DELETE https://cse-msr-dev.westeurope.cloudapp.azure.com/api/workspaces/{{newworkspace.response.body.workspaceId}} Authorization: Bearer {{auth.response.body.access_token}} Content-type: application/json ### Create a workspace service template POST https://cse-msr-dev.westeurope.cloudapp.azure.com/api/workspace-service-templates Authorization: Bearer {{auth.response.body.access_token}} Content-type: application/json { "name": "e2e-test-workspace-service", "version": "0.0.1", "current": "true", "json_schema": { "$schema": "http://json-schema.org/draft-07/schema", "$id": "https://github.com/microsoft/AzureTRE/templates/workspaces/myworkspace/workspace_service.json", "type": "object", "title": "DONOTUSE - E2E workspace service", "description": "DO NOT USE", "required": [], "properties": {} } }
AzureTRE/e2e_tests/test.http/0
{ "file_path": "AzureTRE/e2e_tests/test.http", "repo_id": "AzureTRE", "token_count": 849 }
111
__version__ = "0.8.4"
AzureTRE/resource_processor/_version.py/0
{ "file_path": "AzureTRE/resource_processor/_version.py", "repo_id": "AzureTRE", "token_count": 12 }
112
# This file is maintained automatically by "terraform init". # Manual edits may be lost in future updates. provider "registry.terraform.io/hashicorp/azurerm" { version = "3.23.0" constraints = "3.23.0" hashes = [ "h1:gL/GB7M9xFNr8SxyalWCkTUaYach2k1/0voy6hAqA0A=", "zh:5856ee393eea2c5807d71794020ec16492b6a219c50fec816376c9c4c50d964a", "zh:58883757371208106ae56b591759a7c308d9a3fd74fb38aefaebe00fee4e380f", "zh:69aca3a6929e629d6fe1dc0c0ff105ee943e5d0f95fe0d8abb89965d5dbc07ce", "zh:6cf146b28a5b82da39ee9b5def88e649ed831824b4e669aa60884743d4b08200", "zh:756dfe6d54e54879bed340f0e05e72a3882a7e8f8f2e783b2064d8c27c9fa1c1", "zh:76ed418c25160fe0be9158c82c425375d79e2427f4a8adbac061d4540c56de79", "zh:8c4f42f67157619fdd21525049f5daf9eb4eb3681b43603570a65bd5d52939ff", "zh:ae15665e0ebe6cb40894cd6b7b7345fe4a7d46ca024fc209e03caee3dff6b51d", "zh:ba21ef2d27e6f96fccfa234201151e0ad195ec798cb5486fa70664e0f707b48a", "zh:d0d9b21b65c7b110e599b78abfdc90cec0bafe2d1b1e0bb36c3a3d5d12ad63b4", "zh:ea02b2e6de9ea4184db300903d75871b16b7e240f6b38c46f8b149878f47c739", "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", ] } provider "registry.terraform.io/hashicorp/random" { version = "3.4.3" constraints = "3.4.3" hashes = [ "h1:xZGZf18JjMS06pFa4NErzANI98qi59SEcBsOcS2P2yQ=", "zh:41c53ba47085d8261590990f8633c8906696fa0a3c4b384ff6a7ecbf84339752", "zh:59d98081c4475f2ad77d881c4412c5129c56214892f490adf11c7e7a5a47de9b", "zh:686ad1ee40b812b9e016317e7f34c0d63ef837e084dea4a1f578f64a6314ad53", "zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3", "zh:84103eae7251384c0d995f5a257c72b0096605048f757b749b7b62107a5dccb3", "zh:8ee974b110adb78c7cd18aae82b2729e5124d8f115d484215fd5199451053de5", "zh:9dd4561e3c847e45de603f17fa0c01ae14cae8c4b7b4e6423c9ef3904b308dda", "zh:bb07bb3c2c0296beba0beec629ebc6474c70732387477a65966483b5efabdbc6", "zh:e891339e96c9e5a888727b45b2e1bb3fcbdfe0fd7c5b4396e4695459b38c8cb1", "zh:ea4739860c24dfeaac6c100b2a2e357106a89d18751f7693f3c31ecf6a996f8d", "zh:f0c76ac303fd0ab59146c39bc121c5d7d86f878e9a69294e29444d4c653786f8", "zh:f143a9a5af42b38fed328a161279906759ff39ac428ebcfe55606e05e1518b93", ] }
AzureTRE/templates/shared_services/admin-vm/terraform/.terraform.lock.hcl/0
{ "file_path": "AzureTRE/templates/shared_services/admin-vm/terraform/.terraform.lock.hcl", "repo_id": "AzureTRE", "token_count": 1362 }
113
--- schemaVersion: 1.0.0 name: tre-shared-service-airlock-notifier version: 0.9.0 description: "A shared service notifying on Airlock Operations" registry: azuretre dockerfile: Dockerfile.tmpl credentials: - name: azure_tenant_id env: ARM_TENANT_ID - name: azure_subscription_id env: ARM_SUBSCRIPTION_ID - name: azure_client_id env: ARM_CLIENT_ID - name: azure_client_secret env: ARM_CLIENT_SECRET parameters: - name: tre_id type: string description: "The ID of the parent TRE instance e.g., mytre-dev-3142" - name: tre_url type: string description: "The URL of the parent TRE instance, e.g. https://mytre-dev-3142.uksouth.cloudapp.com" default: "" - name: id type: string description: "Resource ID" - name: azure_environment type: string default: "AzureCloud" description: "Used by Azure CLI to set the Azure environment" - name: tfstate_resource_group_name type: string description: "Resource group containing the Terraform state storage account" - name: tfstate_storage_account_name type: string description: "The name of the Terraform state storage account" - name: tfstate_container_name type: string default: "tfstate" description: "The name of the Terraform state storage container" - name: smtp_server_address env: SMTP_SERVER_ADDRESS type: string - name: smtp_server_port env: SMTP_SERVER_PORT type: string default: "25" - name: smtp_username env: SMTP_USERNAME type: string - name: smtpPassword env: SMTP_PASSWORD type: string sensitive: true - name: smtp_server_enable_ssl env: SMTP_SERVER_ENABLE_SSL type: boolean default: true - name: smtp_from_email env: SMTP_FROM_EMAIL type: string - name: arm_use_msi env: ARM_USE_MSI type: boolean default: false - name: arm_environment env: ARM_ENVIRONMENT type: string default: "public" mixins: - exec - az: clientVersion: 2.37.0 - terraform: clientVersion: 1.4.6 install: - terraform: description: "Deploy shared service" vars: tre_id: ${ bundle.parameters.tre_id } tre_resource_id: ${ bundle.parameters.id } tre_url: ${ bundle.parameters.tre_url } smtp_server_address: ${ bundle.parameters.smtp_server_address } smtp_server_port: ${ bundle.parameters.smtp_server_port } smtp_username: ${ bundle.parameters.smtp_username } smtp_password: ${ bundle.parameters.smtpPassword } smtp_server_enable_ssl: ${ bundle.parameters.smtp_server_enable_ssl } smtp_from_email: ${ bundle.parameters.smtp_from_email } backendConfig: resource_group_name: ${ bundle.parameters.tfstate_resource_group_name } storage_account_name: ${ bundle.parameters.tfstate_storage_account_name } container_name: ${ bundle.parameters.tfstate_container_name } key: ${ bundle.parameters.tre_id }-shared-airlock-notifier outputs: - name: airlock_notifier_logic_app_name - name: airlock_notifier_logic_app_resource_group_name - az: description: "Set Azure Cloud Environment" arguments: - cloud - set flags: name: ${ bundle.parameters.azure_environment } - az: description: "Login to Azure" arguments: - login flags: identity: username: ${ bundle.credentials.azure_client_id } - az: arguments: - resource - update flags: resource-group: ${ bundle.outputs.airlock_notifier_logic_app_resource_group_name } name: scm namespace: Microsoft.Web resource-type: basicPublishingCredentialsPolicies parent: sites/${ bundle.outputs.airlock_notifier_logic_app_name } set: "properties.allow=true" - exec: description: Wait for SCM Auth settings to kick in command: sleep arguments: - "60" - az: description: "Deploy logic app" arguments: - functionapp - deployment - source - config-zip flags: name: ${ bundle.outputs.airlock_notifier_logic_app_name } resource-group: ${ bundle.outputs.airlock_notifier_logic_app_resource_group_name } subscription: ${ bundle.credentials.azure_subscription_id } src: /cnab/app/LogicApp.zip upgrade: - exec: description: "Upgrade shared service" command: echo arguments: - "This shared service does not implement upgrade action" uninstall: - terraform: description: "Tear down shared service" vars: tre_id: ${ bundle.parameters.tre_id } tre_resource_id: ${ bundle.parameters.id } tre_url: ${ bundle.parameters.tre_url } smtp_server_address: ${ bundle.parameters.smtp_server_address } smtp_server_port: ${ bundle.parameters.smtp_server_port } smtp_username: ${ bundle.parameters.smtp_username } smtp_password: ${ bundle.parameters.smtpPassword } smtp_server_enable_ssl: ${ bundle.parameters.smtp_server_enable_ssl } smtp_from_email: ${ bundle.parameters.smtp_from_email } backendConfig: resource_group_name: ${ bundle.parameters.tfstate_resource_group_name } storage_account_name: ${ bundle.parameters.tfstate_storage_account_name } container_name: ${ bundle.parameters.tfstate_container_name } key: ${ bundle.parameters.tre_id }-shared-airlock-notifier
AzureTRE/templates/shared_services/airlock_notifier/porter.yaml/0
{ "file_path": "AzureTRE/templates/shared_services/airlock_notifier/porter.yaml", "repo_id": "AzureTRE", "token_count": 2237 }
114
--- schemaVersion: 1.0.0 name: tre-shared-service-certs version: 0.5.1 description: "An Azure TRE shared service to generate certificates for a specified internal domain using Letsencrypt" registry: azuretre dockerfile: Dockerfile.tmpl credentials: - name: azure_tenant_id env: ARM_TENANT_ID - name: azure_subscription_id env: ARM_SUBSCRIPTION_ID - name: azure_client_id env: ARM_CLIENT_ID - name: azure_client_secret env: ARM_CLIENT_SECRET parameters: - name: tre_id type: string description: "The ID of the parent TRE instance e.g., mytre-dev-3142" - name: azure_environment type: string default: "AzureCloud" description: "Used by Azure CLI to set the Azure environment" - name: tfstate_resource_group_name type: string description: "Resource group containing the Terraform state storage account" - name: tfstate_storage_account_name type: string description: "The name of the Terraform state storage account" - name: tfstate_container_name type: string default: "tfstate" description: "The name of the Terraform state storage container" - name: arm_use_msi env: ARM_USE_MSI type: boolean default: false - name: arm_environment env: ARM_ENVIRONMENT type: string default: "public" - name: domain_prefix type: string description: "The FQDN prefix (prepended to {TRE_ID}.{LOCATION}.cloudapp.azure.com) to generate certificate for" - name: cert_name type: string description: "What to call the certificate exported to KeyVault (alphanumeric and '-' only)" - name: id type: string description: "Resource ID" mixins: - exec - terraform: clientVersion: 1.3.6 - az: clientVersion: 2.37.0 install: - terraform: description: "Deploy shared service" vars: tre_id: ${ bundle.parameters.tre_id } domain_prefix: ${ bundle.parameters.domain_prefix } cert_name: ${ bundle.parameters.cert_name } tre_resource_id: ${ bundle.parameters.id } backendConfig: resource_group_name: ${ bundle.parameters.tfstate_resource_group_name } storage_account_name: ${ bundle.parameters.tfstate_storage_account_name } container_name: ${ bundle.parameters.tfstate_container_name } key: ${ bundle.parameters.tre_id }-shared-service-certs outputs: - name: fqdn - name: application_gateway_name - name: storage_account_name - name: resource_group_name - name: keyvault_name - az: description: "Set Azure Cloud Environment" arguments: - cloud - set flags: name: ${ bundle.parameters.azure_environment } - az: description: "Login to Azure" arguments: - login flags: identity: username: ${ bundle.credentials.azure_client_id } - exec: description: "Generate certificate" command: bash arguments: - ./scripts/letsencrypt.sh flags: fqdn: ${ bundle.outputs.fqdn } application_gateway_name: ${ bundle.outputs.application_gateway_name } storage_account_name: ${ bundle.outputs.storage_account_name } resource_group_name: ${ bundle.outputs.resource_group_name } keyvault_name: ${ bundle.outputs.keyvault_name } cert_name: ${ bundle.parameters.cert_name } - az: description: "Stop application gateway" arguments: - network - application-gateway - stop flags: resource-group: ${ bundle.outputs.resource_group_name } name: ${ bundle.outputs.application_gateway_name } upgrade: - exec: description: "Upgrade shared service" command: echo arguments: - "This shared service does not implement upgrade action" uninstall: - terraform: description: "Tear down shared service" vars: tre_id: ${ bundle.parameters.tre_id } domain_prefix: ${ bundle.parameters.domain_prefix } cert_name: ${ bundle.parameters.cert_name } tre_resource_id: ${ bundle.parameters.id } backendConfig: resource_group_name: ${ bundle.parameters.tfstate_resource_group_name } storage_account_name: ${ bundle.parameters.tfstate_storage_account_name } container_name: ${ bundle.parameters.tfstate_container_name } key: ${ bundle.parameters.tre_id }-shared-service-certs renew: - terraform: arguments: - "output" description: "Get Terraform output variables" backendConfig: resource_group_name: ${ bundle.parameters.tfstate_resource_group_name } storage_account_name: ${ bundle.parameters.tfstate_storage_account_name } container_name: ${ bundle.parameters.tfstate_container_name } key: ${ bundle.parameters.tre_id }-shared-service-certs outputs: - name: fqdn - name: application_gateway_name - name: storage_account_name - name: resource_group_name - name: keyvault_name - az: description: "Set Azure Cloud Environment" arguments: - cloud - set flags: name: ${ bundle.parameters.azure_environment } - az: description: "Login to Azure" arguments: - login flags: identity: username: ${ bundle.credentials.azure_client_id } - az: description: "Start application gateway" arguments: - network - application-gateway - start flags: resource-group: ${ bundle.outputs.resource_group_name } name: ${ bundle.outputs.application_gateway_name } - exec: description: "Renew certificate" command: bash arguments: - ./scripts/letsencrypt.sh flags: fqdn: ${ bundle.outputs.fqdn } application_gateway_name: ${ bundle.outputs.application_gateway_name } storage_account_name: ${ bundle.outputs.storage_account_name } resource_group_name: ${ bundle.outputs.resource_group_name } keyvault_name: ${ bundle.outputs.keyvault_name } cert_name: ${ bundle.parameters.cert_name } - az: description: "Stop application gateway" arguments: - network - application-gateway - stop flags: resource-group: ${ bundle.outputs.resource_group_name } name: ${ bundle.outputs.application_gateway_name }
AzureTRE/templates/shared_services/certs/porter.yaml/0
{ "file_path": "AzureTRE/templates/shared_services/certs/porter.yaml", "repo_id": "AzureTRE", "token_count": 2574 }
115
{ "schemaType": "ParameterSet", "schemaVersion": "1.0.1", "namespace": "", "name": "tre-shared-service-cyclecloud", "parameters": [ { "name": "tre_id", "source": { "env": "TRE_ID" } }, { "name": "tfstate_container_name", "source": { "env": "TERRAFORM_STATE_CONTAINER_NAME" } }, { "name": "tfstate_resource_group_name", "source": { "env": "MGMT_RESOURCE_GROUP_NAME" } }, { "name": "tfstate_storage_account_name", "source": { "env": "MGMT_STORAGE_ACCOUNT_NAME" } }, { "name": "id", "source": { "env": "ID" } }, { "name": "azure_environment", "source": { "env": "AZURE_ENVIRONMENT" } }, { "name": "arm_environment", "source": { "env": "ARM_ENVIRONMENT" } } ] }
AzureTRE/templates/shared_services/cyclecloud/parameters.json/0
{ "file_path": "AzureTRE/templates/shared_services/cyclecloud/parameters.json", "repo_id": "AzureTRE", "token_count": 503 }
116
{ "$schema": "http://json-schema.org/draft-07/schema", "$id": "https://github.com/microsoft/AzureTRE/templates/shared_services/databricks-auth/template_schema.json", "type": "object", "title": "Azure Databricks Private Authentication Shared Service", "description": "Azure Databricks Private Authentication Shared Service", "required": [], "properties": { "display_name": { "type": "string", "title": "Name for the shared service", "description": "The name of the shared service to be displayed to users", "default": "Azure Databricks Private Authentication Shared Service", "updateable": true }, "description": { "type": "string", "title": "Description of the shared service", "description": "The description of the shared service to be displayed to users", "default": "Hosts the browser authentication private endpoint connection for your Azure Databricks workspaces.", "updateable": true }, "overview": { "type": "string", "title": "Overview for the shared service", "description": "Long form description of the shared service, in markdown syntax", "default": "Azure Databricks Private Authentication Shared Service is a Databricks workspace that in the same region as your Azure Databricks workspaces, and its only purpose is hosting the browser authentication private endpoint connection for your Azure Databricks workspace services.", "updateable": true } } }
AzureTRE/templates/shared_services/databricks-auth/template_schema.json/0
{ "file_path": "AzureTRE/templates/shared_services/databricks-auth/template_schema.json", "repo_id": "AzureTRE", "token_count": 455 }
117
# This file is maintained automatically by "terraform init". # Manual edits may be lost in future updates. provider "registry.terraform.io/hashicorp/azurerm" { version = "3.53.0" constraints = "3.53.0" hashes = [ "h1:bK70LV1NldhodSm58cUpawKwdUL1A5AKKglAV2wZ/QY=", "zh:078ece8318ad7d6c1cd2e5f2044188e74af63921b93223c7f8d477539fa91888", "zh:1bdc98ff8c2d3f3e81a746762e03d39794b2f5c90dc478cdb23dcc3d3f9947b6", "zh:20b51cfc0ffc4ff368e6eb2eaece0b6bb99ade09e4b91b3444b50e94fc54c119", "zh:233eed91279a9473825ba02d66487388d66dfc719b7249112d085dece0c2b594", "zh:397ac8194ecc2f8d34d42600d6bf9e20399b222170dc1443b5800db3135ebc99", "zh:3af3a2d8485d6c1ffcd26848af9ab087dfcb6cb045cc624e51f4db5144b53a9c", "zh:5d0b9a346b57cccc369e2076556274225ec7f1c9044a2503dcfd8c117cdc2f79", "zh:6e762dcef4ba14985f93af5f3fd195c9ee7d27de8de3bebdeefe761e53e79bb9", "zh:73f9be719aa867985b1744c1f4fab834d01eb2069ec7a78b3a1bfa87c8256a40", "zh:756deed30c20ffc9b4756c239e1675d3693f7175851e5ef946948a8bfb0b7935", "zh:c279f99902a45a5b88d25d609a73709d101af3ce71222efbab9d4706c8a538b4", "zh:f569b65999264a9416862bca5cd2a6177d94ccb0424f3a4ef424428912b9cb3c", ] }
AzureTRE/templates/shared_services/firewall/terraform/.terraform.lock.hcl/0
{ "file_path": "AzureTRE/templates/shared_services/firewall/terraform/.terraform.lock.hcl", "repo_id": "AzureTRE", "token_count": 697 }
118
--- schemaVersion: 1.0.0 name: tre-shared-service-gitea version: 1.0.1 description: "A Gitea shared service" dockerfile: Dockerfile.tmpl registry: azuretre custom: runtime_image: name: gitea build: version_file: docker/version.txt docker_file: docker/Dockerfile docker_context: docker credentials: - name: azure_tenant_id env: ARM_TENANT_ID - name: azure_subscription_id env: ARM_SUBSCRIPTION_ID - name: azure_client_id env: ARM_CLIENT_ID - name: azure_client_secret env: ARM_CLIENT_SECRET parameters: - name: tre_id type: string description: "The ID of the parent TRE instance e.g., mytre-dev-3142" - name: id type: string description: "Resource ID" - name: mgmt_acr_name type: string description: "The name of the Azure Container Registry" - name: tfstate_resource_group_name type: string description: "Resource group containing the Terraform state storage account" - name: tfstate_storage_account_name type: string description: "The name of the Terraform state storage account" - name: tfstate_container_name type: string default: "tfstate" description: "The name of the Terraform state storage container" - name: arm_use_msi env: ARM_USE_MSI type: boolean default: false - name: arm_environment env: ARM_ENVIRONMENT type: string default: "public" - name: sql_sku type: string default: "B | 4GB 2vCores" mixins: - terraform: clientVersion: 1.3.6 outputs: - name: gitea_allowed_fqdns_list type: string default: "" applyTo: - install - upgrade - name: address_prefixes type: string default: "" applyTo: - install - upgrade - name: connection_uri type: string applyTo: - install - upgrade - name: is_exposed_externally type: boolean applyTo: - install - upgrade install: - terraform: description: "Deploy shared service" vars: tre_id: ${ bundle.parameters.tre_id } tre_resource_id: ${ bundle.parameters.id } mgmt_resource_group_name: ${ bundle.parameters.tfstate_resource_group_name } acr_name: ${ bundle.parameters.mgmt_acr_name } arm_environment: ${ bundle.parameters.arm_environment } sql_sku: ${ bundle.parameters.sql_sku } backendConfig: resource_group_name: ${ bundle.parameters.tfstate_resource_group_name } storage_account_name: ${ bundle.parameters.tfstate_storage_account_name } container_name: ${ bundle.parameters.tfstate_container_name } key: ${ bundle.parameters.tre_id }-shared-service-gitea outputs: - name: gitea_allowed_fqdns_list - name: address_prefixes - name: connection_uri - name: is_exposed_externally upgrade: - terraform: description: "Upgrade shared service" vars: tre_id: ${ bundle.parameters.tre_id } tre_resource_id: ${ bundle.parameters.id } mgmt_resource_group_name: ${ bundle.parameters.tfstate_resource_group_name } acr_name: ${ bundle.parameters.mgmt_acr_name } arm_environment: ${ bundle.parameters.arm_environment } sql_sku: ${ bundle.parameters.sql_sku } backendConfig: resource_group_name: ${ bundle.parameters.tfstate_resource_group_name } storage_account_name: ${ bundle.parameters.tfstate_storage_account_name } container_name: ${ bundle.parameters.tfstate_container_name } key: ${ bundle.parameters.tre_id }-shared-service-gitea outputs: - name: gitea_allowed_fqdns_list - name: address_prefixes - name: connection_uri - name: is_exposed_externally uninstall: - terraform: description: "Tear down shared service" vars: tre_id: ${ bundle.parameters.tre_id } tre_resource_id: ${ bundle.parameters.id } mgmt_resource_group_name: ${ bundle.parameters.tfstate_resource_group_name } acr_name: ${ bundle.parameters.mgmt_acr_name } arm_environment: ${ bundle.parameters.arm_environment } sql_sku: ${ bundle.parameters.sql_sku } backendConfig: resource_group_name: ${ bundle.parameters.tfstate_resource_group_name } storage_account_name: ${ bundle.parameters.tfstate_storage_account_name } container_name: ${ bundle.parameters.tfstate_container_name } key: ${ bundle.parameters.tre_id }-shared-service-gitea
AzureTRE/templates/shared_services/gitea/porter.yaml/0
{ "file_path": "AzureTRE/templates/shared_services/gitea/porter.yaml", "repo_id": "AzureTRE", "token_count": 1832 }
119
--- schemaVersion: 1.0.0 name: tre-shared-service-sonatype-nexus version: 2.8.13 description: "A Sonatype Nexus shared service" dockerfile: Dockerfile.tmpl registry: azuretre credentials: - name: azure_tenant_id env: ARM_TENANT_ID - name: azure_subscription_id env: ARM_SUBSCRIPTION_ID - name: azure_client_id env: ARM_CLIENT_ID - name: azure_client_secret env: ARM_CLIENT_SECRET parameters: - name: tre_id type: string description: "The ID of the parent TRE instance e.g., mytre-dev-3142" - name: id type: string description: "Resource ID" - name: mgmt_acr_name type: string description: "The name of the Azure Container Registry" - name: tfstate_resource_group_name type: string description: "Resource group containing the Terraform state storage account" - name: tfstate_storage_account_name type: string description: "The name of the Terraform state storage account" - name: tfstate_container_name type: string default: "tfstate" description: "The name of the Terraform state storage container" - name: arm_use_msi env: ARM_USE_MSI type: boolean default: false - name: arm_environment env: ARM_ENVIRONMENT type: string default: "public" - name: ssl_cert_name type: string default: "nexus-ssl" description: "Name of the certificate for configuring Nexus SSL with (stored in the core KeyVault)" outputs: - name: workspace_vm_allowed_fqdns_list type: string applyTo: - install - upgrade - name: nexus_allowed_fqdns_list type: string applyTo: - install - upgrade - name: private_ip_addresses applyTo: - install - upgrade - name: connection_uri type: string applyTo: - install - upgrade - name: is_exposed_externally type: boolean applyTo: - install - upgrade mixins: - exec - terraform: clientVersion: 1.4.5 install: - terraform: description: "Deploy shared service" vars: tre_id: ${ bundle.parameters.tre_id } tre_resource_id: ${ bundle.parameters.id } ssl_cert_name: ${ bundle.parameters.ssl_cert_name } backendConfig: resource_group_name: ${ bundle.parameters.tfstate_resource_group_name } storage_account_name: ${ bundle.parameters.tfstate_storage_account_name } container_name: ${ bundle.parameters.tfstate_container_name } key: ${ bundle.parameters.tre_id }-shared-service-sonatype-nexus-vm outputs: - name: workspace_vm_allowed_fqdns_list - name: nexus_allowed_fqdns_list - name: private_ip_addresses - name: connection_uri - name: is_exposed_externally upgrade: - terraform: description: "Upgrade shared service" vars: tre_id: ${ bundle.parameters.tre_id } tre_resource_id: ${ bundle.parameters.id } ssl_cert_name: ${ bundle.parameters.ssl_cert_name } backendConfig: resource_group_name: ${ bundle.parameters.tfstate_resource_group_name } storage_account_name: ${ bundle.parameters.tfstate_storage_account_name } container_name: ${ bundle.parameters.tfstate_container_name } key: ${ bundle.parameters.tre_id }-shared-service-sonatype-nexus-vm outputs: - name: workspace_vm_allowed_fqdns_list - name: nexus_allowed_fqdns_list - name: private_ip_addresses - name: connection_uri - name: is_exposed_externally uninstall: - terraform: description: "Tear down shared service" vars: tre_id: ${ bundle.parameters.tre_id } tre_resource_id: ${ bundle.parameters.id } ssl_cert_name: ${ bundle.parameters.ssl_cert_name } backendConfig: resource_group_name: ${ bundle.parameters.tfstate_resource_group_name } storage_account_name: ${ bundle.parameters.tfstate_storage_account_name } container_name: ${ bundle.parameters.tfstate_container_name } key: ${ bundle.parameters.tre_id }-shared-service-sonatype-nexus-vm
AzureTRE/templates/shared_services/sonatype-nexus-vm/porter.yaml/0
{ "file_path": "AzureTRE/templates/shared_services/sonatype-nexus-vm/porter.yaml", "repo_id": "AzureTRE", "token_count": 1646 }
120
# GUID to identify the workspace service ID=__CHANGE_ME__ # GUID to identify the workspace bundle WORKSPACE_ID="__CHANGE_ME__" # AML ML Workspace settings DISPLAY_NAME="__CHANGE_ME__" DESCRIPTION="__CHANGE_ME__" IS_EXPOSED_EXTERNALLY="false" ADDRESS_SAPCE="__CHANGE_ME__"
AzureTRE/templates/workspace_services/azureml/.env.sample/0
{ "file_path": "AzureTRE/templates/workspace_services/azureml/.env.sample", "repo_id": "AzureTRE", "token_count": 105 }
121
data "azurerm_key_vault_secret" "workspace_client_id" { name = "workspace-client-id" key_vault_id = data.azurerm_key_vault.ws.id } data "external" "app_role_members" { program = ["bash", "${path.module}/get_app_role_members.sh"] query = { auth_client_id = var.auth_client_id auth_client_secret = var.auth_client_secret auth_tenant_id = var.auth_tenant_id workspace_client_id = data.azurerm_key_vault_secret.workspace_client_id.value azure_environment = var.azure_environment } } data "azurerm_role_definition" "azure_ml_data_scientist" { name = "AzureML Data Scientist" } resource "azurerm_role_assignment" "app_role_members_aml_data_scientist" { for_each = (data.external.app_role_members.result.principals == "") ? [] : toset(split("\n", data.external.app_role_members.result.principals)) scope = azurerm_machine_learning_workspace.aml_workspace.id role_definition_id = data.azurerm_role_definition.azure_ml_data_scientist.id principal_id = each.value }
AzureTRE/templates/workspace_services/azureml/terraform/roles.tf/0
{ "file_path": "AzureTRE/templates/workspace_services/azureml/terraform/roles.tf", "repo_id": "AzureTRE", "token_count": 438 }
122
variable "tre_id" { type = string description = "Unique TRE ID" } variable "tre_resource_id" { type = string description = "Unique TRE Resource ID" } variable "workspace_id" { type = string description = "Unique TRE WORKSPACE ID" } variable "address_space" { type = string description = "The address space that is used by the databricks subnets." } variable "is_exposed_externally" { type = bool description = "If the databricks workspace is exposed externally or not." } variable "arm_environment" { type = string }
AzureTRE/templates/workspace_services/databricks/terraform/variables.tf/0
{ "file_path": "AzureTRE/templates/workspace_services/databricks/terraform/variables.tf", "repo_id": "AzureTRE", "token_count": 204 }
123
locals { short_service_id = substr(var.id, -4, -1) short_workspace_id = substr(var.workspace_id, -4, -1) workspace_resource_name_suffix = "${var.tre_id}-ws-${local.short_workspace_id}" service_resource_name_suffix = "${var.tre_id}-ws-${local.short_workspace_id}-svc-${local.short_service_id}" storage_name = lower(replace("stg${substr(local.service_resource_name_suffix, -8, -1)}", "-", "")) webapp_name = "gitea-${local.service_resource_name_suffix}" core_resource_group_name = "rg-${var.tre_id}" keyvault_name = lower("kv-${substr(local.workspace_resource_name_suffix, -20, -1)}") version = replace(replace(replace(data.local_file.version.content, "__version__ = \"", ""), "\"", ""), "\n", "") sql_sku = { "B | 4GB 2vCores" = { value = "B_Standard_B2s" }, "GP | 8GB 2vCores" = { value = "GP_Standard_D2ds_v4" }, "BC | 16GB 2vCores" = { value = "MO_Standard_E2ds_v4" } } workspace_service_tags = { tre_id = var.tre_id tre_workspace_id = var.workspace_id tre_workspace_service_id = var.id } web_app_diagnostic_categories_enabled = [ "AppServiceHTTPLogs", "AppServiceConsoleLogs", "AppServiceAppLogs", "AppServiceFileAuditLogs", "AppServiceAuditLogs", "AppServiceIPSecAuditLogs", "AppServicePlatformLogs", "AppServiceAntivirusScanAuditLogs" ] gitea_openid_auth = "${var.aad_authority_url}/${data.azurerm_key_vault_secret.aad_tenant_id.value}/v2.0" }
AzureTRE/templates/workspace_services/gitea/terraform/locals.tf/0
{ "file_path": "AzureTRE/templates/workspace_services/gitea/terraform/locals.tf", "repo_id": "AzureTRE", "token_count": 731 }
124
#!/usr/bin/with-contenv sh echo >&2 "starting guacd" exec /opt/guacamole/sbin/guacd -f -b 0.0.0.0 -L $GUACD_LOG_LEVEL -l 4822
AzureTRE/templates/workspace_services/guacamole/guacamole-server/docker/services/guacd/run/0
{ "file_path": "AzureTRE/templates/workspace_services/guacamole/guacamole-server/docker/services/guacd/run", "repo_id": "AzureTRE", "token_count": 67 }
125
/** * */ package org.apache.guacamole.auth.azuretre.connection;
AzureTRE/templates/workspace_services/guacamole/guacamole-server/guacamole-auth-azure/src/main/java/org/apache/guacamole/auth/azuretre/connection/package-info.java/0
{ "file_path": "AzureTRE/templates/workspace_services/guacamole/guacamole-server/guacamole-auth-azure/src/main/java/org/apache/guacamole/auth/azuretre/connection/package-info.java", "repo_id": "AzureTRE", "token_count": 25 }
126
--- schemaVersion: 1.0.0 name: tre-service-guacamole version: 0.10.6 description: "An Azure TRE service for Guacamole" dockerfile: Dockerfile.tmpl registry: azuretre custom: runtime_image: name: guac-server build: version_file: guacamole-server/docker/version.txt docker_file: guacamole-server/docker/Dockerfile docker_context: guacamole-server credentials: - name: azure_tenant_id env: ARM_TENANT_ID - name: azure_subscription_id env: ARM_SUBSCRIPTION_ID - name: azure_client_id env: ARM_CLIENT_ID - name: azure_client_secret env: ARM_CLIENT_SECRET parameters: - name: workspace_id type: string - name: tre_id type: string - name: image_tag type: string default: "" # will use the value in the version.txt file unless provided description: "The tag of the guacamole image" - name: mgmt_acr_name type: string env: mgmt_acr_name description: "The devops ACR name" - name: mgmt_resource_group_name type: string description: "Resource group containing the devops ACR" env: MGMT_RESOURCE_GROUP_NAME - name: guac_disable_copy type: boolean default: true env: GUAC_DISABLE_COPY description: "Guacamole disable copy configuration" - name: guac_disable_paste type: boolean default: false env: GUAC_DISABLE_PASTE description: "Guacamole disable paste configuration" - name: guac_enable_drive type: boolean default: true env: GUAC_ENABLE_DRIVE description: "Guacamole enable drive configuration" - name: guac_drive_name type: string default: "transfer" env: GUAC_DRIVE_NAME description: "Guacamole drive name configuration" - name: guac_drive_path type: string default: "/guac-transfer" env: GUAC_DRIVE_PATH description: "Guacamole drive path configuration" - name: guac_disable_download type: boolean default: true env: GUAC_DISABLE_DOWNLOAD description: "Guacamole disable download configuration" - name: guac_disable_upload type: boolean default: true env: GUAC_DISABLE_UPLOAD - name: is_exposed_externally type: boolean default: true env: IS_EXPOSED_EXTERNALLY description: "Determines if the web app will be available over public/internet or private networks" - name: aad_authority_url type: string default: "https://login.microsoftonline.com" # the following are added automatically by the resource processor - name: id type: string description: "An Id for this installation" env: id - name: tfstate_resource_group_name type: string description: "Resource group containing the Terraform state storage account" - name: tfstate_storage_account_name type: string description: "The name of the Terraform state storage account" - name: tfstate_container_name env: tfstate_container_name type: string default: "tfstate" description: "The name of the Terraform state storage container" - name: arm_use_msi env: ARM_USE_MSI type: boolean default: false - name: arm_environment type: string outputs: - name: authentication_callback_uri type: string applyTo: - install - upgrade - name: web_apps_addresses type: string applyTo: - install - upgrade - name: admin_connection_uri type: string applyTo: - install - upgrade mixins: - terraform: clientVersion: 1.4.6 install: - terraform: description: "Deploy Guacamole Service" vars: workspace_id: ${ bundle.parameters.workspace_id } tre_id: ${ bundle.parameters.tre_id } image_name: ${ bundle.custom.runtime_image.name } image_tag: ${ bundle.parameters.image_tag } mgmt_acr_name: ${ bundle.parameters.mgmt_acr_name } mgmt_resource_group_name: ${ bundle.parameters.mgmt_resource_group_name } guac_disable_copy: ${ bundle.parameters.guac_disable_copy } guac_disable_paste: ${ bundle.parameters.guac_disable_paste } guac_enable_drive: ${ bundle.parameters.guac_enable_drive } guac_drive_name: ${ bundle.parameters.guac_drive_name } guac_drive_path: ${ bundle.parameters.guac_drive_path } guac_disable_download: ${ bundle.parameters.guac_disable_download } guac_disable_upload: ${ bundle.parameters.guac_disable_upload } is_exposed_externally: ${ bundle.parameters.is_exposed_externally } tre_resource_id: ${ bundle.parameters.id } aad_authority_url: ${ bundle.parameters.aad_authority_url } arm_environment: ${ bundle.parameters.arm_environment } backendConfig: resource_group_name: ${ bundle.parameters.tfstate_resource_group_name } storage_account_name: ${ bundle.parameters.tfstate_storage_account_name } container_name: ${ bundle.parameters.tfstate_container_name } key: tre-service-guacamole-${ bundle.parameters.id } outputs: - name: authentication_callback_uri - name: web_apps_addresses - name: admin_connection_uri upgrade: - terraform: description: "Upgrade Guacamole Service" vars: workspace_id: ${ bundle.parameters.workspace_id } tre_id: ${ bundle.parameters.tre_id } image_name: ${ bundle.custom.runtime_image.name } image_tag: ${ bundle.parameters.image_tag } mgmt_acr_name: ${ bundle.parameters.mgmt_acr_name } mgmt_resource_group_name: ${ bundle.parameters.mgmt_resource_group_name } guac_disable_copy: ${ bundle.parameters.guac_disable_copy } guac_disable_paste: ${ bundle.parameters.guac_disable_paste } guac_enable_drive: ${ bundle.parameters.guac_enable_drive } guac_drive_name: ${ bundle.parameters.guac_drive_name } guac_drive_path: ${ bundle.parameters.guac_drive_path } guac_disable_download: ${ bundle.parameters.guac_disable_download } guac_disable_upload: ${ bundle.parameters.guac_disable_upload } is_exposed_externally: ${ bundle.parameters.is_exposed_externally } tre_resource_id: ${ bundle.parameters.id } aad_authority_url: ${ bundle.parameters.aad_authority_url } arm_environment: ${ bundle.parameters.arm_environment } backendConfig: resource_group_name: ${ bundle.parameters.tfstate_resource_group_name } storage_account_name: ${ bundle.parameters.tfstate_storage_account_name } container_name: ${ bundle.parameters.tfstate_container_name } key: tre-service-guacamole-${ bundle.parameters.id } outputs: - name: authentication_callback_uri - name: web_apps_addresses - name: admin_connection_uri uninstall: - terraform: description: "Delete the Guacamole Service" vars: workspace_id: ${ bundle.parameters.workspace_id } tre_id: ${ bundle.parameters.tre_id } image_name: ${ bundle.custom.runtime_image.name } image_tag: ${ bundle.parameters.image_tag } mgmt_acr_name: ${ bundle.parameters.mgmt_acr_name } mgmt_resource_group_name: ${ bundle.parameters.mgmt_resource_group_name } guac_disable_copy: ${ bundle.parameters.guac_disable_copy } guac_disable_paste: ${ bundle.parameters.guac_disable_paste } guac_enable_drive: ${ bundle.parameters.guac_enable_drive } guac_drive_name: ${ bundle.parameters.guac_drive_name } guac_drive_path: ${ bundle.parameters.guac_drive_path } guac_disable_download: ${ bundle.parameters.guac_disable_download } guac_disable_upload: ${ bundle.parameters.guac_disable_upload } is_exposed_externally: ${ bundle.parameters.is_exposed_externally } tre_resource_id: ${ bundle.parameters.id } aad_authority_url: ${ bundle.parameters.aad_authority_url } arm_environment: ${ bundle.parameters.arm_environment } backendConfig: resource_group_name: ${ bundle.parameters.tfstate_resource_group_name } storage_account_name: ${ bundle.parameters.tfstate_storage_account_name } container_name: ${ bundle.parameters.tfstate_container_name } key: tre-service-guacamole-${ bundle.parameters.id }
AzureTRE/templates/workspace_services/guacamole/porter.yaml/0
{ "file_path": "AzureTRE/templates/workspace_services/guacamole/porter.yaml", "repo_id": "AzureTRE", "token_count": 3226 }
127
#!/bin/bash sudo tee /etc/pip.conf > dev/null <<'EOF' [global] index = ${nexus_proxy_url}/repository/pypi/pypi index-url = ${nexus_proxy_url}/repository/pypi/simple trusted-host = ${nexus_proxy_url} EOF
AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-linuxvm/terraform/pypi_sources_config.sh/0
{ "file_path": "AzureTRE/templates/workspace_services/guacamole/user_resources/guacamole-azure-linuxvm/terraform/pypi_sources_config.sh", "repo_id": "AzureTRE", "token_count": 89 }
128
data "azurerm_key_vault_secret" "workspace_client_id" { name = "workspace-client-id" key_vault_id = data.azurerm_key_vault.ws.id } data "external" "app_role_members" { program = ["bash", "${path.module}/get_app_role_members.sh"] query = { auth_client_id = var.auth_client_id auth_client_secret = var.auth_client_secret auth_tenant_id = var.auth_tenant_id workspace_client_id = data.azurerm_key_vault_secret.workspace_client_id.value } } data "azurerm_role_definition" "azure_fhir_contributor" { name = "FHIR Data Contributor" } data "azurerm_role_definition" "azure_dicom_data_owner" { name = "DICOM Data Owner" } resource "azurerm_role_assignment" "app_role_members_fhir_contributor" { for_each = !var.deploy_fhir || (data.external.app_role_members.result.principals == "") ? [] : toset(split("\n", data.external.app_role_members.result.principals)) scope = azurerm_healthcare_fhir_service.fhir[0].id role_definition_id = data.azurerm_role_definition.azure_fhir_contributor.id principal_id = each.value } resource "azurerm_role_assignment" "app_role_members_dicom_data_owner" { for_each = !var.deploy_dicom || (data.external.app_role_members.result.principals == "") ? [] : toset(split("\n", data.external.app_role_members.result.principals)) scope = azurerm_healthcare_dicom_service.dicom[0].id role_definition_id = data.azurerm_role_definition.azure_dicom_data_owner.id principal_id = each.value }
AzureTRE/templates/workspace_services/health-services/terraform/roles.tf/0
{ "file_path": "AzureTRE/templates/workspace_services/health-services/terraform/roles.tf", "repo_id": "AzureTRE", "token_count": 662 }
129
# Random unique id locals { short_service_id = substr(var.tre_resource_id, -4, -1) short_workspace_id = substr(var.workspace_id, -4, -1) core_resource_group_name = "rg-${var.tre_id}" workspace_resource_name_suffix = "${var.tre_id}-ws-${local.short_workspace_id}" service_resource_name_suffix = "${var.tre_id}-ws-${local.short_workspace_id}-svc-${local.short_service_id}" aml_workspace_name = lower("ml-${substr(local.service_resource_name_suffix, -30, -1)}") aml_compute_id = substr("${var.tre_id}${var.workspace_id}${local.short_service_id}", -12, -1) aml_compute_cluster_name = "cp-${local.aml_compute_id}" azureml_acr_name = lower(replace("acr${substr(local.service_resource_name_suffix, -8, -1)}", "-", "")) tre_workspace_service_tags = { tre_id = var.tre_id tre_workspace_id = var.workspace_id tre_workspace_service_id = var.tre_resource_id } }
AzureTRE/templates/workspace_services/innereye/terraform/locals.tf/0
{ "file_path": "AzureTRE/templates/workspace_services/innereye/terraform/locals.tf", "repo_id": "AzureTRE", "token_count": 478 }
130
{ "schemaType": "ParameterSet", "schemaVersion": "1.0.1", "namespace": "", "name": "tre-service-mlflow", "parameters": [ { "name": "workspace_id", "source": { "env": "WORKSPACE_ID" } }, { "name": "tre_id", "source": { "env": "TRE_ID" } }, { "name": "id", "source": { "env": "ID" } }, { "name": "mgmt_acr_name", "source": { "env": "ACR_NAME" } }, { "name": "mgmt_resource_group_name", "source": { "env": "MGMT_RESOURCE_GROUP_NAME" } }, { "name": "tfstate_container_name", "source": { "env": "TERRAFORM_STATE_CONTAINER_NAME" } }, { "name": "tfstate_resource_group_name", "source": { "env": "MGMT_RESOURCE_GROUP_NAME" } }, { "name": "tfstate_storage_account_name", "source": { "env": "MGMT_STORAGE_ACCOUNT_NAME" } }, { "name": "arm_environment", "source": { "env": "ARM_ENVIRONMENT" } } ] }
AzureTRE/templates/workspace_services/mlflow/parameters.json/0
{ "file_path": "AzureTRE/templates/workspace_services/mlflow/parameters.json", "repo_id": "AzureTRE", "token_count": 629 }
131
{ "schemaType": "ParameterSet", "schemaVersion": "1.0.1", "namespace": "", "name": "tre-workspace-service-mysql", "parameters": [ { "name": "tre_id", "source": { "env": "TRE_ID" } }, { "name": "id", "source": { "env": "ID" } }, { "name": "tfstate_container_name", "source": { "env": "TERRAFORM_STATE_CONTAINER_NAME" } }, { "name": "tfstate_resource_group_name", "source": { "env": "MGMT_RESOURCE_GROUP_NAME" } }, { "name": "tfstate_storage_account_name", "source": { "env": "MGMT_STORAGE_ACCOUNT_NAME" } }, { "name": "sql_sku", "source": { "env": "SQL_SKU" } }, { "name": "storage_mb", "source": { "env": "STORAGE_MB" } }, { "name": "db_name", "source": { "env": "DB_NAME" } }, { "name": "workspace_id", "source": { "env": "WORKSPACE_ID" } }, { "name": "arm_environment", "source": { "env": "ARM_ENVIRONMENT" } } ] }
AzureTRE/templates/workspace_services/mysql/parameters.json/0
{ "file_path": "AzureTRE/templates/workspace_services/mysql/parameters.json", "repo_id": "AzureTRE", "token_count": 676 }
132
#!/bin/bash set -o errexit set -o pipefail set -o nounset admin_user_password="${OHDSI_ADMIN_PASSWORD}${OHDSI_ADMIN_USERNAME}" app_user_password="${OHDSI_APP_PASSWORD}${OHDSI_APP_USERNAME}" admin_md5=("md5$(echo -n "$admin_user_password" | md5sum | awk '{ print $1 }')") export admin_md5 app_md5=("md5$(echo -n "$app_user_password" | md5sum | awk '{ print $1 }')'") export app_md5 printf 'Creating roles and users' envsubst < ../sql/atlas_create_roles_users.sql | psql -v ON_ERROR_STOP=0 -e "$MAIN_CONNECTION_STRING" printf 'Creating roles and users: done.' printf 'Creating schema' envsubst < ../sql/atlas_create_schema.sql | psql -v ON_ERROR_STOP=1 -e "$OHDSI_ADMIN_CONNECTION_STRING" printf 'Creating schema: done.' printf 'Done'
AzureTRE/templates/workspace_services/ohdsi/scripts/atlas_db_init.sh/0
{ "file_path": "AzureTRE/templates/workspace_services/ohdsi/scripts/atlas_db_init.sh", "repo_id": "AzureTRE", "token_count": 303 }
133