python_code
stringlengths
0
992k
repo_name
stringlengths
8
46
file_path
stringlengths
5
162
import collections import time from typing import Any, Dict, List, Optional, Tuple, Union import torch from packaging import version from torch import nn from torch.utils.data import DataLoader, Dataset from transformers.trainer_utils import EvalPrediction, PredictionOutput, speed_metrics from transformers.utils import logging from .funsd_trainer import FunsdTrainer if version.parse(torch.__version__) >= version.parse("1.6"): _is_native_amp_available = True from torch.cuda.amp import autocast logger = logging.get_logger(__name__) class XfunSerTrainer(FunsdTrainer): pass class XfunReTrainer(FunsdTrainer): def __init__(self, **kwargs): super().__init__(**kwargs) self.label_names.append("relations") def prediction_step( self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool, ignore_keys: Optional[List[str]] = None, ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: inputs = self._prepare_inputs(inputs) with torch.no_grad(): if self.use_amp: with autocast(): outputs = model(**inputs) else: outputs = model(**inputs) labels = tuple(inputs.get(name) for name in self.label_names) return outputs, labels def prediction_loop( self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval", ) -> PredictionOutput: """ Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`. Works both with or without labels. """ if not isinstance(dataloader.dataset, collections.abc.Sized): raise ValueError("dataset must implement __len__") prediction_loss_only = ( prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only ) if self.args.deepspeed and not self.args.do_train: # no harm, but flagging to the user that deepspeed config is ignored for eval # flagging only for when --do_train wasn't passed as only then it's redundant logger.info("Detected the deepspeed argument but it will not be used for evaluation") model = self._wrap_model(self.model, training=False) # if full fp16 is wanted on eval and this ``evaluation`` or ``predict`` isn't called while # ``train`` is running, half it first and then put on device if not self.is_in_train and self.args.fp16_full_eval: model = model.half().to(self.args.device) batch_size = dataloader.batch_size num_examples = self.num_examples(dataloader) logger.info("***** Running %s *****", description) logger.info(" Num examples = %d", num_examples) logger.info(" Batch size = %d", batch_size) model.eval() self.callback_handler.eval_dataloader = dataloader re_labels = None pred_relations = None entities = None for step, inputs in enumerate(dataloader): outputs, labels = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys) re_labels = labels[1] if re_labels is None else re_labels + labels[1] pred_relations = ( outputs.pred_relations if pred_relations is None else pred_relations + outputs.pred_relations ) entities = outputs.entities if entities is None else entities + outputs.entities self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control) gt_relations = [] for b in range(len(re_labels)): rel_sent = [] for head, tail in zip(re_labels[b]["head"], re_labels[b]["tail"]): rel = {} rel["head_id"] = head rel["head"] = (entities[b]["start"][rel["head_id"]], entities[b]["end"][rel["head_id"]]) rel["head_type"] = entities[b]["label"][rel["head_id"]] rel["tail_id"] = tail rel["tail"] = (entities[b]["start"][rel["tail_id"]], entities[b]["end"][rel["tail_id"]]) rel["tail_type"] = entities[b]["label"][rel["tail_id"]] rel["type"] = 1 rel_sent.append(rel) gt_relations.append(rel_sent) re_metrics = self.compute_metrics(EvalPrediction(predictions=pred_relations, label_ids=gt_relations)) re_metrics = { "precision": re_metrics["ALL"]["p"], "recall": re_metrics["ALL"]["r"], "f1": re_metrics["ALL"]["f1"], } re_metrics[f"{metric_key_prefix}_loss"] = outputs.loss.mean().item() metrics = {} # # Prefix all keys with metric_key_prefix + '_' for key in list(re_metrics.keys()): if not key.startswith(f"{metric_key_prefix}_"): metrics[f"{metric_key_prefix}_{key}"] = re_metrics.pop(key) else: metrics[f"{key}"] = re_metrics.pop(key) return metrics def evaluate( self, eval_dataset: Optional[Dataset] = None, ignore_keys: Optional[List[str]] = None, metric_key_prefix: str = "eval", ) -> Dict[str, float]: """ Run evaluation and returns metrics. The calling script will be responsible for providing a method to compute metrics, as they are task-dependent (pass it to the init :obj:`compute_metrics` argument). You can also subclass and override this method to inject custom behavior. Args: eval_dataset (:obj:`Dataset`, `optional`): Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the :obj:`__len__` method. ignore_keys (:obj:`Lst[str]`, `optional`): A list of keys in the output of your model (if it is a dictionary) that should be ignored when gathering predictions. metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`): An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named "eval_bleu" if the prefix is "eval" (default) Returns: A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The dictionary also contains the epoch number which comes from the training state. """ if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized): raise ValueError("eval_dataset must implement __len__") self.args.local_rank = -1 eval_dataloader = self.get_eval_dataloader(eval_dataset) self.args.local_rank = torch.distributed.get_rank() start_time = time.time() metrics = self.prediction_loop( eval_dataloader, description="Evaluation", # No point gathering the predictions if there are no metrics, otherwise we defer to # self.args.prediction_loss_only prediction_loss_only=True if self.compute_metrics is None else None, ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix, ) n_samples = len(eval_dataset if eval_dataset is not None else self.eval_dataset) metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples)) self.log(metrics) self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics) return metrics
EXA-1-master
exa/models/unilm-master/layoutlmft/layoutlmft/trainers/xfun_trainer.py
from .funsd_trainer import FunsdTrainer from .xfun_trainer import XfunReTrainer, XfunSerTrainer
EXA-1-master
exa/models/unilm-master/layoutlmft/layoutlmft/trainers/__init__.py
from typing import Any, Dict, Union import torch from transformers import Trainer class FunsdTrainer(Trainer): def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]: """ Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and handling potential state. """ for k, v in inputs.items(): if hasattr(v, "to") and hasattr(v, "device"): inputs[k] = v.to(self.args.device) if self.args.past_index >= 0 and self._past is not None: inputs["mems"] = self._past return inputs
EXA-1-master
exa/models/unilm-master/layoutlmft/layoutlmft/trainers/funsd_trainer.py
EXA-1-master
exa/models/unilm-master/layoutlmft/layoutlmft/modules/__init__.py
EXA-1-master
exa/models/unilm-master/layoutlmft/layoutlmft/modules/decoders/__init__.py
import copy import torch from torch import nn from torch.nn import CrossEntropyLoss class BiaffineAttention(torch.nn.Module): """Implements a biaffine attention operator for binary relation classification. PyTorch implementation of the biaffine attention operator from "End-to-end neural relation extraction using deep biaffine attention" (https://arxiv.org/abs/1812.11275) which can be used as a classifier for binary relation classification. Args: in_features (int): The size of the feature dimension of the inputs. out_features (int): The size of the feature dimension of the output. Shape: - x_1: `(N, *, in_features)` where `N` is the batch dimension and `*` means any number of additional dimensisons. - x_2: `(N, *, in_features)`, where `N` is the batch dimension and `*` means any number of additional dimensions. - Output: `(N, *, out_features)`, where `N` is the batch dimension and `*` means any number of additional dimensions. Examples: >>> batch_size, in_features, out_features = 32, 100, 4 >>> biaffine_attention = BiaffineAttention(in_features, out_features) >>> x_1 = torch.randn(batch_size, in_features) >>> x_2 = torch.randn(batch_size, in_features) >>> output = biaffine_attention(x_1, x_2) >>> print(output.size()) torch.Size([32, 4]) """ def __init__(self, in_features, out_features): super(BiaffineAttention, self).__init__() self.in_features = in_features self.out_features = out_features self.bilinear = torch.nn.Bilinear(in_features, in_features, out_features, bias=False) self.linear = torch.nn.Linear(2 * in_features, out_features, bias=True) self.reset_parameters() def forward(self, x_1, x_2): return self.bilinear(x_1, x_2) + self.linear(torch.cat((x_1, x_2), dim=-1)) def reset_parameters(self): self.bilinear.reset_parameters() self.linear.reset_parameters() class REDecoder(nn.Module): def __init__(self, config): super().__init__() self.entity_emb = nn.Embedding(3, config.hidden_size, scale_grad_by_freq=True) projection = nn.Sequential( nn.Linear(config.hidden_size * 2, config.hidden_size), nn.ReLU(), nn.Dropout(config.hidden_dropout_prob), nn.Linear(config.hidden_size, config.hidden_size // 2), nn.ReLU(), nn.Dropout(config.hidden_dropout_prob), ) self.ffnn_head = copy.deepcopy(projection) self.ffnn_tail = copy.deepcopy(projection) self.rel_classifier = BiaffineAttention(config.hidden_size // 2, 2) self.loss_fct = CrossEntropyLoss() def build_relation(self, relations, entities): batch_size = len(relations) new_relations = [] for b in range(batch_size): if len(entities[b]["start"]) <= 2: entities[b] = {"end": [1, 1], "label": [0, 0], "start": [0, 0]} all_possible_relations = set( [ (i, j) for i in range(len(entities[b]["label"])) for j in range(len(entities[b]["label"])) if entities[b]["label"][i] == 1 and entities[b]["label"][j] == 2 ] ) if len(all_possible_relations) == 0: all_possible_relations = set([(0, 1)]) positive_relations = set(list(zip(relations[b]["head"], relations[b]["tail"]))) negative_relations = all_possible_relations - positive_relations positive_relations = set([i for i in positive_relations if i in all_possible_relations]) reordered_relations = list(positive_relations) + list(negative_relations) relation_per_doc = {"head": [], "tail": [], "label": []} relation_per_doc["head"] = [i[0] for i in reordered_relations] relation_per_doc["tail"] = [i[1] for i in reordered_relations] relation_per_doc["label"] = [1] * len(positive_relations) + [0] * ( len(reordered_relations) - len(positive_relations) ) assert len(relation_per_doc["head"]) != 0 new_relations.append(relation_per_doc) return new_relations, entities def get_predicted_relations(self, logits, relations, entities): pred_relations = [] for i, pred_label in enumerate(logits.argmax(-1)): if pred_label != 1: continue rel = {} rel["head_id"] = relations["head"][i] rel["head"] = (entities["start"][rel["head_id"]], entities["end"][rel["head_id"]]) rel["head_type"] = entities["label"][rel["head_id"]] rel["tail_id"] = relations["tail"][i] rel["tail"] = (entities["start"][rel["tail_id"]], entities["end"][rel["tail_id"]]) rel["tail_type"] = entities["label"][rel["tail_id"]] rel["type"] = 1 pred_relations.append(rel) return pred_relations def forward(self, hidden_states, entities, relations): batch_size, max_n_words, context_dim = hidden_states.size() device = hidden_states.device relations, entities = self.build_relation(relations, entities) loss = 0 all_pred_relations = [] for b in range(batch_size): head_entities = torch.tensor(relations[b]["head"], device=device) tail_entities = torch.tensor(relations[b]["tail"], device=device) relation_labels = torch.tensor(relations[b]["label"], device=device) entities_start_index = torch.tensor(entities[b]["start"], device=device) entities_labels = torch.tensor(entities[b]["label"], device=device) head_index = entities_start_index[head_entities] head_label = entities_labels[head_entities] head_label_repr = self.entity_emb(head_label) tail_index = entities_start_index[tail_entities] tail_label = entities_labels[tail_entities] tail_label_repr = self.entity_emb(tail_label) head_repr = torch.cat( (hidden_states[b][head_index], head_label_repr), dim=-1, ) tail_repr = torch.cat( (hidden_states[b][tail_index], tail_label_repr), dim=-1, ) heads = self.ffnn_head(head_repr) tails = self.ffnn_tail(tail_repr) logits = self.rel_classifier(heads, tails) loss += self.loss_fct(logits, relation_labels) pred_relations = self.get_predicted_relations(logits, relations[b], entities[b]) all_pred_relations.append(pred_relations) return loss, all_pred_relations
EXA-1-master
exa/models/unilm-master/layoutlmft/layoutlmft/modules/decoders/re.py
# flake8: noqa from .data_collator import DataCollatorForKeyValueExtraction from .datasets import *
EXA-1-master
exa/models/unilm-master/layoutlmft/layoutlmft/data/__init__.py
import torch from detectron2.data.detection_utils import read_image from detectron2.data.transforms import ResizeTransform, TransformList def normalize_bbox(bbox, size): return [ int(1000 * bbox[0] / size[0]), int(1000 * bbox[1] / size[1]), int(1000 * bbox[2] / size[0]), int(1000 * bbox[3] / size[1]), ] def simplify_bbox(bbox): return [ min(bbox[0::2]), min(bbox[1::2]), max(bbox[2::2]), max(bbox[3::2]), ] def merge_bbox(bbox_list): x0, y0, x1, y1 = list(zip(*bbox_list)) return [min(x0), min(y0), max(x1), max(y1)] def load_image(image_path): image = read_image(image_path, format="BGR") h = image.shape[0] w = image.shape[1] img_trans = TransformList([ResizeTransform(h=h, w=w, new_h=224, new_w=224)]) image = torch.tensor(img_trans.apply_image(image).copy()).permute(2, 0, 1) # copy to make it writeable return image, (w, h)
EXA-1-master
exa/models/unilm-master/layoutlmft/layoutlmft/data/utils.py
from dataclasses import dataclass, field from typing import Optional @dataclass class DataTrainingArguments: """ Arguments pertaining to what data we are going to input our model for training and eval. """ task_name: Optional[str] = field(default="ner", metadata={"help": "The name of the task (ner, pos...)."}) dataset_name: Optional[str] = field( default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} ) dataset_config_name: Optional[str] = field( default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} ) train_file: Optional[str] = field( default=None, metadata={"help": "The input training data file (a csv or JSON file)."} ) validation_file: Optional[str] = field( default=None, metadata={"help": "An optional input evaluation data file to evaluate on (a csv or JSON file)."}, ) test_file: Optional[str] = field( default=None, metadata={"help": "An optional input test data file to predict on (a csv or JSON file)."}, ) overwrite_cache: bool = field( default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} ) preprocessing_num_workers: Optional[int] = field( default=None, metadata={"help": "The number of processes to use for the preprocessing."}, ) pad_to_max_length: bool = field( default=True, metadata={ "help": "Whether to pad all samples to model maximum sentence length. " "If False, will pad the samples dynamically when batching to the maximum length in the batch. More " "efficient on GPU but very bad for TPU." }, ) max_train_samples: Optional[int] = field( default=None, metadata={ "help": "For debugging purposes or quicker training, truncate the number of training examples to this " "value if set." }, ) max_val_samples: Optional[int] = field( default=None, metadata={ "help": "For debugging purposes or quicker training, truncate the number of validation examples to this " "value if set." }, ) max_test_samples: Optional[int] = field( default=None, metadata={ "help": "For debugging purposes or quicker training, truncate the number of test examples to this " "value if set." }, ) label_all_tokens: bool = field( default=False, metadata={ "help": "Whether to put the label for one word on all tokens of generated by that word or just on the " "one (in which case the other tokens will have a padding index)." }, ) return_entity_level_metrics: bool = field( default=False, metadata={"help": "Whether to return all the entity levels during evaluation or just the overall ones."}, ) @dataclass class XFUNDataTrainingArguments(DataTrainingArguments): lang: Optional[str] = field(default="en") additional_langs: Optional[str] = field(default=None)
EXA-1-master
exa/models/unilm-master/layoutlmft/layoutlmft/data/data_args.py
from dataclasses import dataclass from typing import Optional, Union import torch from detectron2.structures import ImageList from transformers import PreTrainedTokenizerBase from transformers.file_utils import PaddingStrategy @dataclass class DataCollatorForKeyValueExtraction: """ Data collator that will dynamically pad the inputs received, as well as the labels. Args: tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`): The tokenizer used for encoding the data. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (:obj:`int`, `optional`): Maximum length of the returned list and optionally padding length (see above). pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). label_pad_token_id (:obj:`int`, `optional`, defaults to -100): The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions). """ tokenizer: PreTrainedTokenizerBase padding: Union[bool, str, PaddingStrategy] = True max_length: Optional[int] = None pad_to_multiple_of: Optional[int] = None label_pad_token_id: int = -100 def __call__(self, features): label_name = "label" if "label" in features[0].keys() else "labels" labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None has_image_input = "image" in features[0] has_bbox_input = "bbox" in features[0] if has_image_input: image = ImageList.from_tensors([torch.tensor(feature["image"]) for feature in features], 32) for feature in features: del feature["image"] batch = self.tokenizer.pad( features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, # Conversion to tensors will fail if we have labels as they are not of the same length yet. return_tensors="pt" if labels is None else None, ) if labels is None: return batch sequence_length = torch.tensor(batch["input_ids"]).shape[1] padding_side = self.tokenizer.padding_side if padding_side == "right": batch["labels"] = [label + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels] if has_bbox_input: batch["bbox"] = [bbox + [[0, 0, 0, 0]] * (sequence_length - len(bbox)) for bbox in batch["bbox"]] else: batch["labels"] = [[self.label_pad_token_id] * (sequence_length - len(label)) + label for label in labels] if has_bbox_input: batch["bbox"] = [[[0, 0, 0, 0]] * (sequence_length - len(bbox)) + bbox for bbox in batch["bbox"]] batch = {k: torch.tensor(v, dtype=torch.int64) if isinstance(v[0], list) else v for k, v in batch.items()} if has_image_input: batch["image"] = image return batch
EXA-1-master
exa/models/unilm-master/layoutlmft/layoutlmft/data/data_collator.py
EXA-1-master
exa/models/unilm-master/layoutlmft/layoutlmft/data/datasets/__init__.py
# Lint as: python3 import json import logging import os import datasets from layoutlmft.data.utils import load_image, merge_bbox, normalize_bbox, simplify_bbox from transformers import AutoTokenizer _URL = "https://github.com/doc-analysis/XFUN/releases/download/v1.0/" _LANG = ["zh", "de", "es", "fr", "en", "it", "ja", "pt"] logger = logging.getLogger(__name__) class XFUNConfig(datasets.BuilderConfig): """BuilderConfig for XFUN.""" def __init__(self, lang, additional_langs=None, **kwargs): """ Args: lang: string, language for the input text **kwargs: keyword arguments forwarded to super. """ super(XFUNConfig, self).__init__(**kwargs) self.lang = lang self.additional_langs = additional_langs class XFUN(datasets.GeneratorBasedBuilder): """XFUN dataset.""" BUILDER_CONFIGS = [XFUNConfig(name=f"xfun.{lang}", lang=lang) for lang in _LANG] tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-base") def _info(self): return datasets.DatasetInfo( features=datasets.Features( { "id": datasets.Value("string"), "input_ids": datasets.Sequence(datasets.Value("int64")), "bbox": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))), "labels": datasets.Sequence( datasets.ClassLabel( names=["O", "B-QUESTION", "B-ANSWER", "B-HEADER", "I-ANSWER", "I-QUESTION", "I-HEADER"] ) ), "image": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"), "entities": datasets.Sequence( { "start": datasets.Value("int64"), "end": datasets.Value("int64"), "label": datasets.ClassLabel(names=["HEADER", "QUESTION", "ANSWER"]), } ), "relations": datasets.Sequence( { "head": datasets.Value("int64"), "tail": datasets.Value("int64"), "start_index": datasets.Value("int64"), "end_index": datasets.Value("int64"), } ), } ), supervised_keys=None, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" urls_to_download = { "train": [f"{_URL}{self.config.lang}.train.json", f"{_URL}{self.config.lang}.train.zip"], "val": [f"{_URL}{self.config.lang}.val.json", f"{_URL}{self.config.lang}.val.zip"], # "test": [f"{_URL}{self.config.lang}.test.json", f"{_URL}{self.config.lang}.test.zip"], } downloaded_files = dl_manager.download_and_extract(urls_to_download) train_files_for_many_langs = [downloaded_files["train"]] val_files_for_many_langs = [downloaded_files["val"]] # test_files_for_many_langs = [downloaded_files["test"]] if self.config.additional_langs: additional_langs = self.config.additional_langs.split("+") if "all" in additional_langs: additional_langs = [lang for lang in _LANG if lang != self.config.lang] for lang in additional_langs: urls_to_download = {"train": [f"{_URL}{lang}.train.json", f"{_URL}{lang}.train.zip"]} additional_downloaded_files = dl_manager.download_and_extract(urls_to_download) train_files_for_many_langs.append(additional_downloaded_files["train"]) logger.info(f"Training on {self.config.lang} with additional langs({self.config.additional_langs})") logger.info(f"Evaluating on {self.config.lang}") logger.info(f"Testing on {self.config.lang}") return [ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": train_files_for_many_langs}), datasets.SplitGenerator( name=datasets.Split.VALIDATION, gen_kwargs={"filepaths": val_files_for_many_langs} ), # datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepaths": test_files_for_many_langs}), ] def _generate_examples(self, filepaths): for filepath in filepaths: logger.info("Generating examples from = %s", filepath) with open(filepath[0], "r", encoding="utf-8") as f: data = json.load(f) for doc in data["documents"]: doc["img"]["fpath"] = os.path.join(filepath[1], doc["img"]["fname"]) image, size = load_image(doc["img"]["fpath"]) document = doc["document"] tokenized_doc = {"input_ids": [], "bbox": [], "labels": []} entities = [] relations = [] id2label = {} entity_id_to_index_map = {} empty_entity = set() for line in document: if len(line["text"]) == 0: empty_entity.add(line["id"]) continue id2label[line["id"]] = line["label"] relations.extend([tuple(sorted(l)) for l in line["linking"]]) tokenized_inputs = self.tokenizer( line["text"], add_special_tokens=False, return_offsets_mapping=True, return_attention_mask=False, ) text_length = 0 ocr_length = 0 bbox = [] last_box = None for token_id, offset in zip(tokenized_inputs["input_ids"], tokenized_inputs["offset_mapping"]): if token_id == 6: bbox.append(None) continue text_length += offset[1] - offset[0] tmp_box = [] while ocr_length < text_length: ocr_word = line["words"].pop(0) ocr_length += len( self.tokenizer._tokenizer.normalizer.normalize_str(ocr_word["text"].strip()) ) tmp_box.append(simplify_bbox(ocr_word["box"])) if len(tmp_box) == 0: tmp_box = last_box bbox.append(normalize_bbox(merge_bbox(tmp_box), size)) last_box = tmp_box bbox = [ [bbox[i + 1][0], bbox[i + 1][1], bbox[i + 1][0], bbox[i + 1][1]] if b is None else b for i, b in enumerate(bbox) ] if line["label"] == "other": label = ["O"] * len(bbox) else: label = [f"I-{line['label'].upper()}"] * len(bbox) label[0] = f"B-{line['label'].upper()}" tokenized_inputs.update({"bbox": bbox, "labels": label}) if label[0] != "O": entity_id_to_index_map[line["id"]] = len(entities) entities.append( { "start": len(tokenized_doc["input_ids"]), "end": len(tokenized_doc["input_ids"]) + len(tokenized_inputs["input_ids"]), "label": line["label"].upper(), } ) for i in tokenized_doc: tokenized_doc[i] = tokenized_doc[i] + tokenized_inputs[i] relations = list(set(relations)) relations = [rel for rel in relations if rel[0] not in empty_entity and rel[1] not in empty_entity] kvrelations = [] for rel in relations: pair = [id2label[rel[0]], id2label[rel[1]]] if pair == ["question", "answer"]: kvrelations.append( {"head": entity_id_to_index_map[rel[0]], "tail": entity_id_to_index_map[rel[1]]} ) elif pair == ["answer", "question"]: kvrelations.append( {"head": entity_id_to_index_map[rel[1]], "tail": entity_id_to_index_map[rel[0]]} ) else: continue def get_relation_span(rel): bound = [] for entity_index in [rel["head"], rel["tail"]]: bound.append(entities[entity_index]["start"]) bound.append(entities[entity_index]["end"]) return min(bound), max(bound) relations = sorted( [ { "head": rel["head"], "tail": rel["tail"], "start_index": get_relation_span(rel)[0], "end_index": get_relation_span(rel)[1], } for rel in kvrelations ], key=lambda x: x["head"], ) chunk_size = 512 for chunk_id, index in enumerate(range(0, len(tokenized_doc["input_ids"]), chunk_size)): item = {} for k in tokenized_doc: item[k] = tokenized_doc[k][index : index + chunk_size] entities_in_this_span = [] global_to_local_map = {} for entity_id, entity in enumerate(entities): if ( index <= entity["start"] < index + chunk_size and index <= entity["end"] < index + chunk_size ): entity["start"] = entity["start"] - index entity["end"] = entity["end"] - index global_to_local_map[entity_id] = len(entities_in_this_span) entities_in_this_span.append(entity) relations_in_this_span = [] for relation in relations: if ( index <= relation["start_index"] < index + chunk_size and index <= relation["end_index"] < index + chunk_size ): relations_in_this_span.append( { "head": global_to_local_map[relation["head"]], "tail": global_to_local_map[relation["tail"]], "start_index": relation["start_index"] - index, "end_index": relation["end_index"] - index, } ) item.update( { "id": f"{doc['id']}_{chunk_id}", "image": image, "entities": entities_in_this_span, "relations": relations_in_this_span, } ) yield f"{doc['id']}_{chunk_id}", item
EXA-1-master
exa/models/unilm-master/layoutlmft/layoutlmft/data/datasets/xfun.py
# coding=utf-8 import json import os import datasets from layoutlmft.data.utils import load_image, normalize_bbox logger = datasets.logging.get_logger(__name__) _CITATION = """\ @article{Jaume2019FUNSDAD, title={FUNSD: A Dataset for Form Understanding in Noisy Scanned Documents}, author={Guillaume Jaume and H. K. Ekenel and J. Thiran}, journal={2019 International Conference on Document Analysis and Recognition Workshops (ICDARW)}, year={2019}, volume={2}, pages={1-6} } """ _DESCRIPTION = """\ https://guillaumejaume.github.io/FUNSD/ """ class FunsdConfig(datasets.BuilderConfig): """BuilderConfig for FUNSD""" def __init__(self, **kwargs): """BuilderConfig for FUNSD. Args: **kwargs: keyword arguments forwarded to super. """ super(FunsdConfig, self).__init__(**kwargs) class Funsd(datasets.GeneratorBasedBuilder): """Conll2003 dataset.""" BUILDER_CONFIGS = [ FunsdConfig(name="funsd", version=datasets.Version("1.0.0"), description="FUNSD dataset"), ] def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "id": datasets.Value("string"), "tokens": datasets.Sequence(datasets.Value("string")), "bboxes": datasets.Sequence(datasets.Sequence(datasets.Value("int64"))), "ner_tags": datasets.Sequence( datasets.features.ClassLabel( names=["O", "B-HEADER", "I-HEADER", "B-QUESTION", "I-QUESTION", "B-ANSWER", "I-ANSWER"] ) ), "image": datasets.Array3D(shape=(3, 224, 224), dtype="uint8"), } ), supervised_keys=None, homepage="https://guillaumejaume.github.io/FUNSD/", citation=_CITATION, ) def _split_generators(self, dl_manager): """Returns SplitGenerators.""" downloaded_file = dl_manager.download_and_extract("https://guillaumejaume.github.io/FUNSD/dataset.zip") return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={"filepath": f"{downloaded_file}/dataset/training_data/"} ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={"filepath": f"{downloaded_file}/dataset/testing_data/"} ), ] def _generate_examples(self, filepath): logger.info("⏳ Generating examples from = %s", filepath) ann_dir = os.path.join(filepath, "annotations") img_dir = os.path.join(filepath, "images") for guid, file in enumerate(sorted(os.listdir(ann_dir))): tokens = [] bboxes = [] ner_tags = [] file_path = os.path.join(ann_dir, file) with open(file_path, "r", encoding="utf8") as f: data = json.load(f) image_path = os.path.join(img_dir, file) image_path = image_path.replace("json", "png") image, size = load_image(image_path) for item in data["form"]: words, label = item["words"], item["label"] words = [w for w in words if w["text"].strip() != ""] if len(words) == 0: continue if label == "other": for w in words: tokens.append(w["text"]) ner_tags.append("O") bboxes.append(normalize_bbox(w["box"], size)) else: tokens.append(words[0]["text"]) ner_tags.append("B-" + label.upper()) bboxes.append(normalize_bbox(words[0]["box"], size)) for w in words[1:]: tokens.append(w["text"]) ner_tags.append("I-" + label.upper()) bboxes.append(normalize_bbox(w["box"], size)) yield guid, {"id": str(guid), "tokens": tokens, "bboxes": bboxes, "ner_tags": ner_tags, "image": image}
EXA-1-master
exa/models/unilm-master/layoutlmft/layoutlmft/data/datasets/funsd.py
#!/usr/bin/env python # coding=utf-8 import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np from datasets import ClassLabel, load_dataset, load_metric import layoutlmft.data.datasets.xfun import transformers from layoutlmft.data import DataCollatorForKeyValueExtraction from layoutlmft.data.data_args import XFUNDataTrainingArguments from layoutlmft.models.model_args import ModelArguments from layoutlmft.trainers import XfunSerTrainer from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, HfArgumentParser, PreTrainedTokenizerFast, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process from transformers.utils import check_min_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.5.0") logger = logging.getLogger(__name__) def main(): parser = HfArgumentParser((ModelArguments, XFUNDataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info(f"Training/evaluation parameters {training_args}") # Set seed before initializing model. set_seed(training_args.seed) datasets = load_dataset( os.path.abspath(layoutlmft.data.datasets.xfun.__file__), f"xfun.{data_args.lang}", additional_langs=data_args.additional_langs, keep_in_memory=True, ) if training_args.do_train: column_names = datasets["train"].column_names features = datasets["train"].features else: column_names = datasets["validation"].column_names features = datasets["validation"].features text_column_name = "input_ids" label_column_name = "labels" remove_columns = column_names # In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the # unique labels. def get_label_list(labels): unique_labels = set() for label in labels: unique_labels = unique_labels | set(label) label_list = list(unique_labels) label_list.sort() return label_list if isinstance(features[label_column_name].feature, ClassLabel): label_list = features[label_column_name].feature.names # No need to convert the labels since they are already ints. label_to_id = {i: i for i in range(len(label_list))} else: label_list = get_label_list(datasets["train"][label_column_name]) label_to_id = {l: i for i, l in enumerate(label_list)} num_labels = len(label_list) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # Tokenizer check: this script requires a fast tokenizer. if not isinstance(tokenizer, PreTrainedTokenizerFast): raise ValueError( "This example script only works for models that have a fast tokenizer. Checkout the big table of models " "at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this " "requirement" ) # Preprocessing the dataset # Padding strategy padding = "max_length" if data_args.pad_to_max_length else False if training_args.do_train: if "train" not in datasets: raise ValueError("--do_train requires a train dataset") train_dataset = datasets["train"] if data_args.max_train_samples is not None: train_dataset = train_dataset.select(range(data_args.max_train_samples)) if training_args.do_eval: if "validation" not in datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = datasets["validation"] if data_args.max_val_samples is not None: eval_dataset = eval_dataset.select(range(data_args.max_val_samples)) if training_args.do_predict: if "test" not in datasets: raise ValueError("--do_predict requires a test dataset") test_dataset = datasets["test"] if data_args.max_test_samples is not None: test_dataset = test_dataset.select(range(data_args.max_test_samples)) # Data collator data_collator = DataCollatorForKeyValueExtraction( tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None, padding=padding, max_length=512, ) # Metrics metric = load_metric("seqeval") def compute_metrics(p): predictions, labels = p predictions = np.argmax(predictions, axis=2) # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] true_labels = [ [label_list[l] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] results = metric.compute(predictions=true_predictions, references=true_labels) if data_args.return_entity_level_metrics: # Unpack nested dictionaries final_results = {} for key, value in results.items(): if isinstance(value, dict): for n, v in value.items(): final_results[f"{key}_{n}"] = v else: final_results[key] = value return final_results else: return { "precision": results["overall_precision"], "recall": results["overall_recall"], "f1": results["overall_f1"], "accuracy": results["overall_accuracy"], } # Initialize our Trainer trainer = XfunSerTrainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics, ) # Training if training_args.do_train: checkpoint = last_checkpoint if last_checkpoint else None train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics trainer.save_model() # Saves the tokenizer too for easy upload max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_val_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Predict if training_args.do_predict: logger.info("*** Predict ***") predictions, labels, metrics = trainer.predict(test_dataset) predictions = np.argmax(predictions, axis=2) # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] trainer.log_metrics("test", metrics) trainer.save_metrics("test", metrics) # Save predictions output_test_predictions_file = os.path.join(training_args.output_dir, "test_predictions.txt") if trainer.is_world_process_zero(): with open(output_test_predictions_file, "w") as writer: for prediction in true_predictions: writer.write(" ".join(prediction) + "\n") def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/layoutlmft/examples/run_xfun_ser.py
#!/usr/bin/env python # coding=utf-8 import logging import os import sys from dataclasses import dataclass, field from typing import Optional import numpy as np from datasets import ClassLabel, load_dataset, load_metric import layoutlmft.data.datasets.funsd import transformers from layoutlmft.data import DataCollatorForKeyValueExtraction from layoutlmft.data.data_args import DataTrainingArguments from layoutlmft.models.model_args import ModelArguments from layoutlmft.trainers import FunsdTrainer as Trainer from transformers import ( AutoConfig, AutoModelForTokenClassification, AutoTokenizer, HfArgumentParser, PreTrainedTokenizerFast, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process from transformers.utils import check_min_version # Will error if the minimal version of Transformers is not installed. Remove at your own risks. check_min_version("4.5.0") logger = logging.getLogger(__name__) def main(): # See all possible arguments in layoutlmft/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info(f"Training/evaluation parameters {training_args}") # Set seed before initializing model. set_seed(training_args.seed) datasets = load_dataset(os.path.abspath(layoutlmft.data.datasets.funsd.__file__)) if training_args.do_train: column_names = datasets["train"].column_names features = datasets["train"].features else: column_names = datasets["validation"].column_names features = datasets["validation"].features text_column_name = "tokens" if "tokens" in column_names else column_names[0] label_column_name = ( f"{data_args.task_name}_tags" if f"{data_args.task_name}_tags" in column_names else column_names[1] ) remove_columns = column_names # In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the # unique labels. def get_label_list(labels): unique_labels = set() for label in labels: unique_labels = unique_labels | set(label) label_list = list(unique_labels) label_list.sort() return label_list if isinstance(features[label_column_name].feature, ClassLabel): label_list = features[label_column_name].feature.names # No need to convert the labels since they are already ints. label_to_id = {i: i for i in range(len(label_list))} else: label_list = get_label_list(datasets["train"][label_column_name]) label_to_id = {l: i for i, l in enumerate(label_list)} num_labels = len(label_list) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForTokenClassification.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # Tokenizer check: this script requires a fast tokenizer. if not isinstance(tokenizer, PreTrainedTokenizerFast): raise ValueError( "This example script only works for models that have a fast tokenizer. Checkout the big table of models " "at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this " "requirement" ) # Preprocessing the dataset # Padding strategy padding = "max_length" if data_args.pad_to_max_length else False # Tokenize all texts and align the labels with them. def tokenize_and_align_labels(examples): tokenized_inputs = tokenizer( examples[text_column_name], padding=padding, truncation=True, return_overflowing_tokens=True, # We use this argument because the texts in our dataset are lists of words (with a label for each word). is_split_into_words=True, ) labels = [] bboxes = [] images = [] for batch_index in range(len(tokenized_inputs["input_ids"])): word_ids = tokenized_inputs.word_ids(batch_index=batch_index) org_batch_index = tokenized_inputs["overflow_to_sample_mapping"][batch_index] label = examples[label_column_name][org_batch_index] bbox = examples["bboxes"][org_batch_index] image = examples["image"][org_batch_index] previous_word_idx = None label_ids = [] bbox_inputs = [] for word_idx in word_ids: # Special tokens have a word id that is None. We set the label to -100 so they are automatically # ignored in the loss function. if word_idx is None: label_ids.append(-100) bbox_inputs.append([0, 0, 0, 0]) # We set the label for the first token of each word. elif word_idx != previous_word_idx: label_ids.append(label_to_id[label[word_idx]]) bbox_inputs.append(bbox[word_idx]) # For the other tokens in a word, we set the label to either the current label or -100, depending on # the label_all_tokens flag. else: label_ids.append(label_to_id[label[word_idx]] if data_args.label_all_tokens else -100) bbox_inputs.append(bbox[word_idx]) previous_word_idx = word_idx labels.append(label_ids) bboxes.append(bbox_inputs) images.append(image) tokenized_inputs["labels"] = labels tokenized_inputs["bbox"] = bboxes tokenized_inputs["image"] = images return tokenized_inputs if training_args.do_train: if "train" not in datasets: raise ValueError("--do_train requires a train dataset") train_dataset = datasets["train"] if data_args.max_train_samples is not None: train_dataset = train_dataset.select(range(data_args.max_train_samples)) train_dataset = train_dataset.map( tokenize_and_align_labels, batched=True, remove_columns=remove_columns, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_eval: if "validation" not in datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = datasets["validation"] if data_args.max_val_samples is not None: eval_dataset = eval_dataset.select(range(data_args.max_val_samples)) eval_dataset = eval_dataset.map( tokenize_and_align_labels, batched=True, remove_columns=remove_columns, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) if training_args.do_predict: if "test" not in datasets: raise ValueError("--do_predict requires a test dataset") test_dataset = datasets["test"] if data_args.max_test_samples is not None: test_dataset = test_dataset.select(range(data_args.max_test_samples)) test_dataset = test_dataset.map( tokenize_and_align_labels, batched=True, remove_columns=remove_columns, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=not data_args.overwrite_cache, ) # Data collator data_collator = DataCollatorForKeyValueExtraction( tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None, padding=padding, max_length=512, ) # Metrics metric = load_metric("seqeval") def compute_metrics(p): predictions, labels = p predictions = np.argmax(predictions, axis=2) # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] true_labels = [ [label_list[l] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] results = metric.compute(predictions=true_predictions, references=true_labels) if data_args.return_entity_level_metrics: # Unpack nested dictionaries final_results = {} for key, value in results.items(): if isinstance(value, dict): for n, v in value.items(): final_results[f"{key}_{n}"] = v else: final_results[key] = value return final_results else: return { "precision": results["overall_precision"], "recall": results["overall_recall"], "f1": results["overall_f1"], "accuracy": results["overall_accuracy"], } # Initialize our Trainer trainer = Trainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics, ) # Training if training_args.do_train: checkpoint = last_checkpoint if last_checkpoint else None train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics trainer.save_model() # Saves the tokenizer too for easy upload max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_val_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) # Predict if training_args.do_predict: logger.info("*** Predict ***") predictions, labels, metrics = trainer.predict(test_dataset) predictions = np.argmax(predictions, axis=2) # Remove ignored index (special tokens) true_predictions = [ [label_list[p] for (p, l) in zip(prediction, label) if l != -100] for prediction, label in zip(predictions, labels) ] trainer.log_metrics("test", metrics) trainer.save_metrics("test", metrics) # Save predictions output_test_predictions_file = os.path.join(training_args.output_dir, "test_predictions.txt") if trainer.is_world_process_zero(): with open(output_test_predictions_file, "w") as writer: for prediction in true_predictions: writer.write(" ".join(prediction) + "\n") def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/layoutlmft/examples/run_funsd.py
#!/usr/bin/env python # coding=utf-8 import logging import os import sys import numpy as np from datasets import ClassLabel, load_dataset import layoutlmft.data.datasets.xfun import transformers from layoutlmft import AutoModelForRelationExtraction from layoutlmft.data.data_args import XFUNDataTrainingArguments from layoutlmft.data.data_collator import DataCollatorForKeyValueExtraction from layoutlmft.evaluation import re_score from layoutlmft.models.model_args import ModelArguments from layoutlmft.trainers import XfunReTrainer from transformers import ( AutoConfig, AutoTokenizer, HfArgumentParser, PreTrainedTokenizerFast, TrainingArguments, set_seed, ) from transformers.trainer_utils import get_last_checkpoint, is_main_process logger = logging.getLogger(__name__) def main(): # See all possible arguments in src/transformers/training_args.py # or by passing the --help flag to this script. # We now keep distinct sets of args, for a cleaner separation of concerns. parser = HfArgumentParser((ModelArguments, XFUNDataTrainingArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): # If we pass only one argument to the script and it's the path to a json file, # let's parse it to get our arguments. model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() # Detecting last checkpoint. last_checkpoint = None if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir: last_checkpoint = get_last_checkpoint(training_args.output_dir) if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0: raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. " "Use --overwrite_output_dir to overcome." ) elif last_checkpoint is not None: logger.info( f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change " "the `--output_dir` or add `--overwrite_output_dir` to train from scratch." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", handlers=[logging.StreamHandler(sys.stdout)], ) logger.setLevel(logging.INFO if is_main_process(training_args.local_rank) else logging.WARN) # Log on each process the small summary: logger.warning( f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}" + f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}" ) # Set the verbosity to info of the Transformers logger (on main process only): if is_main_process(training_args.local_rank): transformers.utils.logging.set_verbosity_info() transformers.utils.logging.enable_default_handler() transformers.utils.logging.enable_explicit_format() logger.info(f"Training/evaluation parameters {training_args}") # Set seed before initializing model. set_seed(training_args.seed) datasets = load_dataset( os.path.abspath(layoutlmft.data.datasets.xfun.__file__), f"xfun.{data_args.lang}", additional_langs=data_args.additional_langs, keep_in_memory=True, ) if training_args.do_train: column_names = datasets["train"].column_names features = datasets["train"].features else: column_names = datasets["validation"].column_names features = datasets["validation"].features text_column_name = "input_ids" label_column_name = "labels" remove_columns = column_names # In the event the labels are not a `Sequence[ClassLabel]`, we will need to go through the dataset to get the # unique labels. def get_label_list(labels): unique_labels = set() for label in labels: unique_labels = unique_labels | set(label) label_list = list(unique_labels) label_list.sort() return label_list if isinstance(features[label_column_name].feature, ClassLabel): label_list = features[label_column_name].feature.names # No need to convert the labels since they are already ints. label_to_id = {i: i for i in range(len(label_list))} else: label_list = get_label_list(datasets["train"][label_column_name]) label_to_id = {l: i for i, l in enumerate(label_list)} num_labels = len(label_list) # Load pretrained model and tokenizer # # Distributed training: # The .from_pretrained methods guarantee that only one local process can concurrently # download model & vocab. config = AutoConfig.from_pretrained( model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) tokenizer = AutoTokenizer.from_pretrained( model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) model = AutoModelForRelationExtraction.from_pretrained( model_args.model_name_or_path, from_tf=bool(".ckpt" in model_args.model_name_or_path), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=True if model_args.use_auth_token else None, ) # Tokenizer check: this script requires a fast tokenizer. if not isinstance(tokenizer, PreTrainedTokenizerFast): raise ValueError( "This example script only works for models that have a fast tokenizer. Checkout the big table of models " "at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this " "requirement" ) # Preprocessing the dataset # Padding strategy padding = "max_length" if data_args.pad_to_max_length else False if training_args.do_train: if "train" not in datasets: raise ValueError("--do_train requires a train dataset") train_dataset = datasets["train"] if data_args.max_train_samples is not None: train_dataset = train_dataset.select(range(data_args.max_train_samples)) if training_args.do_eval: if "validation" not in datasets: raise ValueError("--do_eval requires a validation dataset") eval_dataset = datasets["validation"] if data_args.max_val_samples is not None: eval_dataset = eval_dataset.select(range(data_args.max_val_samples)) if training_args.do_predict: if "test" not in datasets: raise ValueError("--do_predict requires a test dataset") test_dataset = datasets["test"] if data_args.max_test_samples is not None: test_dataset = test_dataset.select(range(data_args.max_test_samples)) # Data collator data_collator = DataCollatorForKeyValueExtraction( tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None, padding=padding, max_length=512, ) def compute_metrics(p): pred_relations, gt_relations = p score = re_score(pred_relations, gt_relations, mode="boundaries") return score # Initialize our Trainer trainer = XfunReTrainer( model=model, args=training_args, train_dataset=train_dataset if training_args.do_train else None, eval_dataset=eval_dataset if training_args.do_eval else None, tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics, ) # Training if training_args.do_train: checkpoint = last_checkpoint if last_checkpoint else None train_result = trainer.train(resume_from_checkpoint=checkpoint) metrics = train_result.metrics trainer.save_model() # Saves the tokenizer too for easy upload max_train_samples = ( data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset) ) metrics["train_samples"] = min(max_train_samples, len(train_dataset)) trainer.log_metrics("train", metrics) trainer.save_metrics("train", metrics) trainer.save_state() # Evaluation if training_args.do_eval: logger.info("*** Evaluate ***") metrics = trainer.evaluate() max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(eval_dataset) metrics["eval_samples"] = min(max_val_samples, len(eval_dataset)) trainer.log_metrics("eval", metrics) trainer.save_metrics("eval", metrics) def _mp_fn(index): # For xla_spawn (TPUs) main() if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/layoutlmft/examples/run_xfun_re.py
import os import json import tqdm import numpy as np import torch import argparse from datasets import Dataset from typing import List, Dict from functools import partial from transformers import AutoModel, AutoTokenizer, PreTrainedTokenizerFast, BatchEncoding, DataCollatorWithPadding from transformers.modeling_outputs import BaseModelOutput from torch.utils.data import DataLoader from mteb import MTEB, AbsTaskRetrieval, DRESModel from utils import pool, logger, move_to_cuda parser = argparse.ArgumentParser(description='evaluation for BEIR benchmark') parser.add_argument('--model-name-or-path', default='bert-base-uncased', type=str, metavar='N', help='which model to use') parser.add_argument('--output-dir', default='tmp-outputs/', type=str, metavar='N', help='output directory') parser.add_argument('--doc-as-query', action='store_true', help='use query prefix for passages') parser.add_argument('--pool-type', default='avg', help='pool type') args = parser.parse_args() logger.info('Args: {}'.format(json.dumps(args.__dict__, ensure_ascii=False, indent=4))) assert args.pool_type in ['cls', 'avg'], 'pool_type should be cls or avg' assert args.output_dir, 'output_dir should be set' os.makedirs(args.output_dir, exist_ok=True) def _transform_func(tokenizer: PreTrainedTokenizerFast, examples: Dict[str, List]) -> BatchEncoding: return tokenizer(examples['contents'], max_length=512, padding=True, return_token_type_ids=False, truncation=True) class RetrievalModel(DRESModel): # Refer to the code of DRESModel for the methods to overwrite def __init__(self, **kwargs): self.encoder = AutoModel.from_pretrained(args.model_name_or_path) self.tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path) self.gpu_count = torch.cuda.device_count() if self.gpu_count > 1: self.encoder = torch.nn.DataParallel(self.encoder) self.encoder.cuda() self.encoder.eval() def encode_queries(self, queries: List[str], **kwargs) -> np.ndarray: input_texts = ['query: {}'.format(q) for q in queries] return self._do_encode(input_texts) def encode_corpus(self, corpus: List[Dict[str, str]], **kwargs) -> np.ndarray: if args.doc_as_query: return self.encode_queries([d['text'] for d in corpus], **kwargs) input_texts = ['{} {}'.format(doc.get('title', ''), doc['text']).strip() for doc in corpus] input_texts = ['passage: {}'.format(t) for t in input_texts] return self._do_encode(input_texts) @torch.no_grad() def _do_encode(self, input_texts: List[str]) -> np.ndarray: dataset: Dataset = Dataset.from_dict({'contents': input_texts}) dataset.set_transform(partial(_transform_func, self.tokenizer)) data_collator = DataCollatorWithPadding(self.tokenizer, pad_to_multiple_of=8) data_loader = DataLoader( dataset, batch_size=128 * self.gpu_count, shuffle=False, drop_last=False, num_workers=4, collate_fn=data_collator, pin_memory=True) encoded_embeds = [] for batch_dict in tqdm.tqdm(data_loader, desc='encoding', mininterval=10): batch_dict = move_to_cuda(batch_dict) with torch.cuda.amp.autocast(): outputs: BaseModelOutput = self.encoder(**batch_dict) embeds = pool(outputs.last_hidden_state, batch_dict['attention_mask'], args.pool_type) encoded_embeds.append(embeds.cpu().numpy()) return np.concatenate(encoded_embeds, axis=0) def main(): assert AbsTaskRetrieval.is_dres_compatible(RetrievalModel) model = RetrievalModel() task_names = [t.description["name"] for t in MTEB(task_types=['Retrieval'], task_langs=['en']).tasks] logger.info('Tasks: {}'.format(task_names)) for task in task_names: logger.info('Processing task: {}'.format(task)) args.doc_as_query = task in ['QuoraRetrieval'] if task in ['MSMARCOv2']: logger.warning('Skip task: {}, since it has no test split'.format(task)) continue evaluation = MTEB(tasks=[task], task_langs=['en']) evaluation.run(model, eval_splits=["test" if task not in ['MSMARCO'] else 'dev'], output_folder=args.output_dir, overwrite_results=False) if __name__ == '__main__': main()
EXA-1-master
exa/models/unilm-master/e5/mteb_beir_eval.py
import torch import logging from torch import Tensor from typing import Mapping def _setup_logger(): log_format = logging.Formatter("[%(asctime)s %(levelname)s] %(message)s") logger = logging.getLogger() logger.setLevel(logging.INFO) console_handler = logging.StreamHandler() console_handler.setFormatter(log_format) logger.handlers = [console_handler] return logger logger = _setup_logger() def move_to_cuda(sample): if len(sample) == 0: return {} def _move_to_cuda(maybe_tensor): if torch.is_tensor(maybe_tensor): return maybe_tensor.cuda(non_blocking=True) elif isinstance(maybe_tensor, dict): return {key: _move_to_cuda(value) for key, value in maybe_tensor.items()} elif isinstance(maybe_tensor, list): return [_move_to_cuda(x) for x in maybe_tensor] elif isinstance(maybe_tensor, tuple): return tuple([_move_to_cuda(x) for x in maybe_tensor]) elif isinstance(maybe_tensor, Mapping): return type(maybe_tensor)({k: _move_to_cuda(v) for k, v in maybe_tensor.items()}) else: return maybe_tensor return _move_to_cuda(sample) def pool(last_hidden_states: Tensor, attention_mask: Tensor, pool_type: str) -> Tensor: last_hidden = last_hidden_states.masked_fill(~attention_mask[..., None].bool(), 0.0) if pool_type == "avg": emb = last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None] elif pool_type == "cls": emb = last_hidden[:, 0] else: raise ValueError(f"pool_type {pool_type} not supported") return emb
EXA-1-master
exa/models/unilm-master/e5/utils.py
import os import torch import torch.nn.functional as F import tqdm import json import numpy as np import argparse from functools import partial from torch.utils.data import DataLoader from datasets import Dataset from transformers import AutoModel, AutoTokenizer, DataCollatorWithPadding, PreTrainedTokenizerFast, BatchEncoding from transformers.modeling_outputs import BaseModelOutput from typing import List, Dict from mteb import MTEB from utils import logger, pool, move_to_cuda parser = argparse.ArgumentParser(description='evaluation for MTEB benchmark except its Retrieval category') parser.add_argument('--task-types', nargs='+', default=['sts'], help='task types to evaluate') parser.add_argument('--output-dir', default='', type=str, metavar='N', help='output directory') parser.add_argument('--model-name-or-path', default='tmp-outputs/', type=str, metavar='N', help='which model to use') parser.add_argument('--l2-normalize', action='store_true', help='whether to l2 normalize embeddings') parser.add_argument('--pool-type', default='avg', help='pool type') parser.add_argument('--prompt', default='query: ', help='prompt') args = parser.parse_args() logger.info('Args: {}'.format(json.dumps(args.__dict__, ensure_ascii=False, indent=4))) assert args.prompt in ['', 'query: ', 'passage: '] assert args.output_dir, 'output_dir should be specified' os.makedirs(args.output_dir, exist_ok=True) def _transform_func(tokenizer: PreTrainedTokenizerFast, examples: Dict[str, List]) -> BatchEncoding: if args.prompt: examples['input_texts'] = [args.prompt + t for t in examples['input_texts']] batch_dict = tokenizer(examples['input_texts'], max_length=512, padding=True, truncation=True) return batch_dict class DenseEncoder(torch.nn.Module): def __init__(self, **kwargs): super().__init__() self.encoder = AutoModel.from_pretrained(args.model_name_or_path) self.tokenizer = AutoTokenizer.from_pretrained(args.model_name_or_path) self.gpu_count = torch.cuda.device_count() self.encoder.eval() self.encoder.cuda() if self.gpu_count > 1: self.encoder = torch.nn.DataParallel(self.encoder) @torch.no_grad() def encode(self, sentences, **kwargs) -> np.ndarray: """ Returns a list of embeddings for the given sentences. Args: sentences (`List[str]`): List of sentences to encode batch_size (`int`): Batch size for the encoding Returns: `List[np.ndarray]` or `List[tensor]`: List of embeddings for the given sentences """ dataset: Dataset = Dataset.from_dict({'input_texts': sentences}) dataset.set_transform(partial(_transform_func, self.tokenizer)) data_collator = DataCollatorWithPadding(self.tokenizer, pad_to_multiple_of=8) data_loader = DataLoader( dataset, batch_size=128 * self.gpu_count, shuffle=False, drop_last=False, num_workers=2, collate_fn=data_collator, pin_memory=True) encoded_embeds = [] for batch_dict in tqdm.tqdm(data_loader, desc='encoding', mininterval=10, disable=len(sentences) < 128): batch_dict = move_to_cuda(batch_dict) with torch.cuda.amp.autocast(): outputs: BaseModelOutput = self.encoder(**batch_dict) embeds = pool(outputs.last_hidden_state, batch_dict['attention_mask'], args.pool_type) if args.l2_normalize: embeds = F.normalize(embeds, p=2, dim=-1) encoded_embeds.append(embeds.cpu().numpy()) return np.concatenate(encoded_embeds, axis=0) def main(): model = DenseEncoder() args.task_types = [t for t in args.task_types if t.strip()] evaluation = MTEB( task_types=args.task_types or None, task_langs=['en']) evaluation.run(model, eval_splits=["test"], output_folder=args.output_dir, overwrite_results=False) if __name__ == '__main__': main()
EXA-1-master
exa/models/unilm-master/e5/mteb_eval.py
""" Simple check list from AllenNLP repo: https://github.com/allenai/allennlp/blob/master/setup.py To create the package for pypi. 1. Change the version in __init__.py and setup.py. 2. Commit these changes with the message: "Release: VERSION" 3. Add a tag in git to mark the release: "git tag VERSION -m'Adds tag VERSION for pypi' " Push the tag to git: git push --tags origin master 4. Build both the sources and the wheel. Do not change anything in setup.py between creating the wheel and the source distribution (obviously). For the wheel, run: "python setup.py bdist_wheel" in the top level allennlp directory. (this will build a wheel for the python version you use to build it - make sure you use python 3.x). For the sources, run: "python setup.py sdist" You should now have a /dist directory with both .whl and .tar.gz source versions of allennlp. 5. Check that everything looks correct by uploading the package to the pypi test server: twine upload dist/* -r pypitest (pypi suggest using twine as other methods upload files via plaintext.) Check that you can install it in a virtualenv by running: pip install -i https://testpypi.python.org/pypi allennlp 6. Upload the final version to actual pypi: twine upload dist/* -r pypi 7. Copy the release notes from RELEASE.md to the tag in github once everything is looking hunky-dory. """ from setuptools import find_packages, setup setup( name="pytorch_pretrained_bert", version="0.4.0", author="Thomas Wolf, Victor Sanh, Tim Rault, Google AI Language Team Authors", author_email="[email protected]", description="PyTorch version of Google AI BERT model with script to load Google pre-trained models", long_description="pytorch", long_description_content_type="text/markdown", keywords='BERT NLP deep learning google', license='Apache', url="https://github.com/huggingface/pytorch-pretrained-BERT", packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), install_requires=['numpy', 'boto3', 'requests', 'tqdm'], entry_points={ 'console_scripts': [ "pytorch_pretrained_bert=pytorch_pretrained_bert.__main__:main" ] }, python_requires='>=3.5.0', tests_require=['pytest'], classifiers=[ 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python :: 3', 'Topic :: Scientific/Engineering :: Artificial Intelligence', ], )
EXA-1-master
exa/models/unilm-master/unilm-v1/src/setup.py
import torch from torch.nn import DataParallel from torch.cuda._utils import _get_device_index from torch.nn.parallel._functions import Scatter from itertools import chain def scatter_imbalance(inputs, target_gpus, dim=0): r""" Slices tensors into approximately equal chunks and distributes them across given GPUs. Duplicates references to objects that are not tensors. """ def scatter_map(obj): if isinstance(obj, torch.Tensor): if (len(target_gpus) == 4) and (obj.size(dim) == 22): return Scatter.apply(target_gpus, (4, 6, 6, 6), dim, obj) if (len(target_gpus) == 4) and (obj.size(dim) == 60): return Scatter.apply(target_gpus, (12, 16, 16, 16), dim, obj) elif (len(target_gpus) == 4) and (obj.size(dim) == 144): return Scatter.apply(target_gpus, (24, 40, 40, 40), dim, obj) elif (len(target_gpus) == 8) and (obj.size(dim) == 46): return Scatter.apply(target_gpus, (4, 6, 6, 6, 6, 6, 6, 6), dim, obj) elif (len(target_gpus) == 8) and (obj.size(dim) == 62): return Scatter.apply(target_gpus, (6, 8, 8, 8, 8, 8, 8, 8), dim, obj) elif (len(target_gpus) == 8) and (obj.size(dim) == 94): return Scatter.apply(target_gpus, (10, 12, 12, 12, 12, 12, 12, 12), dim, obj) elif (len(target_gpus) == 8) and (obj.size(dim) == 110): return Scatter.apply(target_gpus, (12, 14, 14, 14, 14, 14, 14, 14), dim, obj) elif (len(target_gpus) == 8) and (obj.size(dim) == 118): return Scatter.apply(target_gpus, (13, 15, 15, 15, 15, 15, 15, 15), dim, obj) elif (len(target_gpus) == 8) and (obj.size(dim) == 126): return Scatter.apply(target_gpus, (14, 16, 16, 16, 16, 16, 16, 16), dim, obj) elif (len(target_gpus) == 8) and (obj.size(dim) == 134): return Scatter.apply(target_gpus, (15, 17, 17, 17, 17, 17, 17, 17), dim, obj) elif (len(target_gpus) == 8) and (obj.size(dim) == 142): return Scatter.apply(target_gpus, (16, 18, 18, 18, 18, 18, 18, 18), dim, obj) elif (len(target_gpus) == 16) and (obj.size(dim) == 222): return Scatter.apply(target_gpus, (12, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14, 14), dim, obj) return Scatter.apply(target_gpus, None, dim, obj) if isinstance(obj, tuple) and len(obj) > 0: return list(zip(*map(scatter_map, obj))) if isinstance(obj, list) and len(obj) > 0: return list(map(list, zip(*map(scatter_map, obj)))) if isinstance(obj, dict) and len(obj) > 0: return list(map(type(obj), zip(*map(scatter_map, obj.items())))) return [obj for targets in target_gpus] # After scatter_map is called, a scatter_map cell will exist. This cell # has a reference to the actual function scatter_map, which has references # to a closure that has a reference to the scatter_map cell (because the # fn is recursive). To avoid this reference cycle, we set the function to # None, clearing the cell try: return scatter_map(inputs) finally: scatter_map = None def scatter_kwargs_imbalance(inputs, kwargs, target_gpus, dim=0): r"""Scatter with support for kwargs dictionary""" inputs = scatter_imbalance(inputs, target_gpus, dim) if inputs else [] kwargs = scatter_imbalance(kwargs, target_gpus, dim) if kwargs else [] if len(inputs) < len(kwargs): inputs.extend([() for _ in range(len(kwargs) - len(inputs))]) elif len(kwargs) < len(inputs): kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))]) inputs = tuple(inputs) kwargs = tuple(kwargs) return inputs, kwargs class DataParallelImbalance(DataParallel): def __init__(self, module, device_ids=None, output_device=None, dim=0): super(DataParallelImbalance, self).__init__( module, device_ids, output_device, dim) if not torch.cuda.is_available(): self.module = module self.device_ids = [] return if device_ids is None: device_ids = list(range(torch.cuda.device_count())) if output_device is None: output_device = device_ids[0] if not all(t.is_cuda and t.device.index == device_ids[0] for t in chain(module.parameters(), module.buffers())): raise RuntimeError("module must have its parameters and buffers " "on device %d (device_ids[0])" % device_ids[0]) self.dim = dim self.module = module self.device_ids = list( map(lambda x: _get_device_index(x, True), device_ids)) self.output_device = _get_device_index(output_device, True) if len(self.device_ids) == 1: self.module.cuda(device_ids[0]) def forward(self, *inputs, **kwargs): if not self.device_ids: return self.module(*inputs, **kwargs) inputs, kwargs = self.scatter_imbalance( inputs, kwargs, self.device_ids) if len(self.device_ids) == 1: return self.module(*inputs[0], **kwargs[0]) replicas = self.replicate(self.module, self.device_ids[:len(inputs)]) outputs = self.parallel_apply(replicas, inputs, kwargs) return self.gather(outputs, self.output_device) def scatter_imbalance(self, inputs, kwargs, device_ids): return scatter_kwargs_imbalance(inputs, kwargs, device_ids, dim=self.dim)
EXA-1-master
exa/models/unilm-master/unilm-v1/src/nn/data_parallel.py
EXA-1-master
exa/models/unilm-master/unilm-v1/src/nn/__init__.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch optimization for BERT model.""" import math import torch from torch.optim import Optimizer from torch.optim.optimizer import required from torch.nn.utils import clip_grad_norm_ from collections import defaultdict from torch._six import container_abcs from copy import deepcopy from itertools import chain def warmup_cosine(x, warmup=0.002): if x < warmup: return x/warmup return 0.5 * (1.0 + torch.cos(math.pi * x)) def warmup_constant(x, warmup=0.002): if x < warmup: return x/warmup return 1.0 def warmup_linear(x, warmup=0.002): if x < warmup: return x/warmup return max((x-1.)/(warmup-1.), 0) SCHEDULES = { 'warmup_cosine': warmup_cosine, 'warmup_constant': warmup_constant, 'warmup_linear': warmup_linear, } class BertAdam(Optimizer): """Implements BERT version of Adam algorithm with weight decay fix. Params: lr: learning rate warmup: portion of t_total for the warmup, -1 means no warmup. Default: -1 t_total: total number of training steps for the learning rate schedule, -1 means constant learning rate. Default: -1 schedule: schedule to use for the warmup (see above). Default: 'warmup_linear' b1: Adams b1. Default: 0.9 b2: Adams b2. Default: 0.999 e: Adams epsilon. Default: 1e-6 weight_decay: Weight decay. Default: 0.01 max_grad_norm: Maximum norm for the gradients (-1 means no clipping). Default: 1.0 """ def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear', b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01, max_grad_norm=1.0): if lr is not required and lr < 0.0: raise ValueError( "Invalid learning rate: {} - should be >= 0.0".format(lr)) if schedule not in SCHEDULES: raise ValueError("Invalid schedule parameter: {}".format(schedule)) if not 0.0 <= warmup < 1.0 and not warmup == -1: raise ValueError( "Invalid warmup: {} - should be in [0.0, 1.0[ or -1".format(warmup)) if not 0.0 <= b1 < 1.0: raise ValueError( "Invalid b1 parameter: {} - should be in [0.0, 1.0[".format(b1)) if not 0.0 <= b2 < 1.0: raise ValueError( "Invalid b2 parameter: {} - should be in [0.0, 1.0[".format(b2)) if not e >= 0.0: raise ValueError( "Invalid epsilon value: {} - should be >= 0.0".format(e)) defaults = dict(lr=lr, schedule=schedule, warmup=warmup, t_total=t_total, b1=b1, b2=b2, e=e, weight_decay=weight_decay, max_grad_norm=max_grad_norm) super(BertAdam, self).__init__(params, defaults) def get_lr(self): lr = [] for group in self.param_groups: for p in group['params']: state = self.state[p] if len(state) == 0: return [0] if group['t_total'] != -1: schedule_fct = SCHEDULES[group['schedule']] lr_scheduled = group['lr'] * schedule_fct( state['step']/group['t_total'], group['warmup']) else: lr_scheduled = group['lr'] lr.append(lr_scheduled) return lr def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError( 'Adam does not support sparse gradients, please consider SparseAdam instead') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['next_m'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['next_v'] = torch.zeros_like(p.data) next_m, next_v = state['next_m'], state['next_v'] beta1, beta2 = group['b1'], group['b2'] # Add grad clipping if group['max_grad_norm'] > 0: clip_grad_norm_(p, group['max_grad_norm']) # Decay the first and second moment running average coefficient # In-place operations to update the averages at the same time next_m.mul_(beta1).add_(1 - beta1, grad) next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad) update = next_m / (next_v.sqrt() + group['e']) # Just adding the square of the weights to the loss function is *not* # the correct way of using L2 regularization/weight decay with Adam, # since that will interact with the m and v parameters in strange ways. # # Instead we want to decay the weights in a manner that doesn't interact # with the m/v parameters. This is equivalent to adding the square # of the weights to the loss with plain (non-momentum) SGD. if group['weight_decay'] > 0.0: update += group['weight_decay'] * p.data if group['t_total'] != -1: schedule_fct = SCHEDULES[group['schedule']] lr_scheduled = group['lr'] * schedule_fct( state['step']/group['t_total'], group['warmup']) else: lr_scheduled = group['lr'] update_with_lr = lr_scheduled * update p.data.add_(-update_with_lr) state['step'] += 1 # step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1 # No bias correction # bias_correction1 = 1 - beta1 ** state['step'] # bias_correction2 = 1 - beta2 ** state['step'] return loss class BertAdamFineTune(BertAdam): def __init__(self, params, lr=required, warmup=-1, t_total=-1, schedule='warmup_linear', b1=0.9, b2=0.999, e=1e-6, weight_decay=0.01, max_grad_norm=1.0): self.init_param_group = [] super(BertAdamFineTune, self).__init__(params, lr, warmup, t_total, schedule, b1, b2, e, weight_decay, max_grad_norm) def save_init_param_group(self, param_groups, name_groups, missing_keys): self.init_param_group = [] for group, name in zip(param_groups, name_groups): if group['weight_decay'] > 0.0: init_p_list = [] for p, n in zip(group['params'], name): init_p = p.data.clone().detach() if any(mk in n for mk in missing_keys): print("[no finetuning weight decay]", n) # should use the original weight decay init_p.zero_() init_p_list.append(init_p) self.init_param_group.append(init_p_list) else: # placeholder self.init_param_group.append([]) def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for i_group, group in enumerate(self.param_groups): for i_p, p in enumerate(group['params']): if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError( 'Adam does not support sparse gradients, please consider SparseAdam instead') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['next_m'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['next_v'] = torch.zeros_like(p.data) next_m, next_v = state['next_m'], state['next_v'] beta1, beta2 = group['b1'], group['b2'] # Add grad clipping if group['max_grad_norm'] > 0: clip_grad_norm_(p, group['max_grad_norm']) # Decay the first and second moment running average coefficient # In-place operations to update the averages at the same time next_m.mul_(beta1).add_(1 - beta1, grad) next_v.mul_(beta2).addcmul_(1 - beta2, grad, grad) update = next_m / (next_v.sqrt() + group['e']) # Just adding the square of the weights to the loss function is *not* # the correct way of using L2 regularization/weight decay with Adam, # since that will interact with the m and v parameters in strange ways. # # Instead we want to decay the weights in a manner that doesn't interact # with the m/v parameters. This is equivalent to adding the square # of the weights to the loss with plain (non-momentum) SGD. if group['weight_decay'] > 0.0: if self.init_param_group: update += group['weight_decay'] * \ (2.0 * p.data - self.init_param_group[i_group][i_p]) else: update += group['weight_decay'] * p.data if group['t_total'] != -1: schedule_fct = SCHEDULES[group['schedule']] lr_scheduled = group['lr'] * schedule_fct( state['step']/group['t_total'], group['warmup']) else: lr_scheduled = group['lr'] update_with_lr = lr_scheduled * update p.data.add_(-update_with_lr) state['step'] += 1 # step_size = lr_scheduled * math.sqrt(bias_correction2) / bias_correction1 # No bias correction # bias_correction1 = 1 - beta1 ** state['step'] # bias_correction2 = 1 - beta2 ** state['step'] return loss def load_state_dict_subset_finetune(self, state_dict, num_load_group): r"""Loads the optimizer state. Arguments: state_dict (dict): optimizer state. Should be an object returned from a call to :meth:`state_dict`. """ # deepcopy, to be consistent with module API state_dict = deepcopy(state_dict) # Validate the state_dict groups = self.param_groups saved_groups = state_dict['param_groups'] if len(groups) < num_load_group or len(saved_groups) < num_load_group: raise ValueError("loaded state dict has a different number of " "parameter groups") param_lens = (len(g['params']) for g in groups[:num_load_group]) saved_lens = (len(g['params']) for g in saved_groups[:num_load_group]) if any(p_len != s_len for p_len, s_len in zip(param_lens, saved_lens)): raise ValueError("loaded state dict contains a parameter group " "that doesn't match the size of optimizer's group") # Update the state id_map = {old_id: p for old_id, p in zip(chain(*(g['params'] for g in saved_groups[:num_load_group])), chain(*(g['params'] for g in groups[:num_load_group])))} def cast(param, value): r"""Make a deep copy of value, casting all tensors to device of param.""" if isinstance(value, torch.Tensor): # Floating-point types are a bit special here. They are the only ones # that are assumed to always match the type of params. if param.is_floating_point(): value = value.to(param.dtype) value = value.to(param.device) return value elif isinstance(value, dict): return {k: cast(param, v) for k, v in value.items()} elif isinstance(value, container_abcs.Iterable): return type(value)(cast(param, v) for v in value) else: return value # Copy state assigned to params (and cast tensors to appropriate types). # State that is not assigned to params is copied as is (needed for # backward compatibility). state = defaultdict(dict) for k, v in state_dict['state'].items(): if k in id_map: param = id_map[k] state[param] = cast(param, v) else: state[k] = v # handle additional params for k, v in self.state: if k not in state: state[k] = v # do not change groups: {'weight_decay': 0.01, 'lr': 9.995e-06, 'schedule': 'warmup_linear', 'warmup': 0.1, 't_total': 400000, 'b1': 0.9, 'b2': 0.999, 'e': 1e-06, 'max_grad_norm': 1.0, 'params': [...]} # # Update parameter groups, setting their 'params' value # def update_group(group, new_group): # new_group['params'] = group['params'] # return new_group # param_groups = [ # update_group(g, ng) for g, ng in zip(groups[:num_load_group], saved_groups[:num_load_group])] # # handle additional params # param_groups.extend(groups[num_load_group:]) self.__setstate__({'state': state, 'param_groups': groups}) def find_state_dict_subset_finetune(org_state_dict, org_name_list, no_decay, param_optimizer): # only use the bert encoder and embeddings want_name_set = set() for n in org_name_list: if ('bert.encoder' in n) or ('bert.embeddings' in n): want_name_set.add(n) # original: name to pid, pid to name org_grouped_names = [[n for n in org_name_list if not any(nd in n for nd in no_decay)], [n for n in org_name_list if any(nd in n for nd in no_decay)]] org_n2id, org_id2n = {}, {} for ng, pg in zip(org_grouped_names, org_state_dict['param_groups']): for n, pid in zip(ng, pg['params']): org_n2id[n] = pid org_id2n[pid] = n # group by: whether pretrained; whether weight decay g_np_list = [ [(n, p) for n, p in param_optimizer if n in want_name_set and not any( nd in n for nd in no_decay)], [(n, p) for n, p in param_optimizer if n in want_name_set and any( nd in n for nd in no_decay)], [(n, p) for n, p in param_optimizer if n not in want_name_set and not any( nd in n for nd in no_decay)], [(n, p) for n, p in param_optimizer if n not in want_name_set and any( nd in n for nd in no_decay)], ] optimizer_grouped_parameters = [ {'params': [p for n, p in g_np_list[0]], 'weight_decay': 0.01}, {'params': [p for n, p in g_np_list[1]], 'weight_decay': 0.0}, {'params': [p for n, p in g_np_list[2]], 'weight_decay': 0.01}, {'params': [p for n, p in g_np_list[3]], 'weight_decay': 0.0} ] new_state_dict = {} # regroup the original state_dict new_state_dict['state'] = {pid: v for pid, v in org_state_dict['state'].items( ) if pid not in org_id2n or org_id2n[pid] in want_name_set} # reset step count to 0 for pid, st in new_state_dict['state'].items(): st['step'] = 0 def _filter_group(group, g_np_list, i, org_n2id): packed = {k: v for k, v in group.items() if k != 'params'} packed['params'] = [pid for pid in group['params'] if pid in org_id2n and org_id2n[pid] in want_name_set] assert len(g_np_list[i]) == len(packed['params']) # keep them the same order packed['params'] = [org_n2id[n] for n, p in g_np_list[i]] return packed new_state_dict['param_groups'] = [_filter_group( g, g_np_list, i, org_n2id) for i, g in enumerate(org_state_dict['param_groups'])] return new_state_dict, optimizer_grouped_parameters
EXA-1-master
exa/models/unilm-master/unilm-v1/src/pytorch_pretrained_bert/optimization.py
__version__ = "0.4.0" from .tokenization import BertTokenizer, BasicTokenizer, WordpieceTokenizer from .modeling import (BertConfig, BertModel, BertForPreTraining, BertForMaskedLM, BertForNextSentencePrediction, BertForSequenceClassification, BertForMultipleChoice, BertForTokenClassification, BertForQuestionAnswering, BertForPreTrainingLossMask, BertPreTrainingPairRel, BertPreTrainingPairTransform) from .optimization import BertAdam, BertAdamFineTune from .optimization_fp16 import FP16_Optimizer_State from .file_utils import PYTORCH_PRETRAINED_BERT_CACHE
EXA-1-master
exa/models/unilm-master/unilm-v1/src/pytorch_pretrained_bert/__init__.py
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import unicodedata import os import logging from .file_utils import cached_path logger = logging.getLogger(__name__) PRETRAINED_VOCAB_ARCHIVE_MAP = { 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt", 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt", 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt", 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt", 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt", 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt", } PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = { 'bert-base-uncased': 512, 'bert-large-uncased': 512, 'bert-base-cased': 512, 'bert-large-cased': 512, 'bert-base-multilingual-uncased': 512, 'bert-base-multilingual-cased': 512, 'bert-base-chinese': 512, } VOCAB_NAME = 'vocab.txt' def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" # mapping unused tokens to special tokens extra_map = {} extra_map['[unused1]'] = '[X_SEP]' for i in range(10): extra_map['[unused{}]'.format(i+2)] = '[SEP_{}]'.format(i) extra_map['[unused12]'] = '[S2S_SEP]' extra_map['[unused13]'] = '[S2S_CLS]' extra_map['[unused14]'] = '[L2R_SEP]' extra_map['[unused15]'] = '[L2R_CLS]' extra_map['[unused16]'] = '[R2L_SEP]' extra_map['[unused17]'] = '[R2L_CLS]' extra_map['[unused18]'] = '[S2S_SOS]' vocab = collections.OrderedDict() index = 0 with open(vocab_file, "r", encoding="utf-8") as reader: while True: token = reader.readline() if not token: break token = token.strip() if token in extra_map: token = extra_map[token] vocab[token] = index index += 1 return vocab def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a peice of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens class BertTokenizer(object): """Runs end-to-end tokenization: punctuation splitting + wordpiece""" def __init__(self, vocab_file, do_lower_case=True, max_len=None, never_split=("[UNK]", "[SEP]", "[X_SEP]", "[PAD]", "[CLS]", "[MASK]")): if not os.path.isfile(vocab_file): raise ValueError( "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained " "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict( [(ids, tok) for tok, ids in self.vocab.items()]) self.basic_tokenizer = BasicTokenizer( do_lower_case=do_lower_case, never_split=never_split) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) self.max_len = max_len if max_len is not None else int(1e12) def tokenize(self, text): split_tokens = [] for token in self.basic_tokenizer.tokenize(text): for sub_token in self.wordpiece_tokenizer.tokenize(token): split_tokens.append(sub_token) return split_tokens def convert_tokens_to_ids(self, tokens): """Converts a sequence of tokens into ids using the vocab.""" ids = [] for token in tokens: ids.append(self.vocab[token]) if len(ids) > self.max_len: raise ValueError( "Token indices sequence length is longer than the specified maximum " " sequence length for this BERT model ({} > {}). Running this" " sequence through BERT will result in indexing errors".format( len(ids), self.max_len) ) return ids def convert_ids_to_tokens(self, ids): """Converts a sequence of ids in wordpiece tokens using the vocab.""" tokens = [] for i in ids: tokens.append(self.ids_to_tokens[i]) return tokens @classmethod def from_pretrained(cls, pretrained_model_name, cache_dir=None, *inputs, **kwargs): """ Instantiate a PreTrainedBertModel from a pre-trained model file. Download and cache the pre-trained model file if needed. """ if pretrained_model_name in PRETRAINED_VOCAB_ARCHIVE_MAP: vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name] else: vocab_file = pretrained_model_name if os.path.isdir(vocab_file): vocab_file = os.path.join(vocab_file, VOCAB_NAME) # redirect to the cache, if necessary try: resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir) except FileNotFoundError: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find any file " "associated to this path or url.".format( pretrained_model_name, ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()), vocab_file)) return None if resolved_vocab_file == vocab_file: logger.info("loading vocabulary file {}".format(vocab_file)) else: logger.info("loading vocabulary file {} from cache at {}".format( vocab_file, resolved_vocab_file)) if pretrained_model_name in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP: # if we're using a pretrained model, ensure the tokenizer wont index sequences longer # than the number of positional embeddings max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name] kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len) # Instantiate tokenizer. tokenizer = cls(resolved_vocab_file, *inputs, **kwargs) return tokenizer class WhitespaceTokenizer(object): def tokenize(self, text): return whitespace_tokenize(text) class BasicTokenizer(object): """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" def __init__(self, do_lower_case=True, never_split=("[UNK]", "[SEP]", "[PAD]", "[CLS]", "[MASK]")): """Constructs a BasicTokenizer. Args: do_lower_case: Whether to lower case the input. """ self.do_lower_case = do_lower_case self.never_split = never_split def tokenize(self, text): """Tokenizes a piece of text.""" text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if self.do_lower_case and token not in self.never_split: token = token.lower() token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text): """Splits punctuation on a piece of text.""" if text in self.never_split: return [text] chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ((cp >= 0x4E00 and cp <= 0x9FFF) or # (cp >= 0x3400 and cp <= 0x4DBF) or # (cp >= 0x20000 and cp <= 0x2A6DF) or # (cp >= 0x2A700 and cp <= 0x2B73F) or # (cp >= 0x2B740 and cp <= 0x2B81F) or # (cp >= 0x2B820 and cp <= 0x2CEAF) or (cp >= 0xF900 and cp <= 0xFAFF) or # (cp >= 0x2F800 and cp <= 0x2FA1F)): # return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xfffd or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) class WordpieceTokenizer(object): """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example: input = "unaffable" output = ["un", "##aff", "##able"] Args: text: A single token or whitespace separated tokens. This should have already been passed through `BasicTokenizer`. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens def _is_whitespace(char): """Checks whether `chars` is a whitespace character.""" # \t, \n, and \r are technically contorl characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False def _is_control(char): """Checks whether `chars` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat.startswith("C"): return True return False def _is_punctuation(char): """Checks whether `chars` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False
EXA-1-master
exa/models/unilm-master/unilm-v1/src/pytorch_pretrained_bert/tokenization.py
# coding=utf-8 """PyTorch optimization for BERT model.""" from apex.optimizers import FP16_Optimizer class FP16_Optimizer_State(FP16_Optimizer): def __init__(self, init_optimizer, static_loss_scale=1.0, dynamic_loss_scale=False, dynamic_loss_args=None, verbose=True): super(FP16_Optimizer_State, self).__init__(init_optimizer, static_loss_scale, dynamic_loss_scale, dynamic_loss_args, verbose) def state_dict(self): """ Returns a dict containing the current state of this :class:`FP16_Optimizer` instance. This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict of the contained Pytorch optimizer. Example:: checkpoint = {} checkpoint['model'] = model.state_dict() checkpoint['optimizer'] = optimizer.state_dict() torch.save(checkpoint, "saved.pth") """ state_dict = {} state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale state_dict['cur_scale'] = self.cur_scale state_dict['cur_iter'] = self.cur_iter if state_dict['dynamic_loss_scale']: state_dict['last_overflow_iter'] = self.last_overflow_iter state_dict['scale_factor'] = self.scale_factor state_dict['scale_window'] = self.scale_window state_dict['optimizer_state_dict'] = self.optimizer.state_dict() state_dict['fp32_groups_flat'] = self.fp32_groups_flat return state_dict def load_state_dict(self, state_dict): """ Loads a state_dict created by an earlier call to state_dict(). If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``, whose parameters in turn came from ``model``, it is expected that the user will call ``model.load_state_dict()`` before ``fp16_optimizer_instance.load_state_dict()`` is called. Example:: model = torch.nn.Linear(D_in, D_out).cuda().half() optimizer = torch.optim.SGD(model.parameters(), lr=1e-3) optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0) ... checkpoint = torch.load("saved.pth") model.load_state_dict(checkpoint['model']) optimizer.load_state_dict(checkpoint['optimizer']) """ # I think it should actually be ok to reload the optimizer before the model. self.dynamic_loss_scale = state_dict['dynamic_loss_scale'] self.cur_scale = state_dict['cur_scale'] self.cur_iter = state_dict['cur_iter'] if state_dict['dynamic_loss_scale']: self.last_overflow_iter = state_dict['last_overflow_iter'] self.scale_factor = state_dict['scale_factor'] self.scale_window = state_dict['scale_window'] self.optimizer.load_state_dict(state_dict['optimizer_state_dict']) # At this point, the optimizer's references to the model's fp32 parameters are up to date. # The optimizer's hyperparameters and internal buffers are also up to date. # However, the fp32 master copies of the model's fp16 params stored by the optimizer are still # out of date. There are two options. # 1: Refresh the master params from the model's fp16 params. # This requires less storage but incurs precision loss. # 2: Save and restore the fp32 master copies separately. # We choose option 2. # # Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device # of their associated parameters, because it's possible those buffers might not exist yet in # the current optimizer instance. In our case, as long as the current FP16_Optimizer has been # constructed in the same way as the one whose state_dict we are loading, the same master params # are guaranteed to exist, so we can just copy_() from the saved master params. for current, saved in zip(self.fp32_groups_flat, state_dict['fp32_groups_flat']): current.data.copy_(saved.data)
EXA-1-master
exa/models/unilm-master/unilm-v1/src/pytorch_pretrained_bert/optimization_fp16.py
# coding=utf-8 from __future__ import absolute_import from __future__ import division from __future__ import print_function import torch import torch.nn.functional as F from torch.nn.modules.loss import _Loss class LabelSmoothingLoss(_Loss): """ With label smoothing, KL-divergence between q_{smoothed ground truth prob.}(w) and p_{prob. computed by model}(w) is minimized. """ def __init__(self, label_smoothing=0, tgt_vocab_size=0, ignore_index=0, size_average=None, reduce=None, reduction='mean'): assert 0.0 < label_smoothing <= 1.0 self.ignore_index = ignore_index super(LabelSmoothingLoss, self).__init__( size_average=size_average, reduce=reduce, reduction=reduction) assert label_smoothing > 0 assert tgt_vocab_size > 0 smoothing_value = label_smoothing / (tgt_vocab_size - 2) one_hot = torch.full((tgt_vocab_size,), smoothing_value) one_hot[self.ignore_index] = 0 self.register_buffer('one_hot', one_hot.unsqueeze(0)) self.confidence = 1.0 - label_smoothing self.tgt_vocab_size = tgt_vocab_size def forward(self, output, target): """ output (FloatTensor): batch_size * num_pos * n_classes target (LongTensor): batch_size * num_pos """ assert self.tgt_vocab_size == output.size(2) batch_size, num_pos = target.size(0), target.size(1) output = output.view(-1, self.tgt_vocab_size) target = target.view(-1) model_prob = self.one_hot.repeat(target.size(0), 1) model_prob.scatter_(1, target.unsqueeze(1), self.confidence) model_prob.masked_fill_((target == self.ignore_index).unsqueeze(1), 0) return F.kl_div(output, model_prob, reduction='none').view(batch_size, num_pos, -1).sum(2)
EXA-1-master
exa/models/unilm-master/unilm-v1/src/pytorch_pretrained_bert/loss.py
""" Utilities for working with the local dataset cache. This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp Copyright by the AllenNLP authors. """ import os import logging import shutil import tempfile import json from urllib.parse import urlparse from pathlib import Path from typing import Optional, Tuple, Union, IO, Callable, Set from hashlib import sha256 from functools import wraps from tqdm import tqdm import boto3 from botocore.exceptions import ClientError import requests logger = logging.getLogger(__name__) # pylint: disable=invalid-name PYTORCH_PRETRAINED_BERT_CACHE = Path(os.getenv('PYTORCH_PRETRAINED_BERT_CACHE', Path.home() / '.pytorch_pretrained_bert')) def url_to_filename(url: str, etag: str = None) -> str: """ Convert `url` into a hashed filename in a repeatable way. If `etag` is specified, append its hash to the url's, delimited by a period. """ url_bytes = url.encode('utf-8') url_hash = sha256(url_bytes) filename = url_hash.hexdigest() if etag: etag_bytes = etag.encode('utf-8') etag_hash = sha256(etag_bytes) filename += '.' + etag_hash.hexdigest() return filename def filename_to_url(filename: str, cache_dir: Union[str, Path] = None) -> Tuple[str, str]: """ Return the url and etag (which may be ``None``) stored for `filename`. Raise ``FileNotFoundError`` if `filename` or its stored metadata do not exist. """ if cache_dir is None: cache_dir = PYTORCH_PRETRAINED_BERT_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) cache_path = os.path.join(cache_dir, filename) if not os.path.exists(cache_path): raise FileNotFoundError("file {} not found".format(cache_path)) meta_path = cache_path + '.json' if not os.path.exists(meta_path): raise FileNotFoundError("file {} not found".format(meta_path)) with open(meta_path) as meta_file: metadata = json.load(meta_file) url = metadata['url'] etag = metadata['etag'] return url, etag def cached_path(url_or_filename: Union[str, Path], cache_dir: Union[str, Path] = None) -> str: """ Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path. """ if cache_dir is None: cache_dir = PYTORCH_PRETRAINED_BERT_CACHE if isinstance(url_or_filename, Path): url_or_filename = str(url_or_filename) if isinstance(cache_dir, Path): cache_dir = str(cache_dir) parsed = urlparse(url_or_filename) if parsed.scheme in ('http', 'https', 's3'): # URL, so get it from the cache (downloading if necessary) return get_from_cache(url_or_filename, cache_dir) elif os.path.exists(url_or_filename): # File, and it exists. return url_or_filename elif parsed.scheme == '': # File, but it doesn't exist. raise FileNotFoundError("file {} not found".format(url_or_filename)) else: # Something unknown raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename)) def split_s3_path(url: str) -> Tuple[str, str]: """Split a full s3 path into the bucket name and path.""" parsed = urlparse(url) if not parsed.netloc or not parsed.path: raise ValueError("bad s3 path {}".format(url)) bucket_name = parsed.netloc s3_path = parsed.path # Remove '/' at beginning of path. if s3_path.startswith("/"): s3_path = s3_path[1:] return bucket_name, s3_path def s3_request(func: Callable): """ Wrapper function for s3 requests in order to create more helpful error messages. """ @wraps(func) def wrapper(url: str, *args, **kwargs): try: return func(url, *args, **kwargs) except ClientError as exc: if int(exc.response["Error"]["Code"]) == 404: raise FileNotFoundError("file {} not found".format(url)) else: raise return wrapper @s3_request def s3_etag(url: str) -> Optional[str]: """Check ETag on S3 object.""" s3_resource = boto3.resource("s3") bucket_name, s3_path = split_s3_path(url) s3_object = s3_resource.Object(bucket_name, s3_path) return s3_object.e_tag @s3_request def s3_get(url: str, temp_file: IO) -> None: """Pull a file directly from S3.""" s3_resource = boto3.resource("s3") bucket_name, s3_path = split_s3_path(url) s3_resource.Bucket(bucket_name).download_fileobj(s3_path, temp_file) def http_get(url: str, temp_file: IO) -> None: req = requests.get(url, stream=True) content_length = req.headers.get('Content-Length') total = int(content_length) if content_length is not None else None progress = tqdm(unit="B", total=total) for chunk in req.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks progress.update(len(chunk)) temp_file.write(chunk) progress.close() def get_from_cache(url: str, cache_dir: Union[str, Path] = None) -> str: """ Given a URL, look for the corresponding dataset in the local cache. If it's not there, download it. Then return the path to the cached file. """ if cache_dir is None: cache_dir = PYTORCH_PRETRAINED_BERT_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) os.makedirs(cache_dir, exist_ok=True) # Get eTag to add to filename, if it exists. if url.startswith("s3://"): etag = s3_etag(url) else: response = requests.head(url, allow_redirects=True) if response.status_code != 200: raise IOError("HEAD request failed for url {} with status code {}" .format(url, response.status_code)) etag = response.headers.get("ETag") filename = url_to_filename(url, etag) # get cache path to put the file cache_path = os.path.join(cache_dir, filename) if not os.path.exists(cache_path): # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with tempfile.NamedTemporaryFile() as temp_file: logger.info("%s not found in cache, downloading to %s", url, temp_file.name) # GET file object if url.startswith("s3://"): s3_get(url, temp_file) else: http_get(url, temp_file) # we are copying the file before closing it, so flush to avoid truncation temp_file.flush() # shutil.copyfileobj() starts at the current position, so go to the start temp_file.seek(0) logger.info("copying %s to cache at %s", temp_file.name, cache_path) with open(cache_path, 'wb') as cache_file: shutil.copyfileobj(temp_file, cache_file) logger.info("creating metadata file for %s", cache_path) meta = {'url': url, 'etag': etag} meta_path = cache_path + '.json' with open(meta_path, 'w') as meta_file: json.dump(meta, meta_file) logger.info("removing temp file %s", temp_file.name) return cache_path def read_set_from_file(filename: str) -> Set[str]: ''' Extract a de-duped collection (set) of text from a file. Expected file format is one item per line. ''' collection = set() with open(filename, 'r', encoding='utf-8') as file_: for line in file_: collection.add(line.rstrip()) return collection def get_file_extension(path: str, dot=True, lower: bool = True): ext = os.path.splitext(path)[1] ext = ext if dot else ext[1:] return ext.lower() if lower else ext
EXA-1-master
exa/models/unilm-master/unilm-v1/src/pytorch_pretrained_bert/file_utils.py
# coding=utf-8 """PyTorch BERT model.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import copy import json import math import logging import tarfile import tempfile import shutil import numpy as np from scipy.stats import truncnorm import torch from torch import nn from torch.nn import CrossEntropyLoss, MSELoss import torch.nn.functional as F from .file_utils import cached_path from .loss import LabelSmoothingLoss logger = logging.getLogger(__name__) PRETRAINED_MODEL_ARCHIVE_MAP = { 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased.tar.gz", 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased.tar.gz", 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased.tar.gz", 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased.tar.gz", 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased.tar.gz", 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased.tar.gz", 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese.tar.gz", } CONFIG_NAME = 'bert_config.json' WEIGHTS_NAME = 'pytorch_model.bin' def gelu(x): """Implementation of the gelu activation function. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) def swish(x): return x * torch.sigmoid(x) ACT2FN = {"gelu": gelu, "relu": torch.nn.functional.relu, "swish": swish} class BertConfig(object): """Configuration class to store the configuration of a `BertModel`. """ def __init__(self, vocab_size_or_config_json_file, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, relax_projection=0, new_pos_ids=False, initializer_range=0.02, task_idx=None, fp32_embedding=False, ffn_type=0, label_smoothing=None, num_qkv=0, seg_emb=False): """Constructs BertConfig. Args: vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in `BertModel`. hidden_size: Size of the encoder layers and the pooler layer. num_hidden_layers: Number of hidden layers in the Transformer encoder. num_attention_heads: Number of attention heads for each attention layer in the Transformer encoder. intermediate_size: The size of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. hidden_act: The non-linear activation function (function or string) in the encoder and pooler. If string, "gelu", "relu" and "swish" are supported. hidden_dropout_prob: The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. attention_probs_dropout_prob: The dropout ratio for the attention probabilities. max_position_embeddings: The maximum sequence length that this model might ever be used with. Typically set this to something large just in case (e.g., 512 or 1024 or 2048). type_vocab_size: The vocabulary size of the `token_type_ids` passed into `BertModel`. initializer_range: The sttdev of the truncated_normal_initializer for initializing all weight matrices. """ if isinstance(vocab_size_or_config_json_file, str): with open(vocab_size_or_config_json_file, "r", encoding='utf-8') as reader: json_config = json.loads(reader.read()) for key, value in json_config.items(): self.__dict__[key] = value elif isinstance(vocab_size_or_config_json_file, int): self.vocab_size = vocab_size_or_config_json_file self.hidden_size = hidden_size self.num_hidden_layers = num_hidden_layers self.num_attention_heads = num_attention_heads self.hidden_act = hidden_act self.intermediate_size = intermediate_size self.hidden_dropout_prob = hidden_dropout_prob self.attention_probs_dropout_prob = attention_probs_dropout_prob self.max_position_embeddings = max_position_embeddings self.type_vocab_size = type_vocab_size self.relax_projection = relax_projection self.new_pos_ids = new_pos_ids self.initializer_range = initializer_range self.task_idx = task_idx self.fp32_embedding = fp32_embedding self.ffn_type = ffn_type self.label_smoothing = label_smoothing self.num_qkv = num_qkv self.seg_emb = seg_emb else: raise ValueError("First argument must be either a vocabulary size (int)" "or the path to a pretrained model config file (str)") @classmethod def from_dict(cls, json_object): """Constructs a `BertConfig` from a Python dictionary of parameters.""" config = BertConfig(vocab_size_or_config_json_file=-1) for key, value in json_object.items(): config.__dict__[key] = value return config @classmethod def from_json_file(cls, json_file): """Constructs a `BertConfig` from a json file of parameters.""" with open(json_file, "r", encoding='utf-8') as reader: text = reader.read() return cls.from_dict(json.loads(text)) def __repr__(self): return str(self.to_json_string()) def to_dict(self): """Serializes this instance to a Python dictionary.""" output = copy.deepcopy(self.__dict__) return output def to_json_string(self): """Serializes this instance to a JSON string.""" return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n" try: from apex.normalization.fused_layer_norm import FusedLayerNorm as BertLayerNorm except ImportError: print("Better speed can be achieved with apex installed from https://www.github.com/nvidia/apex.") class BertLayerNorm(nn.Module): def __init__(self, hidden_size, eps=1e-5): """Construct a layernorm module in the TF style (epsilon inside the square root). """ super(BertLayerNorm, self).__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.bias = nn.Parameter(torch.zeros(hidden_size)) self.variance_epsilon = eps def forward(self, x): u = x.mean(-1, keepdim=True) s = (x - u).pow(2).mean(-1, keepdim=True) x = (x - u) / torch.sqrt(s + self.variance_epsilon) return self.weight * x + self.bias class PositionalEmbedding(nn.Module): def __init__(self, demb): super(PositionalEmbedding, self).__init__() self.demb = demb inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb)) self.register_buffer('inv_freq', inv_freq) def forward(self, pos_seq, bsz=None): sinusoid_inp = torch.ger(pos_seq, self.inv_freq) pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1) if bsz is not None: return pos_emb[:, None, :].expand(-1, bsz, -1) else: return pos_emb[:, None, :] class BertEmbeddings(nn.Module): """Construct the embeddings from word, position and token_type embeddings. """ def __init__(self, config): super(BertEmbeddings, self).__init__() self.word_embeddings = nn.Embedding( config.vocab_size, config.hidden_size) self.token_type_embeddings = nn.Embedding( config.type_vocab_size, config.hidden_size) if hasattr(config, 'fp32_embedding'): self.fp32_embedding = config.fp32_embedding else: self.fp32_embedding = False if hasattr(config, 'new_pos_ids') and config.new_pos_ids: self.num_pos_emb = 4 else: self.num_pos_emb = 1 self.position_embeddings = nn.Embedding( config.max_position_embeddings, config.hidden_size*self.num_pos_emb) # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load # any TensorFlow checkpoint file self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, input_ids, token_type_ids=None, position_ids=None, task_idx=None): seq_length = input_ids.size(1) if position_ids is None: position_ids = torch.arange( seq_length, dtype=torch.long, device=input_ids.device) position_ids = position_ids.unsqueeze(0).expand_as(input_ids) if token_type_ids is None: token_type_ids = torch.zeros_like(input_ids) words_embeddings = self.word_embeddings(input_ids) position_embeddings = self.position_embeddings(position_ids) token_type_embeddings = self.token_type_embeddings(token_type_ids) if self.num_pos_emb > 1: num_batch = position_embeddings.size(0) num_pos = position_embeddings.size(1) position_embeddings = position_embeddings.view( num_batch, num_pos, self.num_pos_emb, -1)[torch.arange(0, num_batch).long(), :, task_idx, :] embeddings = words_embeddings + position_embeddings + token_type_embeddings if self.fp32_embedding: embeddings = embeddings.half() embeddings = self.LayerNorm(embeddings) embeddings = self.dropout(embeddings) return embeddings class BertSelfAttention(nn.Module): def __init__(self, config): super(BertSelfAttention, self).__init__() if config.hidden_size % config.num_attention_heads != 0: raise ValueError( "The hidden size (%d) is not a multiple of the number of attention " "heads (%d)" % (config.hidden_size, config.num_attention_heads)) self.num_attention_heads = config.num_attention_heads self.attention_head_size = int( config.hidden_size / config.num_attention_heads) self.all_head_size = self.num_attention_heads * self.attention_head_size if hasattr(config, 'num_qkv') and (config.num_qkv > 1): self.num_qkv = config.num_qkv else: self.num_qkv = 1 self.query = nn.Linear( config.hidden_size, self.all_head_size*self.num_qkv) self.key = nn.Linear(config.hidden_size, self.all_head_size*self.num_qkv) self.value = nn.Linear( config.hidden_size, self.all_head_size*self.num_qkv) self.dropout = nn.Dropout(config.attention_probs_dropout_prob) self.uni_debug_flag = True if os.getenv( 'UNI_DEBUG_FLAG', '') else False if self.uni_debug_flag: self.register_buffer('debug_attention_probs', torch.zeros((512, 512))) if hasattr(config, 'seg_emb') and config.seg_emb: self.b_q_s = nn.Parameter(torch.zeros( 1, self.num_attention_heads, 1, self.attention_head_size)) self.seg_emb = nn.Embedding( config.type_vocab_size, self.all_head_size) else: self.b_q_s = None self.seg_emb = None def transpose_for_scores(self, x, mask_qkv=None): if self.num_qkv > 1: sz = x.size()[:-1] + (self.num_qkv, self.num_attention_heads, self.all_head_size) # (batch, pos, num_qkv, head, head_hid) x = x.view(*sz) if mask_qkv is None: x = x[:, :, 0, :, :] elif isinstance(mask_qkv, int): x = x[:, :, mask_qkv, :, :] else: # mask_qkv: (batch, pos) if mask_qkv.size(1) > sz[1]: mask_qkv = mask_qkv[:, :sz[1]] # -> x: (batch, pos, head, head_hid) x = x.gather(2, mask_qkv.view(sz[0], sz[1], 1, 1, 1).expand( sz[0], sz[1], 1, sz[3], sz[4])).squeeze(2) else: sz = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) # (batch, pos, head, head_hid) x = x.view(*sz) # (batch, head, pos, head_hid) return x.permute(0, 2, 1, 3) def forward(self, hidden_states, attention_mask, history_states=None, mask_qkv=None, seg_ids=None): if history_states is None: mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(hidden_states) mixed_value_layer = self.value(hidden_states) else: x_states = torch.cat((history_states, hidden_states), dim=1) mixed_query_layer = self.query(hidden_states) mixed_key_layer = self.key(x_states) mixed_value_layer = self.value(x_states) query_layer = self.transpose_for_scores(mixed_query_layer, mask_qkv) key_layer = self.transpose_for_scores(mixed_key_layer, mask_qkv) value_layer = self.transpose_for_scores(mixed_value_layer, mask_qkv) # Take the dot product between "query" and "key" to get the raw attention scores. # (batch, head, pos, pos) attention_scores = torch.matmul( query_layer / math.sqrt(self.attention_head_size), key_layer.transpose(-1, -2)) if self.seg_emb is not None: seg_rep = self.seg_emb(seg_ids) # (batch, pos, head, head_hid) seg_rep = seg_rep.view(seg_rep.size(0), seg_rep.size( 1), self.num_attention_heads, self.attention_head_size) qs = torch.einsum('bnih,bjnh->bnij', query_layer+self.b_q_s, seg_rep) attention_scores = attention_scores + qs # attention_scores = attention_scores / math.sqrt(self.attention_head_size) # Apply the attention mask is (precomputed for all layers in BertModel forward() function) attention_scores = attention_scores + attention_mask # Normalize the attention scores to probabilities. attention_probs = nn.Softmax(dim=-1)(attention_scores) if self.uni_debug_flag: _pos = attention_probs.size(-1) self.debug_attention_probs[:_pos, :_pos].copy_( attention_probs[0].mean(0).view(_pos, _pos)) # This is actually dropping out entire tokens to attend to, which might # seem a bit unusual, but is taken from the original Transformer paper. attention_probs = self.dropout(attention_probs) context_layer = torch.matmul(attention_probs, value_layer) context_layer = context_layer.permute(0, 2, 1, 3).contiguous() new_context_layer_shape = context_layer.size()[ :-2] + (self.all_head_size,) context_layer = context_layer.view(*new_context_layer_shape) return context_layer class BertSelfOutput(nn.Module): def __init__(self, config): super(BertSelfOutput, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class BertAttention(nn.Module): def __init__(self, config): super(BertAttention, self).__init__() self.self = BertSelfAttention(config) self.output = BertSelfOutput(config) def forward(self, input_tensor, attention_mask, history_states=None, mask_qkv=None, seg_ids=None): self_output = self.self( input_tensor, attention_mask, history_states=history_states, mask_qkv=mask_qkv, seg_ids=seg_ids) attention_output = self.output(self_output, input_tensor) return attention_output class BertIntermediate(nn.Module): def __init__(self, config): super(BertIntermediate, self).__init__() self.dense = nn.Linear(config.hidden_size, config.intermediate_size) self.intermediate_act_fn = ACT2FN[config.hidden_act] \ if isinstance(config.hidden_act, str) else config.hidden_act def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.intermediate_act_fn(hidden_states) return hidden_states class BertOutput(nn.Module): def __init__(self, config): super(BertOutput, self).__init__() self.dense = nn.Linear(config.intermediate_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, hidden_states, input_tensor): hidden_states = self.dense(hidden_states) hidden_states = self.dropout(hidden_states) hidden_states = self.LayerNorm(hidden_states + input_tensor) return hidden_states class TransformerFFN(nn.Module): def __init__(self, config): super(TransformerFFN, self).__init__() self.ffn_type = config.ffn_type assert self.ffn_type in (1, 2) if self.ffn_type in (1, 2): self.wx0 = nn.Linear(config.hidden_size, config.hidden_size) if self.ffn_type in (2,): self.wx1 = nn.Linear(config.hidden_size, config.hidden_size) if self.ffn_type in (1, 2): self.output = nn.Linear(config.hidden_size, config.hidden_size) self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5) self.dropout = nn.Dropout(config.hidden_dropout_prob) def forward(self, x): if self.ffn_type in (1, 2): x0 = self.wx0(x) if self.ffn_type == 1: x1 = x elif self.ffn_type == 2: x1 = self.wx1(x) out = self.output(x0 * x1) out = self.dropout(out) out = self.LayerNorm(out + x) return out class BertLayer(nn.Module): def __init__(self, config): super(BertLayer, self).__init__() self.attention = BertAttention(config) self.ffn_type = config.ffn_type if self.ffn_type: self.ffn = TransformerFFN(config) else: self.intermediate = BertIntermediate(config) self.output = BertOutput(config) def forward(self, hidden_states, attention_mask, history_states=None, mask_qkv=None, seg_ids=None): attention_output = self.attention( hidden_states, attention_mask, history_states=history_states, mask_qkv=mask_qkv, seg_ids=seg_ids) if self.ffn_type: layer_output = self.ffn(attention_output) else: intermediate_output = self.intermediate(attention_output) layer_output = self.output(intermediate_output, attention_output) return layer_output class BertEncoder(nn.Module): def __init__(self, config): super(BertEncoder, self).__init__() layer = BertLayer(config) self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.num_hidden_layers)]) def forward(self, hidden_states, attention_mask, output_all_encoded_layers=True, prev_embedding=None, prev_encoded_layers=None, mask_qkv=None, seg_ids=None): # history embedding and encoded layer must be simultanously given assert (prev_embedding is None) == (prev_encoded_layers is None) all_encoder_layers = [] if (prev_embedding is not None) and (prev_encoded_layers is not None): history_states = prev_embedding for i, layer_module in enumerate(self.layer): hidden_states = layer_module( hidden_states, attention_mask, history_states=history_states, mask_qkv=mask_qkv, seg_ids=seg_ids) if output_all_encoded_layers: all_encoder_layers.append(hidden_states) if prev_encoded_layers is not None: history_states = prev_encoded_layers[i] else: for layer_module in self.layer: hidden_states = layer_module( hidden_states, attention_mask, mask_qkv=mask_qkv, seg_ids=seg_ids) if output_all_encoded_layers: all_encoder_layers.append(hidden_states) if not output_all_encoded_layers: all_encoder_layers.append(hidden_states) return all_encoder_layers class BertPooler(nn.Module): def __init__(self, config): super(BertPooler, self).__init__() self.dense = nn.Linear(config.hidden_size, config.hidden_size) self.activation = nn.Tanh() def forward(self, hidden_states): # We "pool" the model by simply taking the hidden state corresponding # to the first token. first_token_tensor = hidden_states[:, 0] pooled_output = self.dense(first_token_tensor) pooled_output = self.activation(pooled_output) return pooled_output class BertPredictionHeadTransform(nn.Module): def __init__(self, config): super(BertPredictionHeadTransform, self).__init__() self.transform_act_fn = ACT2FN[config.hidden_act] \ if isinstance(config.hidden_act, str) else config.hidden_act hid_size = config.hidden_size if hasattr(config, 'relax_projection') and (config.relax_projection > 1): hid_size *= config.relax_projection self.dense = nn.Linear(config.hidden_size, hid_size) self.LayerNorm = BertLayerNorm(hid_size, eps=1e-5) def forward(self, hidden_states): hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) hidden_states = self.LayerNorm(hidden_states) return hidden_states class BertLMPredictionHead(nn.Module): def __init__(self, config, bert_model_embedding_weights): super(BertLMPredictionHead, self).__init__() self.transform = BertPredictionHeadTransform(config) # The output weights are the same as the input embeddings, but there is # an output-only bias for each token. self.decoder = nn.Linear(bert_model_embedding_weights.size(1), bert_model_embedding_weights.size(0), bias=False) self.decoder.weight = bert_model_embedding_weights self.bias = nn.Parameter(torch.zeros( bert_model_embedding_weights.size(0))) if hasattr(config, 'relax_projection') and (config.relax_projection > 1): self.relax_projection = config.relax_projection else: self.relax_projection = 0 self.fp32_embedding = config.fp32_embedding def convert_to_type(tensor): if self.fp32_embedding: return tensor.half() else: return tensor self.type_converter = convert_to_type self.converted = False def forward(self, hidden_states, task_idx=None): if not self.converted: self.converted = True if self.fp32_embedding: self.transform.half() hidden_states = self.transform(self.type_converter(hidden_states)) if self.relax_projection > 1: num_batch = hidden_states.size(0) num_pos = hidden_states.size(1) # (batch, num_pos, relax_projection*hid) -> (batch, num_pos, relax_projection, hid) -> (batch, num_pos, hid) hidden_states = hidden_states.view( num_batch, num_pos, self.relax_projection, -1)[torch.arange(0, num_batch).long(), :, task_idx, :] if self.fp32_embedding: hidden_states = F.linear(self.type_converter(hidden_states), self.type_converter( self.decoder.weight), self.type_converter(self.bias)) else: hidden_states = self.decoder(hidden_states) + self.bias return hidden_states class BertOnlyMLMHead(nn.Module): def __init__(self, config, bert_model_embedding_weights): super(BertOnlyMLMHead, self).__init__() self.predictions = BertLMPredictionHead( config, bert_model_embedding_weights) def forward(self, sequence_output): prediction_scores = self.predictions(sequence_output) return prediction_scores class BertOnlyNSPHead(nn.Module): def __init__(self, config): super(BertOnlyNSPHead, self).__init__() self.seq_relationship = nn.Linear(config.hidden_size, 2) def forward(self, pooled_output): seq_relationship_score = self.seq_relationship(pooled_output) return seq_relationship_score class BertPreTrainingHeads(nn.Module): def __init__(self, config, bert_model_embedding_weights, num_labels=2): super(BertPreTrainingHeads, self).__init__() self.predictions = BertLMPredictionHead( config, bert_model_embedding_weights) self.seq_relationship = nn.Linear(config.hidden_size, num_labels) def forward(self, sequence_output, pooled_output, task_idx=None): prediction_scores = self.predictions(sequence_output, task_idx) if pooled_output is None: seq_relationship_score = None else: seq_relationship_score = self.seq_relationship(pooled_output) return prediction_scores, seq_relationship_score class PreTrainedBertModel(nn.Module): """ An abstract class to handle weights initialization and a simple interface for dowloading and loading pretrained models. """ def __init__(self, config, *inputs, **kwargs): super(PreTrainedBertModel, self).__init__() if not isinstance(config, BertConfig): raise ValueError( "Parameter config in `{}(config)` should be an instance of class `BertConfig`. " "To create a model from a Google pretrained model use " "`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format( self.__class__.__name__, self.__class__.__name__ )) self.config = config def init_bert_weights(self, module): """ Initialize the weights. """ if isinstance(module, (nn.Linear, nn.Embedding)): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, BertLayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() @classmethod def from_pretrained(cls, pretrained_model_name, state_dict=None, cache_dir=None, *inputs, **kwargs): """ Instantiate a PreTrainedBertModel from a pre-trained model file or a pytorch state dict. Download and cache the pre-trained model file if needed. Params: pretrained_model_name: either: - a str with the name of a pre-trained model to load selected in the list of: . `bert-base-uncased` . `bert-large-uncased` . `bert-base-cased` . `bert-base-multilingual` . `bert-base-chinese` - a path or url to a pretrained model archive containing: . `bert_config.json` a configuration file for the model . `pytorch_model.bin` a PyTorch dump of a BertForPreTraining instance cache_dir: an optional path to a folder in which the pre-trained models will be cached. state_dict: an optional state dictionnary (collections.OrderedDict object) to use instead of Google pre-trained models *inputs, **kwargs: additional input for the specific Bert class (ex: num_labels for BertForSequenceClassification) """ if pretrained_model_name in PRETRAINED_MODEL_ARCHIVE_MAP: archive_file = PRETRAINED_MODEL_ARCHIVE_MAP[pretrained_model_name] else: archive_file = pretrained_model_name # redirect to the cache, if necessary try: resolved_archive_file = cached_path( archive_file, cache_dir=cache_dir) except FileNotFoundError: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find any file " "associated to this path or url.".format( pretrained_model_name, ', '.join(PRETRAINED_MODEL_ARCHIVE_MAP.keys()), archive_file)) return None if resolved_archive_file == archive_file: logger.info("loading archive file {}".format(archive_file)) else: logger.info("loading archive file {} from cache at {}".format( archive_file, resolved_archive_file)) tempdir = None if os.path.isdir(resolved_archive_file): serialization_dir = resolved_archive_file else: # Extract archive to temp dir tempdir = tempfile.mkdtemp() logger.info("extracting archive file {} to temp dir {}".format( resolved_archive_file, tempdir)) with tarfile.open(resolved_archive_file, 'r:gz') as archive: archive.extractall(tempdir) serialization_dir = tempdir # Load config if ('config_path' in kwargs) and kwargs['config_path']: config_file = kwargs['config_path'] else: config_file = os.path.join(serialization_dir, CONFIG_NAME) config = BertConfig.from_json_file(config_file) # define new type_vocab_size (there might be different numbers of segment ids) if 'type_vocab_size' in kwargs: config.type_vocab_size = kwargs['type_vocab_size'] # define new relax_projection if ('relax_projection' in kwargs) and kwargs['relax_projection']: config.relax_projection = kwargs['relax_projection'] # new position embedding if ('new_pos_ids' in kwargs) and kwargs['new_pos_ids']: config.new_pos_ids = kwargs['new_pos_ids'] # define new relax_projection if ('task_idx' in kwargs) and kwargs['task_idx']: config.task_idx = kwargs['task_idx'] # define new max position embedding for length expansion if ('max_position_embeddings' in kwargs) and kwargs['max_position_embeddings']: config.max_position_embeddings = kwargs['max_position_embeddings'] # use fp32 for embeddings if ('fp32_embedding' in kwargs) and kwargs['fp32_embedding']: config.fp32_embedding = kwargs['fp32_embedding'] # type of FFN in transformer blocks if ('ffn_type' in kwargs) and kwargs['ffn_type']: config.ffn_type = kwargs['ffn_type'] # label smoothing if ('label_smoothing' in kwargs) and kwargs['label_smoothing']: config.label_smoothing = kwargs['label_smoothing'] # dropout if ('hidden_dropout_prob' in kwargs) and kwargs['hidden_dropout_prob']: config.hidden_dropout_prob = kwargs['hidden_dropout_prob'] if ('attention_probs_dropout_prob' in kwargs) and kwargs['attention_probs_dropout_prob']: config.attention_probs_dropout_prob = kwargs['attention_probs_dropout_prob'] # different QKV if ('num_qkv' in kwargs) and kwargs['num_qkv']: config.num_qkv = kwargs['num_qkv'] # segment embedding for self-attention if ('seg_emb' in kwargs) and kwargs['seg_emb']: config.seg_emb = kwargs['seg_emb'] # initialize word embeddings _word_emb_map = None if ('word_emb_map' in kwargs) and kwargs['word_emb_map']: _word_emb_map = kwargs['word_emb_map'] logger.info("Model config {}".format(config)) # clean the arguments in kwargs for arg_clean in ('config_path', 'type_vocab_size', 'relax_projection', 'new_pos_ids', 'task_idx', 'max_position_embeddings', 'fp32_embedding', 'ffn_type', 'label_smoothing', 'hidden_dropout_prob', 'attention_probs_dropout_prob', 'num_qkv', 'seg_emb', 'word_emb_map'): if arg_clean in kwargs: del kwargs[arg_clean] # Instantiate model. model = cls(config, *inputs, **kwargs) if state_dict is None: weights_path = os.path.join(serialization_dir, WEIGHTS_NAME) state_dict = torch.load(weights_path) old_keys = [] new_keys = [] for key in state_dict.keys(): new_key = None if 'gamma' in key: new_key = key.replace('gamma', 'weight') if 'beta' in key: new_key = key.replace('beta', 'bias') if new_key: old_keys.append(key) new_keys.append(new_key) for old_key, new_key in zip(old_keys, new_keys): state_dict[new_key] = state_dict.pop(old_key) # initialize new segment embeddings _k = 'bert.embeddings.token_type_embeddings.weight' if (_k in state_dict) and (config.type_vocab_size != state_dict[_k].shape[0]): logger.info("config.type_vocab_size != state_dict[bert.embeddings.token_type_embeddings.weight] ({0} != {1})".format( config.type_vocab_size, state_dict[_k].shape[0])) if config.type_vocab_size > state_dict[_k].shape[0]: # state_dict[_k].data = state_dict[_k].data.resize_(config.type_vocab_size, state_dict[_k].shape[1]) state_dict[_k].resize_( config.type_vocab_size, state_dict[_k].shape[1]) # L2R if config.type_vocab_size >= 3: state_dict[_k].data[2, :].copy_(state_dict[_k].data[0, :]) # R2L if config.type_vocab_size >= 4: state_dict[_k].data[3, :].copy_(state_dict[_k].data[0, :]) # S2S if config.type_vocab_size >= 6: state_dict[_k].data[4, :].copy_(state_dict[_k].data[0, :]) state_dict[_k].data[5, :].copy_(state_dict[_k].data[1, :]) if config.type_vocab_size >= 7: state_dict[_k].data[6, :].copy_(state_dict[_k].data[1, :]) elif config.type_vocab_size < state_dict[_k].shape[0]: state_dict[_k].data = state_dict[_k].data[:config.type_vocab_size, :] _k = 'bert.embeddings.position_embeddings.weight' n_config_pos_emb = 4 if config.new_pos_ids else 1 if (_k in state_dict) and (n_config_pos_emb*config.hidden_size != state_dict[_k].shape[1]): logger.info("n_config_pos_emb*config.hidden_size != state_dict[bert.embeddings.position_embeddings.weight] ({0}*{1} != {2})".format( n_config_pos_emb, config.hidden_size, state_dict[_k].shape[1])) assert state_dict[_k].shape[1] % config.hidden_size == 0 n_state_pos_emb = int(state_dict[_k].shape[1]/config.hidden_size) assert (n_state_pos_emb == 1) != (n_config_pos_emb == 1), "!!!!n_state_pos_emb == 1 xor n_config_pos_emb == 1!!!!" if n_state_pos_emb == 1: state_dict[_k].data = state_dict[_k].data.unsqueeze(1).repeat( 1, n_config_pos_emb, 1).reshape((config.max_position_embeddings, n_config_pos_emb*config.hidden_size)) elif n_config_pos_emb == 1: if hasattr(config, 'task_idx') and (config.task_idx is not None) and (0 <= config.task_idx <= 3): _task_idx = config.task_idx else: _task_idx = 0 state_dict[_k].data = state_dict[_k].data.view( config.max_position_embeddings, n_state_pos_emb, config.hidden_size).select(1, _task_idx) # initialize new position embeddings _k = 'bert.embeddings.position_embeddings.weight' if _k in state_dict and config.max_position_embeddings != state_dict[_k].shape[0]: logger.info("config.max_position_embeddings != state_dict[bert.embeddings.position_embeddings.weight] ({0} - {1})".format( config.max_position_embeddings, state_dict[_k].shape[0])) if config.max_position_embeddings > state_dict[_k].shape[0]: old_size = state_dict[_k].shape[0] # state_dict[_k].data = state_dict[_k].data.resize_(config.max_position_embeddings, state_dict[_k].shape[1]) state_dict[_k].resize_( config.max_position_embeddings, state_dict[_k].shape[1]) start = old_size while start < config.max_position_embeddings: chunk_size = min( old_size, config.max_position_embeddings - start) state_dict[_k].data[start:start+chunk_size, :].copy_(state_dict[_k].data[:chunk_size, :]) start += chunk_size elif config.max_position_embeddings < state_dict[_k].shape[0]: state_dict[_k].data = state_dict[_k].data[:config.max_position_embeddings, :] # initialize relax projection _k = 'cls.predictions.transform.dense.weight' n_config_relax = 1 if (config.relax_projection < 1) else config.relax_projection if (_k in state_dict) and (n_config_relax*config.hidden_size != state_dict[_k].shape[0]): logger.info("n_config_relax*config.hidden_size != state_dict[cls.predictions.transform.dense.weight] ({0}*{1} != {2})".format( n_config_relax, config.hidden_size, state_dict[_k].shape[0])) assert state_dict[_k].shape[0] % config.hidden_size == 0 n_state_relax = int(state_dict[_k].shape[0]/config.hidden_size) assert (n_state_relax == 1) != (n_config_relax == 1), "!!!!n_state_relax == 1 xor n_config_relax == 1!!!!" if n_state_relax == 1: _k = 'cls.predictions.transform.dense.weight' state_dict[_k].data = state_dict[_k].data.unsqueeze(0).repeat( n_config_relax, 1, 1).reshape((n_config_relax*config.hidden_size, config.hidden_size)) for _k in ('cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.LayerNorm.bias'): state_dict[_k].data = state_dict[_k].data.unsqueeze( 0).repeat(n_config_relax, 1).view(-1) elif n_config_relax == 1: if hasattr(config, 'task_idx') and (config.task_idx is not None) and (0 <= config.task_idx <= 3): _task_idx = config.task_idx else: _task_idx = 0 _k = 'cls.predictions.transform.dense.weight' state_dict[_k].data = state_dict[_k].data.view( n_state_relax, config.hidden_size, config.hidden_size).select(0, _task_idx) for _k in ('cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.LayerNorm.bias'): state_dict[_k].data = state_dict[_k].data.view( n_state_relax, config.hidden_size).select(0, _task_idx) # initialize QKV _all_head_size = config.num_attention_heads * \ int(config.hidden_size / config.num_attention_heads) n_config_num_qkv = 1 if (config.num_qkv < 1) else config.num_qkv for qkv_name in ('query', 'key', 'value'): _k = 'bert.encoder.layer.0.attention.self.{0}.weight'.format( qkv_name) if (_k in state_dict) and (n_config_num_qkv*_all_head_size != state_dict[_k].shape[0]): logger.info("n_config_num_qkv*_all_head_size != state_dict[_k] ({0}*{1} != {2})".format( n_config_num_qkv, _all_head_size, state_dict[_k].shape[0])) for layer_idx in range(config.num_hidden_layers): _k = 'bert.encoder.layer.{0}.attention.self.{1}.weight'.format( layer_idx, qkv_name) assert state_dict[_k].shape[0] % _all_head_size == 0 n_state_qkv = int(state_dict[_k].shape[0]/_all_head_size) assert (n_state_qkv == 1) != (n_config_num_qkv == 1), "!!!!n_state_qkv == 1 xor n_config_num_qkv == 1!!!!" if n_state_qkv == 1: _k = 'bert.encoder.layer.{0}.attention.self.{1}.weight'.format( layer_idx, qkv_name) state_dict[_k].data = state_dict[_k].data.unsqueeze(0).repeat( n_config_num_qkv, 1, 1).reshape((n_config_num_qkv*_all_head_size, _all_head_size)) _k = 'bert.encoder.layer.{0}.attention.self.{1}.bias'.format( layer_idx, qkv_name) state_dict[_k].data = state_dict[_k].data.unsqueeze( 0).repeat(n_config_num_qkv, 1).view(-1) elif n_config_num_qkv == 1: if hasattr(config, 'task_idx') and (config.task_idx is not None) and (0 <= config.task_idx <= 3): _task_idx = config.task_idx else: _task_idx = 0 assert _task_idx != 3, "[INVALID] _task_idx=3: n_config_num_qkv=1 (should be 2)" if _task_idx == 0: _qkv_idx = 0 else: _qkv_idx = 1 _k = 'bert.encoder.layer.{0}.attention.self.{1}.weight'.format( layer_idx, qkv_name) state_dict[_k].data = state_dict[_k].data.view( n_state_qkv, _all_head_size, _all_head_size).select(0, _qkv_idx) _k = 'bert.encoder.layer.{0}.attention.self.{1}.bias'.format( layer_idx, qkv_name) state_dict[_k].data = state_dict[_k].data.view( n_state_qkv, _all_head_size).select(0, _qkv_idx) if _word_emb_map: _k = 'bert.embeddings.word_embeddings.weight' for _tgt, _src in _word_emb_map: state_dict[_k].data[_tgt, :].copy_( state_dict[_k].data[_src, :]) missing_keys = [] unexpected_keys = [] error_msgs = [] # copy state_dict so _load_from_state_dict can modify it metadata = getattr(state_dict, '_metadata', None) state_dict = state_dict.copy() if metadata is not None: state_dict._metadata = metadata def load(module, prefix=''): local_metadata = {} if metadata is None else metadata.get( prefix[:-1], {}) module._load_from_state_dict( state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs) for name, child in module._modules.items(): if child is not None: load(child, prefix + name + '.') load(model, prefix='' if hasattr(model, 'bert') else 'bert.') model.missing_keys = missing_keys if len(missing_keys) > 0: logger.info("Weights of {} not initialized from pretrained model: {}".format( model.__class__.__name__, missing_keys)) if len(unexpected_keys) > 0: logger.info("Weights from pretrained model not used in {}: {}".format( model.__class__.__name__, unexpected_keys)) if len(error_msgs) > 0: logger.info('\n'.join(error_msgs)) if tempdir: # Clean up temp dir shutil.rmtree(tempdir) return model class BertModel(PreTrainedBertModel): """BERT model ("Bidirectional Embedding Representations from a Transformer"). Params: config: a BertConfig class instance with the configuration to build a new model Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `output_all_encoded_layers`: boolean which controls the content of the `encoded_layers` output as described below. Default: `True`. Outputs: Tuple of (encoded_layers, pooled_output) `encoded_layers`: controled by `output_all_encoded_layers` argument: - `output_all_encoded_layers=True`: outputs a list of the full sequences of encoded-hidden-states at the end of each attention block (i.e. 12 full sequences for BERT-base, 24 for BERT-large), each encoded-hidden-state is a torch.FloatTensor of size [batch_size, sequence_length, hidden_size], - `output_all_encoded_layers=False`: outputs only the full sequence of hidden-states corresponding to the last attention block of shape [batch_size, sequence_length, hidden_size], `pooled_output`: a torch.FloatTensor of size [batch_size, hidden_size] which is the output of a classifier pretrained on top of the hidden state associated to the first character of the input (`CLF`) to train on the Next-Sentence task (see BERT's paper). ``` """ def __init__(self, config): super(BertModel, self).__init__(config) self.embeddings = BertEmbeddings(config) self.encoder = BertEncoder(config) self.pooler = BertPooler(config) self.apply(self.init_bert_weights) def rescale_some_parameters(self): for layer_id, layer in enumerate(self.encoder.layer): layer.attention.output.dense.weight.data.div_( math.sqrt(2.0*(layer_id + 1))) layer.output.dense.weight.data.div_(math.sqrt(2.0*(layer_id + 1))) def get_extended_attention_mask(self, input_ids, token_type_ids, attention_mask): if attention_mask is None: attention_mask = torch.ones_like(input_ids) if token_type_ids is None: token_type_ids = torch.zeros_like(input_ids) # We create a 3D attention mask from a 2D tensor mask. # Sizes are [batch_size, 1, 1, to_seq_length] # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] # this attention mask is more simple than the triangular masking of causal attention # used in OpenAI GPT, we just need to prepare the broadcast dimension here. if attention_mask.dim() == 2: extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2) elif attention_mask.dim() == 3: extended_attention_mask = attention_mask.unsqueeze(1) else: raise NotImplementedError # Since attention_mask is 1.0 for positions we want to attend and 0.0 for # masked positions, this operation will create a tensor which is 0.0 for # positions we want to attend and -10000.0 for masked positions. # Since we are adding it to the raw scores before the softmax, this is # effectively the same as removing these entirely. extended_attention_mask = extended_attention_mask.to( dtype=next(self.parameters()).dtype) # fp16 compatibility extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 return extended_attention_mask def forward(self, input_ids, token_type_ids=None, attention_mask=None, output_all_encoded_layers=True, mask_qkv=None, task_idx=None): extended_attention_mask = self.get_extended_attention_mask( input_ids, token_type_ids, attention_mask) embedding_output = self.embeddings( input_ids, token_type_ids, task_idx=task_idx) encoded_layers = self.encoder(embedding_output, extended_attention_mask, output_all_encoded_layers=output_all_encoded_layers, mask_qkv=mask_qkv, seg_ids=token_type_ids) sequence_output = encoded_layers[-1] pooled_output = self.pooler(sequence_output) if not output_all_encoded_layers: encoded_layers = encoded_layers[-1] return encoded_layers, pooled_output class BertModelIncr(BertModel): def __init__(self, config): super(BertModelIncr, self).__init__(config) def forward(self, input_ids, token_type_ids, position_ids, attention_mask, output_all_encoded_layers=True, prev_embedding=None, prev_encoded_layers=None, mask_qkv=None, task_idx=None): extended_attention_mask = self.get_extended_attention_mask( input_ids, token_type_ids, attention_mask) embedding_output = self.embeddings( input_ids, token_type_ids, position_ids, task_idx=task_idx) encoded_layers = self.encoder(embedding_output, extended_attention_mask, output_all_encoded_layers=output_all_encoded_layers, prev_embedding=prev_embedding, prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv, seg_ids=token_type_ids) sequence_output = encoded_layers[-1] pooled_output = self.pooler(sequence_output) if not output_all_encoded_layers: encoded_layers = encoded_layers[-1] return embedding_output, encoded_layers, pooled_output class BertForPreTraining(PreTrainedBertModel): """BERT model with pre-training heads. This module comprises the BERT model followed by the two pre-training heads: - the masked language modeling head, and - the next sentence classification head. Params: config: a BertConfig class instance with the configuration to build a new model. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss is only computed for the labels set in [0, ..., vocab_size] `next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size] with indices selected in [0, 1]. 0 => next sentence is the continuation, 1 => next sentence is a random sentence. Outputs: if `masked_lm_labels` and `next_sentence_label` are not `None`: Outputs the total_loss which is the sum of the masked language modeling loss and the next sentence classification loss. if `masked_lm_labels` or `next_sentence_label` is `None`: Outputs a tuple comprising - the masked language modeling logits of shape [batch_size, sequence_length, vocab_size], and - the next sentence classification logits of shape [batch_size, 2]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertForPreTraining(config) masked_lm_logits_scores, seq_relationship_logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config): super(BertForPreTraining, self).__init__(config) self.bert = BertModel(config) self.cls = BertPreTrainingHeads( config, self.bert.embeddings.word_embeddings.weight) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None, mask_qkv=None, task_idx=None): sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False, mask_qkv=mask_qkv, task_idx=task_idx) prediction_scores, seq_relationship_score = self.cls( sequence_output, pooled_output) if masked_lm_labels is not None and next_sentence_label is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) masked_lm_loss = loss_fct( prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) next_sentence_loss = loss_fct( seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) total_loss = masked_lm_loss + next_sentence_loss return total_loss else: return prediction_scores, seq_relationship_score class BertPreTrainingPairTransform(nn.Module): def __init__(self, config): super(BertPreTrainingPairTransform, self).__init__() self.dense = nn.Linear(config.hidden_size*2, config.hidden_size) self.transform_act_fn = ACT2FN[config.hidden_act] \ if isinstance(config.hidden_act, str) else config.hidden_act # self.LayerNorm = BertLayerNorm(config.hidden_size, eps=1e-5) def forward(self, pair_x, pair_y): hidden_states = torch.cat([pair_x, pair_y], dim=-1) hidden_states = self.dense(hidden_states) hidden_states = self.transform_act_fn(hidden_states) # hidden_states = self.LayerNorm(hidden_states) return hidden_states class BertPreTrainingPairRel(nn.Module): def __init__(self, config, num_rel=0): super(BertPreTrainingPairRel, self).__init__() self.R_xy = BertPreTrainingPairTransform(config) self.rel_emb = nn.Embedding(num_rel, config.hidden_size) def forward(self, pair_x, pair_y, pair_r, pair_pos_neg_mask): # (batch, num_pair, hidden) xy = self.R_xy(pair_x, pair_y) r = self.rel_emb(pair_r) _batch, _num_pair, _hidden = xy.size() pair_score = (xy * r).sum(-1) # torch.bmm(xy.view(-1, 1, _hidden),r.view(-1, _hidden, 1)).view(_batch, _num_pair) # .mul_(-1.0): objective to loss return F.logsigmoid(pair_score * pair_pos_neg_mask.type_as(pair_score)).mul_(-1.0) class BertForPreTrainingLossMask(PreTrainedBertModel): """refer to BertForPreTraining""" def __init__(self, config, num_labels=2, num_rel=0, num_sentlvl_labels=0, no_nsp=False): super(BertForPreTrainingLossMask, self).__init__(config) self.bert = BertModel(config) self.cls = BertPreTrainingHeads( config, self.bert.embeddings.word_embeddings.weight, num_labels=num_labels) self.num_sentlvl_labels = num_sentlvl_labels self.cls2 = None if self.num_sentlvl_labels > 0: self.secondary_pred_proj = nn.Embedding( num_sentlvl_labels, config.hidden_size) self.cls2 = BertPreTrainingHeads( config, self.secondary_pred_proj.weight, num_labels=num_sentlvl_labels) self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none') if no_nsp: self.crit_next_sent = None else: self.crit_next_sent = nn.CrossEntropyLoss(ignore_index=-1) self.num_labels = num_labels self.num_rel = num_rel if self.num_rel > 0: self.crit_pair_rel = BertPreTrainingPairRel( config, num_rel=num_rel) if hasattr(config, 'label_smoothing') and config.label_smoothing: self.crit_mask_lm_smoothed = LabelSmoothingLoss( config.label_smoothing, config.vocab_size, ignore_index=0, reduction='none') else: self.crit_mask_lm_smoothed = None self.apply(self.init_bert_weights) self.bert.rescale_some_parameters() def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, next_sentence_label=None, masked_pos=None, masked_weights=None, task_idx=None, pair_x=None, pair_x_mask=None, pair_y=None, pair_y_mask=None, pair_r=None, pair_pos_neg_mask=None, pair_loss_mask=None, masked_pos_2=None, masked_weights_2=None, masked_labels_2=None, num_tokens_a=None, num_tokens_b=None, mask_qkv=None): if token_type_ids is None and attention_mask is None: task_0 = (task_idx == 0) task_1 = (task_idx == 1) task_2 = (task_idx == 2) task_3 = (task_idx == 3) sequence_length = input_ids.shape[-1] index_matrix = torch.arange(sequence_length).view( 1, sequence_length).to(input_ids.device) num_tokens = num_tokens_a + num_tokens_b base_mask = (index_matrix < num_tokens.view(-1, 1) ).type_as(input_ids) segment_a_mask = ( index_matrix < num_tokens_a.view(-1, 1)).type_as(input_ids) token_type_ids = ( task_idx + 1 + task_3.type_as(task_idx)).view(-1, 1) * base_mask token_type_ids = token_type_ids - segment_a_mask * \ (task_0 | task_3).type_as(segment_a_mask).view(-1, 1) index_matrix = index_matrix.view(1, 1, sequence_length) index_matrix_t = index_matrix.view(1, sequence_length, 1) tril = index_matrix <= index_matrix_t attention_mask_task_0 = ( index_matrix < num_tokens.view(-1, 1, 1)) & (index_matrix_t < num_tokens.view(-1, 1, 1)) attention_mask_task_1 = tril & attention_mask_task_0 attention_mask_task_2 = torch.transpose( tril, dim0=-2, dim1=-1) & attention_mask_task_0 attention_mask_task_3 = ( (index_matrix < num_tokens_a.view(-1, 1, 1)) | tril) & attention_mask_task_0 attention_mask = (attention_mask_task_0 & task_0.view(-1, 1, 1)) | \ (attention_mask_task_1 & task_1.view(-1, 1, 1)) | \ (attention_mask_task_2 & task_2.view(-1, 1, 1)) | \ (attention_mask_task_3 & task_3.view(-1, 1, 1)) attention_mask = attention_mask.type_as(input_ids) sequence_output, pooled_output = self.bert( input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False, mask_qkv=mask_qkv, task_idx=task_idx) def gather_seq_out_by_pos(seq, pos): return torch.gather(seq, 1, pos.unsqueeze(2).expand(-1, -1, seq.size(-1))) def gather_seq_out_by_pos_average(seq, pos, mask): # pos/mask: (batch, num_pair, max_token_num) batch_size, max_token_num = pos.size(0), pos.size(-1) # (batch, num_pair, max_token_num, seq.size(-1)) pos_vec = torch.gather(seq, 1, pos.view(batch_size, -1).unsqueeze( 2).expand(-1, -1, seq.size(-1))).view(batch_size, -1, max_token_num, seq.size(-1)) # (batch, num_pair, seq.size(-1)) mask = mask.type_as(pos_vec) pos_vec_masked_sum = ( pos_vec * mask.unsqueeze(3).expand_as(pos_vec)).sum(2) return pos_vec_masked_sum / mask.sum(2, keepdim=True).expand_as(pos_vec_masked_sum) def loss_mask_and_normalize(loss, mask): mask = mask.type_as(loss) loss = loss * mask denominator = torch.sum(mask) + 1e-5 return (loss / denominator).sum() if masked_lm_labels is None: if masked_pos is None: prediction_scores, seq_relationship_score = self.cls( sequence_output, pooled_output, task_idx=task_idx) else: sequence_output_masked = gather_seq_out_by_pos( sequence_output, masked_pos) prediction_scores, seq_relationship_score = self.cls( sequence_output_masked, pooled_output, task_idx=task_idx) return prediction_scores, seq_relationship_score # masked lm sequence_output_masked = gather_seq_out_by_pos( sequence_output, masked_pos) prediction_scores_masked, seq_relationship_score = self.cls( sequence_output_masked, pooled_output, task_idx=task_idx) if self.crit_mask_lm_smoothed: masked_lm_loss = self.crit_mask_lm_smoothed( F.log_softmax(prediction_scores_masked.float(), dim=-1), masked_lm_labels) else: masked_lm_loss = self.crit_mask_lm( prediction_scores_masked.transpose(1, 2).float(), masked_lm_labels) masked_lm_loss = loss_mask_and_normalize( masked_lm_loss.float(), masked_weights) # next sentence if self.crit_next_sent is None or next_sentence_label is None: next_sentence_loss = 0.0 else: next_sentence_loss = self.crit_next_sent( seq_relationship_score.view(-1, self.num_labels).float(), next_sentence_label.view(-1)) if self.cls2 is not None and masked_pos_2 is not None: sequence_output_masked_2 = gather_seq_out_by_pos( sequence_output, masked_pos_2) prediction_scores_masked_2, _ = self.cls2( sequence_output_masked_2, None) masked_lm_loss_2 = self.crit_mask_lm( prediction_scores_masked_2.transpose(1, 2).float(), masked_labels_2) masked_lm_loss_2 = loss_mask_and_normalize( masked_lm_loss_2.float(), masked_weights_2) masked_lm_loss = masked_lm_loss + masked_lm_loss_2 if pair_x is None or pair_y is None or pair_r is None or pair_pos_neg_mask is None or pair_loss_mask is None: return masked_lm_loss, next_sentence_loss # pair and relation if pair_x_mask is None or pair_y_mask is None: pair_x_output_masked = gather_seq_out_by_pos( sequence_output, pair_x) pair_y_output_masked = gather_seq_out_by_pos( sequence_output, pair_y) else: pair_x_output_masked = gather_seq_out_by_pos_average( sequence_output, pair_x, pair_x_mask) pair_y_output_masked = gather_seq_out_by_pos_average( sequence_output, pair_y, pair_y_mask) pair_loss = self.crit_pair_rel( pair_x_output_masked, pair_y_output_masked, pair_r, pair_pos_neg_mask) pair_loss = loss_mask_and_normalize( pair_loss.float(), pair_loss_mask) return masked_lm_loss, next_sentence_loss, pair_loss class BertForExtractiveSummarization(PreTrainedBertModel): """refer to BertForPreTraining""" def __init__(self, config): super(BertForExtractiveSummarization, self).__init__(config) self.bert = BertModel(config) self.secondary_pred_proj = nn.Embedding(2, config.hidden_size) self.cls2 = BertPreTrainingHeads( config, self.secondary_pred_proj.weight, num_labels=2) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_pos_2=None, masked_weights_2=None, task_idx=None, mask_qkv=None): sequence_output, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False, mask_qkv=mask_qkv, task_idx=task_idx) def gather_seq_out_by_pos(seq, pos): return torch.gather(seq, 1, pos.unsqueeze(2).expand(-1, -1, seq.size(-1))) sequence_output_masked_2 = gather_seq_out_by_pos( sequence_output, masked_pos_2) prediction_scores_masked_2, _ = self.cls2( sequence_output_masked_2, None, task_idx=task_idx) predicted_probs = torch.nn.functional.softmax( prediction_scores_masked_2, dim=-1) return predicted_probs, masked_pos_2, masked_weights_2 class BertForSeq2SeqDecoder(PreTrainedBertModel): """refer to BertForPreTraining""" def __init__(self, config, mask_word_id=0, num_labels=2, num_rel=0, search_beam_size=1, length_penalty=1.0, eos_id=0, sos_id=0, forbid_duplicate_ngrams=False, forbid_ignore_set=None, not_predict_set=None, ngram_size=3, min_len=0, mode="s2s", pos_shift=False): super(BertForSeq2SeqDecoder, self).__init__(config) self.bert = BertModelIncr(config) self.cls = BertPreTrainingHeads( config, self.bert.embeddings.word_embeddings.weight, num_labels=num_labels) self.apply(self.init_bert_weights) self.crit_mask_lm = nn.CrossEntropyLoss(reduction='none') self.crit_next_sent = nn.CrossEntropyLoss(ignore_index=-1) self.mask_word_id = mask_word_id self.num_labels = num_labels self.num_rel = num_rel if self.num_rel > 0: self.crit_pair_rel = BertPreTrainingPairRel( config, num_rel=num_rel) self.search_beam_size = search_beam_size self.length_penalty = length_penalty self.eos_id = eos_id self.sos_id = sos_id self.forbid_duplicate_ngrams = forbid_duplicate_ngrams self.forbid_ignore_set = forbid_ignore_set self.not_predict_set = not_predict_set self.ngram_size = ngram_size self.min_len = min_len assert mode in ("s2s", "l2r") self.mode = mode self.pos_shift = pos_shift def forward(self, input_ids, token_type_ids, position_ids, attention_mask, task_idx=None, mask_qkv=None): if self.search_beam_size > 1: return self.beam_search(input_ids, token_type_ids, position_ids, attention_mask, task_idx=task_idx, mask_qkv=mask_qkv) input_shape = list(input_ids.size()) batch_size = input_shape[0] input_length = input_shape[1] output_shape = list(token_type_ids.size()) output_length = output_shape[1] output_ids = [] prev_embedding = None prev_encoded_layers = None curr_ids = input_ids mask_ids = input_ids.new(batch_size, 1).fill_(self.mask_word_id) next_pos = input_length if self.pos_shift: sos_ids = input_ids.new(batch_size, 1).fill_(self.sos_id) while next_pos < output_length: curr_length = list(curr_ids.size())[1] if self.pos_shift: if next_pos == input_length: x_input_ids = torch.cat((curr_ids, sos_ids), dim=1) start_pos = 0 else: x_input_ids = curr_ids start_pos = next_pos else: start_pos = next_pos - curr_length x_input_ids = torch.cat((curr_ids, mask_ids), dim=1) curr_token_type_ids = token_type_ids[:, start_pos:next_pos+1] curr_attention_mask = attention_mask[:, start_pos:next_pos+1, :next_pos+1] curr_position_ids = position_ids[:, start_pos:next_pos+1] new_embedding, new_encoded_layers, _ = \ self.bert(x_input_ids, curr_token_type_ids, curr_position_ids, curr_attention_mask, output_all_encoded_layers=True, prev_embedding=prev_embedding, prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv) last_hidden = new_encoded_layers[-1][:, -1:, :] prediction_scores, _ = self.cls( last_hidden, None, task_idx=task_idx) if self.not_predict_set: for token_id in self.not_predict_set: prediction_scores[:, :, token_id].fill_(-10000.0) _, max_ids = torch.max(prediction_scores, dim=-1) output_ids.append(max_ids) if self.pos_shift: if prev_embedding is None: prev_embedding = new_embedding else: prev_embedding = torch.cat( (prev_embedding, new_embedding), dim=1) if prev_encoded_layers is None: prev_encoded_layers = [x for x in new_encoded_layers] else: prev_encoded_layers = [torch.cat((x[0], x[1]), dim=1) for x in zip( prev_encoded_layers, new_encoded_layers)] else: if prev_embedding is None: prev_embedding = new_embedding[:, :-1, :] else: prev_embedding = torch.cat( (prev_embedding, new_embedding[:, :-1, :]), dim=1) if prev_encoded_layers is None: prev_encoded_layers = [x[:, :-1, :] for x in new_encoded_layers] else: prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1) for x in zip(prev_encoded_layers, new_encoded_layers)] curr_ids = max_ids next_pos += 1 return torch.cat(output_ids, dim=1) def beam_search(self, input_ids, token_type_ids, position_ids, attention_mask, task_idx=None, mask_qkv=None): input_shape = list(input_ids.size()) batch_size = input_shape[0] input_length = input_shape[1] output_shape = list(token_type_ids.size()) output_length = output_shape[1] output_ids = [] prev_embedding = None prev_encoded_layers = None curr_ids = input_ids mask_ids = input_ids.new(batch_size, 1).fill_(self.mask_word_id) next_pos = input_length if self.pos_shift: sos_ids = input_ids.new(batch_size, 1).fill_(self.sos_id) K = self.search_beam_size total_scores = [] beam_masks = [] step_ids = [] step_back_ptrs = [] partial_seqs = [] forbid_word_mask = None buf_matrix = None while next_pos < output_length: curr_length = list(curr_ids.size())[1] if self.pos_shift: if next_pos == input_length: x_input_ids = torch.cat((curr_ids, sos_ids), dim=1) start_pos = 0 else: x_input_ids = curr_ids start_pos = next_pos else: start_pos = next_pos - curr_length x_input_ids = torch.cat((curr_ids, mask_ids), dim=1) curr_token_type_ids = token_type_ids[:, start_pos:next_pos + 1] curr_attention_mask = attention_mask[:, start_pos:next_pos + 1, :next_pos + 1] curr_position_ids = position_ids[:, start_pos:next_pos + 1] new_embedding, new_encoded_layers, _ = \ self.bert(x_input_ids, curr_token_type_ids, curr_position_ids, curr_attention_mask, output_all_encoded_layers=True, prev_embedding=prev_embedding, prev_encoded_layers=prev_encoded_layers, mask_qkv=mask_qkv) last_hidden = new_encoded_layers[-1][:, -1:, :] prediction_scores, _ = self.cls( last_hidden, None, task_idx=task_idx) log_scores = torch.nn.functional.log_softmax( prediction_scores, dim=-1) if forbid_word_mask is not None: log_scores += (forbid_word_mask * -10000.0) if self.min_len and (next_pos-input_length+1 <= self.min_len): log_scores[:, :, self.eos_id].fill_(-10000.0) if self.not_predict_set: for token_id in self.not_predict_set: log_scores[:, :, token_id].fill_(-10000.0) kk_scores, kk_ids = torch.topk(log_scores, k=K) if len(total_scores) == 0: k_ids = torch.reshape(kk_ids, [batch_size, K]) back_ptrs = torch.zeros(batch_size, K, dtype=torch.long) k_scores = torch.reshape(kk_scores, [batch_size, K]) else: last_eos = torch.reshape( beam_masks[-1], [batch_size * K, 1, 1]) last_seq_scores = torch.reshape( total_scores[-1], [batch_size * K, 1, 1]) kk_scores += last_eos * (-10000.0) + last_seq_scores kk_scores = torch.reshape(kk_scores, [batch_size, K * K]) k_scores, k_ids = torch.topk(kk_scores, k=K) back_ptrs = torch.div(k_ids, K) kk_ids = torch.reshape(kk_ids, [batch_size, K * K]) k_ids = torch.gather(kk_ids, 1, k_ids) step_back_ptrs.append(back_ptrs) step_ids.append(k_ids) beam_masks.append(torch.eq(k_ids, self.eos_id).float()) total_scores.append(k_scores) def first_expand(x): input_shape = list(x.size()) expanded_shape = input_shape[:1] + [1] + input_shape[1:] x = torch.reshape(x, expanded_shape) repeat_count = [1, K] + [1] * (len(input_shape) - 1) x = x.repeat(*repeat_count) x = torch.reshape(x, [input_shape[0] * K] + input_shape[1:]) return x def select_beam_items(x, ids): id_shape = list(ids.size()) id_rank = len(id_shape) assert len(id_shape) == 2 x_shape = list(x.size()) x = torch.reshape(x, [batch_size, K] + x_shape[1:]) x_rank = len(x_shape) + 1 assert x_rank >= 2 if id_rank < x_rank: ids = torch.reshape( ids, id_shape + [1] * (x_rank - id_rank)) ids = ids.expand(id_shape + x_shape[1:]) y = torch.gather(x, 1, ids) y = torch.reshape(y, x_shape) return y is_first = (prev_embedding is None) if self.pos_shift: if prev_embedding is None: prev_embedding = first_expand(new_embedding) else: prev_embedding = torch.cat( (prev_embedding, new_embedding), dim=1) prev_embedding = select_beam_items( prev_embedding, back_ptrs) if prev_encoded_layers is None: prev_encoded_layers = [first_expand( x) for x in new_encoded_layers] else: prev_encoded_layers = [torch.cat((x[0], x[1]), dim=1) for x in zip( prev_encoded_layers, new_encoded_layers)] prev_encoded_layers = [select_beam_items( x, back_ptrs) for x in prev_encoded_layers] else: if prev_embedding is None: prev_embedding = first_expand(new_embedding[:, :-1, :]) else: prev_embedding = torch.cat( (prev_embedding, new_embedding[:, :-1, :]), dim=1) prev_embedding = select_beam_items( prev_embedding, back_ptrs) if prev_encoded_layers is None: prev_encoded_layers = [first_expand( x[:, :-1, :]) for x in new_encoded_layers] else: prev_encoded_layers = [torch.cat((x[0], x[1][:, :-1, :]), dim=1) for x in zip(prev_encoded_layers, new_encoded_layers)] prev_encoded_layers = [select_beam_items( x, back_ptrs) for x in prev_encoded_layers] curr_ids = torch.reshape(k_ids, [batch_size * K, 1]) if is_first: token_type_ids = first_expand(token_type_ids) position_ids = first_expand(position_ids) attention_mask = first_expand(attention_mask) mask_ids = first_expand(mask_ids) if mask_qkv is not None: mask_qkv = first_expand(mask_qkv) if self.forbid_duplicate_ngrams: wids = step_ids[-1].tolist() ptrs = step_back_ptrs[-1].tolist() if is_first: partial_seqs = [] for b in range(batch_size): for k in range(K): partial_seqs.append([wids[b][k]]) else: new_partial_seqs = [] for b in range(batch_size): for k in range(K): new_partial_seqs.append( partial_seqs[ptrs[b][k] + b * K] + [wids[b][k]]) partial_seqs = new_partial_seqs def get_dup_ngram_candidates(seq, n): cands = set() if len(seq) < n: return [] tail = seq[-(n-1):] if self.forbid_ignore_set and any(tk in self.forbid_ignore_set for tk in tail): return [] for i in range(len(seq) - (n - 1)): mismatch = False for j in range(n - 1): if tail[j] != seq[i + j]: mismatch = True break if (not mismatch) and not(self.forbid_ignore_set and (seq[i + n - 1] in self.forbid_ignore_set)): cands.add(seq[i + n - 1]) return list(sorted(cands)) if len(partial_seqs[0]) >= self.ngram_size: dup_cands = [] for seq in partial_seqs: dup_cands.append( get_dup_ngram_candidates(seq, self.ngram_size)) if max(len(x) for x in dup_cands) > 0: if buf_matrix is None: vocab_size = list(log_scores.size())[-1] buf_matrix = np.zeros( (batch_size * K, vocab_size), dtype=float) else: buf_matrix.fill(0) for bk, cands in enumerate(dup_cands): for i, wid in enumerate(cands): buf_matrix[bk, wid] = 1.0 forbid_word_mask = torch.tensor( buf_matrix, dtype=log_scores.dtype) forbid_word_mask = torch.reshape( forbid_word_mask, [batch_size * K, 1, vocab_size]).cuda() else: forbid_word_mask = None next_pos += 1 # [(batch, beam)] total_scores = [x.tolist() for x in total_scores] step_ids = [x.tolist() for x in step_ids] step_back_ptrs = [x.tolist() for x in step_back_ptrs] # back tracking traces = {'pred_seq': [], 'scores': [], 'wids': [], 'ptrs': []} for b in range(batch_size): # [(beam,)] scores = [x[b] for x in total_scores] wids_list = [x[b] for x in step_ids] ptrs = [x[b] for x in step_back_ptrs] traces['scores'].append(scores) traces['wids'].append(wids_list) traces['ptrs'].append(ptrs) # first we need to find the eos frame where all symbols are eos # any frames after the eos frame are invalid last_frame_id = len(scores) - 1 for i, wids in enumerate(wids_list): if all(wid == self.eos_id for wid in wids): last_frame_id = i break max_score = -math.inf frame_id = -1 pos_in_frame = -1 for fid in range(last_frame_id + 1): for i, wid in enumerate(wids_list[fid]): if wid == self.eos_id or fid == last_frame_id: s = scores[fid][i] if self.length_penalty > 0: s /= math.pow((5 + fid + 1) / 6.0, self.length_penalty) if s > max_score: max_score = s frame_id = fid pos_in_frame = i if frame_id == -1: traces['pred_seq'].append([0]) else: seq = [wids_list[frame_id][pos_in_frame]] for fid in range(frame_id, 0, -1): pos_in_frame = ptrs[fid][pos_in_frame] seq.append(wids_list[fid - 1][pos_in_frame]) seq.reverse() traces['pred_seq'].append(seq) def _pad_sequence(sequences, max_len, padding_value=0): trailing_dims = sequences[0].size()[1:] out_dims = (len(sequences), max_len) + trailing_dims out_tensor = sequences[0].data.new(*out_dims).fill_(padding_value) for i, tensor in enumerate(sequences): length = tensor.size(0) # use index notation to prevent duplicate references to the tensor out_tensor[i, :length, ...] = tensor return out_tensor # convert to tensors for DataParallel for k in ('pred_seq', 'scores', 'wids', 'ptrs'): ts_list = traces[k] if not isinstance(ts_list[0], torch.Tensor): dt = torch.float if k == 'scores' else torch.long ts_list = [torch.tensor(it, dtype=dt) for it in ts_list] traces[k] = _pad_sequence( ts_list, output_length, padding_value=0).to(input_ids.device) return traces class BertForMaskedLM(PreTrainedBertModel): """BERT model with the masked language modeling head. This module comprises the BERT model followed by the masked language modeling head. Params: config: a BertConfig class instance with the configuration to build a new model. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `masked_lm_labels`: masked language modeling labels: torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [-1, 0, ..., vocab_size]. All labels set to -1 are ignored (masked), the loss is only computed for the labels set in [0, ..., vocab_size] Outputs: if `masked_lm_labels` is `None`: Outputs the masked language modeling loss. if `masked_lm_labels` is `None`: Outputs the masked language modeling logits of shape [batch_size, sequence_length, vocab_size]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertForMaskedLM(config) masked_lm_logits_scores = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config): super(BertForMaskedLM, self).__init__(config) self.bert = BertModel(config) self.cls = BertOnlyMLMHead( config, self.bert.embeddings.word_embeddings.weight) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, masked_lm_labels=None, mask_qkv=None, task_idx=None): sequence_output, _ = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False, mask_qkv=mask_qkv, task_idx=task_idx) prediction_scores = self.cls(sequence_output) if masked_lm_labels is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) masked_lm_loss = loss_fct( prediction_scores.view(-1, self.config.vocab_size), masked_lm_labels.view(-1)) return masked_lm_loss else: return prediction_scores class BertForNextSentencePrediction(PreTrainedBertModel): """BERT model with next sentence prediction head. This module comprises the BERT model followed by the next sentence classification head. Params: config: a BertConfig class instance with the configuration to build a new model. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `next_sentence_label`: next sentence classification loss: torch.LongTensor of shape [batch_size] with indices selected in [0, 1]. 0 => next sentence is the continuation, 1 => next sentence is a random sentence. Outputs: if `next_sentence_label` is not `None`: Outputs the total_loss which is the sum of the masked language modeling loss and the next sentence classification loss. if `next_sentence_label` is `None`: Outputs the next sentence classification logits of shape [batch_size, 2]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertForNextSentencePrediction(config) seq_relationship_logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config): super(BertForNextSentencePrediction, self).__init__(config) self.bert = BertModel(config) self.cls = BertOnlyNSPHead(config) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, next_sentence_label=None, mask_qkv=None, task_idx=None): _, pooled_output = self.bert(input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False, mask_qkv=mask_qkv, task_idx=task_idx) seq_relationship_score = self.cls(pooled_output) if next_sentence_label is not None: loss_fct = CrossEntropyLoss(ignore_index=-1) next_sentence_loss = loss_fct( seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)) return next_sentence_loss else: return seq_relationship_score class BertForSequenceClassification(PreTrainedBertModel): """BERT model for classification. This module is composed of the BERT model with a linear layer on top of the pooled output. Params: `config`: a BertConfig class instance with the configuration to build a new model. `num_labels`: the number of classes for the classifier. Default = 2. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `labels`: labels for the classification output: torch.LongTensor of shape [batch_size] with indices selected in [0, ..., num_labels]. Outputs: if `labels` is not `None`: Outputs the CrossEntropy classification loss of the output with the labels. if `labels` is `None`: Outputs the classification logits of shape [batch_size, num_labels]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) num_labels = 2 model = BertForSequenceClassification(config, num_labels) logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config, num_labels=2): super(BertForSequenceClassification, self).__init__(config) self.num_labels = num_labels self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, num_labels) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, mask_qkv=None, task_idx=None): _, pooled_output = self.bert( input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False, mask_qkv=mask_qkv, task_idx=task_idx) pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) if labels is not None: if labels.dtype == torch.long: loss_fct = CrossEntropyLoss() loss = loss_fct( logits.view(-1, self.num_labels), labels.view(-1)) elif labels.dtype == torch.half or labels.dtype == torch.float: loss_fct = MSELoss() loss = loss_fct(logits.view(-1), labels.view(-1)) else: print('unkown labels.dtype') loss = None return loss else: return logits class BertForMultipleChoice(PreTrainedBertModel): """BERT model for multiple choice tasks. This module is composed of the BERT model with a linear layer on top of the pooled output. Params: `config`: a BertConfig class instance with the configuration to build a new model. `num_choices`: the number of classes for the classifier. Default = 2. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, num_choices, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `labels`: labels for the classification output: torch.LongTensor of shape [batch_size] with indices selected in [0, ..., num_choices]. Outputs: if `labels` is not `None`: Outputs the CrossEntropy classification loss of the output with the labels. if `labels` is `None`: Outputs the classification logits of shape [batch_size, num_labels]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[[31, 51, 99], [15, 5, 0]], [[12, 16, 42], [14, 28, 57]]]) input_mask = torch.LongTensor([[[1, 1, 1], [1, 1, 0]],[[1,1,0], [1, 0, 0]]]) token_type_ids = torch.LongTensor([[[0, 0, 1], [0, 1, 0]],[[0, 1, 1], [0, 0, 1]]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) num_choices = 2 model = BertForMultipleChoice(config, num_choices) logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config, num_choices=2): super(BertForMultipleChoice, self).__init__(config) self.num_choices = num_choices self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, 1) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, mask_qkv=None, task_idx=None): flat_input_ids = input_ids.view(-1, input_ids.size(-1)) flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) _, pooled_output = self.bert( flat_input_ids, flat_token_type_ids, flat_attention_mask, output_all_encoded_layers=False, mask_qkv=mask_qkv, task_idx=task_idx) pooled_output = self.dropout(pooled_output) logits = self.classifier(pooled_output) reshaped_logits = logits.view(-1, self.num_choices) if labels is not None: loss_fct = CrossEntropyLoss() loss = loss_fct(reshaped_logits, labels) return loss else: return reshaped_logits class BertForTokenClassification(PreTrainedBertModel): """BERT model for token-level classification. This module is composed of the BERT model with a linear layer on top of the full hidden state of the last layer. Params: `config`: a BertConfig class instance with the configuration to build a new model. `num_labels`: the number of classes for the classifier. Default = 2. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `labels`: labels for the classification output: torch.LongTensor of shape [batch_size] with indices selected in [0, ..., num_labels]. Outputs: if `labels` is not `None`: Outputs the CrossEntropy classification loss of the output with the labels. if `labels` is `None`: Outputs the classification logits of shape [batch_size, sequence_length, num_labels]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) num_labels = 2 model = BertForTokenClassification(config, num_labels) logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config, num_labels=2): super(BertForTokenClassification, self).__init__(config) self.num_labels = num_labels self.bert = BertModel(config) self.dropout = nn.Dropout(config.hidden_dropout_prob) self.classifier = nn.Linear(config.hidden_size, num_labels) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, labels=None, mask_qkv=None, task_idx=None): sequence_output, _ = self.bert( input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False, mask_qkv=mask_qkv, task_idx=task_idx) sequence_output = self.dropout(sequence_output) logits = self.classifier(sequence_output) if labels is not None: loss_fct = CrossEntropyLoss() # Only keep active parts of the loss if attention_mask is not None: active_loss = attention_mask.view(-1) == 1 active_logits = logits.view(-1, self.num_labels)[active_loss] active_labels = labels.view(-1)[active_loss] loss = loss_fct(active_logits, active_labels) else: loss = loss_fct( logits.view(-1, self.num_labels), labels.view(-1)) return loss else: return logits class BertForQuestionAnswering(PreTrainedBertModel): """BERT model for Question Answering (span extraction). This module is composed of the BERT model with a linear layer on top of the sequence output that computes start_logits and end_logits Params: `config`: either - a BertConfig class instance with the configuration to build a new model, or - a str with the name of a pre-trained model to load selected in the list of: . `bert-base-uncased` . `bert-large-uncased` . `bert-base-cased` . `bert-base-multilingual` . `bert-base-chinese` The pre-trained model will be downloaded and cached if needed. Inputs: `input_ids`: a torch.LongTensor of shape [batch_size, sequence_length] with the word token indices in the vocabulary(see the tokens preprocessing logic in the scripts `extract_features.py`, `run_classifier.py` and `run_squad.py`) `token_type_ids`: an optional torch.LongTensor of shape [batch_size, sequence_length] with the token types indices selected in [0, 1]. Type 0 corresponds to a `sentence A` and type 1 corresponds to a `sentence B` token (see BERT paper for more details). `attention_mask`: an optional torch.LongTensor of shape [batch_size, sequence_length] with indices selected in [0, 1]. It's a mask to be used if the input sequence length is smaller than the max input sequence length in the current batch. It's the mask that we typically use for attention when a batch has varying length sentences. `start_positions`: position of the first token for the labeled span: torch.LongTensor of shape [batch_size]. Positions are clamped to the length of the sequence and position outside of the sequence are not taken into account for computing the loss. `end_positions`: position of the last token for the labeled span: torch.LongTensor of shape [batch_size]. Positions are clamped to the length of the sequence and position outside of the sequence are not taken into account for computing the loss. Outputs: if `start_positions` and `end_positions` are not `None`: Outputs the total_loss which is the sum of the CrossEntropy loss for the start and end token positions. if `start_positions` or `end_positions` is `None`: Outputs a tuple of start_logits, end_logits which are the logits respectively for the start and end position tokens of shape [batch_size, sequence_length]. Example usage: ```python # Already been converted into WordPiece token ids input_ids = torch.LongTensor([[31, 51, 99], [15, 5, 0]]) input_mask = torch.LongTensor([[1, 1, 1], [1, 1, 0]]) token_type_ids = torch.LongTensor([[0, 0, 1], [0, 1, 0]]) config = BertConfig(vocab_size_or_config_json_file=32000, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072) model = BertForQuestionAnswering(config) start_logits, end_logits = model(input_ids, token_type_ids, input_mask) ``` """ def __init__(self, config): super(BertForQuestionAnswering, self).__init__(config) self.bert = BertModel(config) # self.dropout = nn.Dropout(config.hidden_dropout_prob) self.qa_outputs = nn.Linear(config.hidden_size, 2) self.apply(self.init_bert_weights) def forward(self, input_ids, token_type_ids=None, attention_mask=None, start_positions=None, end_positions=None, task_idx=None): sequence_output, _ = self.bert( input_ids, token_type_ids, attention_mask, output_all_encoded_layers=False, task_idx=task_idx) logits = self.qa_outputs(sequence_output) start_logits, end_logits = logits.split(1, dim=-1) start_logits = start_logits.squeeze(-1) end_logits = end_logits.squeeze(-1) if start_positions is not None and end_positions is not None: # If we are on multi-GPU, split add a dimension if len(start_positions.size()) > 1: start_positions = start_positions.squeeze(-1) if len(end_positions.size()) > 1: end_positions = end_positions.squeeze(-1) # sometimes the start/end positions are outside our model inputs, we ignore these terms ignored_index = start_logits.size(1) start_positions.clamp_(0, ignored_index) end_positions.clamp_(0, ignored_index) loss_fct = CrossEntropyLoss(ignore_index=ignored_index) start_loss = loss_fct(start_logits, start_positions) end_loss = loss_fct(end_logits, end_positions) total_loss = (start_loss + end_loss) / 2 return total_loss else: return start_logits, end_logits
EXA-1-master
exa/models/unilm-master/unilm-v1/src/pytorch_pretrained_bert/modeling.py
# coding: utf8 def main(): import sys try: from .convert_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch except ModuleNotFoundError: print("pytorch_pretrained_bert can only be used from the commandline to convert TensorFlow models in PyTorch, " "In that case, it requires TensorFlow to be installed. Please see " "https://www.tensorflow.org/install/ for installation instructions.") raise if len(sys.argv) != 5: # pylint: disable=line-too-long print("Should be used as `pytorch_pretrained_bert convert_tf_checkpoint_to_pytorch TF_CHECKPOINT TF_CONFIG PYTORCH_DUMP_OUTPUT`") else: PYTORCH_DUMP_OUTPUT = sys.argv.pop() TF_CONFIG = sys.argv.pop() TF_CHECKPOINT = sys.argv.pop() convert_tf_checkpoint_to_pytorch(TF_CHECKPOINT, TF_CONFIG, PYTORCH_DUMP_OUTPUT) if __name__ == '__main__': main()
EXA-1-master
exa/models/unilm-master/unilm-v1/src/pytorch_pretrained_bert/__main__.py
#!/usr/bin/env python from __future__ import print_function __author__ = 'xinya' from bleu.bleu import Bleu from meteor.meteor import Meteor from rouge.rouge import Rouge from cider.cider import Cider from collections import defaultdict from argparse import ArgumentParser import string import sys reload(sys) sys.setdefaultencoding('utf-8') _tok_dict = {"(": "-lrb-", ")": "-rrb-", "[": "-lsb-", "]": "-rsb-", "{": "-lcb-", "}": "-rcb-", "[UNK]": "UNK", '&': '&amp;', '<': '&lt;', '>': '&gt;'} def _is_digit(w): for ch in w: if not(ch.isdigit() or ch == ','): return False return True def detokenize(tk_list): r_list = [] for tk in tk_list: if tk.startswith('##') and len(r_list) > 0: r_list[-1] = r_list[-1] + tk[2:] else: r_list.append(tk) return r_list def fix_tokenization(text): input_tokens = text.split() output_tokens = [] has_left_quote = False has_left_single_quote = False i = 0 prev_dash = False while i < len(input_tokens): tok = input_tokens[i] flag_prev_dash = False if tok in _tok_dict.keys(): output_tokens.append(_tok_dict[tok]) i += 1 elif tok == "\"": if has_left_quote: output_tokens.append("''") else: output_tokens.append("``") has_left_quote = not has_left_quote i += 1 elif tok == "'" and len(output_tokens) > 0 and output_tokens[-1].endswith("n") and i < len(input_tokens) - 1 and input_tokens[i + 1] == "t": output_tokens[-1] = output_tokens[-1][:-1] output_tokens.append("n't") i += 2 elif tok == "'" and i < len(input_tokens) - 1 and input_tokens[i + 1] in ("s", "d", "ll"): output_tokens.append("'"+input_tokens[i + 1]) i += 2 elif tok == "'": if has_left_single_quote: output_tokens.append("'") else: output_tokens.append("`") has_left_single_quote = not has_left_single_quote i += 1 elif tok == "." and i < len(input_tokens) - 2 and input_tokens[i + 1] == "." and input_tokens[i + 2] == ".": output_tokens.append("...") i += 3 elif tok == "," and len(output_tokens) > 0 and _is_digit(output_tokens[-1]) and i < len(input_tokens) - 1 and _is_digit(input_tokens[i + 1]): # $ 3 , 000 -> $ 3,000 output_tokens[-1] += ','+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and output_tokens[-1].isdigit() and i < len(input_tokens) - 1 and input_tokens[i + 1].isdigit(): # 3 . 03 -> $ 3.03 output_tokens[-1] += '.'+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and len(output_tokens[-1]) == 1 and output_tokens[-1].isupper() and i < len(input_tokens) - 2 and len(input_tokens[i + 1]) == 1 and input_tokens[i + 1].isupper() and input_tokens[i + 2] == '.': # U . N . -> U.N. k = i+3 while k+2 < len(input_tokens): if len(input_tokens[k + 1]) == 1 and input_tokens[k + 1].isupper() and input_tokens[k + 2] == '.': k += 2 else: break output_tokens[-1] += ''.join(input_tokens[i:k]) i += 2 elif tok == "-": if i < len(input_tokens) - 1 and input_tokens[i + 1] == "-": output_tokens.append("--") i += 2 elif i == len(input_tokens) - 1 or i == 0: output_tokens.append("-") i += 1 elif output_tokens[-1] not in string.punctuation and input_tokens[i + 1][0] not in string.punctuation: output_tokens[-1] += "-" i += 1 flag_prev_dash = True else: output_tokens.append("-") i += 1 elif prev_dash and len(output_tokens) > 0 and tok[0] not in string.punctuation: output_tokens[-1] += tok i += 1 else: output_tokens.append(tok) i += 1 prev_dash = flag_prev_dash return " ".join(output_tokens) class QGEvalCap: def __init__(self, gts, res): self.gts = gts self.res = res def evaluate(self): output = [] scorers = [ (Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]), (Meteor(), "METEOR"), (Rouge(), "ROUGE_L"), # (Cider(), "CIDEr") ] # ================================================= # Compute scores # ================================================= for scorer, method in scorers: # print 'computing %s score...'%(scorer.method()) score, scores = scorer.compute_score(self.gts, self.res) if type(method) == list: for sc, scs, m in zip(score, scores, method): print("%s: %0.5f" % (m, sc)) output.append(sc) else: print("%s: %0.5f" % (method, score)) output.append(score) return output def eval(out_file, src_file, tgt_file, isDIn=False, num_pairs=500): """ Given a filename, calculate the metric scores for that prediction file isDin: boolean value to check whether input file is DirectIn.txt """ pairs = [] with open(src_file, 'r') as infile: for line in infile: pair = {} pair['tokenized_sentence'] = line[:-1].strip().lower() pairs.append(pair) with open(tgt_file, "r") as infile: cnt = 0 for line in infile: pairs[cnt]['tokenized_question'] = " ".join( detokenize(line[:-1].strip().split())).lower() cnt += 1 output = [] with open(out_file, 'r') as infile: for line in infile: line = line[:-1].strip().lower() output.append(line) for idx, pair in enumerate(pairs): pair['prediction'] = output[idx] # eval from eval import QGEvalCap import json from json import encoder encoder.FLOAT_REPR = lambda o: format(o, '.4f') res = defaultdict(lambda: []) gts = defaultdict(lambda: []) for pair in pairs[:]: key = pair['tokenized_sentence'] res[key] = [pair['prediction'].encode('utf-8')] # gts gts[key].append(pair['tokenized_question'].encode('utf-8')) QGEval = QGEvalCap(gts, res) return QGEval.evaluate() if __name__ == "__main__": parser = ArgumentParser() parser.add_argument("-out", "--out_file", dest="out_file", default="./output/pred.txt", help="output file to compare") parser.add_argument("-src", "--src_file", dest="src_file", default="./qg_data/test/test.pa.txt", help="src file") parser.add_argument("-tgt", "--tgt_file", dest="tgt_file", default="./qg_data/test/test.q.tok.txt", help="target file") args = parser.parse_args() print("scores: \n") eval(args.out_file, args.src_file, args.tgt_file)
EXA-1-master
exa/models/unilm-master/unilm-v1/src/qg/eval_on_unilm_tokenized_ref.py
#!/usr/bin/env python from __future__ import print_function __author__ = 'xinya' from bleu.bleu import Bleu from meteor.meteor import Meteor from rouge.rouge import Rouge from cider.cider import Cider from collections import defaultdict from argparse import ArgumentParser import string import sys reload(sys) sys.setdefaultencoding('utf-8') _tok_dict = {"(": "-lrb-", ")": "-rrb-", "[": "-lsb-", "]": "-rsb-", "{": "-lcb-", "}": "-rcb-", "[UNK]": "UNK", '&': '&amp;', '<': '&lt;', '>': '&gt;'} def _is_digit(w): for ch in w: if not(ch.isdigit() or ch == ','): return False return True def detokenize(tk_list): r_list = [] for tk in tk_list: if tk.startswith('##') and len(r_list) > 0: r_list[-1] = r_list[-1] + tk[2:] else: r_list.append(tk) return r_list def fix_tokenization(text): input_tokens = text.split() output_tokens = [] has_left_quote = False has_left_single_quote = False i = 0 prev_dash = False while i < len(input_tokens): tok = input_tokens[i] flag_prev_dash = False if tok in _tok_dict.keys(): output_tokens.append(_tok_dict[tok]) i += 1 elif tok == "\"": if has_left_quote: output_tokens.append("''") else: output_tokens.append("``") has_left_quote = not has_left_quote i += 1 elif tok == "'" and len(output_tokens) > 0 and output_tokens[-1].endswith("n") and i < len(input_tokens) - 1 and input_tokens[i + 1] == "t": output_tokens[-1] = output_tokens[-1][:-1] output_tokens.append("n't") i += 2 elif tok == "'" and i < len(input_tokens) - 1 and input_tokens[i + 1] in ("s", "d", "ll"): output_tokens.append("'"+input_tokens[i + 1]) i += 2 elif tok == "'": if has_left_single_quote: output_tokens.append("'") else: output_tokens.append("`") has_left_single_quote = not has_left_single_quote i += 1 elif tok == "." and i < len(input_tokens) - 2 and input_tokens[i + 1] == "." and input_tokens[i + 2] == ".": output_tokens.append("...") i += 3 elif tok == "," and len(output_tokens) > 0 and _is_digit(output_tokens[-1]) and i < len(input_tokens) - 1 and _is_digit(input_tokens[i + 1]): # $ 3 , 000 -> $ 3,000 output_tokens[-1] += ','+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and output_tokens[-1].isdigit() and i < len(input_tokens) - 1 and input_tokens[i + 1].isdigit(): # 3 . 03 -> $ 3.03 output_tokens[-1] += '.'+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and len(output_tokens[-1]) == 1 and output_tokens[-1].isupper() and i < len(input_tokens) - 2 and len(input_tokens[i + 1]) == 1 and input_tokens[i + 1].isupper() and input_tokens[i + 2] == '.': # U . N . -> U.N. k = i+3 while k+2 < len(input_tokens): if len(input_tokens[k + 1]) == 1 and input_tokens[k + 1].isupper() and input_tokens[k + 2] == '.': k += 2 else: break output_tokens[-1] += ''.join(input_tokens[i:k]) i += 2 elif tok == "-": if i < len(input_tokens) - 1 and input_tokens[i + 1] == "-": output_tokens.append("--") i += 2 elif i == len(input_tokens) - 1 or i == 0: output_tokens.append("-") i += 1 elif output_tokens[-1] not in string.punctuation and input_tokens[i + 1][0] not in string.punctuation: output_tokens[-1] += "-" i += 1 flag_prev_dash = True else: output_tokens.append("-") i += 1 elif prev_dash and len(output_tokens) > 0 and tok[0] not in string.punctuation: output_tokens[-1] += tok i += 1 else: output_tokens.append(tok) i += 1 prev_dash = flag_prev_dash return " ".join(output_tokens) class QGEvalCap: def __init__(self, gts, res): self.gts = gts self.res = res def evaluate(self): output = [] scorers = [ (Bleu(4), ["Bleu_1", "Bleu_2", "Bleu_3", "Bleu_4"]), (Meteor(), "METEOR"), (Rouge(), "ROUGE_L"), # (Cider(), "CIDEr") ] # ================================================= # Compute scores # ================================================= for scorer, method in scorers: # print 'computing %s score...'%(scorer.method()) score, scores = scorer.compute_score(self.gts, self.res) if type(method) == list: for sc, scs, m in zip(score, scores, method): print("%s: %0.5f" % (m, sc)) output.append(sc) else: print("%s: %0.5f" % (method, score)) output.append(score) return output def eval(out_file, src_file, tgt_file, isDIn=False, num_pairs=500): """ Given a filename, calculate the metric scores for that prediction file isDin: boolean value to check whether input file is DirectIn.txt """ pairs = [] with open(src_file, 'r') as infile: for line in infile: pair = {} pair['tokenized_sentence'] = line[:-1].strip().lower() pairs.append(pair) with open(tgt_file, "r") as infile: cnt = 0 for line in infile: pairs[cnt]['tokenized_question'] = line[:-1].strip() cnt += 1 output = [] with open(out_file, 'r') as infile: for line in infile: line = fix_tokenization(line[:-1].strip()).lower() output.append(line) for idx, pair in enumerate(pairs): pair['prediction'] = output[idx] # eval from eval import QGEvalCap import json from json import encoder encoder.FLOAT_REPR = lambda o: format(o, '.4f') res = defaultdict(lambda: []) gts = defaultdict(lambda: []) for pair in pairs[:]: key = pair['tokenized_sentence'] res[key] = [pair['prediction'].encode('utf-8')] # gts gts[key].append(pair['tokenized_question'].encode('utf-8')) QGEval = QGEvalCap(gts, res) return QGEval.evaluate() if __name__ == "__main__": parser = ArgumentParser() parser.add_argument("-out", "--out_file", dest="out_file", default="./output/pred.txt", help="output file to compare") parser.add_argument("-src", "--src_file", dest="src_file", default="./qg_data/test/test.pa.txt", help="src file") parser.add_argument("-tgt", "--tgt_file", dest="tgt_file", default="./qg_data/nqg_processed_data/tgt-test.txt", help="target file") args = parser.parse_args() print("scores: \n") eval(args.out_file, args.src_file, args.tgt_file)
EXA-1-master
exa/models/unilm-master/unilm-v1/src/qg/eval.py
EXA-1-master
exa/models/unilm-master/unilm-v1/src/gigaword/__init__.py
"""BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import logging import glob import json import argparse import math import string from multiprocessing import Pool, cpu_count from tqdm import tqdm, trange from pathlib import Path import numpy as np # pip install py-rouge import rouge import time import tempfile import shutil from pytorch_pretrained_bert.tokenization import BertTokenizer # pip install pyrouge from gigaword.bs_pyrouge import Rouge155 logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) parser = argparse.ArgumentParser() # Required parameters parser.add_argument("--gold", type=str, help="Gold output file.") parser.add_argument("--pred", type=str, help="Input prediction file.") parser.add_argument("--split", type=str, default="", help="Data split (train/dev/test).") parser.add_argument("--save_best", action='store_true', help="Save best epoch.") parser.add_argument("--only_eval_best", action='store_true', help="Only evaluate best epoch.") parser.add_argument("--trunc_len", type=int, default=0, help="Truncate line by the maximum length.") default_process_count = max(1, cpu_count() - 1) parser.add_argument("--processes", type=int, default=default_process_count, help="Number of processes to use (default %(default)s)") parser.add_argument("--perl", action='store_true', help="Using the perl script.") parser.add_argument('--lazy_eval', action='store_true', help="Skip evaluation if the .rouge file exists.") args = parser.parse_args() SPECIAL_TOKEN = ["[UNK]", "[PAD]", "[CLS]", "[MASK]"] evaluator = rouge.Rouge(metrics=['rouge-n', 'rouge-l'], max_n=2, limit_length=False, apply_avg=True, weight_factor=1.2) def test_rouge(cand, ref): temp_dir = tempfile.mkdtemp() candidates = cand references = ref assert len(candidates) == len(references) cnt = len(candidates) current_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime()) tmp_dir = os.path.join(temp_dir, "rouge-tmp-{}".format(current_time)) if not os.path.isdir(tmp_dir): os.mkdir(tmp_dir) os.mkdir(tmp_dir + "/candidate") os.mkdir(tmp_dir + "/reference") try: for i in range(cnt): if len(references[i]) < 1: continue with open(tmp_dir + "/candidate/cand.{}.txt".format(i), "w", encoding="utf-8") as f: f.write(candidates[i]) with open(tmp_dir + "/reference/ref.{}.txt".format(i), "w", encoding="utf-8") as f: f.write(references[i]) r = Rouge155(temp_dir=temp_dir) r.model_dir = tmp_dir + "/reference/" r.system_dir = tmp_dir + "/candidate/" r.model_filename_pattern = 'ref.#ID#.txt' r.system_filename_pattern = r'cand.(\d+).txt' rouge_results = r.convert_and_evaluate() print(rouge_results) results_dict = r.output_to_dict(rouge_results) finally: if os.path.isdir(tmp_dir): shutil.rmtree(tmp_dir) return results_dict def rouge_results_to_str(results_dict): return ">> ROUGE-F(1/2/l): {:.2f}/{:.2f}/{:.2f}\nROUGE-R(1/2/3/l): {:.2f}/{:.2f}/{:.2f}\n".format( results_dict["rouge_1_f_score"] * 100, results_dict["rouge_2_f_score"] * 100, results_dict["rouge_l_f_score"] * 100, results_dict["rouge_1_recall"] * 100, results_dict["rouge_2_recall"] * 100, results_dict["rouge_l_recall"] * 100 ) def count_tokens(tokens): counter = {} for t in tokens: if t in counter.keys(): counter[t] += 1 else: counter[t] = 1 return counter def get_f1(text_a, text_b): tokens_a = text_a.lower().split() tokens_b = text_b.lower().split() if len(tokens_a) == 0 or len(tokens_b) == 0: return 1 if len(tokens_a) == len(tokens_b) else 0 set_a = count_tokens(tokens_a) set_b = count_tokens(tokens_b) match = 0 for token in set_a.keys(): if token in set_b.keys(): match += min(set_a[token], set_b[token]) p = match / len(tokens_a) r = match / len(tokens_b) return 2.0 * p * r / (p + r + 1e-5) _tok_dict = {"(": "-lrb-", ")": "-rrb-", "[": "-lsb-", "]": "-rsb-", "{": "-lcb-", "}": "-rcb-", "[UNK]": "UNK", '&': '&amp;', '<': '&lt;', '>': '&gt;'} def _is_digit(w): for ch in w: if not(ch.isdigit() or ch == ','): return False return True def fix_tokenization(text): input_tokens = text.split() output_tokens = [] has_left_quote = False has_left_single_quote = False i = 0 prev_dash = False while i < len(input_tokens): tok = input_tokens[i] flag_prev_dash = False if tok in _tok_dict.keys(): output_tokens.append(_tok_dict[tok]) i += 1 elif tok == "\"": if has_left_quote: output_tokens.append("''") else: output_tokens.append("``") has_left_quote = not has_left_quote i += 1 elif tok == "'" and len(output_tokens) > 0 and output_tokens[-1].endswith("n") and i < len(input_tokens) - 1 and input_tokens[i + 1] == "t": output_tokens[-1] = output_tokens[-1][:-1] output_tokens.append("n't") i += 2 elif tok == "'" and i < len(input_tokens) - 1 and input_tokens[i + 1] in ("s", "d", "ll"): output_tokens.append("'"+input_tokens[i + 1]) i += 2 elif tok == "'": if has_left_single_quote: output_tokens.append("'") else: output_tokens.append("`") has_left_single_quote = not has_left_single_quote i += 1 elif tok == "." and i < len(input_tokens) - 2 and input_tokens[i + 1] == "." and input_tokens[i + 2] == ".": output_tokens.append("...") i += 3 elif tok == "," and len(output_tokens) > 0 and _is_digit(output_tokens[-1]) and i < len(input_tokens) - 1 and _is_digit(input_tokens[i + 1]): # $ 3 , 000 -> $ 3,000 output_tokens[-1] += ','+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and output_tokens[-1].isdigit() and i < len(input_tokens) - 1 and input_tokens[i + 1].isdigit(): # 3 . 03 -> $ 3.03 output_tokens[-1] += '.'+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and len(output_tokens[-1]) == 1 and output_tokens[-1].isupper() and i < len(input_tokens) - 2 and len(input_tokens[i + 1]) == 1 and input_tokens[i + 1].isupper() and input_tokens[i + 2] == '.': # U . N . -> U.N. k = i+3 while k+2 < len(input_tokens): if len(input_tokens[k + 1]) == 1 and input_tokens[k + 1].isupper() and input_tokens[k + 2] == '.': k += 2 else: break output_tokens[-1] += ''.join(input_tokens[i:k]) i += 2 elif tok == "-": if i < len(input_tokens) - 1 and input_tokens[i + 1] == "-": output_tokens.append("--") i += 2 elif i == len(input_tokens) - 1 or i == 0: output_tokens.append("-") i += 1 elif output_tokens[-1] not in string.punctuation and input_tokens[i + 1][0] not in string.punctuation: output_tokens[-1] += "-" i += 1 flag_prev_dash = True else: output_tokens.append("-") i += 1 elif prev_dash and len(output_tokens) > 0 and tok[0] not in string.punctuation: output_tokens[-1] += tok i += 1 else: output_tokens.append(tok) i += 1 prev_dash = flag_prev_dash return " ".join(output_tokens) def process_eval(eval_fn): gold_list = [] with open(args.gold, "r", encoding="utf-8") as f_in: for l in f_in: line = l.strip() gold_list.append(line) pred_list = [] with open(eval_fn, "r", encoding="utf-8") as f_in: for l in f_in: buf = [] sentence = fix_tokenization(l.strip()).replace('1', '#') buf.append(sentence) if args.trunc_len: num_left = args.trunc_len trunc_list = [] for bit in buf: tk_list = bit.split() n = min(len(tk_list), num_left) trunc_list.append(' '.join(tk_list[:n])) num_left -= n if num_left <= 0: break else: trunc_list = buf line = "\n".join(trunc_list) pred_list.append(line) with open(eval_fn+'.post', 'w', encoding='utf-8') as f_out: for l in pred_list: f_out.write(l.strip()) f_out.write('\n') # rouge scores if len(pred_list) < len(gold_list): # evaluate subset gold_list = gold_list[:len(pred_list)] assert len(pred_list) == len(gold_list) if args.perl: scores = test_rouge(pred_list, gold_list) else: scores = evaluator.get_scores(pred_list, [[it] for it in gold_list]) return eval_fn, scores def main(): if args.perl: eval_fn_list = list(glob.glob(args.pred)) else: eval_fn_list = [eval_fn for eval_fn in glob.glob(args.pred) if not( args.lazy_eval and Path(eval_fn+".rouge").exists())] eval_fn_list = list(filter(lambda fn: not(fn.endswith( '.post') or fn.endswith('.rouge')), eval_fn_list)) if args.only_eval_best: best_epoch_dict = {} for dir_path in set(Path(fn).parent for fn in eval_fn_list): fn_save = os.path.join(dir_path, 'save_best.dev') if Path(fn_save).exists(): with open(fn_save, 'r') as f_in: __, o_name, __ = f_in.read().strip().split('\n') epoch = o_name.split('.')[1] best_epoch_dict[dir_path] = epoch new_eval_fn_list = [] for fn in eval_fn_list: dir_path = Path(fn).parent if dir_path in best_epoch_dict: if Path(fn).name.split('.')[1] == best_epoch_dict[dir_path]: new_eval_fn_list.append(fn) eval_fn_list = new_eval_fn_list logger.info("***** Evaluation: %s *****", ','.join(eval_fn_list)) num_pool = min(args.processes, len(eval_fn_list)) p = Pool(num_pool) r_list = p.imap_unordered(process_eval, eval_fn_list) r_list = sorted([(fn, scores) for fn, scores in r_list], key=lambda x: x[0]) rg2_dict = {} for fn, scores in r_list: print(fn) if args.perl: print(rouge_results_to_str(scores)) else: rg2_dict[fn] = scores['rouge-2']['f'] print( "ROUGE-1: {}\tROUGE-2: {}\n".format(scores['rouge-1']['f'], scores['rouge-2']['f'])) with open(fn+".rouge", 'w') as f_out: f_out.write(json.dumps( {'rg1': scores['rouge-1']['f'], 'rg2': scores['rouge-2']['f']})) p.close() p.join() if args.save_best: # find best results group_dict = {} for k, v in rg2_dict.items(): d_name, o_name = Path(k).parent, Path(k).name if (d_name not in group_dict) or (v > group_dict[d_name][1]): group_dict[d_name] = (o_name, v) # compare and save the best result for k, v in group_dict.items(): fn = os.path.join(k, 'save_best.'+args.split) o_name_s, rst_s = v should_save = True if Path(fn).exists(): with open(fn, 'r') as f_in: rst_f = float(f_in.read().strip().split('\n')[-1]) if rst_s <= rst_f: should_save = False if should_save: with open(fn, 'w') as f_out: f_out.write('{0}\n{1}\n{2}\n'.format(k, o_name_s, rst_s)) if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/unilm-v1/src/gigaword/eval.py
from __future__ import print_function, unicode_literals, division import os import re import codecs import platform from subprocess import check_output from tempfile import mkdtemp from functools import partial try: from configparser import ConfigParser except ImportError: from ConfigParser import ConfigParser from pyrouge.utils import log from pyrouge.utils.file_utils import verify_dir REMAP = {"-lrb-": "(", "-rrb-": ")", "-lcb-": "{", "-rcb-": "}", "-lsb-": "[", "-rsb-": "]", "``": '"', "''": '"'} def clean(x): return re.sub( r"-lrb-|-rrb-|-lcb-|-rcb-|-lsb-|-rsb-|``|''", lambda m: REMAP.get(m.group()), x) class DirectoryProcessor: @staticmethod def process(input_dir, output_dir, function): """ Apply function to all files in input_dir and save the resulting ouput files in output_dir. """ if not os.path.exists(output_dir): os.makedirs(output_dir) logger = log.get_global_console_logger() logger.info("Processing files in {}.".format(input_dir)) input_file_names = os.listdir(input_dir) for input_file_name in input_file_names: input_file = os.path.join(input_dir, input_file_name) with codecs.open(input_file, "r", encoding="UTF-8") as f: input_string = f.read() output_string = function(input_string) output_file = os.path.join(output_dir, input_file_name) with codecs.open(output_file, "w", encoding="UTF-8") as f: f.write(clean(output_string.lower())) logger.info("Saved processed files to {}.".format(output_dir)) class Rouge155(object): """ This is a wrapper for the ROUGE 1.5.5 summary evaluation package. This class is designed to simplify the evaluation process by: 1) Converting summaries into a format ROUGE understands. 2) Generating the ROUGE configuration file automatically based on filename patterns. This class can be used within Python like this: rouge = Rouge155() rouge.system_dir = 'test/systems' rouge.model_dir = 'test/models' # The system filename pattern should contain one group that # matches the document ID. rouge.system_filename_pattern = 'SL.P.10.R.11.SL062003-(\d+).html' # The model filename pattern has '#ID#' as a placeholder for the # document ID. If there are multiple model summaries, pyrouge # will use the provided regex to automatically match them with # the corresponding system summary. Here, [A-Z] matches # multiple model summaries for a given #ID#. rouge.model_filename_pattern = 'SL.P.10.R.[A-Z].SL062003-#ID#.html' rouge_output = rouge.evaluate() print(rouge_output) output_dict = rouge.output_to_dict(rouge_ouput) print(output_dict) -> {'rouge_1_f_score': 0.95652, 'rouge_1_f_score_cb': 0.95652, 'rouge_1_f_score_ce': 0.95652, 'rouge_1_precision': 0.95652, [...] To evaluate multiple systems: rouge = Rouge155() rouge.system_dir = '/PATH/TO/systems' rouge.model_dir = 'PATH/TO/models' for system_id in ['id1', 'id2', 'id3']: rouge.system_filename_pattern = \ 'SL.P/.10.R.{}.SL062003-(\d+).html'.format(system_id) rouge.model_filename_pattern = \ 'SL.P.10.R.[A-Z].SL062003-#ID#.html' rouge_output = rouge.evaluate(system_id) print(rouge_output) """ def __init__(self, rouge_dir=None, rouge_args=None, temp_dir=None): """ Create a Rouge155 object. rouge_dir: Directory containing Rouge-1.5.5.pl rouge_args: Arguments to pass through to ROUGE if you don't want to use the default pyrouge arguments. """ self.temp_dir = temp_dir self.log = log.get_global_console_logger() self.__set_dir_properties() self._config_file = None self._settings_file = self.__get_config_path() self.__set_rouge_dir(rouge_dir) self.args = self.__clean_rouge_args(rouge_args) self._system_filename_pattern = None self._model_filename_pattern = None def save_home_dir(self): config = ConfigParser() section = 'pyrouge settings' config.add_section(section) config.set(section, 'home_dir', self._home_dir) with open(self._settings_file, 'w') as f: config.write(f) self.log.info("Set ROUGE home directory to {}.".format(self._home_dir)) @property def settings_file(self): """ Path of the setttings file, which stores the ROUGE home dir. """ return self._settings_file @property def bin_path(self): """ The full path of the ROUGE binary (although it's technically a script), i.e. rouge_home_dir/ROUGE-1.5.5.pl """ if self._bin_path is None: raise Exception( "ROUGE path not set. Please set the ROUGE home directory " "and ensure that ROUGE-1.5.5.pl exists in it.") return self._bin_path @property def system_filename_pattern(self): """ The regular expression pattern for matching system summary filenames. The regex string. E.g. "SL.P.10.R.11.SL062003-(\d+).html" will match the system filenames in the SPL2003/system folder of the ROUGE SPL example in the "sample-test" folder. Currently, there is no support for multiple systems. """ return self._system_filename_pattern @system_filename_pattern.setter def system_filename_pattern(self, pattern): self._system_filename_pattern = pattern @property def model_filename_pattern(self): """ The regular expression pattern for matching model summary filenames. The pattern needs to contain the string "#ID#", which is a placeholder for the document ID. E.g. "SL.P.10.R.[A-Z].SL062003-#ID#.html" will match the model filenames in the SPL2003/system folder of the ROUGE SPL example in the "sample-test" folder. "#ID#" is a placeholder for the document ID which has been matched by the "(\d+)" part of the system filename pattern. The different model summaries for a given document ID are matched by the "[A-Z]" part. """ return self._model_filename_pattern @model_filename_pattern.setter def model_filename_pattern(self, pattern): self._model_filename_pattern = pattern @property def config_file(self): return self._config_file @config_file.setter def config_file(self, path): config_dir, _ = os.path.split(path) verify_dir(config_dir, "configuration file") self._config_file = path def split_sentences(self): """ ROUGE requires texts split into sentences. In case the texts are not already split, this method can be used. """ from pyrouge.utils.sentence_splitter import PunktSentenceSplitter self.log.info("Splitting sentences.") ss = PunktSentenceSplitter() def sent_split_to_string(s): return "\n".join(ss.split(s)) process_func = partial( DirectoryProcessor.process, function=sent_split_to_string) self.__process_summaries(process_func) @staticmethod def convert_summaries_to_rouge_format(input_dir, output_dir): """ Convert all files in input_dir into a format ROUGE understands and saves the files to output_dir. The input files are assumed to be plain text with one sentence per line. input_dir: Path of directory containing the input files. output_dir: Path of directory in which the converted files will be saved. """ DirectoryProcessor.process( input_dir, output_dir, Rouge155.convert_text_to_rouge_format) @staticmethod def convert_text_to_rouge_format(text, title="dummy title"): """ Convert a text to a format ROUGE understands. The text is assumed to contain one sentence per line. text: The text to convert, containg one sentence per line. title: Optional title for the text. The title will appear in the converted file, but doesn't seem to have any other relevance. Returns: The converted text as string. """ sentences = text.split("\n") sent_elems = [ "<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>" "{text}</a>".format(i=i, text=sent) for i, sent in enumerate(sentences, start=1)] html = """<html> <head> <title>{title}</title> </head> <body bgcolor="white"> {elems} </body> </html>""".format(title=title, elems="\n".join(sent_elems)) return html @staticmethod def write_config_static(system_dir, system_filename_pattern, model_dir, model_filename_pattern, config_file_path, system_id=None): """ Write the ROUGE configuration file, which is basically a list of system summary files and their corresponding model summary files. pyrouge uses regular expressions to automatically find the matching model summary files for a given system summary file (cf. docstrings for system_filename_pattern and model_filename_pattern). system_dir: Path of directory containing system summaries. system_filename_pattern: Regex string for matching system summary filenames. model_dir: Path of directory containing model summaries. model_filename_pattern: Regex string for matching model summary filenames. config_file_path: Path of the configuration file. system_id: Optional system ID string which will appear in the ROUGE output. """ system_filenames = [f for f in os.listdir(system_dir)] system_models_tuples = [] system_filename_pattern = re.compile(system_filename_pattern) for system_filename in sorted(system_filenames): match = system_filename_pattern.match(system_filename) if match: id = match.groups(0)[0] model_filenames = [model_filename_pattern.replace('#ID#', id)] # model_filenames = Rouge155.__get_model_filenames_for_id( # id, model_dir, model_filename_pattern) system_models_tuples.append( (system_filename, sorted(model_filenames))) if not system_models_tuples: raise Exception( "Did not find any files matching the pattern {} " "in the system summaries directory {}.".format( system_filename_pattern.pattern, system_dir)) with codecs.open(config_file_path, 'w', encoding='utf-8') as f: f.write('<ROUGE-EVAL version="1.55">') for task_id, (system_filename, model_filenames) in enumerate( system_models_tuples, start=1): eval_string = Rouge155.__get_eval_string( task_id, system_id, system_dir, system_filename, model_dir, model_filenames) f.write(eval_string) f.write("</ROUGE-EVAL>") def write_config(self, config_file_path=None, system_id=None): """ Write the ROUGE configuration file, which is basically a list of system summary files and their matching model summary files. This is a non-static version of write_config_file_static(). config_file_path: Path of the configuration file. system_id: Optional system ID string which will appear in the ROUGE output. """ if not system_id: system_id = 1 if (not config_file_path) or (not self._config_dir): self._config_dir = mkdtemp(dir=self.temp_dir) config_filename = "rouge_conf.xml" else: config_dir, config_filename = os.path.split(config_file_path) verify_dir(config_dir, "configuration file") self._config_file = os.path.join(self._config_dir, config_filename) Rouge155.write_config_static( self._system_dir, self._system_filename_pattern, self._model_dir, self._model_filename_pattern, self._config_file, system_id) self.log.info( "Written ROUGE configuration to {}".format(self._config_file)) def evaluate(self, system_id=1, rouge_args=None): """ Run ROUGE to evaluate the system summaries in system_dir against the model summaries in model_dir. The summaries are assumed to be in the one-sentence-per-line HTML format ROUGE understands. system_id: Optional system ID which will be printed in ROUGE's output. Returns: Rouge output as string. """ self.write_config(system_id=system_id) options = self.__get_options(rouge_args) command = [self._bin_path] + options self.log.info( "Running ROUGE with command {}".format(" ".join(command))) rouge_output = check_output(command).decode("UTF-8") return rouge_output def convert_and_evaluate(self, system_id=1, split_sentences=False, rouge_args=None): """ Convert plain text summaries to ROUGE format and run ROUGE to evaluate the system summaries in system_dir against the model summaries in model_dir. Optionally split texts into sentences in case they aren't already. This is just a convenience method combining convert_summaries_to_rouge_format() and evaluate(). split_sentences: Optional argument specifying if sentences should be split. system_id: Optional system ID which will be printed in ROUGE's output. Returns: ROUGE output as string. """ if split_sentences: self.split_sentences() self.__write_summaries() rouge_output = self.evaluate(system_id, rouge_args) return rouge_output def output_to_dict(self, output): """ Convert the ROUGE output into python dictionary for further processing. """ # 0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632) pattern = re.compile( r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) " r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)") results = {} for line in output.split("\n"): match = pattern.match(line) if match: sys_id, rouge_type, measure, result, conf_begin, conf_end = \ match.groups() measure = { 'Average_R': 'recall', 'Average_P': 'precision', 'Average_F': 'f_score' }[measure] rouge_type = rouge_type.lower().replace("-", '_') key = "{}_{}".format(rouge_type, measure) results[key] = float(result) results["{}_cb".format(key)] = float(conf_begin) results["{}_ce".format(key)] = float(conf_end) return results ################################################################### # Private methods def __set_rouge_dir(self, home_dir=None): """ Verfify presence of ROUGE-1.5.5.pl and data folder, and set those paths. """ if not home_dir: self._home_dir = self.__get_rouge_home_dir_from_settings() else: self._home_dir = home_dir self.save_home_dir() self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl') self.data_dir = os.path.join(self._home_dir, 'data') if not os.path.exists(self._bin_path): raise Exception( "ROUGE binary not found at {}. Please set the " "correct path by running pyrouge_set_rouge_path " "/path/to/rouge/home.".format(self._bin_path)) def __get_rouge_home_dir_from_settings(self): config = ConfigParser() with open(self._settings_file) as f: if hasattr(config, "read_file"): config.read_file(f) else: # use deprecated python 2.x method config.readfp(f) rouge_home_dir = config.get('pyrouge settings', 'home_dir') return rouge_home_dir @staticmethod def __get_eval_string( task_id, system_id, system_dir, system_filename, model_dir, model_filenames): """ ROUGE can evaluate several system summaries for a given text against several model summaries, i.e. there is an m-to-n relation between system and model summaries. The system summaries are listed in the <PEERS> tag and the model summaries in the <MODELS> tag. pyrouge currently only supports one system summary per text, i.e. it assumes a 1-to-n relation between system and model summaries. """ peer_elems = "<P ID=\"{id}\">{name}</P>".format( id=system_id, name=system_filename) model_elems = ["<M ID=\"{id}\">{name}</M>".format( id=chr(65 + i), name=name) for i, name in enumerate(model_filenames)] model_elems = "\n\t\t\t".join(model_elems) eval_string = """ <EVAL ID="{task_id}"> <MODEL-ROOT>{model_root}</MODEL-ROOT> <PEER-ROOT>{peer_root}</PEER-ROOT> <INPUT-FORMAT TYPE="SEE"> </INPUT-FORMAT> <PEERS> {peer_elems} </PEERS> <MODELS> {model_elems} </MODELS> </EVAL> """.format( task_id=task_id, model_root=model_dir, model_elems=model_elems, peer_root=system_dir, peer_elems=peer_elems) return eval_string def __process_summaries(self, process_func): """ Helper method that applies process_func to the files in the system and model folders and saves the resulting files to new system and model folders. """ temp_dir = mkdtemp(dir=self.temp_dir) new_system_dir = os.path.join(temp_dir, "system") os.mkdir(new_system_dir) new_model_dir = os.path.join(temp_dir, "model") os.mkdir(new_model_dir) self.log.info( "Processing summaries. Saving system files to {} and " "model files to {}.".format(new_system_dir, new_model_dir)) process_func(self._system_dir, new_system_dir) process_func(self._model_dir, new_model_dir) self._system_dir = new_system_dir self._model_dir = new_model_dir def __write_summaries(self): self.log.info("Writing summaries.") self.__process_summaries(self.convert_summaries_to_rouge_format) @staticmethod def __get_model_filenames_for_id(id, model_dir, model_filenames_pattern): pattern = re.compile(model_filenames_pattern.replace('#ID#', id)) model_filenames = [ f for f in os.listdir(model_dir) if pattern.match(f)] if not model_filenames: raise Exception( "Could not find any model summaries for the system" " summary with ID {}. Specified model filename pattern was: " "{}".format(id, model_filenames_pattern)) return model_filenames def __get_options(self, rouge_args=None): """ Get supplied command line arguments for ROUGE or use default ones. """ if self.args: options = self.args.split() elif rouge_args: options = rouge_args.split() else: options = [ '-e', self._data_dir, '-c', 95, # '-2', # '-1', # '-U', '-m', # '-v', '-r', 1000, '-n', 2, # '-w', 1.2, '-a', ] options = list(map(str, options)) options = self.__add_config_option(options) return options def __create_dir_property(self, dir_name, docstring): """ Generate getter and setter for a directory property. """ property_name = "{}_dir".format(dir_name) private_name = "_" + property_name setattr(self, private_name, None) def fget(self): return getattr(self, private_name) def fset(self, path): verify_dir(path, dir_name) setattr(self, private_name, path) p = property(fget=fget, fset=fset, doc=docstring) setattr(self.__class__, property_name, p) def __set_dir_properties(self): """ Automatically generate the properties for directories. """ directories = [ ("home", "The ROUGE home directory."), ("data", "The path of the ROUGE 'data' directory."), ("system", "Path of the directory containing system summaries."), ("model", "Path of the directory containing model summaries."), ] for (dirname, docstring) in directories: self.__create_dir_property(dirname, docstring) def __clean_rouge_args(self, rouge_args): """ Remove enclosing quotation marks, if any. """ if not rouge_args: return quot_mark_pattern = re.compile('"(.+)"') match = quot_mark_pattern.match(rouge_args) if match: cleaned_args = match.group(1) return cleaned_args else: return rouge_args def __add_config_option(self, options): return options + [self._config_file] def __get_config_path(self): if platform.system() == "Windows": parent_dir = os.getenv("APPDATA") config_dir_name = "pyrouge" elif os.name == "posix": parent_dir = os.path.expanduser("~") config_dir_name = ".pyrouge" else: parent_dir = os.path.dirname(__file__) config_dir_name = "" config_dir = os.path.join(parent_dir, config_dir_name) if not os.path.exists(config_dir): os.makedirs(config_dir) return os.path.join(config_dir, 'settings.ini') if __name__ == "__main__": import argparse from utils.argparsers import rouge_path_parser parser = argparse.ArgumentParser(parents=[rouge_path_parser]) args = parser.parse_args() rouge = Rouge155(args.rouge_home) rouge.save_home_dir()
EXA-1-master
exa/models/unilm-master/unilm-v1/src/gigaword/bs_pyrouge.py
EXA-1-master
exa/models/unilm-master/unilm-v1/src/cnndm/__init__.py
"""BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import logging import glob import json import argparse import math import string from multiprocessing import Pool, cpu_count from tqdm import tqdm, trange from pathlib import Path import numpy as np # pip install py-rouge import rouge import time import tempfile import shutil from pytorch_pretrained_bert.tokenization import BertTokenizer # pip install pyrouge from cnndm.bs_pyrouge import Rouge155 logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) parser = argparse.ArgumentParser() # Required parameters parser.add_argument("--gold", type=str, help="Gold output file.") parser.add_argument("--pred", type=str, help="Input prediction file.") parser.add_argument("--split", type=str, default="", help="Data split (train/dev/test).") parser.add_argument("--save_best", action='store_true', help="Save best epoch.") parser.add_argument("--only_eval_best", action='store_true', help="Only evaluate best epoch.") parser.add_argument("--trunc_len", type=int, default=60, help="Truncate line by the maximum length.") parser.add_argument("--duplicate_rate", type=float, default=0.7, help="If the duplicat rate (compared with history) is large, we can discard the current sentence.") default_process_count = max(1, cpu_count() - 1) parser.add_argument("--processes", type=int, default=default_process_count, help="Number of processes to use (default %(default)s)") parser.add_argument("--perl", action='store_true', help="Using the perl script.") parser.add_argument('--lazy_eval', action='store_true', help="Skip evaluation if the .rouge file exists.") args = parser.parse_args() SPECIAL_TOKEN = ["[UNK]", "[PAD]", "[CLS]", "[MASK]"] evaluator = rouge.Rouge(metrics=['rouge-n', 'rouge-l'], max_n=2, limit_length=False, apply_avg=True, weight_factor=1.2) def test_rouge(cand, ref): temp_dir = tempfile.mkdtemp() candidates = cand references = ref assert len(candidates) == len(references) cnt = len(candidates) current_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime()) tmp_dir = os.path.join(temp_dir, "rouge-tmp-{}".format(current_time)) if not os.path.isdir(tmp_dir): os.mkdir(tmp_dir) os.mkdir(tmp_dir + "/candidate") os.mkdir(tmp_dir + "/reference") try: for i in range(cnt): if len(references[i]) < 1: continue with open(tmp_dir + "/candidate/cand.{}.txt".format(i), "w", encoding="utf-8") as f: f.write(candidates[i]) with open(tmp_dir + "/reference/ref.{}.txt".format(i), "w", encoding="utf-8") as f: f.write(references[i]) r = Rouge155(temp_dir=temp_dir) r.model_dir = tmp_dir + "/reference/" r.system_dir = tmp_dir + "/candidate/" r.model_filename_pattern = 'ref.#ID#.txt' r.system_filename_pattern = r'cand.(\d+).txt' rouge_results = r.convert_and_evaluate() print(rouge_results) results_dict = r.output_to_dict(rouge_results) finally: if os.path.isdir(tmp_dir): shutil.rmtree(tmp_dir) return results_dict def rouge_results_to_str(results_dict): return ">> ROUGE-F(1/2/l): {:.2f}/{:.2f}/{:.2f}\nROUGE-R(1/2/3/l): {:.2f}/{:.2f}/{:.2f}\n".format( results_dict["rouge_1_f_score"] * 100, results_dict["rouge_2_f_score"] * 100, results_dict["rouge_l_f_score"] * 100, results_dict["rouge_1_recall"] * 100, results_dict["rouge_2_recall"] * 100, results_dict["rouge_l_recall"] * 100 ) def count_tokens(tokens): counter = {} for t in tokens: if t in counter.keys(): counter[t] += 1 else: counter[t] = 1 return counter def get_f1(text_a, text_b): tokens_a = text_a.lower().split() tokens_b = text_b.lower().split() if len(tokens_a) == 0 or len(tokens_b) == 0: return 1 if len(tokens_a) == len(tokens_b) else 0 set_a = count_tokens(tokens_a) set_b = count_tokens(tokens_b) match = 0 for token in set_a.keys(): if token in set_b.keys(): match += min(set_a[token], set_b[token]) p = match / len(tokens_a) r = match / len(tokens_b) return 2.0 * p * r / (p + r + 1e-5) _tok_dict = {"(": "-LRB-", ")": "-RRB-", "[": "-LSB-", "]": "-RSB-", "{": "-LCB-", "}": "-RCB-"} def _is_digit(w): for ch in w: if not(ch.isdigit() or ch == ','): return False return True def fix_tokenization(text): input_tokens = text.split() output_tokens = [] has_left_quote = False has_left_single_quote = False i = 0 prev_dash = False while i < len(input_tokens): tok = input_tokens[i] flag_prev_dash = False if tok in _tok_dict.keys(): output_tokens.append(_tok_dict[tok]) i += 1 elif tok == "\"": if has_left_quote: output_tokens.append("''") else: output_tokens.append("``") has_left_quote = not has_left_quote i += 1 elif tok == "'" and len(output_tokens) > 0 and output_tokens[-1].endswith("n") and i < len(input_tokens) - 1 and input_tokens[i + 1] == "t": output_tokens[-1] = output_tokens[-1][:-1] output_tokens.append("n't") i += 2 elif tok == "'" and i < len(input_tokens) - 1 and input_tokens[i + 1] in ("s", "d", "ll"): output_tokens.append("'"+input_tokens[i + 1]) i += 2 elif tok == "'": if has_left_single_quote: output_tokens.append("'") else: output_tokens.append("`") has_left_single_quote = not has_left_single_quote i += 1 elif tok == "." and i < len(input_tokens) - 2 and input_tokens[i + 1] == "." and input_tokens[i + 2] == ".": output_tokens.append("...") i += 3 elif tok == "," and len(output_tokens) > 0 and _is_digit(output_tokens[-1]) and i < len(input_tokens) - 1 and _is_digit(input_tokens[i + 1]): # $ 3 , 000 -> $ 3,000 output_tokens[-1] += ','+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and output_tokens[-1].isdigit() and i < len(input_tokens) - 1 and input_tokens[i + 1].isdigit(): # 3 . 03 -> $ 3.03 output_tokens[-1] += '.'+input_tokens[i + 1] i += 2 elif tok == "." and len(output_tokens) > 0 and len(output_tokens[-1]) == 1 and output_tokens[-1].isupper() and i < len(input_tokens) - 2 and len(input_tokens[i + 1]) == 1 and input_tokens[i + 1].isupper() and input_tokens[i + 2] == '.': # U . N . -> U.N. k = i+3 while k+2 < len(input_tokens): if len(input_tokens[k + 1]) == 1 and input_tokens[k + 1].isupper() and input_tokens[k + 2] == '.': k += 2 else: break output_tokens[-1] += ''.join(input_tokens[i:k]) i += 2 elif tok == "-": if i < len(input_tokens) - 1 and input_tokens[i + 1] == "-": output_tokens.append("--") i += 2 elif i == len(input_tokens) - 1 or i == 0: output_tokens.append("-") i += 1 elif output_tokens[-1] not in string.punctuation and input_tokens[i + 1][0] not in string.punctuation: output_tokens[-1] += "-" i += 1 flag_prev_dash = True else: output_tokens.append("-") i += 1 elif prev_dash and len(output_tokens) > 0 and tok[0] not in string.punctuation: output_tokens[-1] += tok i += 1 else: output_tokens.append(tok) i += 1 prev_dash = flag_prev_dash return " ".join(output_tokens) def remove_duplicate(l_list, duplicate_rate): tk_list = [l.lower().split() for l in l_list] r_list = [] history_set = set() for i, w_list in enumerate(tk_list): w_set = set(w_list) if len(w_set & history_set)/len(w_set) <= duplicate_rate: r_list.append(l_list[i]) history_set |= w_set return r_list def process_eval(eval_fn): gold_list = [] with open(args.gold, "r", encoding="utf-8") as f_in: for l in f_in: line = l.strip().replace(" <S_SEP> ", '\n') gold_list.append(line) pred_list = [] with open(eval_fn, "r", encoding="utf-8") as f_in: for l in f_in: buf = [] for sentence in l.strip().split("[X_SEP]"): sentence = fix_tokenization(sentence) if any(get_f1(sentence, s) > 1.0 for s in buf): continue s_len = len(sentence.split()) if s_len <= 4: continue buf.append(sentence) if args.duplicate_rate and args.duplicate_rate < 1: buf = remove_duplicate(buf, args.duplicate_rate) if args.trunc_len: num_left = args.trunc_len trunc_list = [] for bit in buf: tk_list = bit.split() n = min(len(tk_list), num_left) trunc_list.append(' '.join(tk_list[:n])) num_left -= n if num_left <= 0: break else: trunc_list = buf line = "\n".join(trunc_list) pred_list.append(line) with open(eval_fn+'.post', 'w', encoding='utf-8') as f_out: for l in pred_list: f_out.write(l.replace('\n', ' [X_SEP] ').strip()) f_out.write('\n') # rouge scores if len(pred_list) < len(gold_list): # evaluate subset gold_list = gold_list[:len(pred_list)] assert len(pred_list) == len(gold_list) if args.perl: scores = test_rouge(pred_list, gold_list) else: scores = evaluator.get_scores(pred_list, [[it] for it in gold_list]) return eval_fn, scores def main(): if args.perl: eval_fn_list = list(glob.glob(args.pred)) else: eval_fn_list = [eval_fn for eval_fn in glob.glob(args.pred) if not( args.lazy_eval and Path(eval_fn+".rouge").exists())] eval_fn_list = list(filter(lambda fn: not(fn.endswith( '.post') or fn.endswith('.rouge')), eval_fn_list)) if args.only_eval_best: best_epoch_dict = {} for dir_path in set(Path(fn).parent for fn in eval_fn_list): fn_save = os.path.join(dir_path, 'save_best.dev') if Path(fn_save).exists(): with open(fn_save, 'r') as f_in: __, o_name, __ = f_in.read().strip().split('\n') epoch = o_name.split('.')[1] best_epoch_dict[dir_path] = epoch new_eval_fn_list = [] for fn in eval_fn_list: dir_path = Path(fn).parent if dir_path in best_epoch_dict: if Path(fn).name.split('.')[1] == best_epoch_dict[dir_path]: new_eval_fn_list.append(fn) eval_fn_list = new_eval_fn_list logger.info("***** Evaluation: %s *****", ','.join(eval_fn_list)) num_pool = min(args.processes, len(eval_fn_list)) p = Pool(num_pool) r_list = p.imap_unordered(process_eval, eval_fn_list) r_list = sorted([(fn, scores) for fn, scores in r_list], key=lambda x: x[0]) rg2_dict = {} for fn, scores in r_list: print(fn) if args.perl: print(rouge_results_to_str(scores)) else: rg2_dict[fn] = scores['rouge-2']['f'] print( "ROUGE-1: {}\tROUGE-2: {}\n".format(scores['rouge-1']['f'], scores['rouge-2']['f'])) with open(fn+".rouge", 'w') as f_out: f_out.write(json.dumps( {'rg1': scores['rouge-1']['f'], 'rg2': scores['rouge-2']['f']})) p.close() p.join() if args.save_best: # find best results group_dict = {} for k, v in rg2_dict.items(): d_name, o_name = Path(k).parent, Path(k).name if (d_name not in group_dict) or (v > group_dict[d_name][1]): group_dict[d_name] = (o_name, v) # compare and save the best result for k, v in group_dict.items(): fn = os.path.join(k, 'save_best.'+args.split) o_name_s, rst_s = v should_save = True if Path(fn).exists(): with open(fn, 'r') as f_in: rst_f = float(f_in.read().strip().split('\n')[-1]) if rst_s <= rst_f: should_save = False if should_save: with open(fn, 'w') as f_out: f_out.write('{0}\n{1}\n{2}\n'.format(k, o_name_s, rst_s)) if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/unilm-v1/src/cnndm/eval.py
from __future__ import print_function, unicode_literals, division import os import re import codecs import platform from subprocess import check_output from tempfile import mkdtemp from functools import partial try: from configparser import ConfigParser except ImportError: from ConfigParser import ConfigParser from pyrouge.utils import log from pyrouge.utils.file_utils import verify_dir REMAP = {"-lrb-": "(", "-rrb-": ")", "-lcb-": "{", "-rcb-": "}", "-lsb-": "[", "-rsb-": "]", "``": '"', "''": '"'} def clean(x): return re.sub( r"-lrb-|-rrb-|-lcb-|-rcb-|-lsb-|-rsb-|``|''", lambda m: REMAP.get(m.group()), x) class DirectoryProcessor: @staticmethod def process(input_dir, output_dir, function): """ Apply function to all files in input_dir and save the resulting ouput files in output_dir. """ if not os.path.exists(output_dir): os.makedirs(output_dir) logger = log.get_global_console_logger() logger.info("Processing files in {}.".format(input_dir)) input_file_names = os.listdir(input_dir) for input_file_name in input_file_names: input_file = os.path.join(input_dir, input_file_name) with codecs.open(input_file, "r", encoding="UTF-8") as f: input_string = f.read() output_string = function(input_string) output_file = os.path.join(output_dir, input_file_name) with codecs.open(output_file, "w", encoding="UTF-8") as f: f.write(clean(output_string.lower())) logger.info("Saved processed files to {}.".format(output_dir)) class Rouge155(object): """ This is a wrapper for the ROUGE 1.5.5 summary evaluation package. This class is designed to simplify the evaluation process by: 1) Converting summaries into a format ROUGE understands. 2) Generating the ROUGE configuration file automatically based on filename patterns. This class can be used within Python like this: rouge = Rouge155() rouge.system_dir = 'test/systems' rouge.model_dir = 'test/models' # The system filename pattern should contain one group that # matches the document ID. rouge.system_filename_pattern = 'SL.P.10.R.11.SL062003-(\d+).html' # The model filename pattern has '#ID#' as a placeholder for the # document ID. If there are multiple model summaries, pyrouge # will use the provided regex to automatically match them with # the corresponding system summary. Here, [A-Z] matches # multiple model summaries for a given #ID#. rouge.model_filename_pattern = 'SL.P.10.R.[A-Z].SL062003-#ID#.html' rouge_output = rouge.evaluate() print(rouge_output) output_dict = rouge.output_to_dict(rouge_ouput) print(output_dict) -> {'rouge_1_f_score': 0.95652, 'rouge_1_f_score_cb': 0.95652, 'rouge_1_f_score_ce': 0.95652, 'rouge_1_precision': 0.95652, [...] To evaluate multiple systems: rouge = Rouge155() rouge.system_dir = '/PATH/TO/systems' rouge.model_dir = 'PATH/TO/models' for system_id in ['id1', 'id2', 'id3']: rouge.system_filename_pattern = \ 'SL.P/.10.R.{}.SL062003-(\d+).html'.format(system_id) rouge.model_filename_pattern = \ 'SL.P.10.R.[A-Z].SL062003-#ID#.html' rouge_output = rouge.evaluate(system_id) print(rouge_output) """ def __init__(self, rouge_dir=None, rouge_args=None, temp_dir=None): """ Create a Rouge155 object. rouge_dir: Directory containing Rouge-1.5.5.pl rouge_args: Arguments to pass through to ROUGE if you don't want to use the default pyrouge arguments. """ self.temp_dir = temp_dir self.log = log.get_global_console_logger() self.__set_dir_properties() self._config_file = None self._settings_file = self.__get_config_path() self.__set_rouge_dir(rouge_dir) self.args = self.__clean_rouge_args(rouge_args) self._system_filename_pattern = None self._model_filename_pattern = None def save_home_dir(self): config = ConfigParser() section = 'pyrouge settings' config.add_section(section) config.set(section, 'home_dir', self._home_dir) with open(self._settings_file, 'w') as f: config.write(f) self.log.info("Set ROUGE home directory to {}.".format(self._home_dir)) @property def settings_file(self): """ Path of the setttings file, which stores the ROUGE home dir. """ return self._settings_file @property def bin_path(self): """ The full path of the ROUGE binary (although it's technically a script), i.e. rouge_home_dir/ROUGE-1.5.5.pl """ if self._bin_path is None: raise Exception( "ROUGE path not set. Please set the ROUGE home directory " "and ensure that ROUGE-1.5.5.pl exists in it.") return self._bin_path @property def system_filename_pattern(self): """ The regular expression pattern for matching system summary filenames. The regex string. E.g. "SL.P.10.R.11.SL062003-(\d+).html" will match the system filenames in the SPL2003/system folder of the ROUGE SPL example in the "sample-test" folder. Currently, there is no support for multiple systems. """ return self._system_filename_pattern @system_filename_pattern.setter def system_filename_pattern(self, pattern): self._system_filename_pattern = pattern @property def model_filename_pattern(self): """ The regular expression pattern for matching model summary filenames. The pattern needs to contain the string "#ID#", which is a placeholder for the document ID. E.g. "SL.P.10.R.[A-Z].SL062003-#ID#.html" will match the model filenames in the SPL2003/system folder of the ROUGE SPL example in the "sample-test" folder. "#ID#" is a placeholder for the document ID which has been matched by the "(\d+)" part of the system filename pattern. The different model summaries for a given document ID are matched by the "[A-Z]" part. """ return self._model_filename_pattern @model_filename_pattern.setter def model_filename_pattern(self, pattern): self._model_filename_pattern = pattern @property def config_file(self): return self._config_file @config_file.setter def config_file(self, path): config_dir, _ = os.path.split(path) verify_dir(config_dir, "configuration file") self._config_file = path def split_sentences(self): """ ROUGE requires texts split into sentences. In case the texts are not already split, this method can be used. """ from pyrouge.utils.sentence_splitter import PunktSentenceSplitter self.log.info("Splitting sentences.") ss = PunktSentenceSplitter() def sent_split_to_string(s): return "\n".join(ss.split(s)) process_func = partial( DirectoryProcessor.process, function=sent_split_to_string) self.__process_summaries(process_func) @staticmethod def convert_summaries_to_rouge_format(input_dir, output_dir): """ Convert all files in input_dir into a format ROUGE understands and saves the files to output_dir. The input files are assumed to be plain text with one sentence per line. input_dir: Path of directory containing the input files. output_dir: Path of directory in which the converted files will be saved. """ DirectoryProcessor.process( input_dir, output_dir, Rouge155.convert_text_to_rouge_format) @staticmethod def convert_text_to_rouge_format(text, title="dummy title"): """ Convert a text to a format ROUGE understands. The text is assumed to contain one sentence per line. text: The text to convert, containg one sentence per line. title: Optional title for the text. The title will appear in the converted file, but doesn't seem to have any other relevance. Returns: The converted text as string. """ sentences = text.split("\n") sent_elems = [ "<a name=\"{i}\">[{i}]</a> <a href=\"#{i}\" id={i}>" "{text}</a>".format(i=i, text=sent) for i, sent in enumerate(sentences, start=1)] html = """<html> <head> <title>{title}</title> </head> <body bgcolor="white"> {elems} </body> </html>""".format(title=title, elems="\n".join(sent_elems)) return html @staticmethod def write_config_static(system_dir, system_filename_pattern, model_dir, model_filename_pattern, config_file_path, system_id=None): """ Write the ROUGE configuration file, which is basically a list of system summary files and their corresponding model summary files. pyrouge uses regular expressions to automatically find the matching model summary files for a given system summary file (cf. docstrings for system_filename_pattern and model_filename_pattern). system_dir: Path of directory containing system summaries. system_filename_pattern: Regex string for matching system summary filenames. model_dir: Path of directory containing model summaries. model_filename_pattern: Regex string for matching model summary filenames. config_file_path: Path of the configuration file. system_id: Optional system ID string which will appear in the ROUGE output. """ system_filenames = [f for f in os.listdir(system_dir)] system_models_tuples = [] system_filename_pattern = re.compile(system_filename_pattern) for system_filename in sorted(system_filenames): match = system_filename_pattern.match(system_filename) if match: id = match.groups(0)[0] model_filenames = [model_filename_pattern.replace('#ID#', id)] # model_filenames = Rouge155.__get_model_filenames_for_id( # id, model_dir, model_filename_pattern) system_models_tuples.append( (system_filename, sorted(model_filenames))) if not system_models_tuples: raise Exception( "Did not find any files matching the pattern {} " "in the system summaries directory {}.".format( system_filename_pattern.pattern, system_dir)) with codecs.open(config_file_path, 'w', encoding='utf-8') as f: f.write('<ROUGE-EVAL version="1.55">') for task_id, (system_filename, model_filenames) in enumerate( system_models_tuples, start=1): eval_string = Rouge155.__get_eval_string( task_id, system_id, system_dir, system_filename, model_dir, model_filenames) f.write(eval_string) f.write("</ROUGE-EVAL>") def write_config(self, config_file_path=None, system_id=None): """ Write the ROUGE configuration file, which is basically a list of system summary files and their matching model summary files. This is a non-static version of write_config_file_static(). config_file_path: Path of the configuration file. system_id: Optional system ID string which will appear in the ROUGE output. """ if not system_id: system_id = 1 if (not config_file_path) or (not self._config_dir): self._config_dir = mkdtemp(dir=self.temp_dir) config_filename = "rouge_conf.xml" else: config_dir, config_filename = os.path.split(config_file_path) verify_dir(config_dir, "configuration file") self._config_file = os.path.join(self._config_dir, config_filename) Rouge155.write_config_static( self._system_dir, self._system_filename_pattern, self._model_dir, self._model_filename_pattern, self._config_file, system_id) self.log.info( "Written ROUGE configuration to {}".format(self._config_file)) def evaluate(self, system_id=1, rouge_args=None): """ Run ROUGE to evaluate the system summaries in system_dir against the model summaries in model_dir. The summaries are assumed to be in the one-sentence-per-line HTML format ROUGE understands. system_id: Optional system ID which will be printed in ROUGE's output. Returns: Rouge output as string. """ self.write_config(system_id=system_id) options = self.__get_options(rouge_args) command = [self._bin_path] + options self.log.info( "Running ROUGE with command {}".format(" ".join(command))) rouge_output = check_output(command).decode("UTF-8") return rouge_output def convert_and_evaluate(self, system_id=1, split_sentences=False, rouge_args=None): """ Convert plain text summaries to ROUGE format and run ROUGE to evaluate the system summaries in system_dir against the model summaries in model_dir. Optionally split texts into sentences in case they aren't already. This is just a convenience method combining convert_summaries_to_rouge_format() and evaluate(). split_sentences: Optional argument specifying if sentences should be split. system_id: Optional system ID which will be printed in ROUGE's output. Returns: ROUGE output as string. """ if split_sentences: self.split_sentences() self.__write_summaries() rouge_output = self.evaluate(system_id, rouge_args) return rouge_output def output_to_dict(self, output): """ Convert the ROUGE output into python dictionary for further processing. """ # 0 ROUGE-1 Average_R: 0.02632 (95%-conf.int. 0.02632 - 0.02632) pattern = re.compile( r"(\d+) (ROUGE-\S+) (Average_\w): (\d.\d+) " r"\(95%-conf.int. (\d.\d+) - (\d.\d+)\)") results = {} for line in output.split("\n"): match = pattern.match(line) if match: sys_id, rouge_type, measure, result, conf_begin, conf_end = \ match.groups() measure = { 'Average_R': 'recall', 'Average_P': 'precision', 'Average_F': 'f_score' }[measure] rouge_type = rouge_type.lower().replace("-", '_') key = "{}_{}".format(rouge_type, measure) results[key] = float(result) results["{}_cb".format(key)] = float(conf_begin) results["{}_ce".format(key)] = float(conf_end) return results ################################################################### # Private methods def __set_rouge_dir(self, home_dir=None): """ Verfify presence of ROUGE-1.5.5.pl and data folder, and set those paths. """ if not home_dir: self._home_dir = self.__get_rouge_home_dir_from_settings() else: self._home_dir = home_dir self.save_home_dir() self._bin_path = os.path.join(self._home_dir, 'ROUGE-1.5.5.pl') self.data_dir = os.path.join(self._home_dir, 'data') if not os.path.exists(self._bin_path): raise Exception( "ROUGE binary not found at {}. Please set the " "correct path by running pyrouge_set_rouge_path " "/path/to/rouge/home.".format(self._bin_path)) def __get_rouge_home_dir_from_settings(self): config = ConfigParser() with open(self._settings_file) as f: if hasattr(config, "read_file"): config.read_file(f) else: # use deprecated python 2.x method config.readfp(f) rouge_home_dir = config.get('pyrouge settings', 'home_dir') return rouge_home_dir @staticmethod def __get_eval_string( task_id, system_id, system_dir, system_filename, model_dir, model_filenames): """ ROUGE can evaluate several system summaries for a given text against several model summaries, i.e. there is an m-to-n relation between system and model summaries. The system summaries are listed in the <PEERS> tag and the model summaries in the <MODELS> tag. pyrouge currently only supports one system summary per text, i.e. it assumes a 1-to-n relation between system and model summaries. """ peer_elems = "<P ID=\"{id}\">{name}</P>".format( id=system_id, name=system_filename) model_elems = ["<M ID=\"{id}\">{name}</M>".format( id=chr(65 + i), name=name) for i, name in enumerate(model_filenames)] model_elems = "\n\t\t\t".join(model_elems) eval_string = """ <EVAL ID="{task_id}"> <MODEL-ROOT>{model_root}</MODEL-ROOT> <PEER-ROOT>{peer_root}</PEER-ROOT> <INPUT-FORMAT TYPE="SEE"> </INPUT-FORMAT> <PEERS> {peer_elems} </PEERS> <MODELS> {model_elems} </MODELS> </EVAL> """.format( task_id=task_id, model_root=model_dir, model_elems=model_elems, peer_root=system_dir, peer_elems=peer_elems) return eval_string def __process_summaries(self, process_func): """ Helper method that applies process_func to the files in the system and model folders and saves the resulting files to new system and model folders. """ temp_dir = mkdtemp(dir=self.temp_dir) new_system_dir = os.path.join(temp_dir, "system") os.mkdir(new_system_dir) new_model_dir = os.path.join(temp_dir, "model") os.mkdir(new_model_dir) self.log.info( "Processing summaries. Saving system files to {} and " "model files to {}.".format(new_system_dir, new_model_dir)) process_func(self._system_dir, new_system_dir) process_func(self._model_dir, new_model_dir) self._system_dir = new_system_dir self._model_dir = new_model_dir def __write_summaries(self): self.log.info("Writing summaries.") self.__process_summaries(self.convert_summaries_to_rouge_format) @staticmethod def __get_model_filenames_for_id(id, model_dir, model_filenames_pattern): pattern = re.compile(model_filenames_pattern.replace('#ID#', id)) model_filenames = [ f for f in os.listdir(model_dir) if pattern.match(f)] if not model_filenames: raise Exception( "Could not find any model summaries for the system" " summary with ID {}. Specified model filename pattern was: " "{}".format(id, model_filenames_pattern)) return model_filenames def __get_options(self, rouge_args=None): """ Get supplied command line arguments for ROUGE or use default ones. """ if self.args: options = self.args.split() elif rouge_args: options = rouge_args.split() else: options = [ '-e', self._data_dir, '-c', 95, # '-2', # '-1', # '-U', '-m', # '-v', '-r', 1000, '-n', 2, # '-w', 1.2, '-a', ] options = list(map(str, options)) options = self.__add_config_option(options) return options def __create_dir_property(self, dir_name, docstring): """ Generate getter and setter for a directory property. """ property_name = "{}_dir".format(dir_name) private_name = "_" + property_name setattr(self, private_name, None) def fget(self): return getattr(self, private_name) def fset(self, path): verify_dir(path, dir_name) setattr(self, private_name, path) p = property(fget=fget, fset=fset, doc=docstring) setattr(self.__class__, property_name, p) def __set_dir_properties(self): """ Automatically generate the properties for directories. """ directories = [ ("home", "The ROUGE home directory."), ("data", "The path of the ROUGE 'data' directory."), ("system", "Path of the directory containing system summaries."), ("model", "Path of the directory containing model summaries."), ] for (dirname, docstring) in directories: self.__create_dir_property(dirname, docstring) def __clean_rouge_args(self, rouge_args): """ Remove enclosing quotation marks, if any. """ if not rouge_args: return quot_mark_pattern = re.compile('"(.+)"') match = quot_mark_pattern.match(rouge_args) if match: cleaned_args = match.group(1) return cleaned_args else: return rouge_args def __add_config_option(self, options): return options + [self._config_file] def __get_config_path(self): if platform.system() == "Windows": parent_dir = os.getenv("APPDATA") config_dir_name = "pyrouge" elif os.name == "posix": parent_dir = os.path.expanduser("~") config_dir_name = ".pyrouge" else: parent_dir = os.path.dirname(__file__) config_dir_name = "" config_dir = os.path.join(parent_dir, config_dir_name) if not os.path.exists(config_dir): os.makedirs(config_dir) return os.path.join(config_dir, 'settings.ini') if __name__ == "__main__": import argparse from utils.argparsers import rouge_path_parser parser = argparse.ArgumentParser(parents=[rouge_path_parser]) args = parser.parse_args() rouge = Rouge155(args.rouge_home) rouge.save_home_dir()
EXA-1-master
exa/models/unilm-master/unilm-v1/src/cnndm/bs_pyrouge.py
from random import randint, shuffle from random import random as rand import numpy as np import torch import torch.utils.data def get_random_word(vocab_words): i = randint(0, len(vocab_words)-1) return vocab_words[i] def batch_list_to_batch_tensors(batch): batch_tensors = [] for x in zip(*batch): if x[0] is None: batch_tensors.append(None) elif isinstance(x[0], torch.Tensor): batch_tensors.append(torch.stack(x)) else: batch_tensors.append(torch.tensor(x, dtype=torch.long)) return batch_tensors class TrieNode(object): def __init__(self): self.children = {} self.is_leaf = False def try_get_children(self, key): if key not in self.children: self.children[key] = TrieNode() return self.children[key] class TrieTree(object): def __init__(self): self.root = TrieNode() def add(self, tokens): r = self.root for token in tokens: r = r.try_get_children(token) r.is_leaf = True def get_pieces(self, tokens, offset): pieces = [] r = self.root token_id = 0 last_valid = 0 match_count = 0 while last_valid < len(tokens): if token_id < len(tokens) and tokens[token_id] in r.children: r = r.children[tokens[token_id]] match_count += 1 if r.is_leaf: last_valid = token_id token_id += 1 else: pieces.append( list(range(token_id - match_count + offset, last_valid + 1 + offset))) last_valid += 1 token_id = last_valid r = self.root match_count = 0 return pieces def _get_word_split_index(tokens, st, end): split_idx = [] i = st while i < end: if (not tokens[i].startswith('##')) or (i == st): split_idx.append(i) i += 1 split_idx.append(end) return split_idx def _expand_whole_word(tokens, st, end): new_st, new_end = st, end while (new_st >= 0) and tokens[new_st].startswith('##'): new_st -= 1 while (new_end < len(tokens)) and tokens[new_end].startswith('##'): new_end += 1 return new_st, new_end class Pipeline(): """ Pre-process Pipeline Class : callable """ def __init__(self): super().__init__() self.skipgram_prb = None self.skipgram_size = None self.pre_whole_word = None self.mask_whole_word = None self.word_subsample_prb = None self.sp_prob = None self.pieces_dir = None self.vocab_words = None self.pieces_threshold = 10 self.trie = None self.call_count = 0 self.offline_mode = False self.skipgram_size_geo_list = None self.span_same_mask = False def init_skipgram_size_geo_list(self, p): if p > 0: g_list = [] t = p for _ in range(self.skipgram_size): g_list.append(t) t *= (1-p) s = sum(g_list) self.skipgram_size_geo_list = [x/s for x in g_list] def create_trie_tree(self, pieces_dir): print("sp_prob = {}".format(self.sp_prob)) print("pieces_threshold = {}".format(self.pieces_threshold)) if pieces_dir is not None: self.trie = TrieTree() pieces_files = [pieces_dir] for token in self.vocab_words: self.trie.add([token]) for piece_file in pieces_files: print("Load piece file: {}".format(piece_file)) with open(piece_file, mode='r', encoding='utf-8') as reader: for line in reader: parts = line.split('\t') if int(parts[-1]) < self.pieces_threshold: pass tokens = [] for part in parts[:-1]: tokens.extend(part.split(' ')) self.trie.add(tokens) def __call__(self, instance): raise NotImplementedError # pre_whole_word: tokenize to words before masking # post whole word (--mask_whole_word): expand to words after masking def get_masked_pos(self, tokens, n_pred, add_skipgram=False, mask_segment=None, protect_range=None): if self.pieces_dir is not None and self.trie is None: self.create_trie_tree(self.pieces_dir) if self.pre_whole_word: if self.trie is not None: pieces = self.trie.get_pieces(tokens, 0) new_pieces = [] for piece in pieces: if len(new_pieces) > 0 and tokens[piece[0]].startswith("##"): new_pieces[-1].extend(piece) else: new_pieces.append(piece) del pieces pieces = new_pieces pre_word_split = list(_[-1] for _ in pieces) pre_word_split.append(len(tokens)) else: pre_word_split = _get_word_split_index(tokens, 0, len(tokens)) index2piece = None else: pre_word_split = list(range(0, len(tokens)+1)) if self.trie is not None: pieces = self.trie.get_pieces(tokens, 0) index2piece = {} for piece in pieces: for index in piece: index2piece[index] = (piece[0], piece[-1]) else: index2piece = None span_list = list(zip(pre_word_split[:-1], pre_word_split[1:])) # candidate positions of masked tokens cand_pos = [] special_pos = set() if mask_segment: for i, sp in enumerate(span_list): sp_st, sp_end = sp if (sp_end-sp_st == 1) and tokens[sp_st].endswith('SEP]'): segment_index = i break for i, sp in enumerate(span_list): sp_st, sp_end = sp if (sp_end-sp_st == 1) and (tokens[sp_st].endswith('CLS]') or tokens[sp_st].endswith('SEP]')): special_pos.add(i) else: if mask_segment: if ((i < segment_index) and ('a' in mask_segment)) or ((i > segment_index) and ('b' in mask_segment)): cand_pos.append(i) else: cand_pos.append(i) shuffle(cand_pos) masked_pos = set() for i_span in cand_pos: if len(masked_pos) >= n_pred: break cand_st, cand_end = span_list[i_span] if len(masked_pos)+cand_end-cand_st > n_pred: continue if any(p in masked_pos for p in range(cand_st, cand_end)): continue n_span = 1 if index2piece is not None: p_start, p_end = index2piece[i_span] if p_start < p_end and (rand() < self.sp_prob): # n_span = p_end - p_start + 1 st_span, end_span = p_start, p_end + 1 else: st_span, end_span = i_span, i_span + 1 else: rand_skipgram_size = 0 # ngram if self.skipgram_size_geo_list: # sampling ngram size from geometric distribution rand_skipgram_size = np.random.choice( len(self.skipgram_size_geo_list), 1, p=self.skipgram_size_geo_list)[0] + 1 else: if add_skipgram and (self.skipgram_prb > 0) and (self.skipgram_size >= 2) and (rand() < self.skipgram_prb): rand_skipgram_size = min( randint(2, self.skipgram_size), len(span_list)-i_span) for n in range(2, rand_skipgram_size+1): tail_st, tail_end = span_list[i_span+n-1] if (tail_end-tail_st == 1) and (tail_st in special_pos): break if len(masked_pos)+tail_end-cand_st > n_pred: break n_span = n st_span, end_span = i_span, i_span + n_span if self.mask_whole_word: # pre_whole_word==False: position index of span_list is the same as tokens st_span, end_span = _expand_whole_word( tokens, st_span, end_span) # subsampling according to frequency if self.word_subsample_prb: skip_pos = set() if self.pre_whole_word: w_span_list = span_list[st_span:end_span] else: split_idx = _get_word_split_index( tokens, st_span, end_span) w_span_list = list( zip(split_idx[:-1], split_idx[1:])) for i, sp in enumerate(w_span_list): sp_st, sp_end = sp if sp_end-sp_st == 1: w_cat = tokens[sp_st] else: w_cat = ''.join(tokens[sp_st:sp_end]) if (w_cat in self.word_subsample_prb) and (rand() < self.word_subsample_prb[w_cat]): for k in range(sp_st, sp_end): skip_pos.add(k) else: skip_pos = None for sp in range(st_span, end_span): for mp in range(span_list[sp][0], span_list[sp][1]): if not(skip_pos and (mp in skip_pos)) and (mp not in special_pos) and not(protect_range and (protect_range[0] <= mp < protect_range[1])): masked_pos.add(mp) if len(masked_pos) < n_pred: shuffle(cand_pos) for pos in cand_pos: if len(masked_pos) >= n_pred: break if pos not in masked_pos: masked_pos.add(pos) masked_pos = list(masked_pos) if len(masked_pos) > n_pred: # shuffle(masked_pos) masked_pos = masked_pos[:n_pred] return masked_pos def replace_masked_tokens(self, tokens, masked_pos): if self.span_same_mask: masked_pos = sorted(list(masked_pos)) prev_pos, prev_rand = None, None for pos in masked_pos: if self.span_same_mask and (pos-1 == prev_pos): t_rand = prev_rand else: t_rand = rand() if t_rand < 0.8: # 80% tokens[pos] = '[MASK]' elif t_rand < 0.9: # 10% tokens[pos] = get_random_word(self.vocab_words) prev_pos, prev_rand = pos, t_rand
EXA-1-master
exa/models/unilm-master/unilm-v1/src/biunilm/loader_utils.py
"""BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import logging import glob import argparse import math from tqdm import tqdm, trange import numpy as np import torch from torch.utils.data import DataLoader, RandomSampler from torch.utils.data.distributed import DistributedSampler import random import pickle from pytorch_pretrained_bert.tokenization import BertTokenizer, WhitespaceTokenizer from pytorch_pretrained_bert.modeling import BertForSeq2SeqDecoder from pytorch_pretrained_bert.optimization import BertAdam, warmup_linear from nn.data_parallel import DataParallelImbalance import biunilm.seq2seq_loader as seq2seq_loader logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) def detokenize(tk_list): r_list = [] for tk in tk_list: if tk.startswith('##') and len(r_list) > 0: r_list[-1] = r_list[-1] + tk[2:] else: r_list.append(tk) return r_list def ascii_print(text): text = text.encode("ascii", "ignore") print(text) def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument("--bert_model", default=None, type=str, required=True, help="Bert pre-trained model selected in the list: bert-base-uncased, " "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.") parser.add_argument("--model_recover_path", default=None, type=str, help="The file of fine-tuned pretraining model.") parser.add_argument("--max_seq_length", default=512, type=int, help="The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument('--ffn_type', default=0, type=int, help="0: default mlp; 1: W((Wx+b) elem_prod x);") parser.add_argument('--num_qkv', default=0, type=int, help="Number of different <Q,K,V>.") parser.add_argument('--seg_emb', action='store_true', help="Using segment embedding for self-attention.") # decoding parameters parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument('--amp', action='store_true', help="Whether to use amp for fp16") parser.add_argument("--input_file", type=str, help="Input file") parser.add_argument('--subset', type=int, default=0, help="Decode a subset of the input dataset.") parser.add_argument("--output_file", type=str, help="output file") parser.add_argument("--split", type=str, default="", help="Data split (train/val/test).") parser.add_argument('--tokenized_input', action='store_true', help="Whether the input is tokenized.") parser.add_argument('--seed', type=int, default=123, help="random seed for initialization") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument('--new_segment_ids', action='store_true', help="Use new segment ids for bi-uni-directional LM.") parser.add_argument('--new_pos_ids', action='store_true', help="Use new position ids for LMs.") parser.add_argument('--batch_size', type=int, default=4, help="Batch size for decoding.") parser.add_argument('--beam_size', type=int, default=1, help="Beam size for searching") parser.add_argument('--length_penalty', type=float, default=0, help="Length penalty for beam search") parser.add_argument('--forbid_duplicate_ngrams', action='store_true') parser.add_argument('--forbid_ignore_word', type=str, default=None, help="Ignore the word during forbid_duplicate_ngrams") parser.add_argument("--min_len", default=None, type=int) parser.add_argument('--need_score_traces', action='store_true') parser.add_argument('--ngram_size', type=int, default=3) parser.add_argument('--mode', default="s2s", choices=["s2s", "l2r", "both"]) parser.add_argument('--max_tgt_length', type=int, default=128, help="maximum length of target sequence") parser.add_argument('--s2s_special_token', action='store_true', help="New special tokens ([S2S_SEP]/[S2S_CLS]) of S2S.") parser.add_argument('--s2s_add_segment', action='store_true', help="Additional segmental for the encoder of S2S.") parser.add_argument('--s2s_share_segment', action='store_true', help="Sharing segment embeddings for the encoder of S2S (used with --s2s_add_segment).") parser.add_argument('--pos_shift', action='store_true', help="Using position shift for fine-tuning.") parser.add_argument('--not_predict_token', type=str, default=None, help="Do not predict the tokens during decoding.") args = parser.parse_args() if args.need_score_traces and args.beam_size <= 1: raise ValueError( "Score trace is only available for beam search with beam size > 1.") if args.max_tgt_length >= args.max_seq_length - 2: raise ValueError("Maximum tgt length exceeds max seq length - 2.") device = torch.device( "cuda" if torch.cuda.is_available() else "cpu") n_gpu = torch.cuda.device_count() random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) tokenizer = BertTokenizer.from_pretrained( args.bert_model, do_lower_case=args.do_lower_case) tokenizer.max_len = args.max_seq_length pair_num_relation = 0 bi_uni_pipeline = [] bi_uni_pipeline.append(seq2seq_loader.Preprocess4Seq2seqDecoder(list(tokenizer.vocab.keys()), tokenizer.convert_tokens_to_ids, args.max_seq_length, max_tgt_length=args.max_tgt_length, new_segment_ids=args.new_segment_ids, mode="s2s", num_qkv=args.num_qkv, s2s_special_token=args.s2s_special_token, s2s_add_segment=args.s2s_add_segment, s2s_share_segment=args.s2s_share_segment, pos_shift=args.pos_shift)) amp_handle = None if args.fp16 and args.amp: from apex import amp amp_handle = amp.init(enable_caching=True) logger.info("enable fp16 with amp") # Prepare model cls_num_labels = 2 type_vocab_size = 6 + \ (1 if args.s2s_add_segment else 0) if args.new_segment_ids else 2 mask_word_id, eos_word_ids, sos_word_id = tokenizer.convert_tokens_to_ids( ["[MASK]", "[SEP]", "[S2S_SOS]"]) def _get_token_id_set(s): r = None if s: w_list = [] for w in s.split('|'): if w.startswith('[') and w.endswith(']'): w_list.append(w.upper()) else: w_list.append(w) r = set(tokenizer.convert_tokens_to_ids(w_list)) return r forbid_ignore_set = _get_token_id_set(args.forbid_ignore_word) not_predict_set = _get_token_id_set(args.not_predict_token) print(args.model_recover_path) for model_recover_path in glob.glob(args.model_recover_path.strip()): logger.info("***** Recover model: %s *****", model_recover_path) model_recover = torch.load(model_recover_path) model = BertForSeq2SeqDecoder.from_pretrained(args.bert_model, state_dict=model_recover, num_labels=cls_num_labels, num_rel=pair_num_relation, type_vocab_size=type_vocab_size, task_idx=3, mask_word_id=mask_word_id, search_beam_size=args.beam_size, length_penalty=args.length_penalty, eos_id=eos_word_ids, sos_id=sos_word_id, forbid_duplicate_ngrams=args.forbid_duplicate_ngrams, forbid_ignore_set=forbid_ignore_set, not_predict_set=not_predict_set, ngram_size=args.ngram_size, min_len=args.min_len, mode=args.mode, max_position_embeddings=args.max_seq_length, ffn_type=args.ffn_type, num_qkv=args.num_qkv, seg_emb=args.seg_emb, pos_shift=args.pos_shift) del model_recover if args.fp16: model.half() model.to(device) if n_gpu > 1: model = torch.nn.DataParallel(model) torch.cuda.empty_cache() model.eval() next_i = 0 max_src_length = args.max_seq_length - 2 - args.max_tgt_length with open(args.input_file, encoding="utf-8") as fin: input_lines = [x.strip() for x in fin.readlines()] if args.subset > 0: logger.info("Decoding subset: %d", args.subset) input_lines = input_lines[:args.subset] data_tokenizer = WhitespaceTokenizer() if args.tokenized_input else tokenizer input_lines = [data_tokenizer.tokenize( x)[:max_src_length] for x in input_lines] input_lines = sorted(list(enumerate(input_lines)), key=lambda x: -len(x[1])) output_lines = [""] * len(input_lines) score_trace_list = [None] * len(input_lines) total_batch = math.ceil(len(input_lines) / args.batch_size) with tqdm(total=total_batch) as pbar: while next_i < len(input_lines): _chunk = input_lines[next_i:next_i + args.batch_size] buf_id = [x[0] for x in _chunk] buf = [x[1] for x in _chunk] next_i += args.batch_size max_a_len = max([len(x) for x in buf]) instances = [] for instance in [(x, max_a_len) for x in buf]: for proc in bi_uni_pipeline: instances.append(proc(instance)) with torch.no_grad(): batch = seq2seq_loader.batch_list_to_batch_tensors( instances) batch = [ t.to(device) if t is not None else None for t in batch] input_ids, token_type_ids, position_ids, input_mask, mask_qkv, task_idx = batch traces = model(input_ids, token_type_ids, position_ids, input_mask, task_idx=task_idx, mask_qkv=mask_qkv) if args.beam_size > 1: traces = {k: v.tolist() for k, v in traces.items()} output_ids = traces['pred_seq'] else: output_ids = traces.tolist() for i in range(len(buf)): w_ids = output_ids[i] output_buf = tokenizer.convert_ids_to_tokens(w_ids) output_tokens = [] for t in output_buf: if t in ("[SEP]", "[PAD]"): break output_tokens.append(t) output_sequence = ' '.join(detokenize(output_tokens)) output_lines[buf_id[i]] = output_sequence if args.need_score_traces: score_trace_list[buf_id[i]] = { 'scores': traces['scores'][i], 'wids': traces['wids'][i], 'ptrs': traces['ptrs'][i]} pbar.update(1) if args.output_file: fn_out = args.output_file else: fn_out = model_recover_path+'.'+args.split with open(fn_out, "w", encoding="utf-8") as fout: for l in output_lines: fout.write(l) fout.write("\n") if args.need_score_traces: with open(fn_out + ".trace.pickle", "wb") as fout_trace: pickle.dump( {"version": 0.0, "num_samples": len(input_lines)}, fout_trace) for x in score_trace_list: pickle.dump(x, fout_trace) if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/unilm-v1/src/biunilm/decode_seq2seq.py
EXA-1-master
exa/models/unilm-master/unilm-v1/src/biunilm/__init__.py
from random import randint, shuffle, choice from random import random as rand import math import torch from biunilm.loader_utils import get_random_word, batch_list_to_batch_tensors, Pipeline # Input file format : # 1. One sentence per line. These should ideally be actual sentences, # not entire paragraphs or arbitrary spans of text. (Because we use # the sentence boundaries for the "next sentence prediction" task). # 2. Blank lines between documents. Document boundaries are needed # so that the "next sentence prediction" task doesn't span between documents. def truncate_tokens_pair(tokens_a, tokens_b, max_len, max_len_a=0, max_len_b=0, trunc_seg=None, always_truncate_tail=False): num_truncated_a = [0, 0] num_truncated_b = [0, 0] while True: if len(tokens_a) + len(tokens_b) <= max_len: break if (max_len_a > 0) and len(tokens_a) > max_len_a: trunc_tokens = tokens_a num_truncated = num_truncated_a elif (max_len_b > 0) and len(tokens_b) > max_len_b: trunc_tokens = tokens_b num_truncated = num_truncated_b elif trunc_seg: # truncate the specified segment if trunc_seg == 'a': trunc_tokens = tokens_a num_truncated = num_truncated_a else: trunc_tokens = tokens_b num_truncated = num_truncated_b else: # truncate the longer segment if len(tokens_a) > len(tokens_b): trunc_tokens = tokens_a num_truncated = num_truncated_a else: trunc_tokens = tokens_b num_truncated = num_truncated_b # whether always truncate source sequences if (not always_truncate_tail) and (rand() < 0.5): del trunc_tokens[0] num_truncated[0] += 1 else: trunc_tokens.pop() num_truncated[1] += 1 return num_truncated_a, num_truncated_b class Seq2SeqDataset(torch.utils.data.Dataset): """ Load sentence pair (sequential or random order) from corpus """ def __init__(self, file_src, file_tgt, batch_size, tokenizer, max_len, file_oracle=None, short_sampling_prob=0.1, sent_reverse_order=False, bi_uni_pipeline=[]): super().__init__() self.tokenizer = tokenizer # tokenize function self.max_len = max_len # maximum length of tokens self.short_sampling_prob = short_sampling_prob self.bi_uni_pipeline = bi_uni_pipeline self.batch_size = batch_size self.sent_reverse_order = sent_reverse_order # read the file into memory self.ex_list = [] if file_oracle is None: with open(file_src, "r", encoding='utf-8') as f_src, open(file_tgt, "r", encoding='utf-8') as f_tgt: for src, tgt in zip(f_src, f_tgt): src_tk = tokenizer.tokenize(src.strip()) tgt_tk = tokenizer.tokenize(tgt.strip()) assert len(src_tk) > 0 assert len(tgt_tk) > 0 self.ex_list.append((src_tk, tgt_tk)) else: with open(file_src, "r", encoding='utf-8') as f_src, \ open(file_tgt, "r", encoding='utf-8') as f_tgt, \ open(file_oracle, "r", encoding='utf-8') as f_orc: for src, tgt, orc in zip(f_src, f_tgt, f_orc): src_tk = tokenizer.tokenize(src.strip()) tgt_tk = tokenizer.tokenize(tgt.strip()) s_st, labl = orc.split('\t') s_st = [int(x) for x in s_st.split()] labl = [int(x) for x in labl.split()] self.ex_list.append((src_tk, tgt_tk, s_st, labl)) print('Load {0} documents'.format(len(self.ex_list))) def __len__(self): return len(self.ex_list) def __getitem__(self, idx): instance = self.ex_list[idx] proc = choice(self.bi_uni_pipeline) instance = proc(instance) return instance def __iter__(self): # iterator to load data for __ in range(math.ceil(len(self.ex_list) / float(self.batch_size))): batch = [] for __ in range(self.batch_size): idx = randint(0, len(self.ex_list)-1) batch.append(self.__getitem__(idx)) # To Tensor yield batch_list_to_batch_tensors(batch) class Preprocess4Seq2seq(Pipeline): """ Pre-processing steps for pretraining transformer """ def __init__(self, max_pred, mask_prob, vocab_words, indexer, max_len=512, skipgram_prb=0, skipgram_size=0, block_mask=False, mask_whole_word=False, new_segment_ids=False, truncate_config={}, mask_source_words=False, mode="s2s", has_oracle=False, num_qkv=0, s2s_special_token=False, s2s_add_segment=False, s2s_share_segment=False, pos_shift=False): super().__init__() self.max_len = max_len self.max_pred = max_pred # max tokens of prediction self.mask_prob = mask_prob # masking probability self.vocab_words = vocab_words # vocabulary (sub)words self.indexer = indexer # function from token to token index self.max_len = max_len self._tril_matrix = torch.tril(torch.ones( (max_len, max_len), dtype=torch.long)) self.skipgram_prb = skipgram_prb self.skipgram_size = skipgram_size self.mask_whole_word = mask_whole_word self.new_segment_ids = new_segment_ids self.always_truncate_tail = truncate_config.get( 'always_truncate_tail', False) self.max_len_a = truncate_config.get('max_len_a', None) self.max_len_b = truncate_config.get('max_len_b', None) self.trunc_seg = truncate_config.get('trunc_seg', None) self.task_idx = 3 # relax projection layer for different tasks self.mask_source_words = mask_source_words assert mode in ("s2s", "l2r") self.mode = mode self.has_oracle = has_oracle self.num_qkv = num_qkv self.s2s_special_token = s2s_special_token self.s2s_add_segment = s2s_add_segment self.s2s_share_segment = s2s_share_segment self.pos_shift = pos_shift def __call__(self, instance): tokens_a, tokens_b = instance[:2] if self.pos_shift: tokens_b = ['[S2S_SOS]'] + tokens_b # -3 for special tokens [CLS], [SEP], [SEP] num_truncated_a, _ = truncate_tokens_pair(tokens_a, tokens_b, self.max_len - 3, max_len_a=self.max_len_a, max_len_b=self.max_len_b, trunc_seg=self.trunc_seg, always_truncate_tail=self.always_truncate_tail) # Add Special Tokens if self.s2s_special_token: tokens = ['[S2S_CLS]'] + tokens_a + \ ['[S2S_SEP]'] + tokens_b + ['[SEP]'] else: tokens = ['[CLS]'] + tokens_a + ['[SEP]'] + tokens_b + ['[SEP]'] if self.new_segment_ids: if self.mode == "s2s": if self.s2s_add_segment: if self.s2s_share_segment: segment_ids = [0] + [1] * \ (len(tokens_a)+1) + [5]*(len(tokens_b)+1) else: segment_ids = [4] + [6] * \ (len(tokens_a)+1) + [5]*(len(tokens_b)+1) else: segment_ids = [4] * (len(tokens_a)+2) + \ [5]*(len(tokens_b)+1) else: segment_ids = [2] * (len(tokens)) else: segment_ids = [0]*(len(tokens_a)+2) + [1]*(len(tokens_b)+1) if self.pos_shift: n_pred = min(self.max_pred, len(tokens_b)) masked_pos = [len(tokens_a)+2+i for i in range(len(tokens_b))] masked_weights = [1]*n_pred masked_ids = self.indexer(tokens_b[1:]+['[SEP]']) else: # For masked Language Models # the number of prediction is sometimes less than max_pred when sequence is short effective_length = len(tokens_b) if self.mask_source_words: effective_length += len(tokens_a) n_pred = min(self.max_pred, max( 1, int(round(effective_length*self.mask_prob)))) # candidate positions of masked tokens cand_pos = [] special_pos = set() for i, tk in enumerate(tokens): # only mask tokens_b (target sequence) # we will mask [SEP] as an ending symbol if (i >= len(tokens_a)+2) and (tk != '[CLS]'): cand_pos.append(i) elif self.mask_source_words and (i < len(tokens_a)+2) and (tk != '[CLS]') and (not tk.startswith('[SEP')): cand_pos.append(i) else: special_pos.add(i) shuffle(cand_pos) masked_pos = set() max_cand_pos = max(cand_pos) for pos in cand_pos: if len(masked_pos) >= n_pred: break if pos in masked_pos: continue def _expand_whole_word(st, end): new_st, new_end = st, end while (new_st >= 0) and tokens[new_st].startswith('##'): new_st -= 1 while (new_end < len(tokens)) and tokens[new_end].startswith('##'): new_end += 1 return new_st, new_end if (self.skipgram_prb > 0) and (self.skipgram_size >= 2) and (rand() < self.skipgram_prb): # ngram cur_skipgram_size = randint(2, self.skipgram_size) if self.mask_whole_word: st_pos, end_pos = _expand_whole_word( pos, pos + cur_skipgram_size) else: st_pos, end_pos = pos, pos + cur_skipgram_size else: # directly mask if self.mask_whole_word: st_pos, end_pos = _expand_whole_word(pos, pos + 1) else: st_pos, end_pos = pos, pos + 1 for mp in range(st_pos, end_pos): if (0 < mp <= max_cand_pos) and (mp not in special_pos): masked_pos.add(mp) else: break masked_pos = list(masked_pos) if len(masked_pos) > n_pred: shuffle(masked_pos) masked_pos = masked_pos[:n_pred] masked_tokens = [tokens[pos] for pos in masked_pos] for pos in masked_pos: if rand() < 0.8: # 80% tokens[pos] = '[MASK]' elif rand() < 0.5: # 10% tokens[pos] = get_random_word(self.vocab_words) # when n_pred < max_pred, we only calculate loss within n_pred masked_weights = [1]*len(masked_tokens) # Token Indexing masked_ids = self.indexer(masked_tokens) # Token Indexing input_ids = self.indexer(tokens) # Zero Padding n_pad = self.max_len - len(input_ids) input_ids.extend([0]*n_pad) segment_ids.extend([0]*n_pad) if self.num_qkv > 1: mask_qkv = [0]*(len(tokens_a)+2) + [1] * (len(tokens_b)+1) mask_qkv.extend([0]*n_pad) else: mask_qkv = None input_mask = torch.zeros(self.max_len, self.max_len, dtype=torch.long) if self.mode == "s2s": input_mask[:, :len(tokens_a)+2].fill_(1) second_st, second_end = len( tokens_a)+2, len(tokens_a)+len(tokens_b)+3 input_mask[second_st:second_end, second_st:second_end].copy_( self._tril_matrix[:second_end-second_st, :second_end-second_st]) else: st, end = 0, len(tokens_a) + len(tokens_b) + 3 input_mask[st:end, st:end].copy_(self._tril_matrix[:end, :end]) # Zero Padding for masked target if self.max_pred > n_pred: n_pad = self.max_pred - n_pred if masked_ids is not None: masked_ids.extend([0]*n_pad) if masked_pos is not None: masked_pos.extend([0]*n_pad) if masked_weights is not None: masked_weights.extend([0]*n_pad) oracle_pos = None oracle_weights = None oracle_labels = None if self.has_oracle: s_st, labls = instance[2:] oracle_pos = [] oracle_labels = [] for st, lb in zip(s_st, labls): st = st - num_truncated_a[0] if st > 0 and st < len(tokens_a): oracle_pos.append(st) oracle_labels.append(lb) oracle_pos = oracle_pos[:20] oracle_labels = oracle_labels[:20] oracle_weights = [1] * len(oracle_pos) if len(oracle_pos) < 20: x_pad = 20 - len(oracle_pos) oracle_pos.extend([0] * x_pad) oracle_labels.extend([0] * x_pad) oracle_weights.extend([0] * x_pad) return (input_ids, segment_ids, input_mask, mask_qkv, masked_ids, masked_pos, masked_weights, -1, self.task_idx, oracle_pos, oracle_weights, oracle_labels) return (input_ids, segment_ids, input_mask, mask_qkv, masked_ids, masked_pos, masked_weights, -1, self.task_idx) class Preprocess4Seq2seqDecoder(Pipeline): """ Pre-processing steps for pretraining transformer """ def __init__(self, vocab_words, indexer, max_len=512, max_tgt_length=128, new_segment_ids=False, mode="s2s", num_qkv=0, s2s_special_token=False, s2s_add_segment=False, s2s_share_segment=False, pos_shift=False): super().__init__() self.max_len = max_len self.vocab_words = vocab_words # vocabulary (sub)words self.indexer = indexer # function from token to token index self.max_len = max_len self._tril_matrix = torch.tril(torch.ones( (max_len, max_len), dtype=torch.long)) self.new_segment_ids = new_segment_ids self.task_idx = 3 # relax projection layer for different tasks assert mode in ("s2s", "l2r") self.mode = mode self.max_tgt_length = max_tgt_length self.num_qkv = num_qkv self.s2s_special_token = s2s_special_token self.s2s_add_segment = s2s_add_segment self.s2s_share_segment = s2s_share_segment self.pos_shift = pos_shift def __call__(self, instance): tokens_a, max_a_len = instance # Add Special Tokens if self.s2s_special_token: padded_tokens_a = ['[S2S_CLS]'] + tokens_a + ['[S2S_SEP]'] else: padded_tokens_a = ['[CLS]'] + tokens_a + ['[SEP]'] assert len(padded_tokens_a) <= max_a_len + 2 if max_a_len + 2 > len(padded_tokens_a): padded_tokens_a += ['[PAD]'] * \ (max_a_len + 2 - len(padded_tokens_a)) assert len(padded_tokens_a) == max_a_len + 2 max_len_in_batch = min(self.max_tgt_length + max_a_len + 2, self.max_len) tokens = padded_tokens_a if self.new_segment_ids: if self.mode == "s2s": _enc_seg1 = 0 if self.s2s_share_segment else 4 if self.s2s_add_segment: if self.s2s_share_segment: segment_ids = [ 0] + [1]*(len(padded_tokens_a)-1) + [5]*(max_len_in_batch - len(padded_tokens_a)) else: segment_ids = [ 4] + [6]*(len(padded_tokens_a)-1) + [5]*(max_len_in_batch - len(padded_tokens_a)) else: segment_ids = [4]*(len(padded_tokens_a)) + \ [5]*(max_len_in_batch - len(padded_tokens_a)) else: segment_ids = [2]*max_len_in_batch else: segment_ids = [0]*(len(padded_tokens_a)) \ + [1]*(max_len_in_batch - len(padded_tokens_a)) if self.num_qkv > 1: mask_qkv = [0]*(len(padded_tokens_a)) + [1] * \ (max_len_in_batch - len(padded_tokens_a)) else: mask_qkv = None position_ids = [] for i in range(len(tokens_a) + 2): position_ids.append(i) for i in range(len(tokens_a) + 2, max_a_len + 2): position_ids.append(0) for i in range(max_a_len + 2, max_len_in_batch): position_ids.append(i - (max_a_len + 2) + len(tokens_a) + 2) # Token Indexing input_ids = self.indexer(tokens) # Zero Padding input_mask = torch.zeros( max_len_in_batch, max_len_in_batch, dtype=torch.long) if self.mode == "s2s": input_mask[:, :len(tokens_a)+2].fill_(1) else: st, end = 0, len(tokens_a) + 2 input_mask[st:end, st:end].copy_( self._tril_matrix[:end, :end]) input_mask[end:, :len(tokens_a)+2].fill_(1) second_st, second_end = len(padded_tokens_a), max_len_in_batch input_mask[second_st:second_end, second_st:second_end].copy_( self._tril_matrix[:second_end-second_st, :second_end-second_st]) return (input_ids, segment_ids, position_ids, input_mask, mask_qkv, self.task_idx)
EXA-1-master
exa/models/unilm-master/unilm-v1/src/biunilm/seq2seq_loader.py
import pickle import math import argparse import glob from pathlib import Path from tqdm import tqdm import unicodedata from pytorch_pretrained_bert.tokenization import BertTokenizer def read_traces_from_file(file_name): with open(file_name, "rb") as fin: meta = pickle.load(fin) num_samples = meta["num_samples"] samples = [] for _ in range(num_samples): samples.append(pickle.load(fin)) return samples def get_best_sequence(sample, eos_id, pad_id, length_penalty=None, alpha=None, expect=None, min_len=None): # if not any((length_penalty, alpha, expect, min_len)): # raise ValueError( # "You can only specify length penalty or alpha, but not both.") scores = sample["scores"] wids_list = sample["wids"] ptrs = sample["ptrs"] last_frame_id = len(scores) - 1 for i, wids in enumerate(wids_list): if all(wid in (eos_id, pad_id) for wid in wids): last_frame_id = i break while all(wid == pad_id for wid in wids_list[last_frame_id]): last_frame_id -= 1 max_score = -math.inf frame_id = -1 pos_in_frame = -1 for fid in range(last_frame_id + 1): for i, wid in enumerate(wids_list[fid]): if fid <= last_frame_id and scores[fid][i] >= 0: # skip paddings continue if (wid in (eos_id, pad_id)) or fid == last_frame_id: s = scores[fid][i] if length_penalty: if expect: s -= length_penalty * math.fabs(fid+1 - expect) else: s += length_penalty * (fid + 1) elif alpha: s = s / math.pow((5 + fid + 1) / 6.0, alpha) if s > max_score: # if (frame_id != -1) and min_len and (fid+1 < min_len): # continue max_score = s frame_id = fid pos_in_frame = i if frame_id == -1: seq = [] else: seq = [wids_list[frame_id][pos_in_frame]] for fid in range(frame_id, 0, -1): pos_in_frame = ptrs[fid][pos_in_frame] seq.append(wids_list[fid - 1][pos_in_frame]) seq.reverse() return seq def detokenize(tk_list): r_list = [] for tk in tk_list: if tk.startswith('##') and len(r_list) > 0: r_list[-1] = r_list[-1] + tk[2:] else: r_list.append(tk) return r_list def simple_postprocess(tk_list): # truncate duplicate punctuations while tk_list and len(tk_list) > 4 and len(tk_list[-1]) == 1 and unicodedata.category(tk_list[-1]).startswith('P') and all(it == tk_list[-1] for it in tk_list[-4:]): tk_list = tk_list[:-3] return tk_list def main(args): tokenizer = BertTokenizer.from_pretrained( args.bert_model, do_lower_case=args.do_lower_case) eos_id, pad_id = set(tokenizer.convert_tokens_to_ids(["[SEP]", "[PAD]"])) for input_file in tqdm(glob.glob(args.input)): if not Path(input_file+'.trace.pickle').exists(): continue print(input_file) samples = read_traces_from_file(input_file+'.trace.pickle') results = [] for s in samples: word_ids = get_best_sequence(s, eos_id, pad_id, alpha=args.alpha, length_penalty=args.length_penalty, expect=args.expect, min_len=args.min_len) tokens = tokenizer.convert_ids_to_tokens(word_ids) buf = [] for t in tokens: if t in ("[SEP]", "[PAD]"): break else: buf.append(t) results.append(" ".join(simple_postprocess(detokenize(buf)))) fn_out = input_file+'.' if args.length_penalty: fn_out += 'lenp'+str(args.length_penalty) if args.expect: fn_out += 'exp'+str(args.expect) if args.alpha: fn_out += 'alp'+str(args.alpha) if args.min_len: fn_out += 'minl'+str(args.min_len) with open(fn_out, "w", encoding="utf-8") as fout: for line in results: fout.write(line) fout.write("\n") if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("--input", type=str, help="Input file.") parser.add_argument("--bert_model", default=None, type=str, required=True, help="Bert pre-trained model selected in the list: bert-base-uncased, " "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.") parser.add_argument("--alpha", default=None, type=float) parser.add_argument("--length_penalty", default=None, type=float) parser.add_argument("--expect", default=None, type=float, help="Expectation of target length.") parser.add_argument("--min_len", default=None, type=int) parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") args = parser.parse_args() main(args)
EXA-1-master
exa/models/unilm-master/unilm-v1/src/biunilm/gen_seq_from_trace.py
"""BERT finetuning runner.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import logging import glob import math import json import argparse import random from pathlib import Path from tqdm import tqdm, trange import numpy as np import torch from torch.utils.data import RandomSampler from torch.utils.data.distributed import DistributedSampler from pytorch_pretrained_bert.tokenization import BertTokenizer, WhitespaceTokenizer from pytorch_pretrained_bert.modeling import BertForPreTrainingLossMask from pytorch_pretrained_bert.optimization import BertAdam, warmup_linear from nn.data_parallel import DataParallelImbalance import biunilm.seq2seq_loader as seq2seq_loader import torch.distributed as dist logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO) logger = logging.getLogger(__name__) def _get_max_epoch_model(output_dir): fn_model_list = glob.glob(os.path.join(output_dir, "model.*.bin")) fn_optim_list = glob.glob(os.path.join(output_dir, "optim.*.bin")) if (not fn_model_list) or (not fn_optim_list): return None both_set = set([int(Path(fn).stem.split('.')[-1]) for fn in fn_model_list] ) & set([int(Path(fn).stem.split('.')[-1]) for fn in fn_optim_list]) if both_set: return max(both_set) else: return None def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument("--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the .tsv files (or other data files) for the task.") parser.add_argument("--src_file", default=None, type=str, help="The input data file name.") parser.add_argument("--tgt_file", default=None, type=str, help="The output data file name.") parser.add_argument("--bert_model", default=None, type=str, required=True, help="Bert pre-trained model selected in the list: bert-base-uncased, " "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.") parser.add_argument("--config_path", default=None, type=str, help="Bert config file path.") parser.add_argument("--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.") parser.add_argument("--log_dir", default='', type=str, required=True, help="The output directory where the log will be written.") parser.add_argument("--model_recover_path", default=None, type=str, required=True, help="The file of fine-tuned pretraining model.") parser.add_argument("--optim_recover_path", default=None, type=str, help="The file of pretraining optimizer.") # Other parameters parser.add_argument("--max_seq_length", default=128, type=int, help="The maximum total input sequence length after WordPiece tokenization. \n" "Sequences longer than this will be truncated, and sequences shorter \n" "than this will be padded.") parser.add_argument("--do_train", action='store_true', help="Whether to run training.") parser.add_argument("--do_eval", action='store_true', help="Whether to run eval on the dev set.") parser.add_argument("--do_lower_case", action='store_true', help="Set this flag if you are using an uncased model.") parser.add_argument("--train_batch_size", default=32, type=int, help="Total batch size for training.") parser.add_argument("--eval_batch_size", default=64, type=int, help="Total batch size for eval.") parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument("--label_smoothing", default=0, type=float, help="The initial learning rate for Adam.") parser.add_argument("--weight_decay", default=0.01, type=float, help="The weight decay rate for Adam.") parser.add_argument("--finetune_decay", action='store_true', help="Weight decay to the original weights.") parser.add_argument("--num_train_epochs", default=3.0, type=float, help="Total number of training epochs to perform.") parser.add_argument("--warmup_proportion", default=0.1, type=float, help="Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10%% of training.") parser.add_argument("--hidden_dropout_prob", default=0.1, type=float, help="Dropout rate for hidden states.") parser.add_argument("--attention_probs_dropout_prob", default=0.1, type=float, help="Dropout rate for attention probabilities.") parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available") parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus") parser.add_argument('--seed', type=int, default=42, help="random seed for initialization") parser.add_argument('--gradient_accumulation_steps', type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.") parser.add_argument('--fp16', action='store_true', help="Whether to use 16-bit float precision instead of 32-bit") parser.add_argument('--fp32_embedding', action='store_true', help="Whether to use 32-bit float precision instead of 16-bit for embeddings") parser.add_argument('--loss_scale', type=float, default=0, help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n" "0 (default value): dynamic loss scaling.\n" "Positive power of 2: static loss scaling value.\n") parser.add_argument('--amp', action='store_true', help="Whether to use amp for fp16") parser.add_argument('--from_scratch', action='store_true', help="Initialize parameters with random values (i.e., training from scratch).") parser.add_argument('--new_segment_ids', action='store_true', help="Use new segment ids for bi-uni-directional LM.") parser.add_argument('--new_pos_ids', action='store_true', help="Use new position ids for LMs.") parser.add_argument('--tokenized_input', action='store_true', help="Whether the input is tokenized.") parser.add_argument('--max_len_a', type=int, default=0, help="Truncate_config: maximum length of segment A.") parser.add_argument('--max_len_b', type=int, default=0, help="Truncate_config: maximum length of segment B.") parser.add_argument('--trunc_seg', default='', help="Truncate_config: first truncate segment A/B (option: a, b).") parser.add_argument('--always_truncate_tail', action='store_true', help="Truncate_config: Whether we should always truncate tail.") parser.add_argument("--mask_prob", default=0.15, type=float, help="Number of prediction is sometimes less than max_pred when sequence is short.") parser.add_argument("--mask_prob_eos", default=0, type=float, help="Number of prediction is sometimes less than max_pred when sequence is short.") parser.add_argument('--max_pred', type=int, default=20, help="Max tokens of prediction.") parser.add_argument("--num_workers", default=0, type=int, help="Number of workers for the data loader.") parser.add_argument('--mask_source_words', action='store_true', help="Whether to mask source words for training") parser.add_argument('--skipgram_prb', type=float, default=0.0, help='prob of ngram mask') parser.add_argument('--skipgram_size', type=int, default=1, help='the max size of ngram mask') parser.add_argument('--mask_whole_word', action='store_true', help="Whether masking a whole word.") parser.add_argument('--do_l2r_training', action='store_true', help="Whether to do left to right training") parser.add_argument('--has_sentence_oracle', action='store_true', help="Whether to have sentence level oracle for training. " "Only useful for summary generation") parser.add_argument('--max_position_embeddings', type=int, default=None, help="max position embeddings") parser.add_argument('--relax_projection', action='store_true', help="Use different projection layers for tasks.") parser.add_argument('--ffn_type', default=0, type=int, help="0: default mlp; 1: W((Wx+b) elem_prod x);") parser.add_argument('--num_qkv', default=0, type=int, help="Number of different <Q,K,V>.") parser.add_argument('--seg_emb', action='store_true', help="Using segment embedding for self-attention.") parser.add_argument('--s2s_special_token', action='store_true', help="New special tokens ([S2S_SEP]/[S2S_CLS]) of S2S.") parser.add_argument('--s2s_add_segment', action='store_true', help="Additional segmental for the encoder of S2S.") parser.add_argument('--s2s_share_segment', action='store_true', help="Sharing segment embeddings for the encoder of S2S (used with --s2s_add_segment).") parser.add_argument('--pos_shift', action='store_true', help="Using position shift for fine-tuning.") args = parser.parse_args() assert Path(args.model_recover_path).exists( ), "--model_recover_path doesn't exist" args.output_dir = args.output_dir.replace( '[PT_OUTPUT_DIR]', os.getenv('PT_OUTPUT_DIR', '')) args.log_dir = args.log_dir.replace( '[PT_OUTPUT_DIR]', os.getenv('PT_OUTPUT_DIR', '')) os.makedirs(args.output_dir, exist_ok=True) os.makedirs(args.log_dir, exist_ok=True) json.dump(args.__dict__, open(os.path.join( args.output_dir, 'opt.json'), 'w'), sort_keys=True, indent=2) if args.local_rank == -1 or args.no_cuda: device = torch.device( "cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") n_gpu = torch.cuda.device_count() else: torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) n_gpu = 1 # Initializes the distributed backend which will take care of sychronizing nodes/GPUs dist.init_process_group(backend='nccl') logger.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format( device, n_gpu, bool(args.local_rank != -1), args.fp16)) if args.gradient_accumulation_steps < 1: raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format( args.gradient_accumulation_steps)) args.train_batch_size = int( args.train_batch_size / args.gradient_accumulation_steps) random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if not args.do_train and not args.do_eval: raise ValueError( "At least one of `do_train` or `do_eval` must be True.") if args.local_rank not in (-1, 0): # Make sure only the first process in distributed training will download model & vocab dist.barrier() tokenizer = BertTokenizer.from_pretrained( args.bert_model, do_lower_case=args.do_lower_case) if args.max_position_embeddings: tokenizer.max_len = args.max_position_embeddings data_tokenizer = WhitespaceTokenizer() if args.tokenized_input else tokenizer if args.local_rank == 0: dist.barrier() if args.do_train: print("Loading Train Dataset", args.data_dir) bi_uni_pipeline = [seq2seq_loader.Preprocess4Seq2seq(args.max_pred, args.mask_prob, list(tokenizer.vocab.keys( )), tokenizer.convert_tokens_to_ids, args.max_seq_length, new_segment_ids=args.new_segment_ids, truncate_config={'max_len_a': args.max_len_a, 'max_len_b': args.max_len_b, 'trunc_seg': args.trunc_seg, 'always_truncate_tail': args.always_truncate_tail}, mask_source_words=args.mask_source_words, skipgram_prb=args.skipgram_prb, skipgram_size=args.skipgram_size, mask_whole_word=args.mask_whole_word, mode="s2s", has_oracle=args.has_sentence_oracle, num_qkv=args.num_qkv, s2s_special_token=args.s2s_special_token, s2s_add_segment=args.s2s_add_segment, s2s_share_segment=args.s2s_share_segment, pos_shift=args.pos_shift)] file_oracle = None if args.has_sentence_oracle: file_oracle = os.path.join(args.data_dir, 'train.oracle') fn_src = os.path.join( args.data_dir, args.src_file if args.src_file else 'train.src') fn_tgt = os.path.join( args.data_dir, args.tgt_file if args.tgt_file else 'train.tgt') train_dataset = seq2seq_loader.Seq2SeqDataset( fn_src, fn_tgt, args.train_batch_size, data_tokenizer, args.max_seq_length, file_oracle=file_oracle, bi_uni_pipeline=bi_uni_pipeline) if args.local_rank == -1: train_sampler = RandomSampler(train_dataset, replacement=False) _batch_size = args.train_batch_size else: train_sampler = DistributedSampler(train_dataset) _batch_size = args.train_batch_size // dist.get_world_size() train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=_batch_size, sampler=train_sampler, num_workers=args.num_workers, collate_fn=seq2seq_loader.batch_list_to_batch_tensors, pin_memory=False) # note: args.train_batch_size has been changed to (/= args.gradient_accumulation_steps) # t_total = int(math.ceil(len(train_dataset.ex_list) / args.train_batch_size) t_total = int(len(train_dataloader) * args.num_train_epochs / args.gradient_accumulation_steps) amp_handle = None if args.fp16 and args.amp: from apex import amp amp_handle = amp.init(enable_caching=True) logger.info("enable fp16 with amp") # Prepare model recover_step = _get_max_epoch_model(args.output_dir) cls_num_labels = 2 type_vocab_size = 6 + \ (1 if args.s2s_add_segment else 0) if args.new_segment_ids else 2 num_sentlvl_labels = 2 if args.has_sentence_oracle else 0 relax_projection = 4 if args.relax_projection else 0 if args.local_rank not in (-1, 0): # Make sure only the first process in distributed training will download model & vocab dist.barrier() if (recover_step is None) and (args.model_recover_path is None): # if _state_dict == {}, the parameters are randomly initialized # if _state_dict == None, the parameters are initialized with bert-init _state_dict = {} if args.from_scratch else None model = BertForPreTrainingLossMask.from_pretrained( args.bert_model, state_dict=_state_dict, num_labels=cls_num_labels, num_rel=0, type_vocab_size=type_vocab_size, config_path=args.config_path, task_idx=3, num_sentlvl_labels=num_sentlvl_labels, max_position_embeddings=args.max_position_embeddings, label_smoothing=args.label_smoothing, fp32_embedding=args.fp32_embedding, relax_projection=relax_projection, new_pos_ids=args.new_pos_ids, ffn_type=args.ffn_type, hidden_dropout_prob=args.hidden_dropout_prob, attention_probs_dropout_prob=args.attention_probs_dropout_prob, num_qkv=args.num_qkv, seg_emb=args.seg_emb) global_step = 0 else: if recover_step: logger.info("***** Recover model: %d *****", recover_step) model_recover = torch.load(os.path.join( args.output_dir, "model.{0}.bin".format(recover_step)), map_location='cpu') # recover_step == number of epochs global_step = math.floor( recover_step * t_total / args.num_train_epochs) elif args.model_recover_path: logger.info("***** Recover model: %s *****", args.model_recover_path) model_recover = torch.load( args.model_recover_path, map_location='cpu') global_step = 0 model = BertForPreTrainingLossMask.from_pretrained( args.bert_model, state_dict=model_recover, num_labels=cls_num_labels, num_rel=0, type_vocab_size=type_vocab_size, config_path=args.config_path, task_idx=3, num_sentlvl_labels=num_sentlvl_labels, max_position_embeddings=args.max_position_embeddings, label_smoothing=args.label_smoothing, fp32_embedding=args.fp32_embedding, relax_projection=relax_projection, new_pos_ids=args.new_pos_ids, ffn_type=args.ffn_type, hidden_dropout_prob=args.hidden_dropout_prob, attention_probs_dropout_prob=args.attention_probs_dropout_prob, num_qkv=args.num_qkv, seg_emb=args.seg_emb) if args.local_rank == 0: dist.barrier() if args.fp16: model.half() if args.fp32_embedding: model.bert.embeddings.word_embeddings.float() model.bert.embeddings.position_embeddings.float() model.bert.embeddings.token_type_embeddings.float() model.to(device) if args.local_rank != -1: try: from torch.nn.parallel import DistributedDataParallel as DDP except ImportError: raise ImportError("DistributedDataParallel") model = DDP(model, device_ids=[ args.local_rank], output_device=args.local_rank, find_unused_parameters=True) elif n_gpu > 1: # model = torch.nn.DataParallel(model) model = DataParallelImbalance(model) # Prepare optimizer param_optimizer = list(model.named_parameters()) no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight'] optimizer_grouped_parameters = [ {'params': [p for n, p in param_optimizer if not any( nd in n for nd in no_decay)], 'weight_decay': 0.01}, {'params': [p for n, p in param_optimizer if any( nd in n for nd in no_decay)], 'weight_decay': 0.0} ] if args.fp16: try: # from apex.optimizers import FP16_Optimizer from pytorch_pretrained_bert.optimization_fp16 import FP16_Optimizer_State from apex.optimizers import FusedAdam except ImportError: raise ImportError( "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.") optimizer = FusedAdam(optimizer_grouped_parameters, lr=args.learning_rate, bias_correction=False, max_grad_norm=1.0) if args.loss_scale == 0: optimizer = FP16_Optimizer_State( optimizer, dynamic_loss_scale=True) else: optimizer = FP16_Optimizer_State( optimizer, static_loss_scale=args.loss_scale) else: optimizer = BertAdam(optimizer_grouped_parameters, lr=args.learning_rate, warmup=args.warmup_proportion, t_total=t_total) if recover_step: logger.info("***** Recover optimizer: %d *****", recover_step) optim_recover = torch.load(os.path.join( args.output_dir, "optim.{0}.bin".format(recover_step)), map_location='cpu') if hasattr(optim_recover, 'state_dict'): optim_recover = optim_recover.state_dict() optimizer.load_state_dict(optim_recover) if args.loss_scale == 0: logger.info("***** Recover optimizer: dynamic_loss_scale *****") optimizer.dynamic_loss_scale = True logger.info("***** CUDA.empty_cache() *****") torch.cuda.empty_cache() if args.do_train: logger.info("***** Running training *****") logger.info(" Batch size = %d", args.train_batch_size) logger.info(" Num steps = %d", t_total) model.train() if recover_step: start_epoch = recover_step+1 else: start_epoch = 1 for i_epoch in trange(start_epoch, int(args.num_train_epochs)+1, desc="Epoch", disable=args.local_rank not in (-1, 0)): if args.local_rank != -1: train_sampler.set_epoch(i_epoch) iter_bar = tqdm(train_dataloader, desc='Iter (loss=X.XXX)', disable=args.local_rank not in (-1, 0)) for step, batch in enumerate(iter_bar): batch = [ t.to(device) if t is not None else None for t in batch] if args.has_sentence_oracle: input_ids, segment_ids, input_mask, mask_qkv, lm_label_ids, masked_pos, masked_weights, is_next, task_idx, oracle_pos, oracle_weights, oracle_labels = batch else: input_ids, segment_ids, input_mask, mask_qkv, lm_label_ids, masked_pos, masked_weights, is_next, task_idx = batch oracle_pos, oracle_weights, oracle_labels = None, None, None loss_tuple = model(input_ids, segment_ids, input_mask, lm_label_ids, is_next, masked_pos=masked_pos, masked_weights=masked_weights, task_idx=task_idx, masked_pos_2=oracle_pos, masked_weights_2=oracle_weights, masked_labels_2=oracle_labels, mask_qkv=mask_qkv) masked_lm_loss, next_sentence_loss = loss_tuple if n_gpu > 1: # mean() to average on multi-gpu. # loss = loss.mean() masked_lm_loss = masked_lm_loss.mean() next_sentence_loss = next_sentence_loss.mean() loss = masked_lm_loss + next_sentence_loss # logging for each step (i.e., before normalization by args.gradient_accumulation_steps) iter_bar.set_description('Iter (loss=%5.3f)' % loss.item()) # ensure that accumlated gradients are normalized if args.gradient_accumulation_steps > 1: loss = loss / args.gradient_accumulation_steps if args.fp16: optimizer.backward(loss) if amp_handle: amp_handle._clear_cache() else: loss.backward() if (step + 1) % args.gradient_accumulation_steps == 0: lr_this_step = args.learning_rate * \ warmup_linear(global_step/t_total, args.warmup_proportion) if args.fp16: # modify learning rate with special warm up BERT uses for param_group in optimizer.param_groups: param_group['lr'] = lr_this_step optimizer.step() optimizer.zero_grad() global_step += 1 # Save a trained model if (args.local_rank == -1 or torch.distributed.get_rank() == 0): logger.info( "** ** * Saving fine-tuned model and optimizer ** ** * ") model_to_save = model.module if hasattr( model, 'module') else model # Only save the model it-self output_model_file = os.path.join( args.output_dir, "model.{0}.bin".format(i_epoch)) torch.save(model_to_save.state_dict(), output_model_file) output_optim_file = os.path.join( args.output_dir, "optim.{0}.bin".format(i_epoch)) torch.save(optimizer.state_dict(), output_optim_file) logger.info("***** CUDA.empty_cache() *****") torch.cuda.empty_cache() if __name__ == "__main__": main()
EXA-1-master
exa/models/unilm-master/unilm-v1/src/biunilm/run_seq2seq.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import subprocess import sys from setuptools import setup, find_packages, Extension from setuptools import Extension, find_packages, setup import site site.ENABLE_USER_SITE = True if sys.version_info < (3, 6): sys.exit("Sorry, Python >= 3.6 is required for fairseq.") def write_version_py(): with open(os.path.join("fairseq", "version.txt")) as f: version = f.read().strip() # append latest commit hash to version string try: sha = ( subprocess.check_output(["git", "rev-parse", "HEAD"]) .decode("ascii") .strip() ) version += "+" + sha[:7] except Exception: pass # write version info to fairseq/version.py with open(os.path.join("fairseq", "version.py"), "w") as f: f.write('__version__ = "{}"\n'.format(version)) return version version = write_version_py() with open("README.md") as f: readme = f.read() if sys.platform == "darwin": extra_compile_args = ["-stdlib=libc++", "-O3"] else: extra_compile_args = ["-std=c++11", "-O3"] class NumpyExtension(Extension): """Source: https://stackoverflow.com/a/54128391""" def __init__(self, *args, **kwargs): self.__include_dirs = [] super().__init__(*args, **kwargs) @property def include_dirs(self): import numpy return self.__include_dirs + [numpy.get_include()] @include_dirs.setter def include_dirs(self, dirs): self.__include_dirs = dirs extensions = [ Extension( "fairseq.libbleu", sources=[ "fairseq/clib/libbleu/libbleu.cpp", "fairseq/clib/libbleu/module.cpp", ], extra_compile_args=extra_compile_args, ), NumpyExtension( "fairseq.data.data_utils_fast", sources=["fairseq/data/data_utils_fast.pyx"], language="c++", extra_compile_args=extra_compile_args, ), NumpyExtension( "fairseq.data.token_block_utils_fast", sources=["fairseq/data/token_block_utils_fast.pyx"], language="c++", extra_compile_args=extra_compile_args, ), ] cmdclass = {} try: # torch is not available when generating docs from torch.utils import cpp_extension extensions.extend( [ cpp_extension.CppExtension( "fairseq.libbase", sources=[ "fairseq/clib/libbase/balanced_assignment.cpp", ], ) ] ) extensions.extend( [ cpp_extension.CppExtension( "fairseq.libnat", sources=[ "fairseq/clib/libnat/edit_dist.cpp", ], ), cpp_extension.CppExtension( "alignment_train_cpu_binding", sources=[ "examples/operators/alignment_train_cpu.cpp", ], ), ] ) if "CUDA_HOME" in os.environ: extensions.extend( [ cpp_extension.CppExtension( "fairseq.libnat_cuda", sources=[ "fairseq/clib/libnat_cuda/edit_dist.cu", "fairseq/clib/libnat_cuda/binding.cpp", ], ), cpp_extension.CppExtension( "fairseq.ngram_repeat_block_cuda", sources=[ "fairseq/clib/cuda/ngram_repeat_block_cuda.cpp", "fairseq/clib/cuda/ngram_repeat_block_cuda_kernel.cu", ], ), cpp_extension.CppExtension( "alignment_train_cuda_binding", sources=[ "examples/operators/alignment_train_kernel.cu", "examples/operators/alignment_train_cuda.cpp", ], ), ] ) cmdclass["build_ext"] = cpp_extension.BuildExtension except ImportError: pass if "READTHEDOCS" in os.environ: # don't build extensions when generating docs extensions = [] if "build_ext" in cmdclass: del cmdclass["build_ext"] # use CPU build of PyTorch dependency_links = [ "https://download.pytorch.org/whl/cpu/torch-1.7.0%2Bcpu-cp36-cp36m-linux_x86_64.whl" ] else: dependency_links = [] if "clean" in sys.argv[1:]: # Source: https://bit.ly/2NLVsgE print("deleting Cython files...") import subprocess subprocess.run( ["rm -f fairseq/*.so fairseq/**/*.so fairseq/*.pyd fairseq/**/*.pyd"], shell=True, ) extra_packages = [] if os.path.exists(os.path.join("fairseq", "model_parallel", "megatron", "mpu")): extra_packages.append("fairseq.model_parallel.megatron.mpu") def do_setup(package_data): setup( name="fairseq", version=version, description="Facebook AI Research Sequence-to-Sequence Toolkit", url="https://github.com/pytorch/fairseq", classifiers=[ "Intended Audience :: Science/Research", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Topic :: Scientific/Engineering :: Artificial Intelligence", ], long_description=readme, long_description_content_type="text/markdown", setup_requires=[ "cython", 'numpy<1.20.0; python_version<"3.7"', 'numpy; python_version>="3.7"', "setuptools>=18.0", ], install_requires=[ "cffi", "cython", 'dataclasses; python_version<"3.7"', "hydra-core>=1.0.7,<1.1", "omegaconf<2.1", 'numpy<1.20.0; python_version<"3.7"', 'numpy; python_version>="3.7"', "regex", "sacrebleu>=1.4.12", "torch", "tqdm", "bitarray", "torchaudio>=0.8.0", ], dependency_links=dependency_links, packages=find_packages( exclude=[ "examples", "examples.*", "scripts", "scripts.*", "tests", "tests.*", ] ) + extra_packages, package_data=package_data, ext_modules=extensions, test_suite="tests", entry_points={ "console_scripts": [ "fairseq-eval-lm = fairseq_cli.eval_lm:cli_main", "fairseq-generate = fairseq_cli.generate:cli_main", "fairseq-hydra-train = fairseq_cli.hydra_train:cli_main", "fairseq-interactive = fairseq_cli.interactive:cli_main", "fairseq-preprocess = fairseq_cli.preprocess:cli_main", "fairseq-score = fairseq_cli.score:cli_main", "fairseq-train = fairseq_cli.train:cli_main", "fairseq-validate = fairseq_cli.validate:cli_main", ], }, cmdclass=cmdclass, zip_safe=False, ) def get_files(path, relative_to="fairseq"): all_files = [] for root, _dirs, files in os.walk(path, followlinks=True): root = os.path.relpath(root, relative_to) for file in files: if file.endswith(".pyc"): continue all_files.append(os.path.join(root, file)) return all_files if __name__ == "__main__": try: # symlink examples into fairseq package so package_data accepts them fairseq_examples = os.path.join("fairseq", "examples") if "build_ext" not in sys.argv[1:] and not os.path.exists(fairseq_examples): os.symlink(os.path.join("..", "examples"), fairseq_examples) package_data = { "fairseq": ( get_files(fairseq_examples) + get_files(os.path.join("fairseq", "config")) ) } do_setup(package_data) finally: if "build_ext" not in sys.argv[1:] and os.path.islink(fairseq_examples): os.unlink(fairseq_examples)
EXA-1-master
exa/models/unilm-master/edgelm/setup.py
#!/usr/bin/env python3 -u # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Legacy entry point. Use fairseq_cli/train.py or fairseq-train instead. """ from fairseq_cli.train import cli_main if __name__ == "__main__": cli_main()
EXA-1-master
exa/models/unilm-master/edgelm/train.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """isort:skip_file""" import functools import importlib dependencies = [ "dataclasses", "hydra", "numpy", "omegaconf", "regex", "requests", "torch", ] # Check for required dependencies and raise a RuntimeError if any are missing. missing_deps = [] for dep in dependencies: try: importlib.import_module(dep) except ImportError: # Hack: the hydra package is provided under the "hydra-core" name in # pypi. We don't want the user mistakenly calling `pip install hydra` # since that will install an unrelated package. if dep == "hydra": dep = "hydra-core" missing_deps.append(dep) if len(missing_deps) > 0: raise RuntimeError("Missing dependencies: {}".format(", ".join(missing_deps))) # only do fairseq imports after checking for dependencies from fairseq.hub_utils import ( # noqa; noqa BPEHubInterface as bpe, TokenizerHubInterface as tokenizer, ) from fairseq.models import MODEL_REGISTRY # noqa # torch.hub doesn't build Cython components, so if they are not found then try # to build them here try: import fairseq.data.token_block_utils_fast # noqa except ImportError: try: import cython # noqa import os from setuptools import sandbox sandbox.run_setup( os.path.join(os.path.dirname(__file__), "setup.py"), ["build_ext", "--inplace"], ) except ImportError: print( "Unable to build Cython components. Please make sure Cython is " "installed if the torch.hub model you are loading depends on it." ) # automatically expose models defined in FairseqModel::hub_models for _model_type, _cls in MODEL_REGISTRY.items(): for model_name in _cls.hub_models().keys(): globals()[model_name] = functools.partial( _cls.from_pretrained, model_name, )
EXA-1-master
exa/models/unilm-master/edgelm/hubconf.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import io import os import string import tempfile import unittest import torch from fairseq import tokenizer from fairseq.data import Dictionary class TestDictionary(unittest.TestCase): def test_finalize(self): txt = [ "A B C D", "B C D", "C D", "D", ] ref_ids1 = list( map( torch.IntTensor, [ [4, 5, 6, 7, 2], [5, 6, 7, 2], [6, 7, 2], [7, 2], ], ) ) ref_ids2 = list( map( torch.IntTensor, [ [7, 6, 5, 4, 2], [6, 5, 4, 2], [5, 4, 2], [4, 2], ], ) ) # build dictionary d = Dictionary() for line in txt: d.encode_line(line, add_if_not_exist=True) def get_ids(dictionary): ids = [] for line in txt: ids.append(dictionary.encode_line(line, add_if_not_exist=False)) return ids def assertMatch(ids, ref_ids): for toks, ref_toks in zip(ids, ref_ids): self.assertEqual(toks.size(), ref_toks.size()) self.assertEqual(0, (toks != ref_toks).sum().item()) ids = get_ids(d) assertMatch(ids, ref_ids1) # check finalized dictionary d.finalize() finalized_ids = get_ids(d) assertMatch(finalized_ids, ref_ids2) # write to disk and reload with tempfile.NamedTemporaryFile(mode="w") as tmp_dict: d.save(tmp_dict.name) d = Dictionary.load(tmp_dict.name) reload_ids = get_ids(d) assertMatch(reload_ids, ref_ids2) assertMatch(finalized_ids, reload_ids) def test_overwrite(self): # for example, Camembert overwrites <unk>, <s> and </s> dict_file = io.StringIO( "<unk> 999 #fairseq:overwrite\n" "<s> 999 #fairseq:overwrite\n" "</s> 999 #fairseq:overwrite\n" ", 999\n" "▁de 999\n" ) d = Dictionary() d.add_from_file(dict_file) self.assertEqual(d.index("<pad>"), 1) self.assertEqual(d.index("foo"), 3) self.assertEqual(d.index("<unk>"), 4) self.assertEqual(d.index("<s>"), 5) self.assertEqual(d.index("</s>"), 6) self.assertEqual(d.index(","), 7) self.assertEqual(d.index("▁de"), 8) def test_no_overwrite(self): # for example, Camembert overwrites <unk>, <s> and </s> dict_file = io.StringIO( "<unk> 999\n" "<s> 999\n" "</s> 999\n" ", 999\n" "▁de 999\n" ) d = Dictionary() with self.assertRaisesRegex(RuntimeError, "Duplicate"): d.add_from_file(dict_file) def test_space(self): # for example, character models treat space as a symbol dict_file = io.StringIO(" 999\n" "a 999\n" "b 999\n") d = Dictionary() d.add_from_file(dict_file) self.assertEqual(d.index(" "), 4) self.assertEqual(d.index("a"), 5) self.assertEqual(d.index("b"), 6) def test_add_file_to_dict(self): counts = {} num_lines = 100 per_line = 10 with tempfile.TemporaryDirectory("test_sampling") as data_dir: filename = os.path.join(data_dir, "dummy.txt") with open(filename, "w", encoding="utf-8") as data: for c in string.ascii_letters: line = f"{c} " * per_line for _ in range(num_lines): data.write(f"{line}\n") counts[c] = per_line * num_lines per_line += 5 dict = Dictionary() Dictionary.add_file_to_dictionary( filename, dict, tokenizer.tokenize_line, 10 ) dict.finalize(threshold=0, nwords=-1, padding_factor=8) for c in string.ascii_letters: count = dict.get_count(dict.index(c)) self.assertEqual( counts[c], count, f"{c} count is {count} but should be {counts[c]}" ) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_dictionary.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import functools import unittest from typing import Any, Dict, Sequence import fairseq import fairseq.options import fairseq.tasks import torch from tests.utils import dummy_dictionary VOCAB_SIZE = 100 @fairseq.tasks.register_task("fake_task") class FakeTask(fairseq.tasks.LegacyFairseqTask): def __init__(self, args): super().__init__(args) self.dictionary = dummy_dictionary(VOCAB_SIZE - 4) assert len(self.dictionary) == VOCAB_SIZE @property def source_dictionary(self): return self.dictionary @property def target_dictionary(self): return self.dictionary @functools.lru_cache() def get_toy_model( device: str, architecture: str = "roberta_enc_dec", **extra_args: Any, ): assert device in ("gpu", "cpu") kwargs = { "arch": architecture, # Use characteristics dimensions "encoder_layers": 3, "encoder_embed_dim": 12, "encoder_ffn_embed_dim": 14, "encoder_attention_heads": 4, "decoder_layers": 3, "decoder_embed_dim": 12, "decoder_ffn_embed_dim": 14, "decoder_attention_heads": 4, # Disable dropout so we have comparable tests. "dropout": 0, "attention_dropout": 0, "activation_dropout": 0, "encoder_layerdrop": 0, # required args "tokens_per_sample": 256, "data": "/tmp/test_roberta", } kwargs.update(extra_args) fake_task = FakeTask(kwargs) args = fairseq.options.get_args( task="online_backtranslation", mono_langs="en,ro", valid_lang_pairs="en-ro", **kwargs, ) torch.manual_seed(0) model = fake_task.build_model(args) if device == "gpu": model.cuda() return fake_task, model def mk_sample( lang: str, device: str, tok: Sequence[int] = None, batch_size: int = 2 ) -> Dict[str, Any]: assert device in ("gpu", "cpu") if not tok: if lang == "en": tok = [10, 11, 12, 13, 14, 15, 2] else: tok = [20, 21, 22, 23, 24, 25, 26, 27, 2] batch = torch.stack([torch.tensor(tok, dtype=torch.long)] * batch_size) if device == "gpu": batch = batch.cuda() sample = { "net_input": { "src_tokens": batch, "prev_output_tokens": batch, "src_lengths": torch.tensor( [len(tok)] * batch_size, dtype=torch.long, device=batch.device ), }, "target": batch[:, 1:], } return sample def cpu_gpu(fn): def helper(self): fn(self, "cpu") if torch.cuda.is_available(): fn(self, "gpu") return helper def architectures(fn): def helper(self): for arch in ["roberta_enc_dec", "transformer"]: fn(self, arch) return helper class RobertaTest(unittest.TestCase): def assertTensorEqual(self, t1, t2, delta: float = 1e-6): self.assertEqual(t1.size(), t2.size(), "size mismatch") if delta == 0.0: self.assertEqual(t1.ne(t2).long().sum(), 0) else: self.assertEqual(((t2 - t1).abs() > delta).long().sum(), 0) def assertSharing(self, model, link_groups: Sequence[Sequence[str]]): ids = {} for group in link_groups: group_ids = {name: id(params(model, name)) for name in group} shared_id = group_ids[group[0]] self.assertEqual(group_ids, {name: shared_id for name in group}) self.assertNotIn(shared_id, ids) ids[shared_id] = group def test_roberta_shared_params(self): _, roberta = get_toy_model("cpu", architecture="roberta") self.assertSharing( roberta, [ [ "encoder.sentence_encoder.embed_tokens.weight", "encoder.lm_head.weight", ] ], ) _, roberta = get_toy_model( "cpu", architecture="roberta", untie_weights_roberta=True ) self.assertSharing( roberta, [ ["encoder.sentence_encoder.embed_tokens.weight"], ["encoder.lm_head.weight"], ], ) def test_roberta_enc_dec_shared_params(self): # 3 distinct embeddings _, enc_dec = get_toy_model("cpu", architecture="roberta_enc_dec") self.assertSharing( enc_dec, [ ["encoder.embed_tokens.weight"], ["decoder.embed_tokens.weight"], ["decoder.output_projection.weight"], ], ) # 2 distinct embeddings, one for encoder, one for decoder _, enc_dec = get_toy_model( "cpu", architecture="roberta_enc_dec", share_decoder_input_output_embed=True ) self.assertSharing( enc_dec, [ ["encoder.embed_tokens.weight"], [ "decoder.embed_tokens.weight", "decoder.output_projection.weight", ], ], ) # shared embeddings _, enc_dec = get_toy_model( "cpu", architecture="roberta_enc_dec", share_all_embeddings=True ) self.assertSharing( enc_dec, [ [ "encoder.embed_tokens.weight", "decoder.embed_tokens.weight", "decoder.output_projection.weight", ] ], ) def test_roberta_max_positions_is_correctly_set(self): device = "cpu" task, model = get_toy_model(device) max_pos = model.max_decoder_positions() self.assertEqual(max_pos, 256) self.assertEqual(max_pos, model.decoder.max_positions()) self.assertEqual(max_pos, model.encoder.max_positions()) self.assertEqual(max_pos, model.encoder.embed_positions.max_positions) sentence = [31 for _ in range(max_pos)] sample = mk_sample("en", device, sentence, batch_size=1) self.assertEqual(list(sample["net_input"]["src_lengths"]), [max_pos]) self.assertEqual(len(sample["net_input"]["src_tokens"][0]), max_pos) x, _ = model.forward(**sample["net_input"]) self.assertEqual(x.shape, (1, max_pos, VOCAB_SIZE)) @cpu_gpu def test_roberta_forward_backward(self, device: str): _, model = get_toy_model(device) sample = mk_sample("en", device) en_tokens = sample["net_input"]["src_tokens"] (bs, l) = en_tokens.shape # Forward logits, _ = model(**sample["net_input"]) self.assertEqual(logits.shape, (bs, l, VOCAB_SIZE)) # Backward loss = logits.sum() loss.backward() @cpu_gpu def test_roberta_forward_backward_bs1(self, device: str): _, model = get_toy_model(device) sample = mk_sample("en", device, batch_size=1) o, _ = model.forward(**sample["net_input"]) loss = o.sum() sample2 = mk_sample("ro", device, batch_size=1) o, _ = model.forward(**sample2["net_input"]) loss += o.sum() loss.backward() @cpu_gpu def test_roberta_batching(self, device: str): """ Checks that the batch of size 2 give twice the same results than the batch of size 1. """ _, model = get_toy_model(device) sample = mk_sample("en", device, batch_size=1) slen = sample["net_input"]["src_lengths"][0] sample2 = mk_sample("en", device, batch_size=2) with torch.no_grad(): z = model.encoder.forward( sample["net_input"]["src_tokens"], sample["net_input"]["src_lengths"] ) z = z["encoder_out"][-1] logits, _ = model.forward(**sample["net_input"]) z2 = model.encoder.forward( sample2["net_input"]["src_tokens"], sample["net_input"]["src_lengths"] ) z2 = z2["encoder_out"][-1] logits2, _ = model.forward(**sample2["net_input"]) self.assertEqual(z.shape, (slen, 1, 12)) self.assertEqual(z2.shape, (slen, 2, 12)) self.assertTensorEqual(logits2[0], logits2[1]) self.assertTensorEqual(logits[0], logits2[0]) @cpu_gpu def test_roberta_incremental_decoder(self, device: str): """ Checks that incremental decoding yields the same result than non incremental one. """ task, model = get_toy_model(device) en_sample = mk_sample("en", device) en_tokens = en_sample["net_input"]["src_tokens"] ro_sample = mk_sample("ro", device) ro_tokens = ro_sample["net_input"]["src_tokens"] en_enc = model.encoder.forward( en_tokens, src_lengths=en_sample["net_input"]["src_lengths"] ) (bs, tgt_len) = ro_tokens.shape # Decode without incremental state ro_dec, _ = model.decoder.forward(ro_tokens, encoder_out=en_enc) self.assertEqual(ro_dec.shape, (bs, tgt_len, VOCAB_SIZE)) self.assertTensorEqual(ro_dec[0], ro_dec[1]) # Decode with incremental state inc_state = {} ro_dec_inc = [] for l in range(tgt_len): ro, _ = model.decoder.forward( ro_tokens[:, : l + 1], encoder_out=en_enc, incremental_state=inc_state ) self.assertEqual(ro.shape, (bs, 1, VOCAB_SIZE)) ro_dec_inc.append(ro) for l in range(tgt_len): # Intra-batch self.assertTensorEqual(ro_dec_inc[l][0], ro_dec_inc[l][1]) # Incremental vs non-incremental self.assertTensorEqual(ro_dec_inc[l][:, 0], ro_dec[:, l]) def params(model, name): if "." not in name: return getattr(model, name) prefix, name = name.split(".", 1) return params(getattr(model, prefix), name)
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_roberta.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq import utils class TestUtils(unittest.TestCase): def test_convert_padding_direction(self): pad = 1 left_pad = torch.LongTensor( [ [2, 3, 4, 5, 6], [1, 7, 8, 9, 10], [1, 1, 1, 11, 12], ] ) right_pad = torch.LongTensor( [ [2, 3, 4, 5, 6], [7, 8, 9, 10, 1], [11, 12, 1, 1, 1], ] ) self.assertAlmostEqual( right_pad, utils.convert_padding_direction( left_pad, pad, left_to_right=True, ), ) self.assertAlmostEqual( left_pad, utils.convert_padding_direction( right_pad, pad, right_to_left=True, ), ) def test_make_positions(self): pad = 1 left_pad_input = torch.LongTensor( [ [9, 9, 9, 9, 9], [1, 9, 9, 9, 9], [1, 1, 1, 9, 9], ] ) left_pad_output = torch.LongTensor( [ [2, 3, 4, 5, 6], [1, 2, 3, 4, 5], [1, 1, 1, 2, 3], ] ) right_pad_input = torch.LongTensor( [ [9, 9, 9, 9, 9], [9, 9, 9, 9, 1], [9, 9, 1, 1, 1], ] ) right_pad_output = torch.LongTensor( [ [2, 3, 4, 5, 6], [2, 3, 4, 5, 1], [2, 3, 1, 1, 1], ] ) self.assertAlmostEqual( left_pad_output, utils.make_positions(left_pad_input, pad), ) self.assertAlmostEqual( right_pad_output, utils.make_positions(right_pad_input, pad), ) def test_clip_grad_norm_(self): params = torch.nn.Parameter(torch.zeros(5)).requires_grad_(False) grad_norm = utils.clip_grad_norm_(params, 1.0) self.assertTrue(torch.is_tensor(grad_norm)) self.assertEqual(grad_norm, 0.0) params = [torch.nn.Parameter(torch.zeros(5)) for i in range(3)] for p in params: p.grad = torch.full((5,), fill_value=2.0) grad_norm = utils.clip_grad_norm_(params, 1.0) exp_grad_norm = torch.full((15,), fill_value=2.0).norm() self.assertTrue(torch.is_tensor(grad_norm)) self.assertEqual(grad_norm, exp_grad_norm) grad_norm = utils.clip_grad_norm_(params, 1.0) self.assertAlmostEqual(grad_norm, torch.tensor(1.0)) def test_resolve_max_positions_with_tuple(self): resolved = utils.resolve_max_positions(None, (2000, 100, 2000), 12000) self.assertEqual(resolved, (2000, 100, 2000)) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess(utils.item((t1 - t2).abs().max()), 1e-4) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from copy import deepcopy from dataclasses import dataclass from typing import Optional import torch from fairseq.models.ema import EMA class DummyModule(torch.nn.Module): def __init__(self) -> None: """LightningModule for testing purposes Args: epoch_min_loss_override (int, optional): Pass in an epoch that will be set to the minimum validation loss for testing purposes (zero based). If None this is ignored. Defaults to None. """ super().__init__() self.layer = torch.nn.Linear(in_features=32, out_features=2) self.another_layer = torch.nn.Linear(in_features=2, out_features=2) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.layer(x) return self.another_layer(x) @dataclass class EMAConfig(object): ema_decay: float = 0.99 ema_start_update: int = 0 ema_fp32: bool = False ema_seed_model: Optional[str] = None class TestEMAGPU(unittest.TestCase): def assertTorchAllClose(self, x, y, atol=1e-8, rtol=1e-5, msg=None): diff = x.float() - y.float() diff_norm = torch.norm(diff) other_norm = torch.norm(y.float()) if msg is None: msg = "|input - other| > {} + {} * |other|".format( atol, rtol ) self.assertLessEqual( diff_norm, atol + rtol * other_norm, msg=msg, ) def test_ema(self): model = DummyModule() optimizer = torch.optim.SGD(model.parameters(), lr=0.01) state = deepcopy(model.state_dict()) config = EMAConfig() ema = EMA(model, config) # set decay ema._set_decay(config.ema_decay) self.assertEqual(ema.get_decay(), config.ema_decay) # get model self.assertEqual(ema.get_model(), ema.model) # Since fp32 params is not used, it should be of size 0 self.assertEqual(len(ema.fp32_params), 0) # EMA step x = torch.randn(32) y = model(x) loss = y.sum() loss.backward() optimizer.step() ema.step(model) ema_state_dict = ema.get_model().state_dict() for key, param in model.state_dict().items(): prev_param = state[key] ema_param = ema_state_dict[key] if "version" in key: # Do not decay a model.version pytorch param continue self.assertTorchAllClose( ema_param, config.ema_decay * prev_param + (1 - config.ema_decay) * param, ) # Since fp32 params is not used, it should be of size 0 self.assertEqual(len(ema.fp32_params), 0) # Load EMA into model model2 = DummyModule() ema.reverse(model2) for key, param in model2.state_dict().items(): ema_param = ema_state_dict[key] self.assertTrue( torch.allclose(ema_param, param) ) def test_ema_fp32(self): model = DummyModule().half() optimizer = torch.optim.SGD(model.parameters(), lr=0.01) state = deepcopy(model.state_dict()) config = EMAConfig(ema_fp32=True) ema = EMA(model, config) x = torch.randn(32) y = model(x.half()) loss = y.sum() loss.backward() optimizer.step() ema.step(model) for key, param in model.state_dict().items(): prev_param = state[key] ema_param = ema.get_model().state_dict()[key] if "version" in key: # Do not decay a model.version pytorch param continue self.assertIn(key, ema.fp32_params) # EMA update is done in fp32, and hence the EMA param must be # closer to the EMA update done in fp32 than in fp16. self.assertLessEqual( torch.norm( ema_param.float() - (config.ema_decay * prev_param.float() + (1 - config.ema_decay) * param.float()).half().float() ), torch.norm( ema_param.float() - (config.ema_decay * prev_param + (1 - config.ema_decay) * param).float() ), ) self.assertTorchAllClose( ema_param, (config.ema_decay * prev_param.float() + (1 - config.ema_decay) * param.float()).half(), ) def test_ema_fp16(self): model = DummyModule().half() optimizer = torch.optim.SGD(model.parameters(), lr=0.01) state = deepcopy(model.state_dict()) config = EMAConfig(ema_fp32=False) ema = EMA(model, config) # Since fp32 params is not used, it should be of size 0 self.assertEqual(len(ema.fp32_params), 0) x = torch.randn(32) y = model(x.half()) loss = y.sum() loss.backward() optimizer.step() ema.step(model) for key, param in model.state_dict().items(): prev_param = state[key] ema_param = ema.get_model().state_dict()[key] if "version" in key: # Do not decay a model.version pytorch param continue # EMA update is done in fp16, and hence the EMA param must be # closer to the EMA update done in fp16 than in fp32. self.assertLessEqual( torch.norm( ema_param.float() - (config.ema_decay * prev_param + (1 - config.ema_decay) * param).float() ), torch.norm( ema_param.float() - (config.ema_decay * prev_param.float() + (1 - config.ema_decay) * param.float()).half().float() ), ) self.assertTorchAllClose( ema_param, config.ema_decay * prev_param + (1 - config.ema_decay) * param, ) # Since fp32 params is not used, it should be of size 0 self.assertEqual(len(ema.fp32_params), 0) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_ema.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from fairseq.data import iterators class TestIterators(unittest.TestCase): def test_counting_iterator_index(self, ref=None, itr=None): # Test the indexing functionality of CountingIterator if ref is None: assert itr is None ref = list(range(10)) itr = iterators.CountingIterator(ref) else: assert len(ref) == 10 assert itr is not None self.assertTrue(itr.has_next()) self.assertEqual(itr.n, 0) self.assertEqual(next(itr), ref[0]) self.assertEqual(itr.n, 1) self.assertEqual(next(itr), ref[1]) self.assertEqual(itr.n, 2) itr.skip(3) self.assertEqual(itr.n, 5) self.assertEqual(next(itr), ref[5]) itr.skip(2) self.assertEqual(itr.n, 8) self.assertEqual(list(itr), [ref[8], ref[9]]) self.assertFalse(itr.has_next()) def test_counting_iterator_length_mismatch(self): ref = list(range(10)) # When the underlying iterable is longer than the CountingIterator, # the remaining items in the iterable should be ignored itr = iterators.CountingIterator(ref, total=8) self.assertEqual(list(itr), ref[:8]) # When the underlying iterable is shorter than the CountingIterator, # raise an IndexError when the underlying iterable is exhausted itr = iterators.CountingIterator(ref, total=12) self.assertRaises(IndexError, list, itr) def test_counting_iterator_take(self): # Test the "take" method of CountingIterator ref = list(range(10)) itr = iterators.CountingIterator(ref) itr.take(5) self.assertEqual(len(itr), len(list(iter(itr)))) self.assertEqual(len(itr), 5) itr = iterators.CountingIterator(ref) itr.take(5) self.assertEqual(next(itr), ref[0]) self.assertEqual(next(itr), ref[1]) itr.skip(2) self.assertEqual(next(itr), ref[4]) self.assertFalse(itr.has_next()) def test_grouped_iterator(self): # test correctness x = list(range(10)) itr = iterators.GroupedIterator(x, 1) self.assertEqual(list(itr), [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]]) itr = iterators.GroupedIterator(x, 4) self.assertEqual(list(itr), [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]]) itr = iterators.GroupedIterator(x, 5) self.assertEqual(list(itr), [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) # test the GroupIterator also works correctly as a CountingIterator x = list(range(30)) ref = list(iterators.GroupedIterator(x, 3)) itr = iterators.GroupedIterator(x, 3) self.test_counting_iterator_index(ref, itr) def test_sharded_iterator(self): # test correctness x = list(range(10)) itr = iterators.ShardedIterator(x, num_shards=1, shard_id=0) self.assertEqual(list(itr), x) itr = iterators.ShardedIterator(x, num_shards=2, shard_id=0) self.assertEqual(list(itr), [0, 2, 4, 6, 8]) itr = iterators.ShardedIterator(x, num_shards=2, shard_id=1) self.assertEqual(list(itr), [1, 3, 5, 7, 9]) itr = iterators.ShardedIterator(x, num_shards=3, shard_id=0) self.assertEqual(list(itr), [0, 3, 6, 9]) itr = iterators.ShardedIterator(x, num_shards=3, shard_id=1) self.assertEqual(list(itr), [1, 4, 7, None]) itr = iterators.ShardedIterator(x, num_shards=3, shard_id=2) self.assertEqual(list(itr), [2, 5, 8, None]) # test CountingIterator functionality x = list(range(30)) ref = list(iterators.ShardedIterator(x, num_shards=3, shard_id=0)) itr = iterators.ShardedIterator(x, num_shards=3, shard_id=0) self.test_counting_iterator_index(ref, itr) def test_counting_iterator_buffered_iterator_take(self): ref = list(range(10)) buffered_itr = iterators.BufferedIterator(2, ref) itr = iterators.CountingIterator(buffered_itr) itr.take(5) self.assertEqual(len(itr), len(list(iter(itr)))) self.assertEqual(len(itr), 5) buffered_itr = iterators.BufferedIterator(2, ref) itr = iterators.CountingIterator(buffered_itr) itr.take(5) self.assertEqual(len(buffered_itr), 5) self.assertEqual(len(list(iter(buffered_itr))), 5) buffered_itr = iterators.BufferedIterator(2, ref) itr = iterators.CountingIterator(buffered_itr) itr.take(5) self.assertEqual(next(itr), ref[0]) self.assertEqual(next(itr), ref[1]) itr.skip(2) self.assertEqual(next(itr), ref[4]) self.assertFalse(itr.has_next()) self.assertRaises(StopIteration, next, buffered_itr) ref = list(range(4, 10)) buffered_itr = iterators.BufferedIterator(2, ref) itr = iterators.CountingIterator(buffered_itr, start=4) itr.take(5) self.assertEqual(len(itr), 5) self.assertEqual(len(buffered_itr), 1) self.assertEqual(next(itr), ref[0]) self.assertFalse(itr.has_next()) self.assertRaises(StopIteration, next, buffered_itr) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_iterators.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from typing import Dict, List import tests.utils as test_utils import torch from fairseq import utils from fairseq.data import ( Dictionary, LanguagePairDataset, TransformEosDataset, data_utils, noising, ) class TestDataNoising(unittest.TestCase): def _get_test_data_with_bpe_cont_marker(self, append_eos=True): """ Args: append_eos: if True, each input sentence in the source tokens tensor will have an EOS appended to the end. Returns: vocabs: BPE vocab with continuation markers as suffixes to denote non-end of word tokens. This is the standard BPE format used in fairseq's preprocessing. x: input tensor containing numberized source tokens, with EOS at the end if append_eos is true src_lengths: and source lengths. """ vocab = Dictionary() vocab.add_symbol("he@@") vocab.add_symbol("llo") vocab.add_symbol("how") vocab.add_symbol("are") vocab.add_symbol("y@@") vocab.add_symbol("ou") vocab.add_symbol("n@@") vocab.add_symbol("ew") vocab.add_symbol("or@@") vocab.add_symbol("k") src_tokens = [ ["he@@", "llo", "n@@", "ew", "y@@", "or@@", "k"], ["how", "are", "y@@", "ou"], ] x, src_lengths = x, src_lengths = self._convert_src_tokens_to_tensor( vocab=vocab, src_tokens=src_tokens, append_eos=append_eos ) return vocab, x, src_lengths def _get_test_data_with_bpe_end_marker(self, append_eos=True): """ Args: append_eos: if True, each input sentence in the source tokens tensor will have an EOS appended to the end. Returns: vocabs: BPE vocab with end-of-word markers as suffixes to denote tokens at the end of a word. This is an alternative to fairseq's standard preprocessing framework and is not generally supported within fairseq. x: input tensor containing numberized source tokens, with EOS at the end if append_eos is true src_lengths: and source lengths. """ vocab = Dictionary() vocab.add_symbol("he") vocab.add_symbol("llo_EOW") vocab.add_symbol("how_EOW") vocab.add_symbol("are_EOW") vocab.add_symbol("y") vocab.add_symbol("ou_EOW") vocab.add_symbol("n") vocab.add_symbol("ew_EOW") vocab.add_symbol("or") vocab.add_symbol("k_EOW") src_tokens = [ ["he", "llo_EOW", "n", "ew_EOW", "y", "or", "k_EOW"], ["how_EOW", "are_EOW", "y", "ou_EOW"], ] x, src_lengths = x, src_lengths = self._convert_src_tokens_to_tensor( vocab=vocab, src_tokens=src_tokens, append_eos=append_eos ) return vocab, x, src_lengths def _get_test_data_with_word_vocab(self, append_eos=True): """ Args: append_eos: if True, each input sentence in the source tokens tensor will have an EOS appended to the end. Returns: vocabs: word vocab x: input tensor containing numberized source tokens, with EOS at the end if append_eos is true src_lengths: and source lengths. """ vocab = Dictionary() vocab.add_symbol("hello") vocab.add_symbol("how") vocab.add_symbol("are") vocab.add_symbol("you") vocab.add_symbol("new") vocab.add_symbol("york") src_tokens = [ ["hello", "new", "york", "you"], ["how", "are", "you", "new", "york"], ] x, src_lengths = self._convert_src_tokens_to_tensor( vocab=vocab, src_tokens=src_tokens, append_eos=append_eos ) return vocab, x, src_lengths def _convert_src_tokens_to_tensor( self, vocab: Dictionary, src_tokens: List[List[str]], append_eos: bool ): src_len = [len(x) for x in src_tokens] # If we have to append EOS, we include EOS in counting src length if append_eos: src_len = [length + 1 for length in src_len] x = torch.LongTensor(len(src_tokens), max(src_len)).fill_(vocab.pad()) for i in range(len(src_tokens)): for j in range(len(src_tokens[i])): x[i][j] = vocab.index(src_tokens[i][j]) if append_eos: x[i][j + 1] = vocab.eos() x = x.transpose(1, 0) return x, torch.LongTensor(src_len) def assert_eos_at_end(self, x, x_len, eos): """Asserts last token of every sentence in x is EOS """ for i in range(len(x_len)): self.assertEqual( x[x_len[i] - 1][i], eos, ( "Expected eos (token id {eos}) at the end of sentence {i} " "but got {other} instead" ).format(i=i, eos=eos, other=x[i][-1]), ) def assert_word_dropout_correct(self, x, x_noised, x_len, l_noised): # Expect only the first word (2 bpe tokens) of the first example # was dropped out self.assertEqual(x_len[0] - 2, l_noised[0]) for i in range(l_noised[0]): self.assertEqual(x_noised[i][0], x[i + 2][0]) def test_word_dropout_with_eos(self): vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True) with data_utils.numpy_seed(1234): noising_gen = noising.WordDropout(vocab) x_noised, l_noised = noising_gen.noising(x, x_len, 0.2) self.assert_word_dropout_correct( x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised ) self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos()) def assert_word_blanking_correct(self, x, x_noised, x_len, l_noised, unk): # Expect only the first word (2 bpe tokens) of the first example # was blanked out self.assertEqual(x_len[0], l_noised[0]) for i in range(l_noised[0]): if i < 2: self.assertEqual(x_noised[i][0], unk) else: self.assertEqual(x_noised[i][0], x[i][0]) def test_word_blank_with_eos(self): vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True) with data_utils.numpy_seed(1234): noising_gen = noising.WordDropout(vocab) x_noised, l_noised = noising_gen.noising(x, x_len, 0.2, vocab.unk()) self.assert_word_blanking_correct( x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised, unk=vocab.unk() ) self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos()) def generate_unchanged_shuffle_map(self, length): return {i: i for i in range(length)} def assert_word_shuffle_matches_expected( self, x, x_len, max_shuffle_distance: int, vocab: Dictionary, expected_shufle_maps: List[Dict[int, int]], expect_eos_at_end: bool, bpe_end_marker=None, ): """ This verifies that with a given x, x_len, max_shuffle_distance, and vocab, we get the expected shuffle result. Args: x: Tensor of shape (T x B) = (sequence_length, batch_size) x_len: Tensor of length B = batch_size max_shuffle_distance: arg to pass to noising expected_shuffle_maps: List[mapping] where mapping is a Dict[old_index, new_index], mapping x's elements from their old positions in x to their new positions in x. expect_eos_at_end: if True, check the output to make sure there is an EOS at the end. bpe_end_marker: str denoting the BPE end token. If this is not None, we set the BPE cont token to None in the noising classes. """ bpe_cont_marker = None if bpe_end_marker is None: bpe_cont_marker = "@@" with data_utils.numpy_seed(1234): word_shuffle = noising.WordShuffle( vocab, bpe_cont_marker=bpe_cont_marker, bpe_end_marker=bpe_end_marker ) x_noised, l_noised = word_shuffle.noising( x, x_len, max_shuffle_distance=max_shuffle_distance ) # For every example, we have a different expected shuffle map. We check # that each example is shuffled as expected according to each # corresponding shuffle map. for i in range(len(expected_shufle_maps)): shuffle_map = expected_shufle_maps[i] for k, v in shuffle_map.items(): self.assertEqual(x[k][i], x_noised[v][i]) # Shuffling should not affect the length of each example for pre_shuffle_length, post_shuffle_length in zip(x_len, l_noised): self.assertEqual(pre_shuffle_length, post_shuffle_length) if expect_eos_at_end: self.assert_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos()) def test_word_shuffle_with_eos(self): vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=True) # Assert word shuffle with max shuffle distance 0 causes input to be # unchanged self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, max_shuffle_distance=0, vocab=vocab, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(example_len) for example_len in x_len ], expect_eos_at_end=True, ) # Assert word shuffle with max shuffle distance 3 matches our expected # shuffle order self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, vocab=vocab, max_shuffle_distance=3, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(x_len[0]), {0: 0, 1: 3, 2: 1, 3: 2}, ], expect_eos_at_end=True, ) def test_word_shuffle_with_eos_nonbpe(self): """The purpose of this is to test shuffling logic with word vocabs""" vocab, x, x_len = self._get_test_data_with_word_vocab(append_eos=True) # Assert word shuffle with max shuffle distance 0 causes input to be # unchanged self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, max_shuffle_distance=0, vocab=vocab, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(example_len) for example_len in x_len ], expect_eos_at_end=True, ) # Assert word shuffle with max shuffle distance 3 matches our expected # shuffle order self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, vocab=vocab, max_shuffle_distance=3, expected_shufle_maps=[ {0: 0, 1: 1, 2: 3, 3: 2}, {0: 0, 1: 2, 2: 1, 3: 3, 4: 4}, ], expect_eos_at_end=True, ) def test_word_shuffle_without_eos(self): """Same result as word shuffle with eos except no EOS at end""" vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False) # Assert word shuffle with max shuffle distance 0 causes input to be # unchanged self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, max_shuffle_distance=0, vocab=vocab, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(example_len) for example_len in x_len ], expect_eos_at_end=False, ) # Assert word shuffle with max shuffle distance 3 matches our expected # shuffle order self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, vocab=vocab, max_shuffle_distance=3, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(x_len[0]), {0: 0, 1: 3, 2: 1, 3: 2}, ], expect_eos_at_end=False, ) def test_word_shuffle_without_eos_with_bpe_end_marker(self): """Same result as word shuffle without eos except using BPE end token""" vocab, x, x_len = self._get_test_data_with_bpe_end_marker(append_eos=False) # Assert word shuffle with max shuffle distance 0 causes input to be # unchanged self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, max_shuffle_distance=0, vocab=vocab, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(example_len) for example_len in x_len ], expect_eos_at_end=False, bpe_end_marker="_EOW", ) # Assert word shuffle with max shuffle distance 3 matches our expected # shuffle order self.assert_word_shuffle_matches_expected( x=x, x_len=x_len, vocab=vocab, max_shuffle_distance=3, expected_shufle_maps=[ self.generate_unchanged_shuffle_map(x_len[0]), {0: 0, 1: 3, 2: 1, 3: 2}, ], expect_eos_at_end=False, bpe_end_marker="_EOW", ) def assert_no_eos_at_end(self, x, x_len, eos): """Asserts that the last token of each sentence in x is not EOS """ for i in range(len(x_len)): self.assertNotEqual( x[x_len[i] - 1][i], eos, "Expected no eos (token id {eos}) at the end of sentence {i}.".format( eos=eos, i=i ), ) def test_word_dropout_without_eos(self): """Same result as word dropout with eos except no EOS at end""" vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False) with data_utils.numpy_seed(1234): noising_gen = noising.WordDropout(vocab) x_noised, l_noised = noising_gen.noising(x, x_len, 0.2) self.assert_word_dropout_correct( x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised ) self.assert_no_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos()) def test_word_blank_without_eos(self): """Same result as word blank with eos except no EOS at end""" vocab, x, x_len = self._get_test_data_with_bpe_cont_marker(append_eos=False) with data_utils.numpy_seed(1234): noising_gen = noising.WordDropout(vocab) x_noised, l_noised = noising_gen.noising(x, x_len, 0.2, vocab.unk()) self.assert_word_blanking_correct( x=x, x_noised=x_noised, x_len=x_len, l_noised=l_noised, unk=vocab.unk() ) self.assert_no_eos_at_end(x=x_noised, x_len=l_noised, eos=vocab.eos()) def _get_noising_dataset_batch( self, src_tokens_no_pad, src_dict, append_eos_to_tgt=False, ): """ Constructs a NoisingDataset and the corresponding ``LanguagePairDataset(NoisingDataset(src), src)``. If *append_eos_to_tgt* is True, wrap the source dataset in :class:`TransformEosDataset` to append EOS to the clean source when using it as the target. """ src_dataset = test_utils.TestDataset(data=src_tokens_no_pad) noising_dataset = noising.NoisingDataset( src_dataset=src_dataset, src_dict=src_dict, seed=1234, max_word_shuffle_distance=3, word_dropout_prob=0.2, word_blanking_prob=0.2, noising_class=noising.UnsupervisedMTNoising, ) tgt = src_dataset language_pair_dataset = LanguagePairDataset( src=noising_dataset, tgt=tgt, src_sizes=None, src_dict=src_dict ) language_pair_dataset = TransformEosDataset( language_pair_dataset, src_dict.eos(), append_eos_to_tgt=append_eos_to_tgt, ) dataloader = torch.utils.data.DataLoader( dataset=language_pair_dataset, batch_size=2, collate_fn=language_pair_dataset.collater, ) denoising_batch_result = next(iter(dataloader)) return denoising_batch_result def test_noising_dataset_with_eos(self): src_dict, src_tokens, _ = self._get_test_data_with_bpe_cont_marker( append_eos=True ) # Format data for src_dataset src_tokens = torch.t(src_tokens) src_tokens_no_pad = [] for src_sentence in src_tokens: src_tokens_no_pad.append( utils.strip_pad(tensor=src_sentence, pad=src_dict.pad()) ) denoising_batch_result = self._get_noising_dataset_batch( src_tokens_no_pad=src_tokens_no_pad, src_dict=src_dict ) eos, pad = src_dict.eos(), src_dict.pad() # Generated noisy source as source expected_src = torch.LongTensor( [[4, 5, 10, 11, 8, 12, 13, eos], [pad, pad, pad, 6, 8, 9, 7, eos]] ) # Original clean source as target (right-padded) expected_tgt = torch.LongTensor( [[4, 5, 10, 11, 8, 12, 13, eos], [6, 7, 8, 9, eos, pad, pad, pad]] ) generated_src = denoising_batch_result["net_input"]["src_tokens"] tgt_tokens = denoising_batch_result["target"] self.assertTensorEqual(expected_src, generated_src) self.assertTensorEqual(expected_tgt, tgt_tokens) def test_noising_dataset_without_eos(self): """ Similar to test noising dataset with eos except that we have to set *append_eos_to_tgt* to ``True``. """ src_dict, src_tokens, _ = self._get_test_data_with_bpe_cont_marker( append_eos=False ) # Format data for src_dataset src_tokens = torch.t(src_tokens) src_tokens_no_pad = [] for src_sentence in src_tokens: src_tokens_no_pad.append( utils.strip_pad(tensor=src_sentence, pad=src_dict.pad()) ) denoising_batch_result = self._get_noising_dataset_batch( src_tokens_no_pad=src_tokens_no_pad, src_dict=src_dict, append_eos_to_tgt=True, ) eos, pad = src_dict.eos(), src_dict.pad() # Generated noisy source as source expected_src = torch.LongTensor( [[4, 5, 10, 11, 8, 12, 13], [pad, pad, pad, 6, 8, 9, 7]] ) # Original clean source as target (right-padded) expected_tgt = torch.LongTensor( [[4, 5, 10, 11, 8, 12, 13, eos], [6, 7, 8, 9, eos, pad, pad, pad]] ) generated_src = denoising_batch_result["net_input"]["src_tokens"] tgt_tokens = denoising_batch_result["target"] self.assertTensorEqual(expected_src, generated_src) self.assertTensorEqual(expected_tgt, tgt_tokens) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_noising.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import tempfile import unittest import torch from fairseq.data.dictionary import Dictionary from fairseq.models.lstm import LSTMModel from fairseq.tasks.fairseq_task import LegacyFairseqTask DEFAULT_TEST_VOCAB_SIZE = 100 class DummyTask(LegacyFairseqTask): def __init__(self, args): super().__init__(args) self.dictionary = get_dummy_dictionary() if getattr(self.args, "ctc", False): self.dictionary.add_symbol("<ctc_blank>") self.src_dict = self.dictionary self.tgt_dict = self.dictionary @property def source_dictionary(self): return self.src_dict @property def target_dictionary(self): return self.dictionary def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE): dummy_dict = Dictionary() # add dummy symbol to satisfy vocab size for id, _ in enumerate(range(vocab_size)): dummy_dict.add_symbol("{}".format(id), 1000) return dummy_dict def get_dummy_task_and_parser(): """ to build a fariseq model, we need some dummy parse and task. This function is used to create dummy task and parser to faciliate model/criterion test Note: we use FbSpeechRecognitionTask as the dummy task. You may want to use other task by providing another function """ parser = argparse.ArgumentParser( description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS ) DummyTask.add_args(parser) args = parser.parse_args([]) task = DummyTask.setup_task(args) return task, parser class TestJitLSTMModel(unittest.TestCase): def _test_save_and_load(self, scripted_module): with tempfile.NamedTemporaryFile() as f: scripted_module.save(f.name) torch.jit.load(f.name) def assertTensorEqual(self, t1, t2): t1 = t1[~torch.isnan(t1)] # can cause size mismatch errors if there are NaNs t2 = t2[~torch.isnan(t2)] self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) def test_jit_and_export_lstm(self): task, parser = get_dummy_task_and_parser() LSTMModel.add_args(parser) args = parser.parse_args([]) args.criterion = "" model = LSTMModel.build_model(args, task) scripted_model = torch.jit.script(model) self._test_save_and_load(scripted_model) def test_assert_jit_vs_nonjit_(self): task, parser = get_dummy_task_and_parser() LSTMModel.add_args(parser) args = parser.parse_args([]) args.criterion = "" model = LSTMModel.build_model(args, task) model.eval() scripted_model = torch.jit.script(model) scripted_model.eval() idx = len(task.source_dictionary) iter = 100 # Inject random input and check output seq_len_tensor = torch.randint(1, 10, (iter,)) num_samples_tensor = torch.randint(1, 10, (iter,)) for i in range(iter): seq_len = seq_len_tensor[i] num_samples = num_samples_tensor[i] src_token = (torch.randint(0, idx, (num_samples, seq_len)),) src_lengths = torch.randint(1, seq_len + 1, (num_samples,)) src_lengths, _ = torch.sort(src_lengths, descending=True) # Force the first sample to have seq_len src_lengths[0] = seq_len prev_output_token = (torch.randint(0, idx, (num_samples, 1)),) result = model(src_token[0], src_lengths, prev_output_token[0], None) scripted_result = scripted_model( src_token[0], src_lengths, prev_output_token[0], None ) self.assertTensorEqual(result[0], scripted_result[0]) self.assertTensorEqual(result[1], scripted_result[1]) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_lstm_jitable.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import copy import unittest import torch from torch.cuda.amp import autocast, GradScaler from fairseq.optim import build_optimizer @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestGradientScalingAMP(unittest.TestCase): def setUp(self): self.x = torch.tensor([2.0]).cuda().half() weight = 3.0 bias = 5.0 self.error = 1.0 self.target = torch.tensor([self.x * weight + bias + self.error]).cuda() self.loss_fn = torch.nn.L1Loss() self.model = torch.nn.Linear(1, 1) self.model.weight.data = torch.tensor([[weight]]) self.model.bias.data = torch.tensor([bias]) self.model.cuda() self.params = list(self.model.parameters()) self.namespace_dls = argparse.Namespace( optimizer="adam", lr=[0.1], adam_betas="(0.9, 0.999)", adam_eps=1e-8, weight_decay=0.0, threshold_loss_scale=1, min_loss_scale=1e-4, ) self.scaler = GradScaler( init_scale=1, growth_interval=1, ) def run_iter(self, model, params, optimizer): optimizer.zero_grad() with autocast(): y = model(self.x) loss = self.loss_fn(y, self.target) self.scaler.scale(loss).backward() self.assertEqual(loss, torch.tensor(1.0, device="cuda:0", dtype=torch.float16)) self.scaler.unscale_(optimizer) grad_norm = optimizer.clip_grad_norm(0) self.assertAlmostEqual(grad_norm.item(), 2.2361, 4) self.scaler.step(optimizer) self.scaler.update() self.assertEqual( model.weight, torch.tensor( [[3.1]], device="cuda:0", requires_grad=True ), ) self.assertEqual( model.bias, torch.tensor( [5.1], device="cuda:0", requires_grad=True ), ) self.assertEqual(self.scaler.get_scale(), 2.0) def test_automatic_mixed_precision(self): model = copy.deepcopy(self.model) params = list(model.parameters()) optimizer = build_optimizer(self.namespace_dls, params) self.run_iter(model, params, optimizer)
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_amp_optimizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq.modules.sparse_multihead_attention import SparseMultiheadAttention class TestSparseMultiheadAttention(unittest.TestCase): def test_sparse_multihead_attention(self): attn_weights = torch.randn(1, 8, 8) bidirectional_sparse_mask = torch.tensor( [ [0, 0, 0, 0, 0, float("-inf"), float("-inf"), 0], [0, 0, 0, 0, 0, float("-inf"), float("-inf"), 0], [0, 0, 0, 0, 0, float("-inf"), float("-inf"), 0], [0, 0, 0, 0, 0, float("-inf"), float("-inf"), 0], [float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0], [float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0], [float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0], [float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0], ] ) bidirectional_attention = SparseMultiheadAttention( 16, 1, stride=4, expressivity=1, is_bidirectional=True ) bidirectional_attention_sparse_mask = ( bidirectional_attention.buffered_sparse_mask(attn_weights, 8, 8) ) torch.all( torch.eq(bidirectional_attention_sparse_mask, bidirectional_sparse_mask) ) sparse_mask = torch.tensor( [ [ 0, float("-inf"), float("-inf"), float("-inf"), float("-inf"), float("-inf"), float("-inf"), float("-inf"), ], [ 0, 0, float("-inf"), float("-inf"), float("-inf"), float("-inf"), float("-inf"), float("-inf"), ], [ 0, 0, 0, float("-inf"), float("-inf"), float("-inf"), float("-inf"), float("-inf"), ], [ 0, 0, 0, 0, float("-inf"), float("-inf"), float("-inf"), float("-inf"), ], [0, 0, 0, 0, 0, float("-inf"), float("-inf"), float("-inf")], [ float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, float("-inf"), float("-inf"), ], [ float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, float("-inf"), ], [float("-inf"), float("-inf"), float("-inf"), 0, 0, 0, 0, 0], ] ) attention = SparseMultiheadAttention( 16, 1, stride=4, expressivity=1, is_bidirectional=False ) attention_sparse_mask = attention.buffered_sparse_mask(attn_weights, 8, 8) torch.all(torch.eq(attention_sparse_mask, sparse_mask)) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_sparse_multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import uuid from fairseq import metrics class TestMetrics(unittest.TestCase): def test_nesting(self): with metrics.aggregate() as a: metrics.log_scalar("loss", 1) with metrics.aggregate() as b: metrics.log_scalar("loss", 2) self.assertEqual(a.get_smoothed_values()["loss"], 1.5) self.assertEqual(b.get_smoothed_values()["loss"], 2) def test_new_root(self): with metrics.aggregate() as a: metrics.log_scalar("loss", 1) with metrics.aggregate(new_root=True) as b: metrics.log_scalar("loss", 2) self.assertEqual(a.get_smoothed_values()["loss"], 1) self.assertEqual(b.get_smoothed_values()["loss"], 2) def test_nested_new_root(self): with metrics.aggregate() as layer1: metrics.log_scalar("loss", 1) with metrics.aggregate(new_root=True) as layer2: metrics.log_scalar("loss", 2) with metrics.aggregate() as layer3: metrics.log_scalar("loss", 3) with metrics.aggregate(new_root=True) as layer4: metrics.log_scalar("loss", 4) metrics.log_scalar("loss", 1.5) self.assertEqual(layer4.get_smoothed_values()["loss"], 4) self.assertEqual(layer3.get_smoothed_values()["loss"], 3) self.assertEqual(layer2.get_smoothed_values()["loss"], 2.5) self.assertEqual(layer1.get_smoothed_values()["loss"], 1.25) def test_named(self): name = str(uuid.uuid4()) metrics.reset_meters(name) with metrics.aggregate(name): metrics.log_scalar("loss", 1) metrics.log_scalar("loss", 3) with metrics.aggregate(name): metrics.log_scalar("loss", 2) self.assertEqual(metrics.get_smoothed_values(name)["loss"], 1.5) def test_nested_duplicate_names(self): name = str(uuid.uuid4()) metrics.reset_meters(name) with metrics.aggregate(name): metrics.log_scalar("loss", 1) with metrics.aggregate() as other: with metrics.aggregate(name): metrics.log_scalar("loss", 2) metrics.log_scalar("loss", 6) self.assertEqual(metrics.get_smoothed_values(name)["loss"], 3) self.assertEqual(other.get_smoothed_values()["loss"], 2) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_metrics.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import logging import unittest from io import StringIO from unittest.mock import MagicMock, patch import torch from fairseq import checkpoint_utils, data from omegaconf import OmegaConf def mock_trainer(epoch, num_updates, iterations_in_epoch): trainer = MagicMock() trainer.load_checkpoint.return_value = { "train_iterator": { "epoch": epoch, "iterations_in_epoch": iterations_in_epoch, "shuffle": False, }, } trainer.get_num_updates.return_value = num_updates return trainer def mock_dict(): d = MagicMock() d.pad.return_value = 1 d.eos.return_value = 2 d.unk.return_value = 3 return d def get_trainer_and_epoch_itr(epoch, epoch_size, num_updates, iterations_in_epoch): tokens = torch.LongTensor(list(range(epoch_size))).view(1, -1) tokens_ds = data.TokenBlockDataset( tokens, sizes=[tokens.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) trainer = mock_trainer(epoch, num_updates, iterations_in_epoch) dataset = data.LanguagePairDataset( tokens_ds, tokens_ds.sizes, mock_dict(), shuffle=False ) epoch_itr = data.EpochBatchIterator( dataset=dataset, collate_fn=dataset.collater, batch_sampler=[[i] for i in range(epoch_size)], ) return trainer, epoch_itr def get_mock_cfg(finetune_from_model): cfg_mock = OmegaConf.create( { "checkpoint": { "save_dir": None, "optimizer_overrides": "{}", "reset_dataloader": False, "reset_meters": False, "reset_optimizer": False, "reset_lr_scheduler": False, "finetune_from_model": finetune_from_model, "model_parallel_size": 1, "restore_file": "checkpoint_last.pt", }, "common": { "model_parallel_size": 1, }, } ) return cfg_mock class TestLoadCheckpoint(unittest.TestCase): def setUp(self): self.cfg_mock = get_mock_cfg(None) self.patches = { "os.makedirs": MagicMock(), "os.path.join": MagicMock(), "os.path.isfile": MagicMock(return_value=True), "os.path.isabs": MagicMock(return_value=False), "fairseq.file_io.PathManager.exists": MagicMock(return_value=False), } self.applied_patches = [patch(p, d) for p, d in self.patches.items()] [p.start() for p in self.applied_patches] logging.disable(logging.CRITICAL) def tearDown(self): patch.stopall() logging.disable(logging.NOTSET) def test_load_partial_checkpoint(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(2, 150, 200, 50) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) _, epoch_itr = checkpoint_utils.load_checkpoint( self.cfg_mock.checkpoint, trainer ) self.assertEqual(epoch_itr.epoch, 2) self.assertEqual(epoch_itr.iterations_in_epoch, 50) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertEqual(epoch_itr.epoch, 2) self.assertEqual(epoch_itr.iterations_in_epoch, 50) self.assertEqual(next(itr)["net_input"]["src_tokens"][0].item(), 50) self.assertEqual(epoch_itr.iterations_in_epoch, 51) for _ in range(150 - 52): next(itr) self.assertEqual(epoch_itr.iterations_in_epoch, 149) self.assertTrue(itr.has_next()) next(itr) self.assertFalse(itr.has_next()) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertTrue(itr.has_next()) self.assertEqual(epoch_itr.epoch, 3) self.assertEqual(epoch_itr.iterations_in_epoch, 0) def test_load_full_checkpoint(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(2, 150, 300, 150) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) _, epoch_itr = checkpoint_utils.load_checkpoint( self.cfg_mock.checkpoint, trainer ) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertEqual(epoch_itr.epoch, 3) self.assertEqual(epoch_itr.iterations_in_epoch, 0) self.assertEqual(next(itr)["net_input"]["src_tokens"][0].item(), 0) def test_load_no_checkpoint(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) self.patches["os.path.isfile"].return_value = False _, epoch_itr = checkpoint_utils.load_checkpoint( self.cfg_mock.checkpoint, trainer ) itr = epoch_itr.next_epoch_itr(shuffle=False) self.assertEqual(epoch_itr.epoch, 1) self.assertEqual(epoch_itr.iterations_in_epoch, 0) self.assertEqual(next(itr)["net_input"]["src_tokens"][0].item(), 0) def test_finetune_from_model_args_conflict(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) for arg in [ "reset_optimizer", "reset_lr_scheduler", "reset_meters", "reset_dataloader", ]: with self.subTest(arg=arg): cfg_mock = get_mock_cfg("/temp/checkpoint_pretrained.pt") cfg_mock["checkpoint"][arg] = True with self.assertRaises(Exception) as context: _, _ = checkpoint_utils.load_checkpoint( cfg_mock.checkpoint, trainer ) self.assertTrue( "--finetune-from-model can not be set together with either --reset-optimizer" " or reset_lr_scheduler or reset_meters or reset_dataloader" in str(context.exception) ) def test_finetune_from_model(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) from_model_path = "/temp/checkpoint_pretrained.pt" def mock_finetune_exist(path): if path == from_model_path: return True else: return False self.patches[ "fairseq.file_io.PathManager.exists" ].side_effect = mock_finetune_exist cfg_mock = get_mock_cfg(from_model_path) cfg_mock.checkpoint.restore_file = "checkpoint_last.pt" _, _ = checkpoint_utils.load_checkpoint(cfg_mock.checkpoint, trainer) ( checkpoint_path, reset_optimizer, reset_lr_scheduler, optimizer_overrides, ) = trainer.load_checkpoint.call_args[0] reset_meters = trainer.load_checkpoint.call_args[1]["reset_meters"] self.assertTrue(reset_optimizer) self.assertTrue(reset_lr_scheduler) self.assertTrue(reset_meters) def test_finetune_from_model_resume(self): with contextlib.redirect_stdout(StringIO()): trainer, epoch_itr = get_trainer_and_epoch_itr(1, 150, 0, 0) trainer.get_train_iterator = MagicMock(return_value=epoch_itr) from_model_path = "/temp/checkpoint_pretrained.pt" # launch second time # both restore_file=checkpoint_last.pt and finetune_from_model are set def mock_finetune_exist(path): if path == from_model_path or path.endsWith("checkpoint_last.pt"): return True else: return False self.patches[ "fairseq.file_io.PathManager.exists" ].side_effect = mock_finetune_exist cfg_mock = get_mock_cfg(from_model_path) cfg_mock.checkpoint.restore_file = "checkpoint_last.pt" _, _ = checkpoint_utils.load_checkpoint(cfg_mock.checkpoint, trainer) ( checkpoint_path, reset_optimizer, reset_lr_scheduler, optimizer_overrides, ) = trainer.load_checkpoint.call_args[0] reset_meters = trainer.load_checkpoint.call_args[1]["reset_meters"] self.assertFalse(reset_optimizer) self.assertFalse(reset_lr_scheduler) self.assertFalse(reset_meters) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_train.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import unittest import tests.utils as test_utils import torch from fairseq.sequence_scorer import SequenceScorer class TestSequenceScorer(unittest.TestCase): def test_sequence_scorer(self): # construct dummy dictionary d = test_utils.dummy_dictionary(vocab_size=2) self.assertEqual(d.pad(), 1) self.assertEqual(d.eos(), 2) self.assertEqual(d.unk(), 3) eos = d.eos() w1 = 4 w2 = 5 # construct dataloader data = [ { "source": torch.LongTensor([w1, w2, eos]), "target": torch.LongTensor([w1, w2, w1, eos]), }, { "source": torch.LongTensor([w2, eos]), "target": torch.LongTensor([w2, w1, eos]), }, { "source": torch.LongTensor([w2, eos]), "target": torch.LongTensor([w2, eos]), }, ] data_itr = test_utils.dummy_dataloader(data) # specify expected output probabilities args = argparse.Namespace() unk = 0.0 args.beam_probs = [ # step 0: torch.FloatTensor( [ # eos w1 w2 [0.0, unk, 0.6, 0.4], # sentence 1 [0.0, unk, 0.4, 0.6], # sentence 2 [0.0, unk, 0.7, 0.3], # sentence 3 ] ), # step 1: torch.FloatTensor( [ # eos w1 w2 [0.0, unk, 0.2, 0.7], # sentence 1 [0.0, unk, 0.8, 0.2], # sentence 2 [0.7, unk, 0.1, 0.2], # sentence 3 ] ), # step 2: torch.FloatTensor( [ # eos w1 w2 [0.10, unk, 0.50, 0.4], # sentence 1 [0.15, unk, 0.15, 0.7], # sentence 2 [0.00, unk, 0.00, 0.0], # sentence 3 ] ), # step 3: torch.FloatTensor( [ # eos w1 w2 [0.9, unk, 0.05, 0.05], # sentence 1 [0.0, unk, 0.00, 0.0], # sentence 2 [0.0, unk, 0.00, 0.0], # sentence 3 ] ), ] expected_scores = [ [0.6, 0.7, 0.5, 0.9], # sentence 1 [0.6, 0.8, 0.15], # sentence 2 [0.3, 0.7], # sentence 3 ] task = test_utils.TestTranslationTask.setup_task(args, d, d) model = task.build_model(args) scorer = SequenceScorer(task.target_dictionary) for sample in data_itr: hypos = task.inference_step(scorer, [model], sample) for id, hypos_id in zip(sample["id"].tolist(), hypos): self.assertHypoTokens(hypos_id[0], data[id]["target"]) self.assertHypoScore(hypos_id[0], expected_scores[id]) def assertHypoTokens(self, hypo, tokens): self.assertTensorEqual(hypo["tokens"], torch.LongTensor(tokens)) def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0): pos_scores = torch.FloatTensor(pos_probs).log() self.assertAlmostEqual(hypo["positional_scores"], pos_scores) self.assertEqual(pos_scores.numel(), hypo["tokens"].numel()) score = pos_scores.sum() if normalized: score /= pos_scores.numel() ** lenpen self.assertLess(abs(score - hypo["score"]), 1e-6) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_sequence_scorer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import unittest from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.models.transformer import TransformerModel from tests.test_sequence_generator import get_dummy_task_and_parser class TestInferenceDropout(unittest.TestCase): def setUp(self): self.task, self.parser = get_dummy_task_and_parser() TransformerModel.add_args(self.parser) self.args = self.parser.parse_args([]) self.args.encoder_layers = 2 self.args.decoder_layers = 1 logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_sets_inference_dropout_to_true(self): self.args.retain_dropout = True self.transformer_model = TransformerModel.build_model(self.args, self.task) cfg = convert_namespace_to_omegaconf(self.args) self.transformer_model.prepare_for_inference_(cfg) assert self.transformer_model.encoder.dropout_module.apply_during_inference assert self.transformer_model.decoder.dropout_module.apply_during_inference for layer in self.transformer_model.encoder.layers: assert layer.dropout_module.apply_during_inference def test_inference_dropout_false_by_default(self): self.transformer_model = TransformerModel.build_model(self.args, self.task) cfg = convert_namespace_to_omegaconf(self.args) self.transformer_model.prepare_for_inference_(cfg) assert not self.transformer_model.encoder.dropout_module.apply_during_inference assert not self.transformer_model.decoder.dropout_module.apply_during_inference for layer in self.transformer_model.encoder.layers: assert not layer.dropout_module.apply_during_inference for layer in self.transformer_model.decoder.layers: assert not layer.dropout_module.apply_during_inference def test_applies_training_mode(self): self.transformer_model = TransformerModel.build_model(self.args, self.task) assert self.transformer_model.encoder.dropout_module.training for layer in self.transformer_model.encoder.layers: assert layer.dropout_module.training self.transformer_model.eval() assert not self.transformer_model.decoder.dropout_module.training for layer in self.transformer_model.encoder.layers: assert not layer.dropout_module.training def test_retain_modules(self): self.args.retain_dropout = True self.args.retain_dropout_modules = [ "TransformerEncoder", "TransformerEncoderLayer", ] self.transformer_model = TransformerModel.build_model(self.args, self.task) cfg = convert_namespace_to_omegaconf(self.args) self.transformer_model.prepare_for_inference_(cfg) assert self.transformer_model.encoder.dropout_module.apply_during_inference assert not self.transformer_model.decoder.dropout_module.apply_during_inference for layer in self.transformer_model.decoder.layers: assert not layer.dropout_module.apply_during_inference
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_inference_dropout.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq.modules.multihead_attention import MultiheadAttention class TestMultiheadAttention(unittest.TestCase): def test_append_prev_key_padding_mask(self): bsz = 1 src_len = 4 cases = [ # no padding mask (None, None, None), # current padding mask only ( torch.tensor([[1]]).bool(), None, torch.tensor([[0, 0, 0, 1]]).bool(), ), # previous padding mask only ( None, torch.tensor([[0, 1, 0]]).bool(), torch.tensor([[0, 1, 0, 0]]).bool(), ), # both padding masks ( torch.tensor([[1]]).bool(), torch.tensor([[0, 1, 0]]).bool(), torch.tensor([[0, 1, 0, 1]]).bool(), ), # prev_key_padding_mask already full ( torch.tensor([[0, 1, 0, 1]]).bool(), None, torch.tensor([[0, 1, 0, 1]]).bool(), ), # key_padding_mask already full ( None, torch.tensor([[0, 1, 0, 1]]).bool(), torch.tensor([[0, 1, 0, 1]]).bool(), ), ] for c in cases: key_padding_mask = MultiheadAttention._append_prev_key_padding_mask( c[0], c[1], batch_size=bsz, src_len=src_len, static_kv=False, ) if key_padding_mask is not None: self.assertTrue( torch.all(torch.eq(key_padding_mask, c[2])), f"Unexpected resultant key padding mask: {key_padding_mask}" f" given current: {c[0]} and previous: {c[1]}", ) self.assertEqual(key_padding_mask.size(0), bsz) self.assertEqual(key_padding_mask.size(1), src_len) else: self.assertIsNone(c[2]) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_multihead_attention.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import numpy as np from fairseq.data.data_utils_fast import batch_by_size_fn from fairseq.data.data_utils_fast import batch_by_size_vec class TestBatchBySize(unittest.TestCase): @classmethod def batch_by_size_baseline( cls, indices, num_tokens_vec, max_tokens, max_sentences, bsz_mult, ): """Simple, reliable and slow implementation of batch by size """ batches = [] start = 0 while start < len(indices): for end in range(start + 1, len(indices) + 1): max_val = max(num_tokens_vec[pos] for pos in range(start, end)) sent_count = end - start num_tokens = max_val * sent_count overflow = num_tokens > max_tokens > 0 or sent_count > max_sentences > 0 terminate = overflow or end == len(indices) if overflow: sent_count -= 1 if terminate: if sent_count > bsz_mult: sent_count = sent_count - sent_count % bsz_mult batches.append(indices[start : start + sent_count]) start = start + sent_count break return batches @classmethod def _get_error_message( cls, max_sentences, max_tokens, bsz_mult, num_tokens_vec, validation, results ): return f"""Reference batch_by_size implementation should produce same output as the baseline method. Params: max_sentences={max_sentences}, max_tokens={max_tokens}, bsz_mult={bsz_mult}, num_tokens_vec={num_tokens_vec}, expected_batches={validation}, returned_batches={results}""" def _compare_results( self, indices_len, batch_by_size_impl, max_sentences, max_tokens, bsz_mult, num_tokens_vec, ): indices = np.array(list(range(indices_len))) validation = self.batch_by_size_baseline( indices, num_tokens_vec, max_tokens=max_tokens, max_sentences=max_sentences, bsz_mult=bsz_mult, ) results = batch_by_size_impl( indices, num_tokens_vec, max_tokens=max_tokens, max_sentences=max_sentences, bsz_mult=bsz_mult, ) error_msg = self._get_error_message( max_sentences, max_tokens, bsz_mult, num_tokens_vec, validation, results ) self.assertEqual(len(validation), len(results), error_msg) for first, second in zip(validation, results): self.assertTrue(np.array_equal(first, second), error_msg) def _run_compare_with_baseline_sweep(self, batch_by_size_impl): """Compare reference batch_by_size implementation with batch_by_size_baseline across a dense grid of hyperparam values""" MAX_MAX_TOKENS = 10 NUM_TOKENS_VECS_COUNT = 5 for indices_len in [10, 11]: # try odd and even len of indices for max_sentences in range(0, indices_len + 2): for max_tokens in range(0, MAX_MAX_TOKENS): for bsz_mult in range(1, max(MAX_MAX_TOKENS, indices_len) + 2): for _ in range(NUM_TOKENS_VECS_COUNT): num_tokens_vec = np.random.randint( 0, max_tokens + 1, size=indices_len ) self._compare_results( indices_len, batch_by_size_impl, max_sentences, max_tokens, bsz_mult, num_tokens_vec, ) class TestBatchBySizeVec(TestBatchBySize): def test_compare_with_baseline(self): self._run_compare_with_baseline_sweep(batch_by_size_vec) class TestBatchBySizeFn(TestBatchBySize): def test_compare_with_baseline(self): def batch_by_size_fn_wrapper( indices, num_tokens_vec, max_tokens, max_sentences, bsz_mult, ): def num_tokens_fn(idx): return num_tokens_vec[idx] return batch_by_size_fn( indices, num_tokens_fn, max_tokens, max_sentences, bsz_mult ) self._run_compare_with_baseline_sweep(batch_by_size_fn_wrapper) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_data_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import random import string import typing as tp import unittest from collections import Counter from tempfile import NamedTemporaryFile, TemporaryDirectory from fairseq.data import Dictionary, indexed_dataset from fairseq.data.huffman import ( HuffmanCodeBuilder, HuffmanCoder, HuffmanMMapIndexedDataset, HuffmanMMapIndexedDatasetBuilder, ) POPULATION = string.ascii_letters + string.digits def make_sentence() -> tp.List[str]: length = random.randint(10, 50) return random.choices( population=POPULATION, k=length, weights=range(1, len(POPULATION) + 1) ) def make_data(length=1000) -> tp.List[tp.List[str]]: return ( [make_sentence() for _ in range(0, length)] # add all the symbols at least once + [list(string.ascii_letters), list(string.digits)] ) def make_counts(data: tp.List[tp.List[str]]) -> Counter: return Counter([symbol for sentence in data for symbol in sentence]) def make_code_builder(data: tp.List[tp.List[str]]) -> HuffmanCodeBuilder: builder = HuffmanCodeBuilder() for sentence in data: builder.add_symbols(*sentence) return builder class TestCodeBuilder(unittest.TestCase): def test_code_builder_can_count(self): data = make_data() counts = make_counts(data) builder = make_code_builder(data) self.assertEqual(builder.symbols, counts) def test_code_builder_can_add(self): data = make_data() counts = make_counts(data) builder = make_code_builder(data) new_builder = builder + builder self.assertEqual(new_builder.symbols, counts + counts) def test_code_builder_can_io(self): data = make_data() builder = make_code_builder(data) with NamedTemporaryFile() as tmp_fp: builder.to_file(tmp_fp.name) other_builder = HuffmanCodeBuilder.from_file(tmp_fp.name) self.assertEqual(builder.symbols, other_builder.symbols) class TestCoder(unittest.TestCase): def test_coder_can_io(self): data = make_data() builder = make_code_builder(data) coder = builder.build_code() with NamedTemporaryFile() as tmp_fp: coder.to_file(tmp_fp.name) other_coder = HuffmanCoder.from_file(tmp_fp.name) self.assertEqual(coder, other_coder) def test_coder_can_encode_decode(self): data = make_data() builder = make_code_builder(data) coder = builder.build_code() encoded = [coder.encode(sentence) for sentence in data] decoded = [[n.symbol for n in coder.decode(enc)] for enc in encoded] self.assertEqual(decoded, data) unseen_data = make_data() unseen_encoded = [coder.encode(sentence) for sentence in unseen_data] unseen_decoded = [ [n.symbol for n in coder.decode(enc)] for enc in unseen_encoded ] self.assertEqual(unseen_decoded, unseen_data) def build_dataset(prefix, data, coder): with HuffmanMMapIndexedDatasetBuilder(prefix, coder) as builder: for sentence in data: builder.add_item(sentence) def sizes(data): return [len(sentence) for sentence in data] class TestHuffmanDataset(unittest.TestCase): def test_huffman_can_encode_decode(self): data = make_data() builder = make_code_builder(data) coder = builder.build_code() with TemporaryDirectory() as dirname: prefix = os.path.join(dirname, "test1") build_dataset(prefix, data, coder) dataset = HuffmanMMapIndexedDataset(prefix) self.assertEqual(len(dataset), len(data)) decoded = [list(dataset.get_symbols(i)) for i in range(0, len(dataset))] self.assertEqual(decoded, data) data_sizes = [i.item() for i in dataset.sizes] self.assertEqual(data_sizes, sizes(data)) def test_huffman_compresses(self): data = make_data() builder = make_code_builder(data) coder = builder.build_code() with TemporaryDirectory() as dirname: prefix = os.path.join(dirname, "huffman") build_dataset(prefix, data, coder) prefix_mmap = os.path.join(dirname, "mmap") mmap_builder = indexed_dataset.make_builder( indexed_dataset.data_file_path(prefix_mmap), "mmap", vocab_size=len(POPULATION), ) dictionary = Dictionary() for c in POPULATION: dictionary.add_symbol(c) dictionary.finalize() for sentence in data: mmap_builder.add_item(dictionary.encode_line(" ".join(sentence))) mmap_builder.finalize(indexed_dataset.index_file_path(prefix_mmap)) huff_size = os.stat(indexed_dataset.data_file_path(prefix)).st_size mmap_size = os.stat(indexed_dataset.data_file_path(prefix_mmap)).st_size self.assertLess(huff_size, mmap_size) def test_huffman_can_append(self): data1 = make_data() builder = make_code_builder(data1) coder = builder.build_code() with TemporaryDirectory() as dirname: prefix1 = os.path.join(dirname, "test1") build_dataset(prefix1, data1, coder) data2 = make_data() prefix2 = os.path.join(dirname, "test2") build_dataset(prefix2, data2, coder) prefix3 = os.path.join(dirname, "test3") with HuffmanMMapIndexedDatasetBuilder(prefix3, coder) as builder: builder.append(prefix1) builder.append(prefix2) dataset = HuffmanMMapIndexedDataset(prefix3) self.assertEqual(len(dataset), len(data1) + len(data2)) decoded1 = [list(dataset.get_symbols(i)) for i in range(0, len(data1))] self.assertEqual(decoded1, data1) decoded2 = [ list(dataset.get_symbols(i)) for i in range(len(data1), len(dataset)) ] self.assertEqual(decoded2, data2) data_sizes = [i.item() for i in dataset.sizes] self.assertEqual(data_sizes[: len(data1)], sizes(data1)) self.assertEqual(data_sizes[len(data1) : len(dataset)], sizes(data2)) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_huffman.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import logging import os import tempfile import unittest from io import StringIO from unittest.mock import patch from fairseq import checkpoint_utils from omegaconf import OmegaConf from tests.utils import ( create_dummy_data, preprocess_translation_data, train_translation_model, ) class TestCheckpointUtils(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) @contextlib.contextmanager def _train_transformer(self, seed, extra_args=None): if extra_args is None: extra_args = [] with tempfile.TemporaryDirectory(f"_train_transformer_seed{seed}") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--encoder-layers", "3", "--decoder-layers", "3", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--seed", str(seed), ] + extra_args, ) yield os.path.join(data_dir, "checkpoint_last.pt") def test_load_model_ensemble_and_task(self): # with contextlib.redirect_stdout(StringIO()): with self._train_transformer(seed=123) as model1: with self._train_transformer(seed=456) as model2: ensemble, cfg, task = checkpoint_utils.load_model_ensemble_and_task( filenames=[model1, model2] ) self.assertEqual(len(ensemble), 2) # after Transformer has been migrated to Hydra, this will probably # become cfg.common.seed self.assertEqual(ensemble[0].args.seed, 123) self.assertEqual(ensemble[1].args.seed, 456) # the task from the first model should be returned self.assertTrue("seed123" in task.cfg.data) # last cfg is saved self.assertEqual(cfg.common.seed, 456) def test_prune_state_dict(self): with contextlib.redirect_stdout(StringIO()): extra_args = ["--encoder-layerdrop", "0.01", "--decoder-layerdrop", "0.01"] with self._train_transformer(seed=1, extra_args=extra_args) as model: ensemble, cfg, task = checkpoint_utils.load_model_ensemble_and_task( filenames=[model], arg_overrides={ "encoder_layers_to_keep": "0,2", "decoder_layers_to_keep": "1", }, ) self.assertEqual(len(ensemble), 1) self.assertEqual(len(ensemble[0].encoder.layers), 2) self.assertEqual(len(ensemble[0].decoder.layers), 1) def test_torch_persistent_save_async(self): state_dict = {} filename = "async_checkpoint.pt" with patch(f"{checkpoint_utils.__name__}.PathManager.opena") as mock_opena: with patch(f"{checkpoint_utils.__name__}._torch_persistent_save") as mock_save: checkpoint_utils.torch_persistent_save( state_dict, filename, async_write=True ) mock_opena.assert_called_with(filename, "wb") mock_save.assert_called() if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_checkpoint_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from unittest import mock class TestIOPath(unittest.TestCase): def test_no_iopath(self): from .test_reproducibility import TestReproducibility with mock.patch.dict("sys.modules", {"iopath": None}): # reuse reproducibility tests, which are e2e tests that should cover # most checkpoint related functionality TestReproducibility._test_reproducibility(self, "test_reproducibility") def test_no_supports_rename(self): from .test_reproducibility import TestReproducibility with mock.patch("fairseq.file_io.PathManager.supports_rename") as mock_fn: mock_fn.return_value = False TestReproducibility._test_reproducibility(self, "test_reproducibility") if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_iopath.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import collections import unittest import numpy as np from fairseq.data import ListDataset, ResamplingDataset class TestResamplingDataset(unittest.TestCase): def setUp(self): self.strings = ["ab", "c", "def", "ghij"] self.weights = [4.0, 2.0, 7.0, 1.5] self.size_ratio = 2 self.dataset = ListDataset( self.strings, np.array([len(s) for s in self.strings]) ) def _test_common(self, resampling_dataset, iters): assert len(self.dataset) == len(self.strings) == len(self.weights) assert len(resampling_dataset) == self.size_ratio * len(self.strings) results = {"ordered_by_size": True, "max_distribution_diff": 0.0} totalfreqs = 0 freqs = collections.defaultdict(int) for epoch_num in range(iters): resampling_dataset.set_epoch(epoch_num) indices = resampling_dataset.ordered_indices() assert len(indices) == len(resampling_dataset) prev_size = -1 for i in indices: cur_size = resampling_dataset.size(i) # Make sure indices map to same sequences within an epoch assert resampling_dataset[i] == resampling_dataset[i] # Make sure length of sequence is correct assert cur_size == len(resampling_dataset[i]) freqs[resampling_dataset[i]] += 1 totalfreqs += 1 if prev_size > cur_size: results["ordered_by_size"] = False prev_size = cur_size assert set(freqs.keys()) == set(self.strings) for s, weight in zip(self.strings, self.weights): freq = freqs[s] / totalfreqs expected_freq = weight / sum(self.weights) results["max_distribution_diff"] = max( results["max_distribution_diff"], abs(expected_freq - freq) ) return results def test_resampling_dataset_batch_by_size_false(self): resampling_dataset = ResamplingDataset( self.dataset, self.weights, size_ratio=self.size_ratio, batch_by_size=False, seed=0, ) results = self._test_common(resampling_dataset, iters=1000) # For batch_by_size = False, the batches should be returned in # arbitrary order of size. assert not results["ordered_by_size"] # Allow tolerance in distribution error of 2%. assert results["max_distribution_diff"] < 0.02 def test_resampling_dataset_batch_by_size_true(self): resampling_dataset = ResamplingDataset( self.dataset, self.weights, size_ratio=self.size_ratio, batch_by_size=True, seed=0, ) results = self._test_common(resampling_dataset, iters=1000) # For batch_by_size = True, the batches should be returned in # increasing order of size. assert results["ordered_by_size"] # Allow tolerance in distribution error of 2%. assert results["max_distribution_diff"] < 0.02 if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_resampling_dataset.py
import os import shutil import tempfile import unittest from fairseq import options from fairseq.dataclass.utils import convert_namespace_to_omegaconf from fairseq.data.data_utils import raise_if_valid_subsets_unintentionally_ignored from .utils import create_dummy_data, preprocess_lm_data, train_language_model def make_lm_config( data_dir=None, extra_flags=None, task="language_modeling", arch="transformer_lm_gpt2_tiny", ): task_args = [task] if data_dir is not None: task_args += [data_dir] train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ "--task", *task_args, "--arch", arch, "--optimizer", "adam", "--lr", "0.0001", "--max-tokens", "500", "--tokens-per-sample", "500", "--save-dir", data_dir, "--max-epoch", "1", ] + (extra_flags or []), ) cfg = convert_namespace_to_omegaconf(train_args) return cfg def write_empty_file(path): with open(path, "w"): pass assert os.path.exists(path) class TestValidSubsetsErrors(unittest.TestCase): """Test various filesystem, clarg combinations and ensure that error raising happens as expected""" def _test_case(self, paths, extra_flags): with tempfile.TemporaryDirectory() as data_dir: [ write_empty_file(os.path.join(data_dir, f"{p}.bin")) for p in paths + ["train"] ] cfg = make_lm_config(data_dir, extra_flags=extra_flags) raise_if_valid_subsets_unintentionally_ignored(cfg) def test_default_raises(self): with self.assertRaises(ValueError): self._test_case(["valid", "valid1"], []) with self.assertRaises(ValueError): self._test_case( ["valid", "valid1", "valid2"], ["--valid-subset", "valid,valid1"] ) def partially_specified_valid_subsets(self): with self.assertRaises(ValueError): self._test_case( ["valid", "valid1", "valid2"], ["--valid-subset", "valid,valid1"] ) # Fix with ignore unused self._test_case( ["valid", "valid1", "valid2"], ["--valid-subset", "valid,valid1", "--ignore-unused-valid-subsets"], ) def test_legal_configs(self): self._test_case(["valid"], []) self._test_case(["valid", "valid1"], ["--ignore-unused-valid-subsets"]) self._test_case(["valid", "valid1"], ["--combine-val"]) self._test_case(["valid", "valid1"], ["--valid-subset", "valid,valid1"]) self._test_case(["valid", "valid1"], ["--valid-subset", "valid1"]) self._test_case( ["valid", "valid1"], ["--combine-val", "--ignore-unused-valid-subsets"] ) self._test_case( ["valid1"], ["--valid-subset", "valid1"] ) # valid.bin doesn't need to be ignored. def test_disable_validation(self): self._test_case([], ["--disable-validation"]) self._test_case(["valid", "valid1"], ["--disable-validation"]) def test_dummy_task(self): cfg = make_lm_config(task="dummy_lm") raise_if_valid_subsets_unintentionally_ignored(cfg) def test_masked_dummy_task(self): cfg = make_lm_config(task="dummy_masked_lm") raise_if_valid_subsets_unintentionally_ignored(cfg) class TestCombineValidSubsets(unittest.TestCase): def _train(self, extra_flags): with self.assertLogs() as logs: with tempfile.TemporaryDirectory("test_transformer_lm") as data_dir: create_dummy_data(data_dir, num_examples=20) preprocess_lm_data(data_dir) shutil.copyfile(f"{data_dir}/valid.bin", f"{data_dir}/valid1.bin") shutil.copyfile(f"{data_dir}/valid.idx", f"{data_dir}/valid1.idx") train_language_model( data_dir, "transformer_lm", ["--max-update", "0", "--log-format", "json"] + extra_flags, run_validation=False, ) return [x.message for x in logs.records] def test_combined(self): flags = ["--combine-valid-subsets"] logs = self._train(flags) assert any(["valid1" in x for x in logs]) # loaded 100 examples from valid1 assert not any(["valid1_ppl" in x for x in logs]) # metrics are combined def test_subsets(self): flags = ["--valid-subset", "valid,valid1"] logs = self._train(flags) assert any(["valid_ppl" in x for x in logs]) # loaded 100 examples from valid1 assert any(["valid1_ppl" in x for x in logs]) # metrics are combined
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_valid_subset_checks.py
EXA-1-master
exa/models/unilm-master/edgelm/tests/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import tests.utils as test_utils import torch from fairseq.data import ( BacktranslationDataset, LanguagePairDataset, TransformEosDataset, ) from fairseq.sequence_generator import SequenceGenerator class TestBacktranslationDataset(unittest.TestCase): def setUp(self): ( self.tgt_dict, self.w1, self.w2, self.src_tokens, self.src_lengths, self.model, ) = test_utils.sequence_generator_setup() dummy_src_samples = self.src_tokens self.tgt_dataset = test_utils.TestDataset(data=dummy_src_samples) self.cuda = torch.cuda.is_available() def _backtranslation_dataset_helper( self, remove_eos_from_input_src, remove_eos_from_output_src, ): tgt_dataset = LanguagePairDataset( src=self.tgt_dataset, src_sizes=self.tgt_dataset.sizes, src_dict=self.tgt_dict, tgt=None, tgt_sizes=None, tgt_dict=None, ) generator = SequenceGenerator( [self.model], tgt_dict=self.tgt_dict, max_len_a=0, max_len_b=200, beam_size=2, unk_penalty=0, ) backtranslation_dataset = BacktranslationDataset( tgt_dataset=TransformEosDataset( dataset=tgt_dataset, eos=self.tgt_dict.eos(), # remove eos from the input src remove_eos_from_src=remove_eos_from_input_src, ), src_dict=self.tgt_dict, backtranslation_fn=( lambda sample: generator.generate([self.model], sample) ), output_collater=TransformEosDataset( dataset=tgt_dataset, eos=self.tgt_dict.eos(), # if we remove eos from the input src, then we need to add it # back to the output tgt append_eos_to_tgt=remove_eos_from_input_src, remove_eos_from_src=remove_eos_from_output_src, ).collater, cuda=self.cuda, ) dataloader = torch.utils.data.DataLoader( backtranslation_dataset, batch_size=2, collate_fn=backtranslation_dataset.collater, ) backtranslation_batch_result = next(iter(dataloader)) eos, pad, w1, w2 = self.tgt_dict.eos(), self.tgt_dict.pad(), self.w1, self.w2 # Note that we sort by src_lengths and add left padding, so actually # ids will look like: [1, 0] expected_src = torch.LongTensor([[w1, w2, w1, eos], [pad, pad, w1, eos]]) if remove_eos_from_output_src: expected_src = expected_src[:, :-1] expected_tgt = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]]) generated_src = backtranslation_batch_result["net_input"]["src_tokens"] tgt_tokens = backtranslation_batch_result["target"] self.assertTensorEqual(expected_src, generated_src) self.assertTensorEqual(expected_tgt, tgt_tokens) def test_backtranslation_dataset_no_eos_in_output_src(self): self._backtranslation_dataset_helper( remove_eos_from_input_src=False, remove_eos_from_output_src=True, ) def test_backtranslation_dataset_with_eos_in_output_src(self): self._backtranslation_dataset_helper( remove_eos_from_input_src=False, remove_eos_from_output_src=False, ) def test_backtranslation_dataset_no_eos_in_input_src(self): self._backtranslation_dataset_helper( remove_eos_from_input_src=True, remove_eos_from_output_src=False, ) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_backtranslation_dataset.py
import contextlib import unittest import tempfile from io import StringIO import numpy as np from tests.utils import create_dummy_data, preprocess_lm_data, train_language_model try: from pyarrow import plasma from fairseq.data.plasma_utils import PlasmaView, PlasmaStore PYARROW_AVAILABLE = True except ImportError: PYARROW_AVAILABLE = False dummy_path = "dummy" @unittest.skipUnless(PYARROW_AVAILABLE, "") class TestPlasmaView(unittest.TestCase): def setUp(self) -> None: self.tmp_file = tempfile.NamedTemporaryFile() # noqa: P201 self.path = self.tmp_file.name self.server = PlasmaStore.start(path=self.path, nbytes=10000) self.client = plasma.connect(self.path, num_retries=10) def tearDown(self) -> None: self.client.disconnect() self.tmp_file.close() self.server.kill() def test_two_servers_do_not_share_object_id_space(self): data_server_1 = np.array([0, 1]) data_server_2 = np.array([2, 3]) server_2_path = self.path with tempfile.NamedTemporaryFile() as server_1_path: server = PlasmaStore.start(path=server_1_path.name, nbytes=10000) arr1 = PlasmaView( data_server_1, dummy_path, 1, plasma_path=server_1_path.name ) assert len(arr1.client.list()) == 1 assert (arr1.array == data_server_1).all() arr2 = PlasmaView(data_server_2, dummy_path, 1, plasma_path=server_2_path) assert (arr2.array == data_server_2).all() assert (arr1.array == data_server_1).all() server.kill() def test_hash_collision(self): data_server_1 = np.array([0, 1]) data_server_2 = np.array([2, 3]) arr1 = PlasmaView(data_server_1, dummy_path, 1, plasma_path=self.path) assert len(arr1.client.list()) == 1 arr2 = PlasmaView(data_server_2, dummy_path, 1, plasma_path=self.path) assert len(arr1.client.list()) == 1 assert len(arr2.client.list()) == 1 assert (arr2.array == data_server_1).all() # New hash key based on tuples arr3 = PlasmaView( data_server_2, dummy_path, (1, 12312312312, None), plasma_path=self.path ) assert ( len(arr2.client.list()) == 2 ), "No new object was created by using a novel hash key" assert ( arr3.object_id in arr2.client.list() ), "No new object was created by using a novel hash key" assert ( arr3.object_id in arr3.client.list() ), "No new object was created by using a novel hash key" del arr3, arr2, arr1 @staticmethod def _assert_view_equal(pv1, pv2): np.testing.assert_array_equal(pv1.array, pv2.array) def test_putting_same_array_twice(self): data = np.array([4, 4, 4]) arr1 = PlasmaView(data, dummy_path, 1, plasma_path=self.path) assert len(self.client.list()) == 1 arr1b = PlasmaView( data, dummy_path, 1, plasma_path=self.path ) # should not change contents of store arr1c = PlasmaView( None, dummy_path, 1, plasma_path=self.path ) # should not change contents of store assert len(self.client.list()) == 1 self._assert_view_equal(arr1, arr1b) self._assert_view_equal(arr1, arr1c) PlasmaView( data, dummy_path, 2, plasma_path=self.path ) # new object id, adds new entry assert len(self.client.list()) == 2 new_client = plasma.connect(self.path) assert len(new_client.list()) == 2 # new client can access same objects assert isinstance(arr1.object_id, plasma.ObjectID) del arr1b del arr1c def test_plasma_store_full_raises(self): with tempfile.NamedTemporaryFile() as new_path: server = PlasmaStore.start(path=new_path.name, nbytes=10000) with self.assertRaises(plasma.PlasmaStoreFull): # 2000 floats is more than 2000 bytes PlasmaView( np.random.rand(10000, 1), dummy_path, 1, plasma_path=new_path.name ) server.kill() def test_object_id_overflow(self): PlasmaView.get_object_id("", 2 ** 21) def test_training_lm_plasma(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer_lm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "transformer_lm", ["--use-plasma-view", "--plasma-path", self.path], run_validation=True, )
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_plasma_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import logging import json import os import random import sys import tempfile import unittest from io import StringIO from typing import List, Dict import torch from fairseq import options from fairseq_cli import eval_lm, train from tests.utils import ( create_dummy_data, generate_main, preprocess_lm_data, preprocess_summarization_data, preprocess_translation_data, create_laser_data_and_config_json, train_translation_model, train_language_model, ) try: import transformers # noqa has_hf_transformers = True except ImportError: has_hf_transformers = False class TestTranslation(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_fconv(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fconv") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, "fconv_iwslt_de_en") generate_main(data_dir) def test_raw(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fconv_raw") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ["--dataset-impl", "raw"]) train_translation_model( data_dir, "fconv_iwslt_de_en", ["--dataset-impl", "raw"] ) generate_main(data_dir, ["--dataset-impl", "raw"]) def test_update_freq(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_update_freq") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "fconv_iwslt_de_en", ["--update-freq", "3"] ) generate_main(data_dir) def test_max_positions(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_max_positions") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) with self.assertRaises(Exception) as context: train_translation_model( data_dir, "fconv_iwslt_de_en", ["--max-target-positions", "5"], ) self.assertTrue( "skip this example with --skip-invalid-size-inputs-valid-test" in str(context.exception) ) train_translation_model( data_dir, "fconv_iwslt_de_en", [ "--max-target-positions", "5", "--skip-invalid-size-inputs-valid-test", ], ) with self.assertRaises(Exception) as context: generate_main(data_dir) generate_main(data_dir, ["--skip-invalid-size-inputs-valid-test"]) def test_generation(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_sampling") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, "fconv_iwslt_de_en") generate_main( data_dir, [ "--sampling", "--temperature", "2", "--beam", "2", "--nbest", "2", ], ) generate_main( data_dir, [ "--sampling", "--sampling-topk", "3", "--beam", "2", "--nbest", "2", ], ) generate_main( data_dir, [ "--sampling", "--sampling-topp", "0.2", "--beam", "2", "--nbest", "2", ], ) generate_main( data_dir, [ "--diversity-rate", "0.5", "--beam", "6", ], ) with self.assertRaises(ValueError): generate_main( data_dir, [ "--diverse-beam-groups", "4", "--match-source-len", ], ) generate_main(data_dir, ["--prefix-size", "2"]) generate_main(data_dir, ["--retain-dropout"]) def test_eval_bleu(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_eval_bleu") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "fconv_iwslt_de_en", [ "--eval-bleu", "--eval-bleu-print-samples", "--eval-bleu-remove-bpe", "--eval-bleu-detok", "space", "--eval-bleu-args", '{"beam": 4, "min_len": 10}', ], ) def test_lstm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_lstm") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "lstm_wiseman_iwslt_de_en", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--decoder-out-embed-dim", "8", ], ) generate_main(data_dir) def test_lstm_bidirectional(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_lstm_bidirectional") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "lstm", [ "--encoder-layers", "2", "--encoder-bidirectional", "--encoder-hidden-size", "16", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--decoder-out-embed-dim", "8", "--decoder-layers", "2", ], ) generate_main(data_dir) def test_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", ], run_validation=True, ) generate_main(data_dir) def test_multilingual_transformer(self): # test with all combinations of encoder/decoder lang tokens encoder_langtok_flags = [ [], ["--encoder-langtok", "src"], ["--encoder-langtok", "tgt"], ] decoder_langtok_flags = [[], ["--decoder-langtok"]] with contextlib.redirect_stdout(StringIO()): for i in range(len(encoder_langtok_flags)): for j in range(len(decoder_langtok_flags)): enc_ltok_flag = encoder_langtok_flags[i] dec_ltok_flag = decoder_langtok_flags[j] with tempfile.TemporaryDirectory( f"test_multilingual_transformer_{i}_{j}" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, arch="multilingual_transformer", task="multilingual_translation", extra_flags=[ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", ] + enc_ltok_flag + dec_ltok_flag, lang_flags=["--lang-pairs", "in-out,out-in"], run_validation=True, extra_valid_flags=enc_ltok_flag + dec_ltok_flag, ) generate_main( data_dir, extra_flags=[ "--task", "multilingual_translation", "--lang-pairs", "in-out,out-in", "--source-lang", "in", "--target-lang", "out", ] + enc_ltok_flag + dec_ltok_flag, ) @unittest.skipIf( sys.platform.lower() == "darwin", "skip latent depth test on MacOS" ) def test_multilingual_translation_latent_depth(self): # test with latent depth in encoder, decoder, or both encoder_latent_layer = [[], ["--encoder-latent-layer"]] decoder_latent_layer = [[], ["--decoder-latent-layer"]] with contextlib.redirect_stdout(StringIO()): for i in range(len(encoder_latent_layer)): for j in range(len(decoder_latent_layer)): if i == 0 and j == 0: continue enc_ll_flag = encoder_latent_layer[i] dec_ll_flag = decoder_latent_layer[j] with tempfile.TemporaryDirectory( f"test_multilingual_translation_latent_depth_{i}_{j}" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data( data_dir, extra_flags=["--joined-dictionary"] ) train_translation_model( data_dir, arch="latent_multilingual_transformer", task="multilingual_translation_latent_depth", extra_flags=[ "--user-dir", "examples/latent_depth/latent_depth_src", "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--share-encoders", "--share-decoders", "--sparsity-weight", "0.1", ] + enc_ll_flag + dec_ll_flag, lang_flags=["--lang-pairs", "in-out,out-in"], run_validation=True, extra_valid_flags=[ "--user-dir", "examples/latent_depth/latent_depth_src", ] + enc_ll_flag + dec_ll_flag, ) generate_main( data_dir, extra_flags=[ "--user-dir", "examples/latent_depth/latent_depth_src", "--task", "multilingual_translation_latent_depth", "--lang-pairs", "in-out,out-in", "--source-lang", "in", "--target-lang", "out", ] + enc_ll_flag + dec_ll_flag, ) def test_translation_multi_simple_epoch(self): # test with all combinations of encoder/decoder lang tokens encoder_langtok_flags = [ [], ["--encoder-langtok", "src"], ["--encoder-langtok", "tgt"], ] decoder_langtok_flags = [[], ["--decoder-langtok"]] with contextlib.redirect_stdout(StringIO()): for i in range(len(encoder_langtok_flags)): for j in range(len(decoder_langtok_flags)): enc_ltok_flag = encoder_langtok_flags[i] dec_ltok_flag = decoder_langtok_flags[j] with tempfile.TemporaryDirectory( f"test_translation_multi_simple_epoch_{i}_{j}" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data( data_dir, extra_flags=["--joined-dictionary"] ) train_translation_model( data_dir, arch="transformer", task="translation_multi_simple_epoch", extra_flags=[ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--sampling-method", "temperature", "--sampling-temperature", "1.5", "--virtual-epoch-size", "1000", ] + enc_ltok_flag + dec_ltok_flag, lang_flags=["--lang-pairs", "in-out,out-in"], run_validation=True, extra_valid_flags=enc_ltok_flag + dec_ltok_flag, ) generate_main( data_dir, extra_flags=[ "--task", "translation_multi_simple_epoch", "--lang-pairs", "in-out,out-in", "--source-lang", "in", "--target-lang", "out", ] + enc_ltok_flag + dec_ltok_flag, ) def test_translation_multi_simple_epoch_no_vepoch(self): # test with all combinations of encoder/decoder lang tokens with contextlib.redirect_stdout(StringIO()): enc_ltok_flag = ["--encoder-langtok", "src"] dec_ltok_flag = ["--decoder-langtok"] with tempfile.TemporaryDirectory( "test_translation_multi_simple_epoch_dict" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, extra_flags=[]) train_translation_model( data_dir, arch="transformer", task="translation_multi_simple_epoch", extra_flags=[ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--sampling-method", "temperature", "--sampling-temperature", "1.5", ] + enc_ltok_flag + dec_ltok_flag, lang_flags=["--lang-pairs", "in-out"], run_validation=True, extra_valid_flags=enc_ltok_flag + dec_ltok_flag, ) generate_main( data_dir, extra_flags=[ "--task", "translation_multi_simple_epoch", "--lang-pairs", "in-out", "--source-lang", "in", "--target-lang", "out", ] + enc_ltok_flag + dec_ltok_flag, ) def test_translation_multi_simple_epoch_dicts(self): # test with all combinations of encoder/decoder lang tokens with contextlib.redirect_stdout(StringIO()): enc_ltok_flag = ["--encoder-langtok", "src"] dec_ltok_flag = ["--decoder-langtok"] with tempfile.TemporaryDirectory( "test_translation_multi_simple_epoch_dict" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, extra_flags=[]) train_translation_model( data_dir, arch="transformer", task="translation_multi_simple_epoch", extra_flags=[ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--sampling-method", "temperature", "--sampling-temperature", "1.5", "--virtual-epoch-size", "1000", ] + enc_ltok_flag + dec_ltok_flag, lang_flags=["--lang-pairs", "in-out"], run_validation=True, extra_valid_flags=enc_ltok_flag + dec_ltok_flag, ) generate_main( data_dir, extra_flags=[ "--task", "translation_multi_simple_epoch", "--lang-pairs", "in-out", "--source-lang", "in", "--target-lang", "out", ] + enc_ltok_flag + dec_ltok_flag, ) def test_translation_multi_simple_epoch_src_tgt_dict_spec(self): # test the specification of explicit --src-dict and --tgt-dict with contextlib.redirect_stdout(StringIO()): enc_ltok_flag = ["--encoder-langtok", "src"] dec_ltok_flag = ["--decoder-langtok"] with tempfile.TemporaryDirectory( "test_translation_multi_simple_epoch_dict" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, extra_flags=[]) train_translation_model( data_dir, arch="transformer", task="translation_multi_simple_epoch", extra_flags=[ "--source-dict", f"{data_dir}/dict.in.txt", "--target-dict", f"{data_dir}/dict.out.txt", "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--sampling-method", "temperature", "--sampling-temperature", "1.5", "--virtual-epoch-size", "1000", ] + enc_ltok_flag + dec_ltok_flag, lang_flags=["--lang-pairs", "in-out"], run_validation=True, extra_valid_flags=enc_ltok_flag + dec_ltok_flag, ) generate_main( data_dir, extra_flags=[ "--task", "translation_multi_simple_epoch", "--lang-pairs", "in-out", "--source-lang", "in", "--target-lang", "out", ] + enc_ltok_flag + dec_ltok_flag, ) def test_transformer_cross_self_attention(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_transformer_cross_self_attention" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--decoder-embed-dim", "8", "--no-cross-attention", "--cross-self-attention", ], run_validation=True, ) generate_main(data_dir, extra_flags=[]) def test_transformer_pointer_generator(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_transformer_pointer_generator" ) as data_dir: create_dummy_data(data_dir) preprocess_summarization_data(data_dir) train_translation_model( data_dir, "transformer_pointer_generator", extra_flags=[ "--user-dir", "examples/pointer_generator/pointer_generator_src", "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--alignment-layer", "-1", "--alignment-heads", "1", "--source-position-markers", "0", ], run_validation=True, extra_valid_flags=[ "--user-dir", "examples/pointer_generator/pointer_generator_src", ], ) generate_main( data_dir, extra_flags=[ "--user-dir", "examples/pointer_generator/pointer_generator_src", ], ) def test_lightconv(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_lightconv") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "lightconv_iwslt_de_en", [ "--encoder-conv-type", "lightweight", "--decoder-conv-type", "lightweight", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", ], ) generate_main(data_dir) def test_dynamicconv(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_dynamicconv") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "lightconv_iwslt_de_en", [ "--encoder-conv-type", "dynamic", "--decoder-conv-type", "dynamic", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", ], ) generate_main(data_dir) def test_cmlm_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_cmlm_transformer") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ["--joined-dictionary"]) train_translation_model( data_dir, "cmlm_transformer", [ "--apply-bert-init", "--criterion", "nat_loss", "--noise", "full_mask", "--pred-length-offset", "--length-loss-factor", "0.1", ], task="translation_lev", ) generate_main( data_dir, [ "--task", "translation_lev", "--iter-decode-max-iter", "9", "--iter-decode-eos-penalty", "0", "--print-step", ], ) def test_nonautoregressive_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_nonautoregressive_transformer" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ["--joined-dictionary"]) train_translation_model( data_dir, "nonautoregressive_transformer", [ "--apply-bert-init", "--src-embedding-copy", "--criterion", "nat_loss", "--noise", "full_mask", "--pred-length-offset", "--length-loss-factor", "0.1", ], task="translation_lev", ) generate_main( data_dir, [ "--task", "translation_lev", "--iter-decode-max-iter", "0", "--iter-decode-eos-penalty", "0", "--print-step", ], ) # def test_nat_crf_transformer(self): # with contextlib.redirect_stdout(StringIO()): # with tempfile.TemporaryDirectory('test_nat_crf_transformer') as data_dir: # create_dummy_data(data_dir) # preprocess_translation_data(data_dir, ['--joined-dictionary']) # train_translation_model(data_dir, 'nacrf_transformer', [ # '--apply-bert-init', '--criterion', # 'nat_loss', '--noise', 'full_mask', '--pred-length-offset', # '--length-loss-factor', '0.1', # '--word-ins-loss-factor', '0.5', # '--crf-lowrank-approx', '1', # '--crf-beam-approx', '1' # ], task='translation_lev') # generate_main(data_dir, [ # '--task', 'translation_lev', # '--iter-decode-max-iter', '0', # '--iter-decode-eos-penalty', '0', # '--print-step', # ]) def test_iterative_nonautoregressive_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_iterative_nonautoregressive_transformer" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ["--joined-dictionary"]) train_translation_model( data_dir, "iterative_nonautoregressive_transformer", [ "--apply-bert-init", "--src-embedding-copy", "--criterion", "nat_loss", "--noise", "full_mask", "--stochastic-approx", "--dae-ratio", "0.5", "--train-step", "3", ], task="translation_lev", ) generate_main( data_dir, [ "--task", "translation_lev", "--iter-decode-max-iter", "9", "--iter-decode-eos-penalty", "0", "--print-step", ], ) def test_insertion_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_insertion_transformer") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ["--joined-dictionary"]) train_translation_model( data_dir, "insertion_transformer", [ "--apply-bert-init", "--criterion", "nat_loss", "--noise", "random_mask", ], task="translation_lev", ) generate_main( data_dir, [ "--task", "translation_lev", "--iter-decode-max-iter", "9", "--iter-decode-eos-penalty", "0", "--print-step", ], ) def test_mixture_of_experts(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_moe") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--task", "translation_moe", "--user-dir", "examples/translation_moe/translation_moe_src", "--method", "hMoElp", "--mean-pool-gating-network", "--num-experts", "3", "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", ], ) generate_main( data_dir, [ "--task", "translation_moe", "--user-dir", "examples/translation_moe/translation_moe_src", "--method", "hMoElp", "--mean-pool-gating-network", "--num-experts", "3", "--gen-expert", "0", ], ) def test_alignment(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_alignment") as data_dir: create_dummy_data(data_dir, alignment=True) preprocess_translation_data(data_dir, ["--align-suffix", "align"]) train_translation_model( data_dir, "transformer_align", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--load-alignments", "--alignment-layer", "1", "--criterion", "label_smoothed_cross_entropy_with_alignment", ], run_validation=True, ) generate_main(data_dir) def test_laser_lstm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_laser_lstm") as data_dir: laser_config_file = create_laser_data_and_config_json(data_dir) train_translation_model( laser_config_file.name, "laser_lstm", [ "--user-dir", "examples/laser/laser_src", "--weighting-alpha", "0.3", "--encoder-bidirectional", "--encoder-hidden-size", "512", "--encoder-layers", "5", "--decoder-layers", "1", "--encoder-embed-dim", "320", "--decoder-embed-dim", "320", "--decoder-lang-embed-dim", "32", "--save-dir", data_dir, "--disable-validation", ], task="laser", lang_flags=[], ) def test_laser_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_laser_transformer") as data_dir: laser_config_file = create_laser_data_and_config_json(data_dir) train_translation_model( laser_config_file.name, "laser_transformer", [ "--user-dir", "examples/laser/laser_src", "--weighting-alpha", "0.3", "--encoder-embed-dim", "320", "--decoder-embed-dim", "320", "--decoder-lang-embed-dim", "32", "--save-dir", data_dir, "--disable-validation", ], task="laser", lang_flags=[], ) def test_alignment_full_context(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_alignment") as data_dir: create_dummy_data(data_dir, alignment=True) preprocess_translation_data(data_dir, ["--align-suffix", "align"]) train_translation_model( data_dir, "transformer_align", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--load-alignments", "--alignment-layer", "1", "--criterion", "label_smoothed_cross_entropy_with_alignment", "--full-context-alignment", ], run_validation=True, ) generate_main(data_dir) def test_transformer_layerdrop(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer_layerdrop") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--encoder-layers", "3", "--decoder-layers", "3", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--encoder-layerdrop", "0.01", "--decoder-layerdrop", "0.01", ], ) generate_main(data_dir) generate_main( data_dir, [ "--model-overrides", "{'encoder_layers_to_keep':'0,2','decoder_layers_to_keep':'1'}", ], ) class TestStories(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_fconv_self_att_wp(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fconv_self_att_wp") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) config = [ "--encoder-layers", "[(128, 3)] * 2", "--decoder-layers", "[(128, 3)] * 2", "--decoder-attention", "True", "--encoder-attention", "False", "--gated-attention", "True", "--self-attention", "True", "--project-input", "True", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--decoder-out-embed-dim", "8", "--multihead-self-attention-nheads", "2", ] train_translation_model(data_dir, "fconv_self_att_wp", config) generate_main(data_dir) # fusion model os.rename( os.path.join(data_dir, "checkpoint_last.pt"), os.path.join(data_dir, "pretrained.pt"), ) config.extend( [ "--pretrained", "True", "--pretrained-checkpoint", os.path.join(data_dir, "pretrained.pt"), "--save-dir", os.path.join(data_dir, "fusion_model"), ] ) train_translation_model(data_dir, "fconv_self_att_wp", config) class TestLanguageModeling(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_fconv_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fconv_lm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "fconv_lm", [ "--decoder-layers", "[(850, 3)] * 2 + [(1024,4)]", "--decoder-embed-dim", "280", "--optimizer", "nag", "--lr", "0.1", ], ) eval_lm_main(data_dir) generate_main( data_dir, [ "--task", "language_modeling", "--sample-break-mode", "eos", "--tokens-per-sample", "500", ], ) def test_transformer_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer_lm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "transformer_lm", ["--add-bos-token", '--nval', '1'], run_validation=True, ) eval_lm_main(data_dir) eval_lm_main(data_dir, extra_flags=["--context-window", "25"]) generate_main( data_dir, [ "--task", "language_modeling", "--sample-break-mode", "eos", "--tokens-per-sample", "500", ], ) def test_normformer_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer_lm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "transformer_lm", ["--add-bos-token", '--nval', '1', '--scale-fc', '--scale-heads', '--scale-attn', '--scale-fc'], run_validation=True, ) eval_lm_main(data_dir) eval_lm_main(data_dir, extra_flags=["--context-window", "25"]) generate_main( data_dir, [ "--task", "language_modeling", "--sample-break-mode", "eos", "--tokens-per-sample", "500", ], ) def test_transformer_lm_with_adaptive_softmax(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_transformer_lm_with_adaptive_softmax" ) as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "transformer_lm", [ "--add-bos-token", "--criterion", "adaptive_loss", "--adaptive-softmax-cutoff", "5,10,15", ], run_validation=True, ) eval_lm_main(data_dir) generate_main( data_dir, [ "--task", "language_modeling", "--sample-break-mode", "eos", "--tokens-per-sample", "500", ], ) def test_lightconv_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_lightconv_lm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "lightconv_lm", ["--add-bos-token"], run_validation=True, ) eval_lm_main(data_dir) generate_main( data_dir, [ "--task", "language_modeling", "--sample-break-mode", "eos", "--tokens-per-sample", "500", ], ) def test_lstm_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_lstm_lm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "lstm_lm", ["--add-bos-token"], run_validation=True, ) eval_lm_main(data_dir) generate_main( data_dir, [ "--task", "language_modeling", "--sample-break-mode", "eos", "--tokens-per-sample", "500", ], ) def test_lstm_lm_residuals(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_lstm_lm_residuals") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_language_model( data_dir, "lstm_lm", ["--add-bos-token", "--residuals"], run_validation=True, ) eval_lm_main(data_dir) generate_main( data_dir, [ "--task", "language_modeling", "--sample-break-mode", "eos", "--tokens-per-sample", "500", ], ) @unittest.skipIf(not has_hf_transformers, "skip test if transformers is missing") def test_transformer_xl_bptt_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer_xl_bptt_lm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) task_flags = [ "--user-dir", "examples/truncated_bptt", "--task", "truncated_bptt_lm", "--batch-size", "2", "--tokens-per-sample", "50", ] train_language_model( data_dir=data_dir, arch="transformer_xl", extra_flags=task_flags + [ "--n-layer", "2", ], task="truncated_bptt_lm", run_validation=True, extra_valid_flags=task_flags, ) eval_lm_main(data_dir, extra_flags=task_flags) # Train with activation offloading train_language_model( data_dir=data_dir, arch="transformer_xl", extra_flags=task_flags + [ "--n-layer", "2", "--offload-activations", ], task="truncated_bptt_lm", run_validation=True, extra_valid_flags=task_flags, ) class TestMaskedLanguageModel(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_legacy_masked_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_legacy_mlm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_legacy_masked_language_model(data_dir, "masked_lm") def test_roberta_masked_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_roberta_mlm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_masked_lm( data_dir, "roberta_base", extra_flags=["--encoder-layers", "2"] ) def test_roberta_sentence_prediction(self): num_classes = 3 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_roberta_head") as data_dir: create_dummy_roberta_head_data(data_dir, num_classes=num_classes) preprocess_lm_data(os.path.join(data_dir, "input0")) preprocess_lm_data(os.path.join(data_dir, "label")) train_roberta_head(data_dir, "roberta_base", num_classes=num_classes) def test_roberta_regression_single(self): num_classes = 1 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_roberta_regression_single" ) as data_dir: create_dummy_roberta_head_data( data_dir, num_classes=num_classes, regression=True ) preprocess_lm_data(os.path.join(data_dir, "input0")) train_roberta_head( data_dir, "roberta_base", num_classes=num_classes, extra_flags=["--regression-target"], ) def test_roberta_regression_multiple(self): num_classes = 3 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_roberta_regression_multiple" ) as data_dir: create_dummy_roberta_head_data( data_dir, num_classes=num_classes, regression=True ) preprocess_lm_data(os.path.join(data_dir, "input0")) train_roberta_head( data_dir, "roberta_base", num_classes=num_classes, extra_flags=["--regression-target"], ) def test_linformer_roberta_masked_lm(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_linformer_roberta_mlm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_masked_lm( data_dir, "linformer_roberta_base", extra_flags=[ "--user-dir", "examples/linformer/linformer_src", "--encoder-layers", "2", ], ) def test_linformer_roberta_sentence_prediction(self): num_classes = 3 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_linformer_roberta_head") as data_dir: create_dummy_roberta_head_data(data_dir, num_classes=num_classes) preprocess_lm_data(os.path.join(data_dir, "input0")) preprocess_lm_data(os.path.join(data_dir, "label")) train_roberta_head( data_dir, "linformer_roberta_base", num_classes=num_classes, extra_flags=["--user-dir", "examples/linformer/linformer_src"], ) def test_linformer_roberta_regression_single(self): num_classes = 1 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_linformer_roberta_regression_single" ) as data_dir: create_dummy_roberta_head_data( data_dir, num_classes=num_classes, regression=True ) preprocess_lm_data(os.path.join(data_dir, "input0")) train_roberta_head( data_dir, "linformer_roberta_base", num_classes=num_classes, extra_flags=[ "--regression-target", "--user-dir", "examples/linformer/linformer_src", ], ) def test_linformer_roberta_regression_multiple(self): num_classes = 3 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_linformer_roberta_regression_multiple" ) as data_dir: create_dummy_roberta_head_data( data_dir, num_classes=num_classes, regression=True ) preprocess_lm_data(os.path.join(data_dir, "input0")) train_roberta_head( data_dir, "linformer_roberta_base", num_classes=num_classes, extra_flags=[ "--regression-target", "--user-dir", "examples/linformer/linformer_src", ], ) def _test_pretrained_masked_lm_for_translation(self, learned_pos_emb, encoder_only): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_mlm") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) train_legacy_masked_language_model( data_dir, arch="masked_lm", extra_args=("--encoder-learned-pos",) if learned_pos_emb else (), ) with tempfile.TemporaryDirectory( "test_mlm_translation" ) as translation_dir: create_dummy_data(translation_dir) preprocess_translation_data( translation_dir, extra_flags=["--joined-dictionary"] ) # Train transformer with data_dir/checkpoint_last.pt train_translation_model( translation_dir, arch="transformer_from_pretrained_xlm", extra_flags=[ "--decoder-layers", "1", "--decoder-embed-dim", "32", "--decoder-attention-heads", "1", "--decoder-ffn-embed-dim", "32", "--encoder-layers", "1", "--encoder-embed-dim", "32", "--encoder-attention-heads", "1", "--encoder-ffn-embed-dim", "32", "--pretrained-xlm-checkpoint", "{}/checkpoint_last.pt".format(data_dir), "--activation-fn", "gelu", "--max-source-positions", "500", "--max-target-positions", "500", ] + ( ["--encoder-learned-pos", "--decoder-learned-pos"] if learned_pos_emb else [] ) + (["--init-encoder-only"] if encoder_only else []), task="translation_from_pretrained_xlm", ) def test_pretrained_masked_lm_for_translation_learned_pos_emb(self): self._test_pretrained_masked_lm_for_translation(True, False) def test_pretrained_masked_lm_for_translation_sinusoidal_pos_emb(self): self._test_pretrained_masked_lm_for_translation(False, False) def test_pretrained_masked_lm_for_translation_encoder_only(self): self._test_pretrained_masked_lm_for_translation(True, True) def test_r4f_roberta(self): num_classes = 3 with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_r4f_roberta_head") as data_dir: create_dummy_roberta_head_data(data_dir, num_classes=num_classes) preprocess_lm_data(os.path.join(data_dir, "input0")) preprocess_lm_data(os.path.join(data_dir, "label")) train_roberta_head( data_dir, "roberta_base", num_classes=num_classes, extra_flags=[ "--user-dir", "examples/rxf/rxf_src", "--criterion", "sentence_prediction_r3f", "--spectral-norm-classification-head", ], ) def train_legacy_masked_language_model(data_dir, arch, extra_args=()): train_parser = options.get_training_parser() # TODO: langs should be in and out right? train_args = options.parse_args_and_arch( train_parser, [ "--task", "cross_lingual_lm", data_dir, "--arch", arch, # Optimizer args "--optimizer", "adam", "--lr-scheduler", "reduce_lr_on_plateau", "--lr-shrink", "0.5", "--lr", "0.0001", "--stop-min-lr", "1e-09", # dropout, attention args "--dropout", "0.1", "--attention-dropout", "0.1", # MLM args "--criterion", "legacy_masked_lm_loss", "--masked-lm-only", "--monolingual-langs", "in,out", "--num-segment", "5", # Transformer args: use a small transformer model for fast training "--encoder-layers", "1", "--encoder-embed-dim", "32", "--encoder-attention-heads", "1", "--encoder-ffn-embed-dim", "32", # Other training args "--max-tokens", "500", "--tokens-per-sample", "500", "--save-dir", data_dir, "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", "1", "--dataset-impl", "raw", "--num-workers", "0", ] + list(extra_args), ) train.main(train_args) class TestOptimizers(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_optimizers(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_optimizers") as data_dir: # Use just a bit of data and tiny model to keep this test runtime reasonable create_dummy_data(data_dir, num_examples=10, maxlen=5) preprocess_translation_data(data_dir) optimizers = ["adafactor", "adam", "nag", "adagrad", "sgd", "adadelta"] last_checkpoint = os.path.join(data_dir, "checkpoint_last.pt") for optimizer in optimizers: if os.path.exists(last_checkpoint): os.remove(last_checkpoint) train_translation_model( data_dir, "lstm", [ "--required-batch-size-multiple", "1", "--encoder-layers", "1", "--encoder-hidden-size", "32", "--decoder-layers", "1", "--optimizer", optimizer, ], ) generate_main(data_dir) def read_last_log_entry( logs: List[logging.LogRecord], logger_name: str ) -> Dict[str, float]: for x in reversed(logs): if x.name == logger_name: return json.loads(x.message) raise ValueError(f"No entries from {logger_name} found in captured logs") class TestActivationCheckpointing(unittest.TestCase): base_flags = [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "8", "--decoder-embed-dim", "8", "--restore-file", "x.pt", "--log-format", "json", "--log-interval", "1", "--max-update", "2", ] def _train(self, data_dir, extra_flags): with self.assertLogs() as logs: train_translation_model( data_dir, "transformer_iwslt_de_en", self.base_flags + extra_flags, run_validation=True, extra_valid_flags=["--log-format", "json"], ) return logs.records def test_activation_offloading_does_not_change_metrics(self): """Neither ----checkpoint-activations nor --offload-activations should change loss""" with tempfile.TemporaryDirectory("test_transformer_with_act_cpt") as data_dir: with self.assertLogs(): create_dummy_data(data_dir, num_examples=20) preprocess_translation_data(data_dir) offload_logs = self._train(data_dir, ["--offload-activations"]) baseline_logs = self._train(data_dir, []) assert len(baseline_logs) == len(offload_logs) baseline_valid_stats = read_last_log_entry(baseline_logs, "valid") offload_valid_stats = read_last_log_entry(offload_logs, "valid") baseline_train_stats = read_last_log_entry(baseline_logs, "train") offload_train_stats = read_last_log_entry(offload_logs, "train") assert ( baseline_train_stats["train_loss"] == offload_train_stats["train_loss"] ) assert ( baseline_valid_stats["valid_loss"] == offload_valid_stats["valid_loss"] ) def test_activation_checkpointing_does_not_change_metrics(self): """--checkpoint-activations should not change loss""" with tempfile.TemporaryDirectory("test_transformer_with_act_cpt") as data_dir: with self.assertLogs(): create_dummy_data(data_dir, num_examples=20) preprocess_translation_data(data_dir) ckpt_logs = self._train(data_dir, ["--checkpoint-activations"]) baseline_logs = self._train(data_dir, []) assert len(baseline_logs) == len(ckpt_logs) baseline_train_stats = read_last_log_entry(baseline_logs, "train") ckpt_train_stats = read_last_log_entry(ckpt_logs, "train") assert baseline_train_stats["train_loss"] == ckpt_train_stats["train_loss"] baseline_valid_stats = read_last_log_entry(baseline_logs, "valid") ckpt_valid_stats = read_last_log_entry(ckpt_logs, "valid") assert baseline_valid_stats["valid_loss"] == ckpt_valid_stats["valid_loss"] def create_dummy_roberta_head_data( data_dir, num_examples=100, maxlen=10, num_classes=2, regression=False ): input_dir = "input0" def _create_dummy_data(filename): random_data = torch.rand(num_examples * maxlen) input_data = 97 + torch.floor(26 * random_data).int() if regression: output_data = torch.rand((num_examples, num_classes)) else: output_data = 1 + torch.floor(num_classes * torch.rand(num_examples)).int() with open(os.path.join(data_dir, input_dir, filename + ".out"), "w") as f_in: label_filename = filename + ".label" if regression else filename + ".out" with open(os.path.join(data_dir, "label", label_filename), "w") as f_out: offset = 0 for i in range(num_examples): # write example input ex_len = random.randint(1, maxlen) ex_str = " ".join(map(chr, input_data[offset : offset + ex_len])) print(ex_str, file=f_in) # write example label if regression: class_str = " ".join(map(str, output_data[i].numpy())) print(class_str, file=f_out) else: class_str = "class{}".format(output_data[i]) print(class_str, file=f_out) offset += ex_len os.mkdir(os.path.join(data_dir, input_dir)) os.mkdir(os.path.join(data_dir, "label")) _create_dummy_data("train") _create_dummy_data("valid") _create_dummy_data("test") def train_masked_lm(data_dir, arch, extra_flags=None): train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ "--task", "masked_lm", data_dir, "--arch", arch, "--optimizer", "adam", "--lr", "0.0001", "--criterion", "masked_lm", "--batch-size", "500", "--save-dir", data_dir, "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", "1", "--ddp-backend", "no_c10d", "--num-workers", "0", ] + (extra_flags or []), ) train.main(train_args) def train_roberta_head(data_dir, arch, num_classes=2, extra_flags=None): train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ "--task", "sentence_prediction", data_dir, "--arch", arch, "--encoder-layers", "2", "--num-classes", str(num_classes), "--optimizer", "adam", "--lr", "0.0001", "--criterion", "sentence_prediction", "--max-tokens", "500", "--max-positions", "500", "--batch-size", "500", "--save-dir", data_dir, "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", "1", "--ddp-backend", "no_c10d", "--num-workers", "0", ] + (extra_flags or []), ) train.main(train_args) def eval_lm_main(data_dir, extra_flags=None): eval_lm_parser = options.get_eval_lm_parser() eval_lm_args = options.parse_args_and_arch( eval_lm_parser, [ data_dir, "--path", os.path.join(data_dir, "checkpoint_last.pt"), "--no-progress-bar", "--num-workers", "0", ] + (extra_flags or []), ) eval_lm.main(eval_lm_args) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_binaries.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import sys import unittest import torch from fairseq.token_generation_constraints import * def tensorize(constraints: List[List[int]]) -> torch.Tensor: return [torch.tensor(x) for x in constraints] class TestHelperRoutines(unittest.TestCase): def setUp(self): self.examples = [ ([[]], torch.tensor([[0]])), ([[], []], torch.tensor([[0], [0]])), ([[torch.tensor([1, 2])], []], torch.tensor([[1, 1, 2, 0], [0, 0, 0, 0]])), ( [ [ torch.tensor([3, 1, 2]), torch.tensor([3]), torch.tensor([4, 5, 6, 7]), ], [], [torch.tensor([1, 8, 9, 10, 1, 4, 11, 12])], ], torch.tensor( [ [3, 3, 1, 2, 0, 3, 0, 4, 5, 6, 7, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 8, 9, 10, 1, 4, 11, 12, 0, 0, 0], ] ), ), ] def test_packing(self): """Ensures the list of lists of tensors gets packed correctly.""" for batch_constraints, expected_tensor in self.examples: packed = pack_constraints(batch_constraints) assert torch.equal(packed, expected_tensor) class TestUnorderedConstraintState(unittest.TestCase): def setUp(self): # Tuples of (contraint set, expected printed graph, token counts per node) self.examples = [ ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), "([None].False#6 ([1].True#4 ([2].False#1 [3].True#1) [3].True#1 [4].True#1) ([4].False#2 ([5].True#2 ([6].False#1 [7].True#1))))", {1: 4, 2: 1, 3: 2, 4: 3, 5: 2, 6: 1, 7: 1}, ), ([], "[None].False#0", {}), (tensorize([[0]]), "([None].False#1 [0].True#1)", {0: 1}), ( tensorize([[100000, 1, 2, 3, 4, 5]]), "([None].False#1 ([100000].False#1 ([1].False#1 ([2].False#1 ([3].False#1 ([4].False#1 [5].True#1))))))", {100000: 1, 1: 1, 2: 1, 3: 1, 4: 1, 5: 1}, ), ( tensorize([[1, 2], [1, 2]]), "([None].False#2 ([1].False#2 [2].True#2))", {1: 2, 2: 2}, ), ( tensorize([[1, 2], [3, 4]]), "([None].False#2 ([1].False#1 [2].True#1) ([3].False#1 [4].True#1))", {1: 1, 2: 1, 3: 1, 4: 1}, ), ] self.sequences = [ ( self.examples[0][0], [], {"bank": 0, "num_completed": 0, "finished": False, "is_root": True}, ), ( self.examples[0][0], [1, 2], {"bank": 2, "num_completed": 0, "finished": False, "is_root": False}, ), ( self.examples[0][0], [1, 2, 94], {"bank": 1, "num_completed": 1, "finished": False, "is_root": True}, ), ( self.examples[0][0], [1, 3, 999, 1, 4], {"bank": 4, "num_completed": 2, "finished": False, "is_root": False}, ), ( self.examples[0][0], [1, 3, 999, 1, 4, 999], {"bank": 4, "num_completed": 2, "finished": False, "is_root": True}, ), ( self.examples[0][0], [4, 5, 6, 8], {"bank": 2, "num_completed": 1, "finished": False, "is_root": True}, ), ( self.examples[0][0], # Tricky, because in last three, goes down [1->4] branch, could miss [1] and [4->5] # [[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]], [1, 2, 3, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5], {"bank": 14, "num_completed": 6, "finished": True, "is_root": False}, ), ( self.examples[0][0], [1, 2, 3, 999, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5, 117], {"bank": 14, "num_completed": 6, "finished": True, "is_root": True}, ), ( tensorize([[1], [2, 3]]), # Should not be able to get credit for entering 1 a second time [1, 1], {"bank": 1, "num_completed": 1, "finished": False, "is_root": True}, ), ( self.examples[4][0], [1, 2, 1, 2], {"bank": 4, "num_completed": 2, "finished": True, "is_root": False}, ), ( self.examples[4][0], [1, 2, 1, 2, 1], {"bank": 4, "num_completed": 2, "finished": True, "is_root": True}, ), ( self.examples[5][0], [1, 2, 3, 4, 5], {"bank": 4, "num_completed": 2, "finished": True, "is_root": True}, ), ] def test_graphs(self): """ Test whether unordered graph systems are created correctly. """ for example in self.examples: constraints, expected, gold_counts = example c = ConstraintNode.create(constraints) assert ( ConstraintNode.print_graph(c) == expected ), f"got {ConstraintNode.print_graph(c)}, expected {expected}" assert ( c.token_counts() == gold_counts ), f"{c} got {c.token_counts()} wanted {gold_counts}" def test_next_tokens(self): """ Tests that the set of next tokens is correct. """ for example in self.examples: constraints, expected, gold_counts = example root = ConstraintNode.create(constraints) root_tokens = set(root.children.keys()) for sequence in constraints: state = UnorderedConstraintState(root) for token in sequence: all_tokens = root_tokens.union(state.node.children.keys()) assert ( all_tokens == state.next_tokens() ), f"ALL {all_tokens} NEXT {state.next_tokens()}" state = state.advance(token) def test_sequences(self): for constraints, tokens, expected in self.sequences: state = UnorderedConstraintState.create(pack_constraints([constraints])[0]) for token in tokens: state = state.advance(token) result = {} for attr in expected.keys(): result[attr] = getattr(state, attr) assert ( result == expected ), f"TEST({tokens}) GOT: {result} WANTED: {expected}" class TestOrderedConstraintState(unittest.TestCase): def setUp(self): self.sequences = [ ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [], {"bank": 0, "num_completed": 0, "finished": False, "is_root": True}, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2], {"bank": 2, "num_completed": 0, "finished": False, "is_root": False}, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 94], {"bank": 0, "num_completed": 0, "finished": False, "is_root": True}, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 3, 999, 1, 4], {"bank": 0, "num_completed": 0, "finished": False, "is_root": True}, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 3, 999, 999], {"bank": 3, "num_completed": 1, "finished": False, "is_root": False}, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 3, 77, 1, 3, 1], {"bank": 6, "num_completed": 2, "finished": False, "is_root": False}, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 3, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5], {"bank": 14, "num_completed": 6, "finished": True, "is_root": False}, ), ( tensorize([[1, 2, 3], [1, 3], [1, 4], [4, 5, 6, 7], [1], [4, 5]]), [1, 2, 999, 1, 2, 3, 999, 1, 3, 1, 4, 4, 5, 6, 7, 1, 4, 5, 117], {"bank": 14, "num_completed": 6, "finished": True, "is_root": False}, ), ( tensorize([[1], [2, 3]]), [1, 1], {"bank": 1, "num_completed": 1, "finished": False, "is_root": False}, ), ( tensorize([[1, 2], [1, 2]]), [1, 2, 1, 2], {"bank": 4, "num_completed": 2, "finished": True, "is_root": False}, ), ( tensorize([[1, 2], [1, 2]]), [1, 2, 1, 2, 1], {"bank": 4, "num_completed": 2, "finished": True, "is_root": False}, ), ( tensorize([[1, 2], [3, 4]]), [1, 2, 3, 4, 5], {"bank": 4, "num_completed": 2, "finished": True, "is_root": False}, ), ] def test_sequences(self): for i, (constraints, tokens, expected) in enumerate(self.sequences): state = OrderedConstraintState.create(pack_constraints([constraints])[0]) for token in tokens: state = state.advance(token) result = {} for attr in expected.keys(): result[attr] = getattr(state, attr) assert ( result == expected ), f"TEST({tokens}) GOT: {result} WANTED: {expected}" if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_constraints.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import copy import unittest import tests.utils as test_utils import torch from fairseq.criterions.cross_entropy import CrossEntropyCriterion from fairseq.criterions.label_smoothed_cross_entropy import ( LabelSmoothedCrossEntropyCriterion, ) class TestLabelSmoothing(unittest.TestCase): def setUp(self): # build dictionary self.d = test_utils.dummy_dictionary(3) vocab = len(self.d) self.assertEqual(vocab, 4 + 3) # 4 special + 3 tokens self.assertEqual(self.d.pad(), 1) self.assertEqual(self.d.eos(), 2) self.assertEqual(self.d.unk(), 3) pad, eos, unk, w1, w2, w3 = 1, 2, 3, 4, 5, 6 # noqa: F841 # build dataset self.data = [ # the first batch item has padding { "source": torch.LongTensor([w1, eos]), "target": torch.LongTensor([w1, eos]), }, { "source": torch.LongTensor([w1, eos]), "target": torch.LongTensor([w1, w1, eos]), }, ] self.sample = next(test_utils.dummy_dataloader(self.data)) # build model self.args = argparse.Namespace() self.args.sentence_avg = False self.args.report_accuracy = False self.args.probs = ( torch.FloatTensor( [ # pad eos unk w1 w2 w3 [0.05, 0.05, 0.1, 0.05, 0.3, 0.4, 0.05], [0.05, 0.10, 0.2, 0.05, 0.2, 0.3, 0.10], [0.05, 0.15, 0.3, 0.05, 0.1, 0.2, 0.15], ] ) .unsqueeze(0) .expand(2, 3, 7) ) # add batch dimension self.task = test_utils.TestTranslationTask.setup_task(self.args, self.d, self.d) self.model = self.task.build_model(self.args) def test_nll_loss(self): self.args.label_smoothing = 0.1 nll_crit = CrossEntropyCriterion.build_criterion(self.args, self.task) smooth_crit = LabelSmoothedCrossEntropyCriterion.build_criterion( self.args, self.task ) nll_loss, nll_sample_size, nll_logging_output = nll_crit( self.model, self.sample ) smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit( self.model, self.sample ) self.assertLess(abs(nll_loss - nll_logging_output["loss"]), 1e-6) self.assertLess(abs(nll_loss - smooth_logging_output["nll_loss"]), 1e-6) def test_padding(self): self.args.label_smoothing = 0.1 crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task) loss, _, logging_output = crit(self.model, self.sample) def get_one_no_padding(idx): # create a new sample with just a single batch item so that there's # no padding sample1 = next(test_utils.dummy_dataloader([self.data[idx]])) args1 = copy.copy(self.args) args1.probs = args1.probs[idx, :, :].unsqueeze(0) model1 = self.task.build_model(args1) loss1, _, _ = crit(model1, sample1) return loss1 loss1 = get_one_no_padding(0) loss2 = get_one_no_padding(1) self.assertAlmostEqual(loss, loss1 + loss2) def test_reduction(self): self.args.label_smoothing = 0.1 crit = LabelSmoothedCrossEntropyCriterion.build_criterion(self.args, self.task) loss, _, logging_output = crit(self.model, self.sample, reduce=True) unreduced_loss, _, _ = crit(self.model, self.sample, reduce=False) self.assertAlmostEqual(loss, unreduced_loss.sum()) def test_zero_eps(self): self.args.label_smoothing = 0.0 nll_crit = CrossEntropyCriterion.build_criterion(self.args, self.task) smooth_crit = LabelSmoothedCrossEntropyCriterion.build_criterion( self.args, self.task ) nll_loss, nll_sample_size, nll_logging_output = nll_crit( self.model, self.sample ) smooth_loss, smooth_sample_size, smooth_logging_output = smooth_crit( self.model, self.sample ) self.assertAlmostEqual(nll_loss, smooth_loss) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-6) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_label_smoothing.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import json import os import random import sys from io import StringIO import torch import torch.nn.functional as F from fairseq import options, utils from fairseq.data import Dictionary from fairseq.data.language_pair_dataset import collate from fairseq.models import ( FairseqEncoder, FairseqEncoderDecoderModel, FairseqIncrementalDecoder, ) from fairseq.models.fairseq_encoder import EncoderOut from fairseq.tasks import LegacyFairseqTask from fairseq_cli import generate, interactive, preprocess, train, validate import fairseq.distributed.utils as distributed_utils from fairseq.dataclass.utils import convert_namespace_to_omegaconf def dummy_dictionary(vocab_size, prefix="token_"): d = Dictionary() for i in range(vocab_size): token = prefix + str(i) d.add_symbol(token) d.finalize(padding_factor=1) # don't add extra padding symbols return d def dummy_dataloader( samples, padding_idx=1, eos_idx=2, batch_size=None, ): if batch_size is None: batch_size = len(samples) # add any missing data to samples for i, sample in enumerate(samples): if "id" not in sample: sample["id"] = i # create dataloader dataset = TestDataset(samples) dataloader = torch.utils.data.DataLoader( dataset, batch_size=batch_size, collate_fn=(lambda samples: collate(samples, padding_idx, eos_idx)), ) return iter(dataloader) def sequence_generator_setup(): # construct dummy dictionary d = dummy_dictionary(vocab_size=2) eos = d.eos() w1 = 4 w2 = 5 # construct source data src_tokens = torch.LongTensor([[w1, w2, eos], [w1, w2, eos]]) src_lengths = torch.LongTensor([2, 2]) args = argparse.Namespace() unk = 0.0 args.beam_probs = [ # step 0: torch.FloatTensor( [ # eos w1 w2 # sentence 1: [0.0, unk, 0.9, 0.1], # beam 1 [0.0, unk, 0.9, 0.1], # beam 2 # sentence 2: [0.0, unk, 0.7, 0.3], [0.0, unk, 0.7, 0.3], ] ), # step 1: torch.FloatTensor( [ # eos w1 w2 prefix # sentence 1: [1.0, unk, 0.0, 0.0], # w1: 0.9 (emit: w1 <eos>: 0.9*1.0) [0.0, unk, 0.9, 0.1], # w2: 0.1 # sentence 2: [0.25, unk, 0.35, 0.4], # w1: 0.7 (don't emit: w1 <eos>: 0.7*0.25) [0.00, unk, 0.10, 0.9], # w2: 0.3 ] ), # step 2: torch.FloatTensor( [ # eos w1 w2 prefix # sentence 1: [0.0, unk, 0.1, 0.9], # w2 w1: 0.1*0.9 [ 0.6, unk, 0.2, 0.2, ], # w2 w2: 0.1*0.1 (emit: w2 w2 <eos>: 0.1*0.1*0.6) # sentence 2: [ 0.60, unk, 0.4, 0.00, ], # w1 w2: 0.7*0.4 (emit: w1 w2 <eos>: 0.7*0.4*0.6) [0.01, unk, 0.0, 0.99], # w2 w2: 0.3*0.9 ] ), # step 3: torch.FloatTensor( [ # eos w1 w2 prefix # sentence 1: [ 1.0, unk, 0.0, 0.0, ], # w2 w1 w2: 0.1*0.9*0.9 (emit: w2 w1 w2 <eos>: 0.1*0.9*0.9*1.0) [ 1.0, unk, 0.0, 0.0, ], # w2 w1 w1: 0.1*0.9*0.1 (emit: w2 w1 w1 <eos>: 0.1*0.9*0.1*1.0) # sentence 2: [ 0.1, unk, 0.5, 0.4, ], # w2 w2 w2: 0.3*0.9*0.99 (emit: w2 w2 w2 <eos>: 0.3*0.9*0.99*0.1) [ 1.0, unk, 0.0, 0.0, ], # w1 w2 w1: 0.7*0.4*0.4 (emit: w1 w2 w1 <eos>: 0.7*0.4*0.4*1.0) ] ), ] task = TestTranslationTask.setup_task(args, d, d) model = task.build_model(args) tgt_dict = task.target_dictionary return tgt_dict, w1, w2, src_tokens, src_lengths, model def create_dummy_data(data_dir, num_examples=100, maxlen=20, alignment=False): def _create_dummy_data(filename): data = torch.rand(num_examples * maxlen) data = 97 + torch.floor(26 * data).int() with open(os.path.join(data_dir, filename), "w") as h: offset = 0 for _ in range(num_examples): ex_len = random.randint(1, maxlen) ex_str = " ".join(map(chr, data[offset : offset + ex_len])) print(ex_str, file=h) offset += ex_len def _create_dummy_alignment_data(filename_src, filename_tgt, filename): with open(os.path.join(data_dir, filename_src), "r") as src_f, open( os.path.join(data_dir, filename_tgt), "r" ) as tgt_f, open(os.path.join(data_dir, filename), "w") as h: for src, tgt in zip(src_f, tgt_f): src_len = len(src.split()) tgt_len = len(tgt.split()) avg_len = (src_len + tgt_len) // 2 num_alignments = random.randint(avg_len // 2, 2 * avg_len) src_indices = torch.floor(torch.rand(num_alignments) * src_len).int() tgt_indices = torch.floor(torch.rand(num_alignments) * tgt_len).int() ex_str = " ".join( [ "{}-{}".format(src, tgt) for src, tgt in zip(src_indices, tgt_indices) ] ) print(ex_str, file=h) _create_dummy_data("train.in") _create_dummy_data("train.out") _create_dummy_data("valid.in") _create_dummy_data("valid.out") _create_dummy_data("test.in") _create_dummy_data("test.out") if alignment: _create_dummy_alignment_data("train.in", "train.out", "train.align") _create_dummy_alignment_data("valid.in", "valid.out", "valid.align") _create_dummy_alignment_data("test.in", "test.out", "test.align") def preprocess_lm_data(data_dir): preprocess_parser = options.get_preprocessing_parser() preprocess_args = preprocess_parser.parse_args( [ "--only-source", "--trainpref", os.path.join(data_dir, "train.out"), "--validpref", os.path.join(data_dir, "valid.out"), "--testpref", os.path.join(data_dir, "test.out"), "--destdir", data_dir, ] ) preprocess.main(preprocess_args) def preprocess_translation_data(data_dir, extra_flags=None): preprocess_parser = options.get_preprocessing_parser() preprocess_args = preprocess_parser.parse_args( [ "--source-lang", "in", "--target-lang", "out", "--trainpref", os.path.join(data_dir, "train"), "--validpref", os.path.join(data_dir, "valid"), "--testpref", os.path.join(data_dir, "test"), "--thresholdtgt", "0", "--thresholdsrc", "0", "--destdir", data_dir, ] + (extra_flags or []), ) preprocess.main(preprocess_args) def preprocess_summarization_data(data_dir, extra_flags=None): preprocess_parser = options.get_preprocessing_parser() preprocess_args = preprocess_parser.parse_args( [ "--source-lang", "in", "--target-lang", "out", "--trainpref", os.path.join(data_dir, "train"), "--validpref", os.path.join(data_dir, "valid"), "--testpref", os.path.join(data_dir, "test"), "--thresholdtgt", "0", "--thresholdsrc", "0", "--joined-dictionary", "--destdir", data_dir, ] + (extra_flags or []), ) preprocess.main(preprocess_args) def create_laser_data_and_config_json(data_dir): src_langs = ["de", "fr", "ru", "tr", "zh"] tgt_langs = ["en", "es"] config_json = {} config_train_json = [] src_vocab = None tgt_vocab = None for src_lang in src_langs: for tgt_lang in tgt_langs: langpair_folder = f"{src_lang}-{tgt_lang}" langpair_path = os.path.join(data_dir, langpair_folder) os.mkdir(langpair_path) create_dummy_data(langpair_path) preprocess_translation_data(langpair_path, ["--dataset-impl", "cached"]) src_vocab = os.path.join(langpair_path, "dict.in.txt") tgt_vocab = os.path.join(langpair_path, "dict.out.txt") config_train_json.append( { "id": 0 if tgt_lang == "en" else 1, "src": os.path.join(langpair_path, "train.in-out.in"), "tgt": os.path.join(langpair_path, "train.in-out.out"), } ) config_json["src_vocab"] = src_vocab config_json["tgt_vocab"] = tgt_vocab config_json["train"] = config_train_json with open(os.path.join(data_dir, "laserconfig.json"), "w") as config_file: json.dump(config_json, config_file) return config_file def train_translation_model( data_dir, arch, extra_flags=None, task="translation", run_validation=False, lang_flags=None, extra_valid_flags=None, world_size=1, ): if lang_flags is None: lang_flags = [ "--source-lang", "in", "--target-lang", "out", ] train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ "--task", task, data_dir, "--save-dir", data_dir, "--arch", arch, "--optimizer", "nag", "--lr", "0.05", "--max-tokens", "500", "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", str(world_size), "--num-workers", "0", ] + lang_flags + (extra_flags or []), ) cfg = convert_namespace_to_omegaconf(train_args) distributed_utils.call_main(cfg, train.main) if run_validation: # test validation validate_parser = options.get_validation_parser() validate_args = options.parse_args_and_arch( validate_parser, [ "--task", task, data_dir, "--path", os.path.join(data_dir, "checkpoint_last.pt"), "--valid-subset", "valid", "--max-tokens", "500", "--no-progress-bar", "--num-workers", "0", ] + lang_flags + (extra_valid_flags or []), ) validate.main(validate_args) def generate_main(data_dir, extra_flags=None, path=None): if extra_flags is None: extra_flags = [ "--print-alignment", ] if path is None: path = os.path.join(data_dir, "checkpoint_last.pt") generate_parser = options.get_generation_parser() generate_args = options.parse_args_and_arch( generate_parser, [ data_dir, "--path", path, "--beam", "3", "--batch-size", "64", "--max-len-b", "5", "--gen-subset", "valid", "--no-progress-bar", "--num-workers", "0", ] + (extra_flags or []), ) # evaluate model in batch mode generate.main(generate_args) # evaluate model interactively generate_args.buffer_size = 0 generate_args.input = "-" generate_args.batch_size = None orig_stdin = sys.stdin sys.stdin = StringIO("h e l l o\n") interactive.main(generate_args) sys.stdin = orig_stdin class TestDataset(torch.utils.data.Dataset): def __init__(self, data): super().__init__() self.data = data self.sizes = None def __getitem__(self, index): return self.data[index] def __len__(self): return len(self.data) class TestTranslationTask(LegacyFairseqTask): def __init__(self, args, src_dict, tgt_dict, model): super().__init__(args) self.src_dict = src_dict self.tgt_dict = tgt_dict self.model = model @classmethod def setup_task(cls, args, src_dict=None, tgt_dict=None, model=None): return cls(args, src_dict, tgt_dict, model) def build_model(self, args): return TestModel.build_model(args, self) @property def source_dictionary(self): return self.src_dict @property def target_dictionary(self): return self.tgt_dict class TestModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @classmethod def build_model(cls, args, task): encoder = TestEncoder(args, task.source_dictionary) decoder = TestIncrementalDecoder(args, task.target_dictionary) return cls(encoder, decoder) class TestEncoder(FairseqEncoder): def __init__(self, args, dictionary): super().__init__(dictionary) self.args = args def forward(self, src_tokens, src_lengths=None, **kwargs): return EncoderOut( encoder_out=src_tokens, encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) def reorder_encoder_out(self, encoder_out, new_order): return EncoderOut( encoder_out=encoder_out.encoder_out.index_select(0, new_order), encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) class TestIncrementalDecoder(FairseqIncrementalDecoder): def __init__(self, args, dictionary): super().__init__(dictionary) assert hasattr(args, "beam_probs") or hasattr(args, "probs") args.max_decoder_positions = getattr(args, "max_decoder_positions", 100) self.args = args def forward(self, prev_output_tokens, encoder_out=None, incremental_state=None): if incremental_state is not None: prev_output_tokens = prev_output_tokens[:, -1:] bbsz = prev_output_tokens.size(0) vocab = len(self.dictionary) src_len = encoder_out.encoder_out.size(1) tgt_len = prev_output_tokens.size(1) # determine number of steps if incremental_state is not None: # cache step number step = utils.get_incremental_state(self, incremental_state, "step") if step is None: step = 0 utils.set_incremental_state(self, incremental_state, "step", step + 1) steps = [step] else: steps = list(range(tgt_len)) # define output in terms of raw probs if hasattr(self.args, "probs"): assert ( self.args.probs.dim() == 3 ), "expected probs to have size bsz*steps*vocab" probs = self.args.probs.index_select(1, torch.LongTensor(steps)) else: probs = torch.FloatTensor(bbsz, len(steps), vocab).zero_() for i, step in enumerate(steps): # args.beam_probs gives the probability for every vocab element, # starting with eos, then unknown, and then the rest of the vocab if step < len(self.args.beam_probs): probs[:, i, self.dictionary.eos() :] = self.args.beam_probs[step] else: probs[:, i, self.dictionary.eos()] = 1.0 # random attention attn = torch.rand(bbsz, tgt_len, src_len) dev = prev_output_tokens.device return probs.to(dev), {"attn": [attn.to(dev)]} def get_normalized_probs(self, net_output, log_probs, _): # the decoder returns probabilities directly probs = net_output[0] if log_probs: return probs.log() else: return probs def max_positions(self): return self.args.max_decoder_positions class TestReshapingEncoder(FairseqEncoder): def __init__(self, args, dictionary): super().__init__(dictionary) self.args = args def forward(self, src_tokens, src_lengths=None, **kwargs): b_sz, t_sz = src_tokens.shape padding_needed = t_sz % 2 x = src_tokens if padding_needed > 0: padding_needed = 2 - padding_needed x = F.pad(x, (0, padding_needed)) return EncoderOut( encoder_out=x.view(b_sz, -1, 2), encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) def reorder_encoder_out(self, encoder_out, new_order): return EncoderOut( encoder_out=encoder_out.encoder_out.index_select(0, new_order), encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) class TestReshapingModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @classmethod def build_model(cls, args, task): encoder = TestReshapingEncoder(args, task.source_dictionary) decoder = TestIncrementalDecoder(args, task.target_dictionary) return cls(encoder, decoder) class TestAdditionalInputEncoder(FairseqEncoder): def __init__(self, args, dictionary): super().__init__(dictionary) self.args = args def forward(self, src_tokens, src_lengths=None, **kwargs): assert "fancy_other_input" in kwargs assert kwargs["fancy_other_input"] is not None return EncoderOut( encoder_out=src_tokens, encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) def reorder_encoder_out(self, encoder_out, new_order): return EncoderOut( encoder_out=encoder_out.encoder_out.index_select(0, new_order), encoder_padding_mask=None, encoder_embedding=None, encoder_states=None, src_tokens=None, src_lengths=None, ) class TestAdditionalInputModel(FairseqEncoderDecoderModel): def __init__(self, encoder, decoder): super().__init__(encoder, decoder) @classmethod def build_model(cls, args, task): encoder = TestAdditionalInputEncoder(args, task.source_dictionary) decoder = TestIncrementalDecoder(args, task.target_dictionary) return cls(encoder, decoder) def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs): encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) decoder_out = self.decoder( prev_output_tokens, encoder_out=encoder_out, **kwargs ) return decoder_out def train_language_model( data_dir, arch, extra_flags=None, run_validation=False, extra_valid_flags=None, task="language_modeling", world_size=1, ): train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ "--task", task, data_dir, "--arch", arch, "--optimizer", "adam", "--lr", "0.0001", "--max-tokens", "500", "--tokens-per-sample", "500", "--save-dir", data_dir, "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", str(world_size), "--ddp-backend", "no_c10d", "--num-workers", "0", ] + (extra_flags or []), ) cfg = convert_namespace_to_omegaconf(train_args) distributed_utils.call_main(cfg, train.main) if run_validation: # test validation validate_parser = options.get_validation_parser() validate_args = options.parse_args_and_arch( validate_parser, [ "--task", task, data_dir, "--path", os.path.join(data_dir, "checkpoint_last.pt"), "--valid-subset", "valid", "--max-tokens", "500", "--no-progress-bar", "--num-workers", "0", ] + (extra_valid_flags or []), ) validate.main(validate_args)
EXA-1-master
exa/models/unilm-master/edgelm/tests/utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import logging import unittest from typing import Sequence from fairseq.data import LanguagePairDataset, ListDataset, RoundRobinZipDatasets from tests.test_train import mock_dict def lang_pair_dataset(lengths: Sequence[int]) -> LanguagePairDataset: tokens = [[i] * l for i, l in enumerate(lengths)] return LanguagePairDataset(ListDataset(tokens), lengths, mock_dict()) def sample(id: int, length: int): return {"id": id, "source": [id] * length, "target": None} class TestDataset(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_round_robin_zip_datasets(self): long_dataset = lang_pair_dataset([10, 9, 8, 11]) short_dataset = lang_pair_dataset([11, 9]) dataset = RoundRobinZipDatasets({"a": long_dataset, "b": short_dataset}) # Dataset is now sorted by sentence length dataset.ordered_indices() assert dataset.longest_dataset is long_dataset self.assertEqual(dict(dataset[0]), {"a": sample(2, 8), "b": sample(1, 9)}) # The item 2 of dataset 'a' is with item (2 % 2 = 0) of dataset 'b' self.assertEqual(dict(dataset[2]), {"a": sample(0, 10), "b": sample(1, 9)}) def test_round_robin_zip_datasets_filtered(self): long_dataset = lang_pair_dataset([10, 20, 8, 11, 1000, 7, 12]) short_dataset = lang_pair_dataset([11, 20, 9, 1000]) dataset = RoundRobinZipDatasets({"a": long_dataset, "b": short_dataset}) # Dataset is now sorted by sentence length idx = dataset.ordered_indices() idx, _ = dataset.filter_indices_by_size(idx, {"a": 19, "b": 900}) self.assertEqual(list(idx), [0, 1, 2, 3, 4]) self.assertEqual(dict(dataset[0]), {"a": sample(5, 7), "b": sample(2, 9)}) self.assertEqual(dict(dataset[2]), {"a": sample(0, 10), "b": sample(1, 20)}) self.assertEqual(dict(dataset[4]), {"a": sample(6, 12), "b": sample(0, 11)}) def test_round_robin_zip_datasets_filtered_with_tuple(self): long_dataset = lang_pair_dataset([10, 20, 8, 11, 1000, 7, 12]) short_dataset = lang_pair_dataset([11, 20, 9, 1000]) dataset = RoundRobinZipDatasets({"a": long_dataset, "b": short_dataset}) # Dataset is now sorted by sentence length idx = dataset.ordered_indices() idx, _ = dataset.filter_indices_by_size(idx, 19) self.assertEqual(list(idx), [0, 1, 2, 3, 4]) self.assertEqual(dict(dataset[0]), {"a": sample(5, 7), "b": sample(2, 9)}) self.assertEqual(dict(dataset[2]), {"a": sample(0, 10), "b": sample(2, 9)}) self.assertEqual(dict(dataset[4]), {"a": sample(6, 12), "b": sample(2, 9)})
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch import torch.nn as nn from fairseq.modules import ConvTBC class TestConvTBC(unittest.TestCase): def test_convtbc(self): # ksz, in_channels, out_channels conv_tbc = ConvTBC(4, 5, kernel_size=3, padding=1) # out_channels, in_channels, ksz conv1d = nn.Conv1d(4, 5, kernel_size=3, padding=1) conv_tbc.weight.data.copy_(conv1d.weight.data.transpose(0, 2)) conv_tbc.bias.data.copy_(conv1d.bias.data) input_tbc = torch.randn(7, 2, 4, requires_grad=True) input1d = input_tbc.data.transpose(0, 1).transpose(1, 2) input1d.requires_grad = True output_tbc = conv_tbc(input_tbc) output1d = conv1d(input1d) self.assertAlmostEqual( output_tbc.data.transpose(0, 1).transpose(1, 2), output1d.data ) grad_tbc = torch.randn(output_tbc.size()) grad1d = grad_tbc.transpose(0, 1).transpose(1, 2).contiguous() output_tbc.backward(grad_tbc) output1d.backward(grad1d) self.assertAlmostEqual( conv_tbc.weight.grad.data.transpose(0, 2), conv1d.weight.grad.data ) self.assertAlmostEqual(conv_tbc.bias.grad.data, conv1d.bias.grad.data) self.assertAlmostEqual( input_tbc.grad.data.transpose(0, 1).transpose(1, 2), input1d.grad.data ) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_convtbc.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from collections import OrderedDict import torch from fairseq.data import LanguagePairDataset, TokenBlockDataset from fairseq.data.multi_corpus_dataset import MultiCorpusDataset from tests.test_train import mock_dict class TestMultiCorpusDataset(unittest.TestCase): def setUp(self): d = mock_dict() tokens_1 = torch.LongTensor([i for i in range(1, 5000, 2)]).view(1, -1) tokens_ds1 = TokenBlockDataset( tokens_1, sizes=[tokens_1.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_1 = LanguagePairDataset( tokens_ds1, tokens_ds1.sizes, d, shuffle=False ) tokens_2 = torch.LongTensor([i for i in range(0, 5000, 2)]).view(1, -1) tokens_ds2 = TokenBlockDataset( tokens_2, sizes=[tokens_2.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_2 = LanguagePairDataset( tokens_ds2, tokens_ds2.sizes, d, shuffle=False ) def _test_sample_helper( self, distribution, ): m = MultiCorpusDataset( OrderedDict({0: self.dataset_1, 1: self.dataset_2}), distribution=distribution, seed=0, sort_indices=True, ) m.set_epoch(1) indices = m.ordered_indices() count_sample_from_first_dataset = 0 items = set() for i in indices: item = m[i]["source"].item() if item % 2 == 1: count_sample_from_first_dataset += 1 items.add(item) sample_from_first_ds_percentage = ( 1.0 * count_sample_from_first_dataset / len(indices) ) self.assertLess( abs(sample_from_first_ds_percentage - distribution[0]), 0.01, ) self.assertEqual( len(items), int(min(len(self.dataset_1), len(indices) * distribution[0]) + min(len(self.dataset_1), len(indices) * distribution[1])) ) print(distribution) def test_multi_corpus_dataset(self): for distribution in [[0.5, 0.5], [0.1, 0.9], [0.9, 0.1], [0.0, 1.0]]: self._test_sample_helper(distribution=distribution)
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_multi_corpus_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import copy import logging import unittest import torch from fairseq.optim.fp16_optimizer import FP16Optimizer, MemoryEfficientFP16Optimizer from omegaconf import OmegaConf @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestGradientScaling(unittest.TestCase): def setUp(self): self.x = torch.tensor([2.0]).cuda().half() weight = 3.0 bias = 5.0 self.error = 1.0 self.target = torch.tensor([self.x * weight + bias + self.error]).cuda().half() self.loss_fn = torch.nn.L1Loss() self.model = torch.nn.Linear(1, 1) self.model.weight.data = torch.tensor([[weight]]) self.model.bias.data = torch.tensor([bias]) self.model.cuda().half() self.params = list(self.model.parameters()) self.cfg_dls = OmegaConf.create( { "optimization": { "lr": [0.1], }, "optimizer": { "_name": "adam", "lr": [0.1], "adam_betas": "(0.9, 0.999)", "adam_eps": 1e-8, "weight_decay": 0.0, }, "common": { "fp16_init_scale": 1, "fp16_scale_window": 1, "fp16_scale_tolerance": 1, "threshold_loss_scale": 1, "min_loss_scale": 1e-4, "tpu": False, }, } ) logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def run_iter(self, model, params, optimizer): optimizer.zero_grad() y = model(self.x) loss = self.loss_fn(y, self.target) optimizer.backward(loss) self.assertEqual(loss, torch.tensor(1.0, device="cuda:0", dtype=torch.float16)) grad_norm = optimizer.clip_grad_norm(0) self.assertAlmostEqual(grad_norm.item(), 2.2361, 4) optimizer.step() self.assertEqual( model.weight, torch.tensor( [[3.0996]], device="cuda:0", dtype=torch.float16, requires_grad=True ), ) self.assertEqual( model.bias, torch.tensor( [5.1016], device="cuda:0", dtype=torch.float16, requires_grad=True ), ) self.assertEqual(optimizer.scaler.loss_scale, 2.0) def test_mixed_precision(self): model = copy.deepcopy(self.model) params = list(model.parameters()) optimizer = FP16Optimizer.build_optimizer(self.cfg_dls, params) self.run_iter(model, params, optimizer) self.assertTrue( all( torch.all( fp32_params.eq( torch.tensor( [3.1000, 5.1000], device="cuda:0", requires_grad=True ) ) ) for fp32_params in optimizer.fp32_params.values() ) ) def test_memory_efficient(self): model = copy.deepcopy(self.model) params = list(model.parameters()) optimizer = MemoryEfficientFP16Optimizer.build_optimizer(self.cfg_dls, params) self.run_iter(model, params, optimizer) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_fp16_optimizer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import logging import unittest import torch from fairseq.optim.adam import FairseqAdam from fairseq.optim.fp16_optimizer import MemoryEfficientFP16Optimizer from omegaconf import OmegaConf @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestMemoryEfficientFP16(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_load_state_dict(self): # define simple FP16 model model = torch.nn.Linear(5, 5).cuda().half() params = list(model.parameters()) # initialize memory efficient FP16 optimizer # with pseudo DictConfigs optimizer = FairseqAdam( cfg=OmegaConf.create( vars( argparse.Namespace( adam_betas="(0.9, 0.999)", adam_eps=1e-8, weight_decay=0.0, lr=[0.00001], ) ) ), params=params, ) me_optimizer = MemoryEfficientFP16Optimizer( cfg=OmegaConf.create( { "common": vars( argparse.Namespace( fp16_init_scale=1, fp16_scale_window=1, fp16_scale_tolerance=1, threshold_loss_scale=1, min_loss_scale=1e-4, ) ) } ), params=params, optimizer=optimizer, ) # optimizer state is created in the first step loss = model(torch.rand(5).cuda().half()).sum() me_optimizer.backward(loss) me_optimizer.step() # reload state state = me_optimizer.state_dict() me_optimizer.load_state_dict(state) for k, v in me_optimizer.optimizer.state.items(): self.assertTrue(k.dtype == torch.float16) for v_i in v.values(): if torch.is_tensor(v_i): self.assertTrue(v_i.dtype == torch.float32) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_memory_efficient_fp16.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import tests.utils as test_utils import torch from fairseq.data import TokenBlockDataset class TestTokenBlockDataset(unittest.TestCase): def _build_dataset(self, data, **kwargs): sizes = [len(x) for x in data] underlying_ds = test_utils.TestDataset(data) return TokenBlockDataset(underlying_ds, sizes, **kwargs) def test_eos_break_mode(self): data = [ torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long), ] ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode="eos") self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1]) self.assertEqual(ds[1].tolist(), [1]) self.assertEqual(ds[2].tolist(), [8, 7, 6, 1]) data = [ torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long), torch.tensor([1], dtype=torch.long), ] ds = self._build_dataset(data, block_size=None, pad=0, eos=1, break_mode="eos") self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1]) self.assertEqual(ds[1].tolist(), [8, 7, 6, 1]) self.assertEqual(ds[2].tolist(), [1]) def test_block_break_mode(self): data = [ torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long), torch.tensor([9, 1], dtype=torch.long), ] ds = self._build_dataset(data, block_size=3, pad=0, eos=1, break_mode="none") self.assertEqual(ds[0].tolist(), [5, 4, 3]) self.assertEqual(ds[1].tolist(), [2, 1, 8]) self.assertEqual(ds[2].tolist(), [7, 6, 1]) self.assertEqual(ds[3].tolist(), [9, 1]) def test_complete_break_mode(self): data = [ torch.tensor([5, 4, 3, 2, 1], dtype=torch.long), torch.tensor([8, 7, 6, 1], dtype=torch.long), torch.tensor([9, 1], dtype=torch.long), ] ds = self._build_dataset( data, block_size=6, pad=0, eos=1, break_mode="complete" ) self.assertEqual(ds[0].tolist(), [5, 4, 3, 2, 1]) self.assertEqual(ds[1].tolist(), [8, 7, 6, 1, 9, 1]) data = [ torch.tensor([4, 3, 2, 1], dtype=torch.long), torch.tensor([5, 1], dtype=torch.long), torch.tensor([1], dtype=torch.long), torch.tensor([6, 1], dtype=torch.long), ] ds = self._build_dataset( data, block_size=3, pad=0, eos=1, break_mode="complete" ) self.assertEqual(ds[0].tolist(), [4, 3, 2, 1]) self.assertEqual(ds[1].tolist(), [5, 1, 1]) self.assertEqual(ds[2].tolist(), [6, 1]) def test_4billion_tokens(self): """Regression test for numpy type promotion issue https://github.com/numpy/numpy/issues/5745""" data = [torch.tensor(list(range(10000)), dtype=torch.long)] * 430000 ds = self._build_dataset( data, block_size=6, pad=0, eos=1, break_mode="complete" ) ds[-1] # __getitem__ works start, end = ds.slice_indices[-1] assert end > 4294967295 # data must be sufficiently large to overflow uint32 assert not isinstance( end + 1, float ) # this would also raise, since np.uint64(1) + 1 => 2.0 if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_token_block_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import tempfile import unittest import math import numpy as np import tests.utils as test_utils import torch from fairseq import search from fairseq.data.dictionary import Dictionary from fairseq.models.transformer import TransformerModel from fairseq.sequence_generator import EnsembleModel, SequenceGenerator from fairseq.ngram_repeat_block import NGramRepeatBlock from fairseq.tasks.fairseq_task import LegacyFairseqTask DEFAULT_TEST_VOCAB_SIZE = 100 class DummyTask(LegacyFairseqTask): def __init__(self, args): super().__init__(args) self.dictionary = get_dummy_dictionary() if getattr(self.args, "ctc", False): self.dictionary.add_symbol("<ctc_blank>") self.src_dict = self.dictionary self.tgt_dict = self.dictionary @property def source_dictionary(self): return self.src_dict @property def target_dictionary(self): return self.dictionary def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE): dummy_dict = Dictionary() # add dummy symbol to satisfy vocab size for id, _ in enumerate(range(vocab_size)): dummy_dict.add_symbol("{}".format(id), n=1000) return dummy_dict def get_dummy_task_and_parser(): """ to build a fariseq model, we need some dummy parse and task. This function is used to create dummy task and parser to faciliate model/criterion test Note: we use FbSpeechRecognitionTask as the dummy task. You may want to use other task by providing another function """ parser = argparse.ArgumentParser( description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS ) DummyTask.add_args(parser) args = parser.parse_args([]) task = DummyTask.setup_task(args) return task, parser class TestJitSequenceGeneratorBase(unittest.TestCase): def setUp(self): self.task, self.parser = get_dummy_task_and_parser() eos = self.task.tgt_dict.eos() src_tokens = torch.randint(3, 50, (2, 10)).long() src_tokens = torch.cat((src_tokens, torch.LongTensor([[eos], [eos]])), -1) src_lengths = torch.LongTensor([2, 10]) self.sample = { "net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths} } TransformerModel.add_args(self.parser) args = self.parser.parse_args([]) args.encoder_layers = 2 args.decoder_layers = 1 self.transformer_model = TransformerModel.build_model(args, self.task) def assertOutputEqual(self, hypo, pos_probs): pos_scores = torch.FloatTensor(pos_probs).log() self.assertTensorSizeEqual(hypo["positional_scores"], pos_scores) self.assertTensorSizeEqual(pos_scores.numel(), hypo["tokens"].numel()) def assertTensorSizeEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) def assertHypoEqual(self, h1, h2): "Check two hypos are equal" self.assertTensorEqual(h1["tokens"], h2["tokens"]) self.assertAlmostEqual(h1["positional_scores"], h2["positional_scores"]) self.assertLess(abs(h1["score"] - h2["score"]), 1e-6) self.assertAlmostEqual(h1["attention"], h2["attention"]) def _test_save_and_load(self, scripted_module): with tempfile.NamedTemporaryFile() as f: scripted_module.save(f.name) torch.jit.load(f.name) JIT_MSG = "Targeting OSS scriptability for the 1.6 release" @unittest.skipIf(torch.__version__ < "1.6.0", JIT_MSG) class TestJitSequenceGenerator(TestJitSequenceGeneratorBase): def test_export_transformer(self): model = self.transformer_model torch.jit.script(model) def test_ensemble_sequence_generator(self): model = self.transformer_model generator = SequenceGenerator( [model], self.task.tgt_dict, beam_size=2, no_repeat_ngram_size=2, max_len_b=10, ) scripted_model = torch.jit.script(generator) self._test_save_and_load(scripted_model) def test_export_ensemble_model(self): model = self.transformer_model ensemble_models = EnsembleModel([model]) torch.jit.script(ensemble_models) class TestExportSearch(unittest.TestCase): def setUp(self): task, _ = get_dummy_task_and_parser() self.tgt_dict = task.tgt_dict self.min_top1_prob = 0.4 def test_export_diverse_bs(self): search_strategy = search.DiverseBeamSearch( self.tgt_dict, num_groups=2, diversity_strength=0.0 ) torch.jit.script(search_strategy) def test_export_sampling(self): low_sampling_topp = self.min_top1_prob / 2.0 search_strategy = search.Sampling( self.tgt_dict, sampling_topp=low_sampling_topp ) torch.jit.script(search_strategy) def test_export_diverse_siblings_search(self): search_strategy = search.DiverseSiblingsSearch( self.tgt_dict, diversity_rate=0.5 ) torch.jit.script(search_strategy) class TestSequenceGeneratorBase(unittest.TestCase): def assertHypoTokens(self, hypo, tokens): self.assertTensorEqual(hypo["tokens"], torch.LongTensor(tokens)) def assertHypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0): pos_scores = torch.FloatTensor(pos_probs).log() self.assertAlmostEqual(hypo["positional_scores"], pos_scores) self.assertEqual(pos_scores.numel(), hypo["tokens"].numel()) score = pos_scores.sum() if normalized: score /= pos_scores.numel() ** lenpen self.assertLess(abs(score - hypo["score"]), 1e-6) def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) def assertTensorEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertEqual(t1.ne(t2).long().sum(), 0) class TestSequenceGenerator(TestSequenceGeneratorBase): def setUp(self): ( self.tgt_dict, self.w1, self.w2, src_tokens, src_lengths, self.model, ) = test_utils.sequence_generator_setup() self.sample = { "net_input": {"src_tokens": src_tokens, "src_lengths": src_lengths} } def test_with_normalization(self): generator = SequenceGenerator([self.model], self.tgt_dict, beam_size=2) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0]) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0]) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0]) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6]) def test_without_normalization(self): # Sentence 1: unchanged from the normalized case # Sentence 2: beams swap order generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, normalize_scores=False ) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0], normalized=False) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], normalized=False) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], normalized=False) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], normalized=False) def test_with_lenpen_favoring_short_hypos(self): lenpen = 0.6 generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, len_penalty=lenpen ) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0], lenpen=lenpen) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6], lenpen=lenpen) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen) def test_with_lenpen_favoring_long_hypos(self): lenpen = 5.0 generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, len_penalty=lenpen ) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w2, w1, w2, eos]) self.assertHypoScore(hypos[0][0], [0.1, 0.9, 0.9, 1.0], lenpen=lenpen) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w1, eos]) self.assertHypoScore(hypos[0][1], [0.9, 1.0], lenpen=lenpen) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, w1, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.4, 1.0], lenpen=lenpen) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.6], lenpen=lenpen) def test_maxlen(self): generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, max_len_b=2 ) hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0]) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w2, w2, eos]) self.assertHypoScore(hypos[0][1], [0.1, 0.1, 0.6]) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.6]) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w2, w2, eos]) self.assertHypoScore(hypos[1][1], [0.3, 0.9, 0.01]) def test_encoder_with_different_output_len(self): args = self.model.encoder.args task = test_utils.TestTranslationTask.setup_task( args, self.tgt_dict, self.tgt_dict ) reshaping_model = test_utils.TestReshapingModel.build_model(args, task) generator = SequenceGenerator( [reshaping_model], self.tgt_dict, beam_size=2, max_len_b=2 ) hypos = generator.forward(self.sample) for sent in [0, 1]: for beam in [0, 1]: assert hypos[sent][beam]["attention"] is not None def test_generation_with_additional_input(self): args = self.model.encoder.args task = test_utils.TestTranslationTask.setup_task( args, self.tgt_dict, self.tgt_dict ) add_input_model = test_utils.TestAdditionalInputModel.build_model(args, task) generator = SequenceGenerator([add_input_model], self.tgt_dict, beam_size=2) sample = self.sample.copy() sample["net_input"]["fancy_other_input"] = sample["net_input"]["src_tokens"] hypos = generator.forward(self.sample) eos, w1, w2 = self.tgt_dict.eos(), self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 1.0]) @unittest.skipUnless(torch.cuda.is_available(), "") class TestRepeatNgramBlocking(TestSequenceGeneratorBase): @classmethod def setUpClass(cls): ( cls.tgt_dict, cls.w1, cls.w2, src_tokens, src_lengths, cls.model, ) = test_utils.sequence_generator_setup() return cls def test_finds_repetitive_tokens(self): bsz, vocab_size, beam_size, step = 2, 4, 1, 3 generated_tok = torch.tensor( [[2, 2, 2, 2], [3, 3, 3, 3]], dtype=torch.long, device="cuda" ) lprobs = torch.zeros((beam_size * bsz, vocab_size), device="cuda") desired_result = lprobs.new_tensor( [[0.0, 0.0, -math.inf, 0.0], [0.0, 0.0, 0.0, -math.inf]] ) cuda_ext_result, baseline_result = self._compare_cuda_ext_to_default_implem( bsz, beam_size, generated_tok, lprobs, step, 2 ) self.assertTensorEqual(cuda_ext_result, desired_result) self.assertTensorEqual(baseline_result, desired_result) @unittest.skipIf(torch.__version__ < "1.6.0", JIT_MSG) def test_jit_no_extension(self): bsz, vocab_size, beam_size, step = 2, 4, 1, 3 generated_tok = torch.tensor( [[2, 2, 2, 2], [3, 3, 3, 3]], dtype=torch.long, device="cuda" ) lprobs = torch.zeros((beam_size * bsz, vocab_size), device="cuda") blocker = NGramRepeatBlock(2, use_extension=False) base_result = blocker(generated_tok, lprobs.clone(), bsz, beam_size, step) scripted_blocker = torch.jit.script(blocker) jit_result = scripted_blocker( generated_tok, lprobs.clone(), bsz, beam_size, step ) self.assertTensorEqual(base_result, jit_result) def test_ngram_blocking_same_as_default_implem(self): """Test that cuda extension returns same things as default impl in many settings.""" vocab_size = 4 step = 6 for _ in range(2): block_param = np.random.choice([1, 2, 3, 4]) batch_size = np.random.randint(1, 8) beam_size = np.random.choice([1, 2, 4, 8]) lprobs = torch.zeros((beam_size * batch_size, vocab_size), device="cuda") generated_tok = torch.tensor( np.random.randint( 0, vocab_size, size=(batch_size * beam_size, step + 1) ), device="cuda", dtype=torch.long, ) self._compare_cuda_ext_to_default_implem( batch_size, beam_size, generated_tok, lprobs, step, block_param, ) def _compare_cuda_ext_to_default_implem( self, bsz, beam_size, generated_tok, lprobs, step, block_param ): """Assert that cuda extension and default implem return the same thing.""" blocker = NGramRepeatBlock(block_param) assert blocker.use_extension, "Extension not compiled" cuda_ext_result = blocker( generated_tok, lprobs.clone(), bsz, beam_size, step, ) blocker.use_extension = False baseline_result = blocker( generated_tok, lprobs.clone(), bsz, beam_size, step, ) self.assertTensorEqual(cuda_ext_result, baseline_result) blocker.use_extension = True return cuda_ext_result, baseline_result class TestDiverseBeamSearch(TestSequenceGeneratorBase): def setUp(self): # construct dummy dictionary d = test_utils.dummy_dictionary(vocab_size=2) self.assertEqual(d.pad(), 1) self.assertEqual(d.eos(), 2) self.assertEqual(d.unk(), 3) self.eos = d.eos() self.w1 = 4 self.w2 = 5 # construct source data self.src_tokens = torch.LongTensor( [ [self.w1, self.w2, self.eos], [self.w1, self.w2, self.eos], ] ) self.src_lengths = torch.LongTensor([2, 2]) args = argparse.Namespace() unk = 0.0 args.beam_probs = [ # step 0: torch.FloatTensor( [ # eos w1 w2 # sentence 1: [0.0, unk, 0.9, 0.1], # beam 1 [0.0, unk, 0.9, 0.1], # beam 2 # sentence 2: [0.0, unk, 0.7, 0.3], [0.0, unk, 0.7, 0.3], ] ), # step 1: torch.FloatTensor( [ # eos w1 w2 # sentence 1: [0.0, unk, 0.6, 0.4], [0.0, unk, 0.6, 0.4], # sentence 2: [0.25, unk, 0.35, 0.4], [0.25, unk, 0.35, 0.4], ] ), # step 2: torch.FloatTensor( [ # eos w1 w2 # sentence 1: [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], # sentence 2: [0.9, unk, 0.1, 0.0], [0.9, unk, 0.1, 0.0], ] ), ] task = test_utils.TestTranslationTask.setup_task(args, d, d) self.model = task.build_model(args) self.tgt_dict = task.target_dictionary def test_diverse_beam_search(self): search_strategy = search.DiverseBeamSearch( self.tgt_dict, num_groups=2, diversity_strength=0.0 ) generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy, ) sample = { "net_input": { "src_tokens": self.src_tokens, "src_lengths": self.src_lengths, } } hypos = generator.forward(sample) eos, w1, w2 = self.eos, self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 0.6, 1.0]) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w1, w1, eos]) self.assertHypoScore(hypos[0][1], [0.9, 0.6, 1.0]) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.9]) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w2, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.4, 0.9]) class TestDiverseSiblingsSearch(TestDiverseBeamSearch): def assertHypoScore( self, hypo, pos_probs, sibling_rank, diversity_rate, normalized=True, lenpen=1.0 ): pos_scores = torch.FloatTensor(pos_probs).log() pos_scores.sub_(torch.Tensor(sibling_rank) * diversity_rate) self.assertAlmostEqual(hypo["positional_scores"], pos_scores) self.assertEqual(pos_scores.numel(), hypo["tokens"].numel()) score = pos_scores.sum() if normalized: score /= pos_scores.numel() ** lenpen self.assertLess(abs(score - hypo["score"]), 1e-6) def test_diverse_beam_search(self): search_strategy = search.DiverseSiblingsSearch( self.tgt_dict, diversity_rate=0.5 ) generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy ) sample = { "net_input": { "src_tokens": self.src_tokens, "src_lengths": self.src_lengths, } } hypos = generator.forward(sample) eos, w1, w2 = self.eos, self.w1, self.w2 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, w1, eos]) self.assertHypoScore(hypos[0][0], [0.9, 0.6, 1.0], [0, 1, 1], 0.5) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w1, w2, eos]) self.assertHypoScore(hypos[0][1], [0.9, 0.4, 1.0], [0, 2, 1], 0.5) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w2, eos]) self.assertHypoScore(hypos[1][0], [0.7, 0.4, 0.9], [0, 1, 1], 0.5) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w1, eos]) self.assertHypoScore(hypos[1][1], [0.7, 0.35, 0.9], [0, 2, 1], 0.5) class TestPrefixBeamSearch(TestSequenceGeneratorBase): def setUp(self): # construct dummy dictionary vocab_size = 10 d = test_utils.dummy_dictionary(vocab_size=vocab_size) self.assertEqual(d.pad(), 1) self.assertEqual(d.eos(), 2) self.assertEqual(d.unk(), 3) self.eos = d.eos() self.w1 = 4 self.w2 = 5 self.beam_size = 3 # construct prefix data self.tokens = torch.LongTensor( [ [self.w1, self.w2, self.eos], ] ) self.token_lengths = torch.LongTensor([2]) args = argparse.Namespace() unk = 0.0 args.beam_probs = [ # prefix step 0: torch.FloatTensor( [ # eos [0.0, unk] + [1.0 / vocab_size] * vocab_size # beam 1 ] * self.beam_size ), ] * vocab_size task = test_utils.TestTranslationTask.setup_task(args, d, d) self.model = task.build_model(args) self.tgt_dict = task.target_dictionary def test_prefix_beam_search(self): search_strategy = search.BeamSearch(self.tgt_dict) generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=self.beam_size, search_strategy=search_strategy, ) sample = { "net_input": { "src_tokens": self.tokens, "src_lengths": self.token_lengths, } } # make sure test sample doesn't break any assertion generator.forward(sample, prefix_tokens=self.tokens[:, :-1]) class TestTopPSamplingSearch(TestSequenceGeneratorBase): def setUp(self): # construct dummy dictionary d = test_utils.dummy_dictionary(vocab_size=2) self.assertEqual(d.pad(), 1) self.assertEqual(d.eos(), 2) self.assertEqual(d.unk(), 3) self.eos = d.eos() self.w1 = 4 self.w2 = 5 # construct source data self.src_tokens = torch.LongTensor( [ [self.w1, self.w2, self.eos], [self.w1, self.w2, self.eos], ] ) self.src_lengths = torch.LongTensor([2, 2]) args = argparse.Namespace() unk = 0.0 # The minimal probability of top 2 tokens. self.min_top2_prob = 0.75 # The minimal probability of the top 1 token. self.min_top1_prob = 0.4 w1_prob = self.min_top1_prob w2_prob = self.min_top2_prob - self.min_top1_prob eos_prob = 1 - self.min_top2_prob args.beam_probs = [ # step 0: torch.FloatTensor( [ # eos w1 w2 [0.0, unk, 1.0, 0.0], [0.0, unk, 1.0, 0.0], [0.0, unk, 1.0, 0.0], [0.0, unk, 1.0, 0.0], ] ), # step 1: torch.FloatTensor( [ # eos w1 w2 [eos_prob, unk, w1_prob, w2_prob], [eos_prob, unk, w1_prob, w2_prob], [eos_prob, unk, w1_prob, w2_prob], [eos_prob, unk, w1_prob, w2_prob], ] ), # step 2: torch.FloatTensor( [ # eos w1 w2 [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], [1.0, unk, 0.0, 0.0], ] ), ] task = test_utils.TestTranslationTask.setup_task(args, d, d) self.model = task.build_model(args) self.tgt_dict = task.target_dictionary def test_topp_sampling_search_low_prob(self): # Given a prob low enough to top-P sampling, we expect only the top # 1 token to be sampled, which always results in the same output. low_sampling_topp = self.min_top1_prob / 2.0 search_strategy = search.Sampling( self.tgt_dict, sampling_topp=low_sampling_topp ) generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy ) sample = { "net_input": { "src_tokens": self.src_tokens, "src_lengths": self.src_lengths, } } hypos = generator.forward(sample) eos, w1 = self.eos, self.w1 # sentence 1, beam 1 self.assertHypoTokens(hypos[0][0], [w1, w1, eos]) self.assertHypoScore(hypos[0][0], [1.0, 0.4, 1.0]) # sentence 1, beam 2 self.assertHypoTokens(hypos[0][1], [w1, w1, eos]) self.assertHypoScore(hypos[0][1], [1.0, 0.4, 1.0]) # sentence 2, beam 1 self.assertHypoTokens(hypos[1][0], [w1, w1, eos]) self.assertHypoScore(hypos[1][0], [1.0, 0.4, 1.0]) # sentence 2, beam 2 self.assertHypoTokens(hypos[1][1], [w1, w1, eos]) self.assertHypoScore(hypos[1][1], [1.0, 0.4, 1.0]) def test_topp_sampling_search_high_prob(self): # Given a prob high enough to top-P sampling, any of the top 2 # tokens could be sampled. This can cause different outputs. high_sampling_topp = (self.min_top1_prob + self.min_top2_prob) / 2.0 search_strategy = search.Sampling( self.tgt_dict, sampling_topp=high_sampling_topp ) generator = SequenceGenerator( [self.model], self.tgt_dict, beam_size=2, search_strategy=search_strategy ) sample = { "net_input": { "src_tokens": self.src_tokens, "src_lengths": self.src_lengths, } } hypos = generator.forward(sample) eos, w1, w2 = self.eos, self.w1, self.w2 # sentence 1, beam 1 self.assertTrue( self.hypoTokens(hypos[0][0], [w1, w1, eos]) or self.hypoTokens(hypos[0][0], [w1, w2, eos]) ) self.assertTrue( self.hypoScore(hypos[0][0], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[0][0], [1.0, 0.35, 1.0]) ) # sentence 1, beam 2 self.assertTrue( self.hypoTokens(hypos[0][1], [w1, w1, eos]) or self.hypoTokens(hypos[0][1], [w1, w2, eos]) ) self.assertTrue( self.hypoScore(hypos[0][1], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[0][1], [1.0, 0.35, 1.0]) ) # sentence 2, beam 1 self.assertTrue( self.hypoTokens(hypos[1][0], [w1, w1, eos]) or self.hypoTokens(hypos[1][0], [w1, w2, eos]) ) self.assertTrue( self.hypoScore(hypos[1][0], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[1][0], [1.0, 0.35, 1.0]) ) # sentence 2, beam 2 self.assertTrue( self.hypoTokens(hypos[1][1], [w1, w1, eos]) or self.hypoTokens(hypos[1][1], [w1, w2, eos]) ) self.assertTrue( self.hypoScore(hypos[1][1], [1.0, 0.4, 1.0]) or self.hypoScore(hypos[1][1], [1.0, 0.35, 1.0]) ) def hypoTokens(self, hypo, tokens): return self.tensorEqual(hypo["tokens"], torch.LongTensor(tokens)) def hypoScore(self, hypo, pos_probs, normalized=True, lenpen=1.0): pos_scores = torch.FloatTensor(pos_probs).log() if not self.almostEqual(hypo["positional_scores"], pos_scores): return False if pos_scores.numel() != hypo["tokens"].numel(): return False score = pos_scores.sum() if normalized: score /= pos_scores.numel() ** lenpen return abs(score - hypo["score"]) < 1e-6 def almostEqual(self, t1, t2): return t1.size() == t2.size() and (t1 - t2).abs().max() < 1e-4 def tensorEqual(self, t1, t2): return t1.size() == t2.size() and t1.ne(t2).long().sum() == 0 if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_sequence_generator.py
# This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import shutil import tempfile import unittest from typing import Optional class TestFileChunker(unittest.TestCase): _tmpdir: Optional[str] = None _tmpfile: Optional[str] = None _line_content = "Hello, World\n" _num_bytes = None _num_lines = 200 _num_splits = 20 @classmethod def setUpClass(cls) -> None: cls._num_bytes = len(cls._line_content.encode("utf-8")) cls._tmpdir = tempfile.mkdtemp() with open(os.path.join(cls._tmpdir, "test.txt"), "w") as f: cls._tmpfile = f.name for _i in range(cls._num_lines): f.write(cls._line_content) f.flush() @classmethod def tearDownClass(cls) -> None: # Cleanup temp working dir. if cls._tmpdir is not None: shutil.rmtree(cls._tmpdir) # type: ignore def test_find_offsets(self): from fairseq.file_chunker_utils import find_offsets offsets = find_offsets(self._tmpfile, self._num_splits) self.assertEqual(len(offsets), self._num_splits + 1) (zero, *real_offsets, last) = offsets self.assertEqual(zero, 0) for i, o in enumerate(real_offsets): self.assertEqual( o, self._num_bytes + ((i + 1) * self._num_bytes * self._num_lines / self._num_splits), ) self.assertEqual(last, self._num_bytes * self._num_lines) def test_readchunks(self): from fairseq.file_chunker_utils import Chunker, find_offsets offsets = find_offsets(self._tmpfile, self._num_splits) for start, end in zip(offsets, offsets[1:]): with Chunker(self._tmpfile, start, end) as lines: all_lines = list(lines) num_lines = self._num_lines / self._num_splits self.assertAlmostEqual( len(all_lines), num_lines, delta=1 ) # because we split on the bites, we might end up with one more/less line in a chunk self.assertListEqual( all_lines, [self._line_content for _ in range(len(all_lines))] )
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_file_chunker_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from argparse import ArgumentParser from dataclasses import dataclass, field from fairseq.dataclass import FairseqDataclass from fairseq.dataclass.utils import gen_parser_from_dataclass @dataclass class A(FairseqDataclass): data: str = field(default="test", metadata={"help": "the data input"}) num_layers: int = field(default=200, metadata={"help": "more layers is better?"}) @dataclass class B(FairseqDataclass): bar: A = field(default=A()) foo: int = field(default=0, metadata={"help": "not a bar"}) @dataclass class D(FairseqDataclass): arch: A = field(default=A()) foo: int = field(default=0, metadata={"help": "not a bar"}) @dataclass class C(FairseqDataclass): data: str = field(default="test", metadata={"help": "root level data input"}) encoder: D = field(default=D()) decoder: A = field(default=A()) lr: int = field(default=0, metadata={"help": "learning rate"}) class TestDataclassUtils(unittest.TestCase): def test_argparse_convert_basic(self): parser = ArgumentParser() gen_parser_from_dataclass(parser, A(), True) args = parser.parse_args(["--num-layers", '10', "the/data/path"]) self.assertEqual(args.num_layers, 10) self.assertEqual(args.data, "the/data/path") def test_argparse_recursive(self): parser = ArgumentParser() gen_parser_from_dataclass(parser, B(), True) args = parser.parse_args(["--num-layers", "10", "--foo", "10", "the/data/path"]) self.assertEqual(args.num_layers, 10) self.assertEqual(args.foo, 10) self.assertEqual(args.data, "the/data/path") def test_argparse_recursive_prefixing(self): self.maxDiff = None parser = ArgumentParser() gen_parser_from_dataclass(parser, C(), True, "") args = parser.parse_args( [ "--encoder-arch-data", "ENCODER_ARCH_DATA", "--encoder-arch-num-layers", "10", "--encoder-foo", "10", "--decoder-data", "DECODER_DATA", "--decoder-num-layers", "10", "--lr", "10", "the/data/path", ] ) self.assertEqual(args.encoder_arch_data, "ENCODER_ARCH_DATA") self.assertEqual(args.encoder_arch_num_layers, 10) self.assertEqual(args.encoder_foo, 10) self.assertEqual(args.decoder_data, "DECODER_DATA") self.assertEqual(args.decoder_num_layers, 10) self.assertEqual(args.lr, 10) self.assertEqual(args.data, "the/data/path") if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_dataclass_utils.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq.data import LanguagePairDataset, TokenBlockDataset from fairseq.data.concat_dataset import ConcatDataset from tests.test_train import mock_dict class TestConcatDataset(unittest.TestCase): def setUp(self): d = mock_dict() tokens_1 = torch.LongTensor([1]).view(1, -1) tokens_ds1 = TokenBlockDataset( tokens_1, sizes=[tokens_1.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_1 = LanguagePairDataset( tokens_ds1, tokens_ds1.sizes, d, shuffle=False ) tokens_2 = torch.LongTensor([2]).view(1, -1) tokens_ds2 = TokenBlockDataset( tokens_2, sizes=[tokens_2.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_2 = LanguagePairDataset( tokens_ds2, tokens_ds2.sizes, d, shuffle=False ) def test_concat_dataset_basics(self): d = ConcatDataset([self.dataset_1, self.dataset_2]) assert len(d) == 2 assert d[0]["source"][0] == 1 assert d[1]["source"][0] == 2 d = ConcatDataset([self.dataset_1, self.dataset_2], sample_ratios=[1, 2]) assert len(d) == 3 assert d[0]["source"][0] == 1 assert d[1]["source"][0] == 2 assert d[2]["source"][0] == 2 d = ConcatDataset([self.dataset_1, self.dataset_2], sample_ratios=[2, 1]) assert len(d) == 3 assert d[0]["source"][0] == 1 assert d[1]["source"][0] == 1 assert d[2]["source"][0] == 2
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_concat_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import tempfile import unittest from pathlib import Path from typing import Any, Dict, Sequence import fairseq.data.indexed_dataset as indexed_dataset import fairseq.options import fairseq.tasks.online_backtranslation as obt import torch from tests import utils def mk_sample(tokens: Sequence[int], batch_size: int = 2) -> Dict[str, Any]: batch = torch.stack([torch.tensor(tokens, dtype=torch.long)] * batch_size) sample = { "net_input": { "src_tokens": batch, "prev_output_tokens": batch, "src_lengths": torch.tensor([len(tokens)] * batch_size, dtype=torch.long), }, "target": batch[:, 1:], } return sample def mk_dataset(num_samples: int, max_len: int, output: Path): output.parent.mkdir(exist_ok=True) idx = indexed_dataset.IndexedDatasetBuilder(str(output)) data = torch.randint(5, 100, (num_samples, max_len)) lengths = torch.randint(3, max_len, (num_samples,)) for d, l in zip(data, lengths): d[0] = 0 idx.add_item(d[:l]) idx.finalize(output.with_suffix(".idx")) assert output.exists() assert output.with_suffix(".idx").exists() class OnlineBacktranslationTest(unittest.TestCase): tmp_dir = Path(tempfile.mkdtemp(suffix="OnlineBacktranslationTest")) @classmethod def obt_task( cls, languages: Sequence[str], data: Path = None, language_mapping: str = None ): dict_path = cls.tmp_dir / "dict.txt" if not dict_path.exists(): dictionary = utils.dummy_dictionary(100) dictionary.save(str(dict_path)) if data is not None: (data / "dict.txt").write_text(dict_path.read_text()) else: data = cls.tmp_dir assert len(languages) >= 2 kwargs = { "arch": "transformer", # --max-sentences=1 for better predictability of batches "max_sentences": 1, # Use characteristics dimensions "encoder_layers": 3, "encoder_embed_dim": 12, "encoder_ffn_embed_dim": 14, "encoder_attention_heads": 4, "decoder_layers": 3, "decoder_embed_dim": 12, "decoder_output_dim": 12, "decoder_ffn_embed_dim": 14, "decoder_attention_heads": 4, # Disable dropout so we have comparable tests. "dropout": 0, "attention_dropout": 0, "activation_dropout": 0, "encoder_layerdrop": 0, } args = fairseq.options.get_args( data, task="online_backtranslation", mono_langs=",".join(languages), valid_lang_pairs=f"{languages[0]}-{languages[1]}", tokens_per_sample=256, language_mapping=language_mapping, **kwargs, ) task = obt.OnlineBackTranslationTask.setup_task(args) # we need to build the model to have the correct dictionary model = task.build_model(task.args) return task, model def tmp_path(self, test_case: str) -> Path: return Path(tempfile.mkdtemp(test_case, dir=self.tmp_dir)) def test_lang_tokens(self): task, model = self.obt_task(["en", "ro", "zh"]) assert obt._lang_token("en") in task.dictionary assert obt._lang_token("ro") in task.dictionary assert obt._lang_token("zh") in task.dictionary en_bos = obt._lang_token_index(task.common_dict, "en") assert "en" == task.common_dict[en_bos].strip("_") zh_bos = obt._lang_token_index(task.common_dict, "zh") assert "zh" == task.common_dict[zh_bos].strip("_") zh_sample = mk_sample([zh_bos, 16, 14, 12, 10]) # we expect to receive the bos token for translation assert task.get_bos_token_from_sample(zh_sample) == en_bos def test_backtranslate_sample(self): task, model = self.obt_task(["en", "ro", "zh"]) en_bos = obt._lang_token_index(task.common_dict, "en") zh_bos = obt._lang_token_index(task.common_dict, "zh") sample = mk_sample([zh_bos, 16, 14, 12, 10]) task.backtranslate_sample(sample, "zh", "en") target_zh = list(sample["target"][0]) assert target_zh == [16, 14, 12, 10] # original zh sentence generated_en = sample["net_input"]["src_tokens"][0] assert generated_en[0] == en_bos def test_train_dataset(self): data = self.tmp_path("test_train_dataset") mk_dataset(20, 10, data / "en" / "train.bin") mk_dataset(10, 10, data / "zh" / "train.bin") task, model = self.obt_task(["en", "zh"], data) task.load_dataset("train") en_bos = obt._lang_token_index(task.common_dict, "en") zh_bos = obt._lang_token_index(task.common_dict, "zh") train = task.datasets["train"] train.ordered_indices() train.prefetch([0, 19]) sample_0 = train[0] sample_19 = train[19] self.assertEqual( set(sample_0.keys()), {"en-BT", "en-DENOISE", "zh-BT", "zh-DENOISE"} ) for sample in (sample_0, sample_19): self.assertEqual(sample["en-BT"]["source"][0], en_bos) # bt target isn't ready to look at. self.assertEqual(sample["en-DENOISE"]["source"][0], en_bos) # TODO What could we check on the target side ? for i in range(10): # Zh dataset is shorter, and is wrapped around En dataset. train.prefetch([i, i + 10]) self.assertEqual( list(train[i]["zh-DENOISE"]["source"]), list(train[i + 10]["zh-DENOISE"]["source"]), ) self.assertEqual(train[i]["zh-DENOISE"]["source"][0].item(), zh_bos) # Sorted by increasing len self.assertLess( len(sample_0["en-BT"]["source"]), len(sample_19["en-BT"]["source"]) ) def test_valid_dataset(self): data = self.tmp_path("test_valid_dataset") mk_dataset(10, 21, data / "valid.en-zh.en.bin") mk_dataset(10, 21, data / "valid.en-zh.zh.bin") task, model = self.obt_task(["en", "zh"], data) valid = task.load_dataset("valid") en_bos = obt._lang_token_index(task.common_dict, "en") assert valid is not None valid.prefetch(range(10)) sample_0 = valid[0] sample_9 = valid[9] self.assertEqual(sample_0["id"], 0) self.assertEqual(sample_9["id"], 9) self.assertEqual(sample_0["source"][0], en_bos) self.assertEqual(sample_9["source"][0], en_bos) # TODO: could we test the target side ? def assertFnMatch(self, fn, values): for x, y in values.items(): fn_x = fn(x) self.assertEqual(fn_x, y, f"Fn has wrong value: fn({x}) = {fn_x} != {y}") def test_piecewise_linear_fn(self): self.assertFnMatch( obt.PiecewiseLinearFn.from_string("1.0"), {0: 1, 100: 1, 500: 1, 1000: 1} ) self.assertFnMatch( obt.PiecewiseLinearFn.from_string("0:1,1000:0"), {0: 1, 500: 0.5, 1000: 0, 2000: 0}, ) self.assertFnMatch( obt.PiecewiseLinearFn.from_string("0:0,1000:1"), {0: 0, 500: 0.5, 1000: 1, 2000: 1}, ) self.assertFnMatch( obt.PiecewiseLinearFn.from_string("0:0,1000:1,2000:0"), {0: 0, 500: 0.5, 1000: 1, 1500: 0.5, 2000: 0, 3000: 0}, )
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_online_backtranslation.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq.data import MonolingualDataset from fairseq.tasks.language_modeling import LanguageModelingTask, LanguageModelingConfig from tests import utils as test_utils class TestLMContextWindow(unittest.TestCase): def test_eval_dataloader(self): dictionary = test_utils.dummy_dictionary(10) assert len(dictionary) == 14 # 4 extra special symbols assert dictionary.pad() == 1 dataset = test_utils.TestDataset([ torch.tensor([4, 5, 6, 7], dtype=torch.long), torch.tensor([8, 9, 10, 11], dtype=torch.long), torch.tensor([12, 13], dtype=torch.long), ]) dataset = MonolingualDataset(dataset, sizes=[4, 4, 2], src_vocab=dictionary) config = LanguageModelingConfig(tokens_per_sample=4) task = LanguageModelingTask(config, dictionary) eval_dataloader = task.eval_lm_dataloader( dataset=dataset, batch_size=1, context_window=2, ) batch = next(eval_dataloader) assert batch["net_input"]["src_tokens"][0].tolist() == [4, 5, 6, 7, 1, 1] assert batch["target"][0].tolist() == [4, 5, 6, 7, 1, 1] batch = next(eval_dataloader) assert batch["net_input"]["src_tokens"][0].tolist() == [6, 7, 8, 9, 10, 11] assert batch["target"][0].tolist() == [1, 1, 8, 9, 10, 11] batch = next(eval_dataloader) assert batch["net_input"]["src_tokens"][0].tolist() == [10, 11, 12, 13] assert batch["target"][0].tolist() == [1, 1, 12, 13] if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_lm_context_window.py
# This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os import shutil import sys import tempfile import unittest from typing import Optional from unittest.mock import MagicMock class TestFileIO(unittest.TestCase): _tmpdir: Optional[str] = None _tmpfile: Optional[str] = None _tmpfile_contents = "Hello, World" @classmethod def setUpClass(cls) -> None: cls._tmpdir = tempfile.mkdtemp() with open(os.path.join(cls._tmpdir, "test.txt"), "w") as f: cls._tmpfile = f.name f.write(cls._tmpfile_contents) f.flush() @classmethod def tearDownClass(cls) -> None: # Cleanup temp working dir. if cls._tmpdir is not None: shutil.rmtree(cls._tmpdir) # type: ignore def test_file_io(self): from fairseq.file_io import PathManager with PathManager.open(os.path.join(self._tmpdir, "test.txt"), "r") as f: s = f.read() self.assertEqual(s, self._tmpfile_contents) def test_file_io_oss(self): # Mock iopath to simulate oss environment. sys.modules["iopath"] = MagicMock() from fairseq.file_io import PathManager with PathManager.open(os.path.join(self._tmpdir, "test.txt"), "r") as f: s = f.read() self.assertEqual(s, self._tmpfile_contents) def test_file_io_async(self): # ioPath `PathManager` is initialized after the first `opena` call. try: from fairseq.file_io import IOPathManager, PathManager _asyncfile = os.path.join(self._tmpdir, "async.txt") f = PathManager.opena(_asyncfile, "wb") f.close() finally: self.assertTrue(PathManager.async_close())
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_file_io.py
import argparse import unittest from typing import Any, Dict, Sequence import torch from fairseq.models import transformer from tests.test_roberta import FakeTask def mk_sample(tok: Sequence[int] = None, batch_size: int = 2) -> Dict[str, Any]: if not tok: tok = [10, 11, 12, 13, 14, 15, 2] batch = torch.stack([torch.tensor(tok, dtype=torch.long)] * batch_size) sample = { "net_input": { "src_tokens": batch, "prev_output_tokens": batch, "src_lengths": torch.tensor( [len(tok)] * batch_size, dtype=torch.long, device=batch.device ), }, "target": batch[:, 1:], } return sample def mk_transformer(**extra_args: Any): overrides = { # Use characteristics dimensions "encoder_embed_dim": 12, "encoder_ffn_embed_dim": 14, "decoder_embed_dim": 12, "decoder_ffn_embed_dim": 14, # Disable dropout so we have comparable tests. "dropout": 0, "attention_dropout": 0, "activation_dropout": 0, "encoder_layerdrop": 0, } overrides.update(extra_args) # Overrides the defaults from the parser args = argparse.Namespace(**overrides) transformer.tiny_architecture(args) torch.manual_seed(0) task = FakeTask(args) return transformer.TransformerModel.build_model(args, task) class TransformerTestCase(unittest.TestCase): def test_forward_backward(self): model = mk_transformer(encoder_embed_dim=12, decoder_embed_dim=12) sample = mk_sample() o, _ = model.forward(**sample["net_input"]) loss = o.sum() loss.backward() def test_different_encoder_decoder_embed_dim(self): model = mk_transformer(encoder_embed_dim=12, decoder_embed_dim=16) sample = mk_sample() o, _ = model.forward(**sample["net_input"]) loss = o.sum() loss.backward()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_transformer.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import json import os import tempfile import unittest from io import StringIO import torch from . import test_binaries class TestReproducibility(unittest.TestCase): def _test_reproducibility( self, name, extra_flags=None, delta=0.0001, resume_checkpoint="checkpoint1.pt", max_epoch=3, ): def get_last_log_stats_containing_string(log_records, search_string): for log_record in logs.records[::-1]: if isinstance(log_record.msg, str) and search_string in log_record.msg: return json.loads(log_record.msg) if extra_flags is None: extra_flags = [] with tempfile.TemporaryDirectory(name) as data_dir: with self.assertLogs() as logs: test_binaries.create_dummy_data(data_dir) test_binaries.preprocess_translation_data(data_dir) # train epochs 1 and 2 together with self.assertLogs() as logs: test_binaries.train_translation_model( data_dir, "fconv_iwslt_de_en", [ "--dropout", "0.0", "--log-format", "json", "--log-interval", "1", "--max-epoch", str(max_epoch), ] + extra_flags, ) train_log = get_last_log_stats_containing_string(logs.records, "train_loss") valid_log = get_last_log_stats_containing_string(logs.records, "valid_loss") # train epoch 2, resuming from previous checkpoint 1 os.rename( os.path.join(data_dir, resume_checkpoint), os.path.join(data_dir, "checkpoint_last.pt"), ) with self.assertLogs() as logs: test_binaries.train_translation_model( data_dir, "fconv_iwslt_de_en", [ "--dropout", "0.0", "--log-format", "json", "--log-interval", "1", "--max-epoch", str(max_epoch), ] + extra_flags, ) train_res_log = get_last_log_stats_containing_string( logs.records, "train_loss" ) valid_res_log = get_last_log_stats_containing_string( logs.records, "valid_loss" ) for k in ["train_loss", "train_ppl", "train_num_updates", "train_gnorm"]: self.assertAlmostEqual( float(train_log[k]), float(train_res_log[k]), delta=delta ) for k in [ "valid_loss", "valid_ppl", "valid_num_updates", "valid_best_loss", ]: self.assertAlmostEqual( float(valid_log[k]), float(valid_res_log[k]), delta=delta ) def test_reproducibility(self): self._test_reproducibility("test_reproducibility") @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_reproducibility_fp16(self): self._test_reproducibility( "test_reproducibility_fp16", [ "--fp16", "--fp16-init-scale", "4096", ], delta=0.011, ) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_reproducibility_memory_efficient_fp16(self): self._test_reproducibility( "test_reproducibility_memory_efficient_fp16", [ "--memory-efficient-fp16", "--fp16-init-scale", "4096", ], ) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_reproducibility_amp(self): self._test_reproducibility( "test_reproducibility_amp", [ "--amp", "--fp16-init-scale", "4096", ], delta=0.011, ) def test_mid_epoch_reproducibility(self): self._test_reproducibility( "test_mid_epoch_reproducibility", ["--save-interval-updates", "3"], resume_checkpoint="checkpoint_1_3.pt", max_epoch=1, ) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_reproducibility.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from fairseq.data import Dictionary from fairseq.modules import CharacterTokenEmbedder class TestCharacterTokenEmbedder(unittest.TestCase): def test_character_token_embedder(self): vocab = Dictionary() vocab.add_symbol("hello") vocab.add_symbol("there") embedder = CharacterTokenEmbedder( vocab, [(2, 16), (4, 32), (8, 64), (16, 2)], 64, 5, 2 ) test_sents = [["hello", "unk", "there"], ["there"], ["hello", "there"]] max_len = max(len(s) for s in test_sents) input = torch.LongTensor(len(test_sents), max_len + 2).fill_(vocab.pad()) for i in range(len(test_sents)): input[i][0] = vocab.eos() for j in range(len(test_sents[i])): input[i][j + 1] = vocab.index(test_sents[i][j]) input[i][j + 2] = vocab.eos() embs = embedder(input) assert embs.size() == (len(test_sents), max_len + 2, 5) self.assertAlmostEqual(embs[0][0], embs[1][0]) self.assertAlmostEqual(embs[0][0], embs[0][-1]) self.assertAlmostEqual(embs[0][1], embs[2][1]) self.assertAlmostEqual(embs[0][3], embs[1][1]) embs.sum().backward() assert embedder.char_embeddings.weight.grad is not None def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-6) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_character_token_embedder.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import collections import os import shutil import tempfile import unittest import numpy as np import torch from scripts.average_checkpoints import average_checkpoints from torch import nn class ModelWithSharedParameter(nn.Module): def __init__(self): super(ModelWithSharedParameter, self).__init__() self.embedding = nn.Embedding(1000, 200) self.FC1 = nn.Linear(200, 200) self.FC2 = nn.Linear(200, 200) # tie weight in FC2 to FC1 self.FC2.weight = nn.Parameter(self.FC1.weight) self.FC2.bias = nn.Parameter(self.FC1.bias) self.relu = nn.ReLU() def forward(self, input): return self.FC2(self.ReLU(self.FC1(input))) + self.FC1(input) class TestAverageCheckpoints(unittest.TestCase): def test_average_checkpoints(self): params_0 = collections.OrderedDict( [ ("a", torch.DoubleTensor([100.0])), ("b", torch.FloatTensor([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])), ("c", torch.IntTensor([7, 8, 9])), ] ) params_1 = collections.OrderedDict( [ ("a", torch.DoubleTensor([1.0])), ("b", torch.FloatTensor([[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]])), ("c", torch.IntTensor([2, 2, 2])), ] ) params_avg = collections.OrderedDict( [ ("a", torch.DoubleTensor([50.5])), ("b", torch.FloatTensor([[1.0, 1.5, 2.0], [2.5, 3.0, 3.5]])), # We expect truncation for integer division ("c", torch.IntTensor([4, 5, 5])), ] ) fd_0, path_0 = tempfile.mkstemp() fd_1, path_1 = tempfile.mkstemp() torch.save(collections.OrderedDict([("model", params_0)]), path_0) torch.save(collections.OrderedDict([("model", params_1)]), path_1) output = average_checkpoints([path_0, path_1])["model"] os.close(fd_0) os.remove(path_0) os.close(fd_1) os.remove(path_1) for (k_expected, v_expected), (k_out, v_out) in zip( params_avg.items(), output.items() ): self.assertEqual( k_expected, k_out, "Key mismatch - expected {} but found {}. " "(Expected list of keys: {} vs actual list of keys: {})".format( k_expected, k_out, params_avg.keys(), output.keys() ), ) np.testing.assert_allclose( v_expected.numpy(), v_out.numpy(), err_msg="Tensor value mismatch for key {}".format(k_expected), ) def test_average_checkpoints_with_shared_parameters(self): def _construct_model_with_shared_parameters(path, value): m = ModelWithSharedParameter() nn.init.constant_(m.FC1.weight, value) torch.save({"model": m.state_dict()}, path) return m tmpdir = tempfile.mkdtemp() paths = [] path = os.path.join(tmpdir, "m1.pt") m1 = _construct_model_with_shared_parameters(path, 1.0) paths.append(path) path = os.path.join(tmpdir, "m2.pt") m2 = _construct_model_with_shared_parameters(path, 2.0) paths.append(path) path = os.path.join(tmpdir, "m3.pt") m3 = _construct_model_with_shared_parameters(path, 3.0) paths.append(path) new_model = average_checkpoints(paths) self.assertTrue( torch.equal( new_model["model"]["embedding.weight"], (m1.embedding.weight + m2.embedding.weight + m3.embedding.weight) / 3.0, ) ) self.assertTrue( torch.equal( new_model["model"]["FC1.weight"], (m1.FC1.weight + m2.FC1.weight + m3.FC1.weight) / 3.0, ) ) self.assertTrue( torch.equal( new_model["model"]["FC2.weight"], (m1.FC2.weight + m2.FC2.weight + m3.FC2.weight) / 3.0, ) ) shutil.rmtree(tmpdir) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_average_checkpoints.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import tempfile import unittest import torch from fairseq.data.dictionary import Dictionary from fairseq.models.transformer import TransformerModel from fairseq.modules import multihead_attention, sinusoidal_positional_embedding from fairseq.tasks.fairseq_task import LegacyFairseqTask DEFAULT_TEST_VOCAB_SIZE = 100 class DummyTask(LegacyFairseqTask): def __init__(self, args): super().__init__(args) self.dictionary = get_dummy_dictionary() if getattr(self.args, "ctc", False): self.dictionary.add_symbol("<ctc_blank>") self.src_dict = self.dictionary self.tgt_dict = self.dictionary @property def source_dictionary(self): return self.src_dict @property def target_dictionary(self): return self.dictionary def get_dummy_dictionary(vocab_size=DEFAULT_TEST_VOCAB_SIZE): dummy_dict = Dictionary() # add dummy symbol to satisfy vocab size for id, _ in enumerate(range(vocab_size)): dummy_dict.add_symbol("{}".format(id), 1000) return dummy_dict def get_dummy_task_and_parser(): """ Return a dummy task and argument parser, which can be used to create a model/criterion. """ parser = argparse.ArgumentParser( description="test_dummy_s2s_task", argument_default=argparse.SUPPRESS ) DummyTask.add_args(parser) args = parser.parse_args([]) task = DummyTask.setup_task(args) return task, parser def _test_save_and_load(scripted_module): with tempfile.NamedTemporaryFile() as f: scripted_module.save(f.name) torch.jit.load(f.name) class TestExportModels(unittest.TestCase): def test_export_multihead_attention(self): module = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2) scripted = torch.jit.script(module) _test_save_and_load(scripted) def test_incremental_state_multihead_attention(self): module1 = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2) module1 = torch.jit.script(module1) module2 = multihead_attention.MultiheadAttention(embed_dim=8, num_heads=2) module2 = torch.jit.script(module2) state = {} state = module1.set_incremental_state(state, "key", {"a": torch.tensor([1])}) state = module2.set_incremental_state(state, "key", {"a": torch.tensor([2])}) v1 = module1.get_incremental_state(state, "key")["a"] v2 = module2.get_incremental_state(state, "key")["a"] self.assertEqual(v1, 1) self.assertEqual(v2, 2) def test_positional_embedding(self): module = sinusoidal_positional_embedding.SinusoidalPositionalEmbedding( embedding_dim=8, padding_idx=1 ) scripted = torch.jit.script(module) _test_save_and_load(scripted) @unittest.skipIf( torch.__version__ < "1.6.0", "Targeting OSS scriptability for the 1.6 release" ) def test_export_transformer(self): task, parser = get_dummy_task_and_parser() TransformerModel.add_args(parser) args = parser.parse_args([]) model = TransformerModel.build_model(args, task) scripted = torch.jit.script(model) _test_save_and_load(scripted) @unittest.skipIf( torch.__version__ < "1.6.0", "Targeting OSS scriptability for the 1.6 release" ) def test_export_transformer_no_token_pos_emb(self): task, parser = get_dummy_task_and_parser() TransformerModel.add_args(parser) args = parser.parse_args([]) args.no_token_positional_embeddings = True model = TransformerModel.build_model(args, task) scripted = torch.jit.script(model) _test_save_and_load(scripted) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_export.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch import torch.nn as nn from fairseq.modules.checkpoint_activations import checkpoint_wrapper from torch.utils.checkpoint import checkpoint class Model(nn.Module): def __init__( self, use_pytorch_checkpoint=False, use_fairseq_checkpoint=False, **kwargs ): super().__init__() torch.manual_seed(0) self.use_pytorch_checkpoint = use_pytorch_checkpoint self.ffn = nn.Sequential( nn.Linear(32, 128), # add a Dropout layer to test RNG save/restore nn.Dropout(p=0.5), nn.Linear(128, 32), ) if use_fairseq_checkpoint: self.ffn = checkpoint_wrapper(self.ffn, **kwargs) self.out = nn.Linear(32, 1) def forward(self, x): if self.use_pytorch_checkpoint: x = checkpoint(self.ffn, x) else: x = self.ffn(x) return self.out(x) class TestActivationCheckpointing(unittest.TestCase): def _test_checkpoint_wrapper(self, device, log_memory_usage=False): def get_loss_and_gnorm(model): torch.manual_seed(1) input = torch.rand(2, 16, 32).requires_grad_(True).to(device) model.zero_grad() loss = model(input).sum() loss.backward() gnorm = torch.norm( torch.stack([torch.norm(p.grad.detach()) for p in model.parameters()]) ) return {"loss": loss, "gnorm": gnorm} model = Model().to(device) no_cpt = get_loss_and_gnorm(model) model = Model(use_pytorch_checkpoint=True).to(device) pyt_cpt = get_loss_and_gnorm(model) torch.testing.assert_allclose(no_cpt["loss"], pyt_cpt["loss"]) torch.testing.assert_allclose(no_cpt["gnorm"], pyt_cpt["gnorm"]) model = Model(use_fairseq_checkpoint=True).to(device) fairseq_cpt = get_loss_and_gnorm(model) torch.testing.assert_allclose(no_cpt["loss"], fairseq_cpt["loss"]) torch.testing.assert_allclose(no_cpt["gnorm"], fairseq_cpt["gnorm"]) model = Model(use_fairseq_checkpoint=True, offload_to_cpu=True).to(device) fairseq_cpt_offload = get_loss_and_gnorm(model) torch.testing.assert_allclose(no_cpt["loss"], fairseq_cpt_offload["loss"]) torch.testing.assert_allclose(no_cpt["gnorm"], fairseq_cpt_offload["gnorm"]) def test_checkpoint_wrapper_cpu(self): self._test_checkpoint_wrapper(device=torch.device("cpu")) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_checkpoint_wrapper_cuda(self): self._test_checkpoint_wrapper(device=torch.device("cuda")) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_activation_checkpointing.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from collections import OrderedDict import numpy as np import torch from fairseq.data import LanguagePairDataset, TokenBlockDataset from fairseq.data.multi_corpus_sampled_dataset import MultiCorpusSampledDataset from tests.test_train import mock_dict class TestMultiCorpusSampledDataset(unittest.TestCase): def setUp(self): d = mock_dict() tokens_1 = torch.LongTensor([1]).view(1, -1) tokens_ds1 = TokenBlockDataset( tokens_1, sizes=[tokens_1.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_1 = LanguagePairDataset( tokens_ds1, tokens_ds1.sizes, d, shuffle=False ) tokens_2 = torch.LongTensor([2]).view(1, -1) tokens_ds2 = TokenBlockDataset( tokens_2, sizes=[tokens_2.size(-1)], block_size=1, pad=0, eos=1, include_targets=False, ) self.dataset_2 = LanguagePairDataset( tokens_ds2, tokens_ds2.sizes, d, shuffle=False ) def _test_sample_helper( self, expected_sample_from_first_ds_percentage, num_samples=1000, sampling_func=None, ): # To make sure test is not flaky np.random.seed(0) if sampling_func is None: m = MultiCorpusSampledDataset( OrderedDict({0: self.dataset_1, 1: self.dataset_2}), ) else: m = MultiCorpusSampledDataset( OrderedDict({0: self.dataset_1, 1: self.dataset_2}), sampling_func=sampling_func, ) m.ordered_indices() count_sample_from_first_dataset = 0 for _ in range(num_samples): if m.collater([m[0], m[1]])["net_input"]["src_tokens"][0] == 1: count_sample_from_first_dataset += 1 sample_from_first_ds_percentage = ( 1.0 * count_sample_from_first_dataset / num_samples ) self.assertLess( abs( sample_from_first_ds_percentage - expected_sample_from_first_ds_percentage ), 0.01, ) def test_multi_corpus_sampled_dataset_uniform_sample(self): self._test_sample_helper(expected_sample_from_first_ds_percentage=0.5) def test_multi_corpus_sampled_dataset_weighted_sample(self): def naive_weighted_sample(weights): def f(l): v = np.random.random() agg = 0 for i, weight in enumerate(weights): agg += weight if agg > v: return i return f self._test_sample_helper( expected_sample_from_first_ds_percentage=0.9, sampling_func=naive_weighted_sample(weights=[0.9, 0.1]), )
EXA-1-master
exa/models/unilm-master/edgelm/tests/test_multi_corpus_sampled_dataset.py
EXA-1-master
exa/models/unilm-master/edgelm/tests/gpu/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest from copy import deepcopy from dataclasses import dataclass from typing import Optional import torch from fairseq.models.ema import EMA class DummyModule(torch.nn.Module): def __init__(self) -> None: """LightningModule for testing purposes Args: epoch_min_loss_override (int, optional): Pass in an epoch that will be set to the minimum validation loss for testing purposes (zero based). If None this is ignored. Defaults to None. """ super().__init__() self.layer = torch.nn.Linear(in_features=32, out_features=2) self.another_layer = torch.nn.Linear(in_features=2, out_features=2) def forward(self, x: torch.Tensor) -> torch.Tensor: x = self.layer(x) return self.another_layer(x) @dataclass class EMAConfig(object): ema_decay: float = 0.99 ema_start_update: int = 0 ema_fp32: bool = False ema_seed_model: Optional[str] = None @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestEMAGPU(unittest.TestCase): def assertTorchAllClose(self, x, y, atol=1e-8, rtol=1e-5, msg=None): diff = x.float() - y.float() diff_norm = torch.norm(diff) other_norm = torch.norm(y.float()) if msg is None: msg = "|input - other| > {} + {} * |other|".format( atol, rtol ) self.assertLessEqual( diff_norm, atol + rtol * other_norm, msg=msg, ) def test_ema(self): model = DummyModule().cuda() optimizer = torch.optim.SGD(model.parameters(), lr=0.01) state = deepcopy(model.state_dict()) config = EMAConfig() ema = EMA(model, config) # set decay ema._set_decay(config.ema_decay) self.assertEqual(ema.get_decay(), config.ema_decay) # get model self.assertEqual(ema.get_model(), ema.model) # Since fp32 params is not used, it should be of size 0 self.assertEqual(len(ema.fp32_params), 0) # EMA step x = torch.randn(32).cuda() y = model(x) loss = y.sum() loss.backward() optimizer.step() ema.step(model) ema_state_dict = ema.get_model().state_dict() for key, param in model.state_dict().items(): prev_param = state[key] ema_param = ema_state_dict[key] if "version" in key: # Do not decay a model.version pytorch param continue self.assertTorchAllClose( ema_param, config.ema_decay * prev_param + (1 - config.ema_decay) * param, ) # Since fp32 params is not used, it should be of size 0 self.assertEqual(len(ema.fp32_params), 0) # Load EMA into model model2 = DummyModule().cuda() ema.reverse(model2) for key, param in model2.state_dict().items(): ema_param = ema_state_dict[key] self.assertTrue( torch.allclose(ema_param, param) ) def test_ema_fp32(self): model = DummyModule().cuda().half() optimizer = torch.optim.SGD(model.parameters(), lr=0.01) state = deepcopy(model.state_dict()) config = EMAConfig(ema_fp32=True) ema = EMA(model, config) x = torch.randn(32).cuda() y = model(x.half()) loss = y.sum() loss.backward() optimizer.step() ema.step(model) for key, param in model.state_dict().items(): prev_param = state[key] ema_param = ema.get_model().state_dict()[key] if "version" in key: # Do not decay a model.version pytorch param continue self.assertIn(key, ema.fp32_params) # EMA update is done in fp32, and hence the EMA param must be # closer to the EMA update done in fp32 than in fp16. self.assertLessEqual( torch.norm( ema_param.float() - (config.ema_decay * prev_param.float() + (1 - config.ema_decay) * param.float()).half().float() ), torch.norm( ema_param.float() - (config.ema_decay * prev_param + (1 - config.ema_decay) * param).float() ), ) self.assertTorchAllClose( ema_param, (config.ema_decay * prev_param.float() + (1 - config.ema_decay) * param.float()).half(), ) def test_ema_fp16(self): model = DummyModule().cuda().half() optimizer = torch.optim.SGD(model.parameters(), lr=0.01) state = deepcopy(model.state_dict()) config = EMAConfig(ema_fp32=False) ema = EMA(model, config) # Since fp32 params is not used, it should be of size 0 self.assertEqual(len(ema.fp32_params), 0) x = torch.randn(32).cuda() y = model(x.half()) loss = y.sum() loss.backward() optimizer.step() ema.step(model) for key, param in model.state_dict().items(): prev_param = state[key] ema_param = ema.get_model().state_dict()[key] if "version" in key: # Do not decay a model.version pytorch param continue # EMA update is done in fp16, and hence the EMA param must be # closer to the EMA update done in fp16 than in fp32. self.assertLessEqual( torch.norm( ema_param.float() - (config.ema_decay * prev_param + (1 - config.ema_decay) * param).float() ), torch.norm( ema_param.float() - (config.ema_decay * prev_param.float() + (1 - config.ema_decay) * param.float()).half().float() ), ) self.assertTorchAllClose( ema_param, config.ema_decay * prev_param + (1 - config.ema_decay) * param, ) # Since fp32 params is not used, it should be of size 0 self.assertEqual(len(ema.fp32_params), 0) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/gpu/test_ema_gpu.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import contextlib import logging import json import os import tempfile import unittest from io import StringIO import torch from fairseq import options from fairseq_cli import train from tests.utils import ( create_dummy_data, generate_main, preprocess_lm_data, preprocess_translation_data, train_translation_model, ) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestTranslationGPU(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_fp16_multigpu(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fp16") as data_dir: log = os.path.join(data_dir, "train.log") create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "fconv_iwslt_de_en", ["--fp16", "--log-file", log], world_size=min(torch.cuda.device_count(), 2), ) generate_main(data_dir) assert os.path.exists(log) @staticmethod def parse_logs(logfile): logs = [] for ln in open(logfile, "r").readlines(): try: logs.append(json.loads(ln)) except json.JSONDecodeError: continue return logs def test_resume_training_fsdp(self): self._test_resume_training(["--ddp-backend", "fully_sharded"]) def test_resume_training_fsdp_sharded_state(self): self._test_resume_training(["--ddp-backend", "fully_sharded", "--use-sharded-state"]) def test_resume_training_noc10d(self): self._test_resume_training([]) def _test_resume_training(self, extra_clargs, arch="fconv_iwslt_de_en"): flags = [ "--fp16", "--log-format", "json", "--max-update", "10", "--save-interval-updates", "2", "--log-interval", "1", ] + extra_clargs world_size = min(torch.cuda.device_count(), 2) with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fp16") as data_dir: log = os.path.join(data_dir, "train.log") create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, arch, flags + ["--log-file", log], world_size=world_size, ) log2 = os.path.join(data_dir, "resume.log") restore_file = os.path.join(data_dir, "checkpoint_1_2.pt") train_translation_model( data_dir, arch, flags + ["--log-file", log2, "--restore-file", restore_file], world_size=world_size, ) l1 = self.parse_logs(log) l2 = self.parse_logs(log2) assert int(l2[0]["num_updates"]) == 3, f"{l1}\n\n {l2}" for k in [ "train_loss", "train_num_updates", "train_ppl", "train_gnorm", ]: from_scratch, resumed = l1[-1][k], l2[-1][k] assert ( from_scratch == resumed ), f"difference at {k} {from_scratch} != {resumed}" def test_memory_efficient_fp16(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_memory_efficient_fp16") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "fconv_iwslt_de_en", ["--memory-efficient-fp16"] ) generate_main(data_dir) def test_transformer_fp16(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "64", "--decoder-embed-dim", "64", "--fp16", ], run_validation=True, ) generate_main(data_dir) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_amp(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_amp") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model(data_dir, "fconv_iwslt_de_en", ["--amp"]) generate_main(data_dir) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_transformer_amp(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_transformer") as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir) train_translation_model( data_dir, "transformer_iwslt_de_en", [ "--encoder-layers", "2", "--decoder-layers", "2", "--encoder-embed-dim", "64", "--decoder-embed-dim", "64", "--amp", ], run_validation=True, ) generate_main(data_dir) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") def test_levenshtein_transformer(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory( "test_levenshtein_transformer" ) as data_dir: create_dummy_data(data_dir) preprocess_translation_data(data_dir, ["--joined-dictionary"]) train_translation_model( data_dir, "levenshtein_transformer", [ "--apply-bert-init", "--early-exit", "6,6,6", "--criterion", "nat_loss", ], task="translation_lev", ) gen_config = [ "--task", "translation_lev", "--iter-decode-max-iter", "9", "--iter-decode-eos-penalty", "0", "--print-step", ] # non-ensemble generation generate_main(data_dir, gen_config) # ensemble generation generate_main( data_dir, gen_config, path=os.pathsep.join( [ os.path.join(data_dir, "checkpoint_last.pt"), os.path.join(data_dir, "checkpoint_last.pt"), ] ), ) def test_fsdp_checkpoint_generate(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fsdp_sharded") as data_dir: log = os.path.join(data_dir, "train.log") create_dummy_data(data_dir) preprocess_translation_data(data_dir) world_size = min(torch.cuda.device_count(), 2) train_translation_model( data_dir, "fconv_iwslt_de_en", ["--log-file", log, "--ddp-backend", "fully_sharded"], world_size=world_size, ) generate_main(data_dir) assert os.path.exists(log) def test_fsdp_sharded_checkpoint_generate(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_fsdp_sharded") as data_dir: log = os.path.join(data_dir, "train.log") create_dummy_data(data_dir) preprocess_translation_data(data_dir) world_size = min(torch.cuda.device_count(), 2) train_translation_model( data_dir, "fconv_iwslt_de_en", ["--log-file", log, "--ddp-backend", "fully_sharded", "--use-sharded-state"], world_size=world_size, ) generate_main(data_dir, ["--checkpoint-shard-count", str(world_size)]) assert os.path.exists(log) def _quantize_language_model(data_dir, arch, extra_flags=None, run_validation=False): train_parser = options.get_training_parser() train_args = options.parse_args_and_arch( train_parser, [ "--task", "language_modeling", data_dir, "--arch", arch, "--optimizer", "adam", "--lr", "0.0001", "--criterion", "adaptive_loss", "--adaptive-softmax-cutoff", "5,10,15", "--max-tokens", "500", "--tokens-per-sample", "500", "--save-dir", data_dir, "--max-epoch", "1", "--no-progress-bar", "--distributed-world-size", "1", "--ddp-backend", "no_c10d", "--num-workers", "0", ] + (extra_flags or []), ) train.main(train_args) # try scalar quantization scalar_quant_train_parser = options.get_training_parser() scalar_quant_train_args = options.parse_args_and_arch( scalar_quant_train_parser, [ "--task", "language_modeling", data_dir, "--arch", arch, "--optimizer", "adam", "--lr", "0.0001", "--criterion", "adaptive_loss", "--adaptive-softmax-cutoff", "5,10,15", "--max-tokens", "500", "--tokens-per-sample", "500", "--save-dir", data_dir, "--max-update", "3", "--no-progress-bar", "--distributed-world-size", "1", "--ddp-backend", "no_c10d", "--num-workers", "0", "--quant-noise-scalar", "0.5", ] + (extra_flags or []), ) train.main(scalar_quant_train_args) # try iterative PQ quantization quantize_parser = options.get_training_parser() quantize_args = options.parse_args_and_arch( quantize_parser, [ "--task", "language_modeling", data_dir, "--arch", arch, "--optimizer", "adam", "--lr", "0.0001", "--criterion", "adaptive_loss", "--adaptive-softmax-cutoff", "5,10,15", "--max-tokens", "50", "--tokens-per-sample", "50", "--max-update", "6", "--no-progress-bar", "--distributed-world-size", "1", "--ddp-backend", "no_c10d", "--num-workers", "0", "--restore-file", os.path.join(data_dir, "checkpoint_last.pt"), "--reset-optimizer", "--quantization-config-path", os.path.join( os.path.dirname(__file__), "transformer_quantization_config.yaml" ), ] + (extra_flags or []), ) train.main(quantize_args) @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestQuantization(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_quantization(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_quantization") as data_dir: create_dummy_data(data_dir) preprocess_lm_data(data_dir) # tests both scalar and iterative PQ quantization _quantize_language_model(data_dir, "transformer_lm") @unittest.skipIf(not torch.cuda.is_available(), "test requires a GPU") class TestOptimizersGPU(unittest.TestCase): def setUp(self): logging.disable(logging.CRITICAL) def tearDown(self): logging.disable(logging.NOTSET) def test_flat_grads(self): with contextlib.redirect_stdout(StringIO()): with tempfile.TemporaryDirectory("test_flat_grads") as data_dir: # Use just a bit of data and tiny model to keep this test runtime reasonable create_dummy_data(data_dir, num_examples=10, maxlen=5) preprocess_translation_data(data_dir) with self.assertRaises(RuntimeError): # adafactor isn't compatible with flat grads, which # are used by default with --fp16 train_translation_model( data_dir, "lstm", [ "--required-batch-size-multiple", "1", "--encoder-layers", "1", "--encoder-hidden-size", "32", "--decoder-layers", "1", "--optimizer", "adafactor", "--fp16", ], ) # but it should pass once we set --fp16-no-flatten-grads train_translation_model( data_dir, "lstm", [ "--required-batch-size-multiple", "1", "--encoder-layers", "1", "--encoder-hidden-size", "32", "--decoder-layers", "1", "--optimizer", "adafactor", "--fp16", "--fp16-no-flatten-grads", ], ) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/gpu/test_binaries_gpu.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import unittest import torch from torch import nn from fairseq.distributed import ModuleProxyWrapper from .utils import objects_are_equal class MockDDPWrapper(nn.Module): """A simple wrapper with an interface similar to DistributedDataParallel.""" def __init__(self, module): super().__init__() self.module = module def forward(self, x): return self.module(x) class Model(nn.Module): def __init__(self): super().__init__() self.linear = nn.Linear(5, 10) self.xyz = "hello" def forward(self, x): return self.linear(x) def get_xyz(self): return self.xyz class TestModuleProxyWrapper(unittest.TestCase): def _get_module(self): module = Model() wrapped_module = MockDDPWrapper(module) wrapped_module = ModuleProxyWrapper(wrapped_module) return wrapped_module, module def test_getattr_forwarding(self): wrapped_module, module = self._get_module() assert module.xyz == "hello" assert module.get_xyz() == "hello" assert wrapped_module.xyz == "hello" wrapped_module.xyz = "world" assert wrapped_module.xyz == "world" assert module.get_xyz() == "hello" def test_state_dict(self): wrapped_module, module = self._get_module() assert objects_are_equal(wrapped_module.state_dict(), module.state_dict()) def test_load_state_dict(self): wrapped_module, module = self._get_module() wrapped_module.load_state_dict(module.state_dict()) input = torch.rand(4, 5) torch.testing.assert_allclose(wrapped_module(input), module(input)) def test_forward(self): wrapped_module, module = self._get_module() input = torch.rand(4, 5) torch.testing.assert_allclose(wrapped_module(input), module(input)) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/distributed/test_module_proxy_wrapper.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import functools import random import unittest from multiprocessing import Manager import torch import torch.nn as nn from fairseq import optim from fairseq.distributed import utils as distributed_utils from omegaconf import OmegaConf class Model(nn.Module): def __init__(self, input_size, output_size): super(Model, self).__init__() self.fc = nn.Linear(input_size, output_size) def forward(self, input): output = self.fc(input) return output def setup_model_loss_criterion(cfg, args, rank, is_cuda): """ setup model, criterion and optimizer based on input args """ args.distributed_rank = rank cfg.distributed_training.distributed_rank = args.distributed_rank if cfg.distributed_training.distributed_world_size > 1: distributed_utils.distributed_init(cfg) torch.manual_seed(1) model = Model(args.input_size, args.nb_classes) loss_fn = nn.CrossEntropyLoss() if is_cuda: model = model.cuda() loss_fn = loss_fn.cuda() optimizer = optim.sgd.SGD(args, model.parameters()) optimizer = optim.FairseqBMUF( cfg=cfg.bmuf, optimizer=optimizer ) return model, loss_fn, optimizer def train_step(input, target, model, loss_fn, optimizer, **unused): """Do forward, backward and parameter update.""" model.train() output = model(input) loss = loss_fn(output, target) optimizer.backward(loss) optimizer.step() def single_gpu_training(cfg, args, rank, iterations, shared_results): is_cuda = torch.cuda.is_available() if is_cuda: torch.cuda.set_device(rank) model, loss_fn, optimizer = setup_model_loss_criterion(cfg, args, rank, is_cuda) for _ in range(iterations): input = torch.randn(1, args.input_size) target = torch.empty(args.batch_size, dtype=torch.long).random_(args.nb_classes) if is_cuda: input = input.cuda() target = target.cuda() train_step(input, target, model, loss_fn, optimizer) results = [] for param in model.parameters(): if len(results) == 0: results = param.flatten().cpu().data else: results = torch.cat((results, param.flatten().cpu().data), 0) shared_results[rank] = results def setup_args(): args = argparse.Namespace() args.global_sync_iter = 20 args.block_momentum = 0.875 args.block_lr = 0.5 args.input_size = 5 args.nb_classes = 2 args.batch_size = 1 args.lr = [1e-3] args.momentum = 0 args.weight_decay = 0 args.warmup_iterations = 0 args.use_nbm = True args.average_sync = True args.global_sync_iter = 1 args.model_parallel_size = 1 args.distributed_backend = "gloo" args.distributed_world_size = 2 port = random.randint(10000, 20000) args.distributed_init_method = "tcp://localhost:{port}".format(port=port) args.distributed_init_host = "localhost" args.distributed_port = port + 1 args.local_world_size = args.distributed_world_size cfg = OmegaConf.create() cfg.optimization = OmegaConf.create() cfg.common = OmegaConf.create() cfg.distributed_training = OmegaConf.create() cfg.dataset = OmegaConf.create() cfg.bmuf = OmegaConf.create() cfg.optimizer = OmegaConf.create() cfg.bmuf.global_sync_iter = args.global_sync_iter cfg.bmuf.block_momentum = args.block_momentum cfg.bmuf.block_lr = args.block_lr cfg.dataset.batch_size = args.batch_size cfg.optimization.lr = args.lr cfg.optimizer.momentum = args.momentum cfg.optimizer.weight_decay = args.weight_decay cfg.bmuf.warmup_iterations = args.warmup_iterations cfg.bmuf.use_nbm = args.use_nbm cfg.bmuf.average_sync = args.average_sync cfg.common.model_parallel_size = args.model_parallel_size cfg.distributed_training.distributed_backend = args.distributed_backend cfg.distributed_training.distributed_world_size = args.distributed_world_size cfg.bmuf.distributed_world_size = args.distributed_world_size cfg.distributed_training.distributed_init_method = args.distributed_init_method cfg.distributed_training.distributed_port = args.distributed_port return cfg, args @unittest.skipIf(torch.cuda.device_count() < 2, "test requires 2 GPUs") class TestBMUF(unittest.TestCase): def bmuf_process(self, cfg, args, iterations): processes = [] results = Manager().dict() torch.multiprocessing.spawn( fn=functools.partial(single_gpu_training, cfg, args), args=(iterations, results), nprocs=args.distributed_world_size, join=True, ) return results def test_bmuf_sync(self): # Train model for 1 iteration and do bmuf sync without doing warmup cfg, args = setup_args() iterations = 1 results = self.bmuf_process(cfg, args, iterations) # Make sure params in both machines are same assert len(results) == 2 self.assertAlmostEqual(results[0], results[1]) def test_warmup_sync(self): # Train model for 20 iteration and do warmup sync without doing bmuf sync cfg, args = setup_args() args.warmup_iterations = 20 cfg.bmuf.warmup_iterations = args.warmup_iterations iterations = 20 results = self.bmuf_process(cfg, args, iterations) # Make sure params in both machines are same assert len(results) == 2 self.assertAlmostEqual(results[0], results[1]) def test_warmup_sync_bmuf_sync(self): # Train model for 25 iteration and do warmup sync after 20 iteration # and bmuf sync after 25 iteration cfg, args = setup_args() args.warmup_iterations = 20 args.global_sync_iter = 5 cfg.bmuf.warmup_iterations = args.warmup_iterations cfg.bmuf.global_sync_iter = args.global_sync_iter iterations = 25 results = self.bmuf_process(cfg, args, iterations) # Make sure params in both machines are same assert len(results) == 2 self.assertAlmostEqual(results[0], results[1]) def test_single_gpu_bmuf(self): # Train model for 5 iterations and use GPU 1 cfg, args = setup_args() args.distributed_world_size = 1 args.warmup_iterations = 5 cfg.distributed_training.distributed_world_size = args.distributed_world_size cfg.bmuf.distributed_world_size = args.distributed_world_size cfg.bmuf.warmup_iterations = args.warmup_iterations iterations = 20 results = self.bmuf_process(cfg, args, iterations) assert len(results) == 1 def assertAlmostEqual(self, t1, t2): self.assertEqual(t1.size(), t2.size(), "size mismatch") self.assertLess((t1 - t2).abs().max(), 1e-4) if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/distributed/test_bmuf.py
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import functools import sys import unittest import torch from fairseq.distributed import utils as dist_utils from .utils import objects_are_equal, spawn_and_init class DistributedTest(unittest.TestCase): def setUp(self): if not torch.cuda.is_available(): raise unittest.SkipTest("CUDA not available, skipping test") if sys.platform == "win32": raise unittest.SkipTest("NCCL doesn't support Windows, skipping test") if torch.cuda.device_count() < 2: raise unittest.SkipTest("distributed tests require 2+ GPUs, skipping") class TestBroadcastObject(DistributedTest): def test_str(self): spawn_and_init( functools.partial( TestBroadcastObject._test_broadcast_object, "hello world" ), world_size=2, ) def test_tensor(self): spawn_and_init( functools.partial( TestBroadcastObject._test_broadcast_object, torch.rand(5), ), world_size=2, ) def test_complex(self): spawn_and_init( functools.partial( TestBroadcastObject._test_broadcast_object, { "a": "1", "b": [2, torch.rand(2, 3), 3], "c": (torch.rand(2, 3), 4), "d": {5, torch.rand(5)}, "e": torch.rand(5), "f": torch.rand(5).int().cuda(), }, ), world_size=2, ) @staticmethod def _test_broadcast_object(ref_obj, rank, group): obj = dist_utils.broadcast_object( ref_obj if rank == 0 else None, src_rank=0, group=group ) assert objects_are_equal(ref_obj, obj) class TestAllGatherList(DistributedTest): def test_str_equality(self): spawn_and_init( functools.partial( TestAllGatherList._test_all_gather_list_equality, "hello world", ), world_size=2, ) def test_tensor_equality(self): spawn_and_init( functools.partial( TestAllGatherList._test_all_gather_list_equality, torch.rand(5), ), world_size=2, ) def test_complex_equality(self): spawn_and_init( functools.partial( TestAllGatherList._test_all_gather_list_equality, { "a": "1", "b": [2, torch.rand(2, 3), 3], "c": (torch.rand(2, 3), 4), "d": {5, torch.rand(5)}, "e": torch.rand(5), "f": torch.rand(5).int(), }, ), world_size=2, ) @staticmethod def _test_all_gather_list_equality(ref_obj, rank, group): objs = dist_utils.all_gather_list(ref_obj, group) for obj in objs: assert objects_are_equal(ref_obj, obj) def test_rank_tensor(self): spawn_and_init( TestAllGatherList._test_all_gather_list_rank_tensor, world_size=2 ) @staticmethod def _test_all_gather_list_rank_tensor(rank, group): obj = torch.tensor([rank]) objs = dist_utils.all_gather_list(obj, group) for i, obj in enumerate(objs): assert obj.item() == i if __name__ == "__main__": unittest.main()
EXA-1-master
exa/models/unilm-master/edgelm/tests/distributed/test_utils.py