python_code
stringlengths
0
679k
repo_name
stringlengths
9
41
file_path
stringlengths
6
149
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. import os import torch import torchvision import torch.multiprocessing as mp from maskrcnn_benchmark.structures.image_list import to_image_list from maskrcnn_benchmark.structures.bounding_box import BoxList from maskrcnn_benchmark.structures.segmentation_mask import SegmentationMask class COCODataset(torchvision.datasets.coco.CocoDetection): def __init__( self, ann_file, root, remove_images_without_annotations, transforms=None ): super(COCODataset, self).__init__(root, ann_file) # sort indices for reproducible results self.ids = sorted(self.ids) # filter images without detection annotations if remove_images_without_annotations: self.ids = [ img_id for img_id in self.ids if len(self.coco.getAnnIds(imgIds=img_id, iscrowd=None)) > 0 ] self.json_category_id_to_contiguous_id = { v: i + 1 for i, v in enumerate(self.coco.getCatIds()) } self.contiguous_category_id_to_json_id = { v: k for k, v in self.json_category_id_to_contiguous_id.items() } self.id_to_img_map = {k: v for k, v in enumerate(self.ids)} self._transforms = transforms def build_target(self, anno, img_size, pin_memory=False): # filter crowd annotations # TODO might be better to add an extra field anno = [obj for obj in anno if obj["iscrowd"] == 0] boxes = [obj["bbox"] for obj in anno] boxes = torch.tensor(boxes, dtype=torch.float32, pin_memory=pin_memory).reshape(-1, 4) # guard against no boxes target = BoxList(boxes, img_size, mode="xywh").convert("xyxy") classes = [obj["category_id"] for obj in anno] classes = [self.json_category_id_to_contiguous_id[c] for c in classes] classes = torch.tensor(classes, dtype=torch.float32, pin_memory=pin_memory) target.add_field("labels", classes) masks = [obj["segmentation"] for obj in anno] masks = SegmentationMask(masks, img_size, pin_memory=pin_memory) target.add_field("masks", masks) target = target.clip_to_image(remove_empty=True) return target def __getitem__(self, idx): img = torchvision.io.read_image(self.get_raw_img_info(idx), torchvision.io.image.ImageReadMode.RGB) target = self.get_target(idx) if self._transforms is not None: img, target = self._transforms(img, target) return img, target, idx def get_img_info(self, index): img_id = self.id_to_img_map[index] img_data = self.coco.imgs[img_id] return img_data def get_raw_img_info(self, index): img_id = self.ids[index] path = self.coco.loadImgs(img_id)[0]['file_name'] return os.path.join(self.root, path) def get_target(self, index, pin_memory=False): img_id = self.ids[index] ann_ids = self.coco.getAnnIds(imgIds=img_id) anno = self.coco.loadAnns(ann_ids) img_size = (self.coco.imgs[img_id]["width"], self.coco.imgs[img_id]["height"]) return self.build_target(anno, img_size, pin_memory=pin_memory) class HybridDataLoader(object): def __init__(self, cfg, is_train, batch_size, batch_sampler, dataset, collator, transforms, size_divisible): assert(dataset._transforms is None), "dataset._transforms must be None when hybrid dataloader is selected" self.batch_size = batch_size self.length = len(batch_sampler) self.batch_sampler = iter(batch_sampler) self.dataset = dataset self.transforms = transforms self.size_divisible = size_divisible def __iter__(self): return self def __len__(self): return self.length def __next__(self): images, targets, idxs = [], [], [] for idx in next(self.batch_sampler): raw_image = torchvision.io.read_image(self.dataset.get_raw_img_info(idx), torchvision.io.image.ImageReadMode.RGB).pin_memory().to(device='cuda', non_blocking=True) raw_target = self.dataset.get_target(idx, pin_memory=True) image, target = self.transforms(raw_image, raw_target) images.append( image ) targets.append( target ) idxs.append( idx ) images = to_image_list(images, self.size_divisible) return images, targets, idxs
DeepLearningExamples-master
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data/datasets/coco.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import bisect from torch.utils.data.dataset import ConcatDataset as _ConcatDataset class ConcatDataset(_ConcatDataset): """ Same as torch.utils.data.dataset.ConcatDataset, but exposes an extra method for querying the sizes of the image """ def get_idxs(self, idx): dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) if dataset_idx == 0: sample_idx = idx else: sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] return dataset_idx, sample_idx def get_img_info(self, idx): dataset_idx, sample_idx = self.get_idxs(idx) return self.datasets[dataset_idx].get_img_info(sample_idx)
DeepLearningExamples-master
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data/datasets/concat_dataset.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from .coco import COCODataset from .voc import PascalVOCDataset from .concat_dataset import ConcatDataset __all__ = ["COCODataset", "ConcatDataset", "PascalVOCDataset"]
DeepLearningExamples-master
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data/datasets/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. """ Simple dataset class that wraps a list of path names """ from PIL import Image from maskrcnn_benchmark.structures.bounding_box import BoxList class ListDataset(object): def __init__(self, image_lists, transforms=None): self.image_lists = image_lists self.transforms = transforms def __getitem__(self, item): img = Image.open(self.image_lists[item]).convert("RGB") # dummy target w, h = img.size target = BoxList([[0, 0, w, h]], img.size, mode="xyxy") if self.transforms is not None: img, target = self.transforms(img, target) return img, target def __len__(self): return len(self.image_lists) def get_img_info(self, item): """ Return the image dimensions for the image, without loading and pre-processing it """ pass
DeepLearningExamples-master
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data/datasets/list_dataset.py
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. import os import torch import torch.utils.data from PIL import Image import sys if sys.version_info[0] == 2: import xml.etree.cElementTree as ET else: import xml.etree.ElementTree as ET from maskrcnn_benchmark.structures.bounding_box import BoxList class PascalVOCDataset(torch.utils.data.Dataset): CLASSES = ( "__background__ ", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor", ) def __init__(self, data_dir, split, use_difficult=False, transforms=None): self.root = data_dir self.image_set = split self.keep_difficult = use_difficult self.transforms = transforms self._annopath = os.path.join(self.root, "Annotations", "%s.xml") self._imgpath = os.path.join(self.root, "JPEGImages", "%s.jpg") self._imgsetpath = os.path.join(self.root, "ImageSets", "Main", "%s.txt") with open(self._imgsetpath % self.image_set) as f: self.ids = f.readlines() self.ids = [x.strip("\n") for x in self.ids] self.id_to_img_map = {k: v for k, v in enumerate(self.ids)} cls = PascalVOCDataset.CLASSES self.class_to_ind = dict(zip(cls, range(len(cls)))) def __getitem__(self, index): img_id = self.ids[index] img = Image.open(self._imgpath % img_id).convert("RGB") target = self.get_groundtruth(index) target = target.clip_to_image(remove_empty=True) if self.transforms is not None: img, target = self.transforms(img, target) return img, target, index def __len__(self): return len(self.ids) def get_groundtruth(self, index): img_id = self.ids[index] anno = ET.parse(self._annopath % img_id).getroot() anno = self._preprocess_annotation(anno) height, width = anno["im_info"] target = BoxList(anno["boxes"], (width, height), mode="xyxy") target.add_field("labels", anno["labels"]) target.add_field("difficult", anno["difficult"]) return target def _preprocess_annotation(self, target): boxes = [] gt_classes = [] difficult_boxes = [] TO_REMOVE = 1 for obj in target.iter("object"): difficult = int(obj.find("difficult").text) == 1 if not self.keep_difficult and difficult: continue name = obj.find("name").text.lower().strip() bb = obj.find("bndbox") # Make pixel indexes 0-based # Refer to "https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/pascal_voc.py#L208-L211" box = [ bb.find("xmin").text, bb.find("ymin").text, bb.find("xmax").text, bb.find("ymax").text, ] bndbox = tuple( map(lambda x: x - TO_REMOVE, list(map(int, box))) ) boxes.append(bndbox) gt_classes.append(self.class_to_ind[name]) difficult_boxes.append(difficult) size = target.find("size") im_info = tuple(map(int, (size.find("height").text, size.find("width").text))) res = { "boxes": torch.tensor(boxes, dtype=torch.float32), "labels": torch.tensor(gt_classes), "difficult": torch.tensor(difficult_boxes), "im_info": im_info, } return res def get_img_info(self, index): img_id = self.ids[index] anno = ET.parse(self._annopath % img_id).getroot() size = anno.find("size") im_info = tuple(map(int, (size.find("height").text, size.find("width").text))) return {"height": im_info[0], "width": im_info[1]} def map_class_id_to_class_name(self, class_id): return PascalVOCDataset.CLASSES[class_id]
DeepLearningExamples-master
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data/datasets/voc.py
from maskrcnn_benchmark.data import datasets from .coco import coco_evaluation from .voc import voc_evaluation def evaluate(dataset, predictions, output_folder, **kwargs): """evaluate dataset using different methods based on dataset type. Args: dataset: Dataset object predictions(list[BoxList]): each item in the list represents the prediction results for one image. output_folder: output folder, to save evaluation files or results. **kwargs: other args. Returns: evaluation result """ args = dict( dataset=dataset, predictions=predictions, output_folder=output_folder, **kwargs ) if isinstance(dataset, datasets.COCODataset): return coco_evaluation(**args) elif isinstance(dataset, datasets.PascalVOCDataset): return voc_evaluation(**args) else: dataset_name = dataset.__class__.__name__ raise NotImplementedError("Unsupported dataset type {}.".format(dataset_name))
DeepLearningExamples-master
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data/datasets/evaluation/__init__.py
from .coco_eval import do_coco_evaluation def coco_evaluation( dataset, predictions, output_folder, box_only, iou_types, expected_results, expected_results_sigma_tol, ): return do_coco_evaluation( dataset=dataset, predictions=predictions, box_only=box_only, output_folder=output_folder, iou_types=iou_types, expected_results=expected_results, expected_results_sigma_tol=expected_results_sigma_tol, )
DeepLearningExamples-master
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data/datasets/evaluation/coco/__init__.py
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. import logging import tempfile import os import torch from collections import OrderedDict from tqdm import tqdm from maskrcnn_benchmark.modeling.roi_heads.mask_head.inference import Masker from maskrcnn_benchmark.structures.bounding_box import BoxList from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou def do_coco_evaluation( dataset, predictions, box_only, output_folder, iou_types, expected_results, expected_results_sigma_tol, ): logger = logging.getLogger("maskrcnn_benchmark.inference") if box_only: logger.info("Evaluating bbox proposals") areas = {"all": "", "small": "s", "medium": "m", "large": "l"} res = COCOResults("box_proposal") for limit in [100, 1000]: for area, suffix in areas.items(): stats = evaluate_box_proposals( predictions, dataset, area=area, limit=limit ) key = "AR{}@{:d}".format(suffix, limit) res.results["box_proposal"][key] = stats["ar"].item() logger.info(res) check_expected_results(res, expected_results, expected_results_sigma_tol) if output_folder: torch.save(res, os.path.join(output_folder, "box_proposals.pth")) return logger.info("Preparing results for COCO format") coco_results = {} if "bbox" in iou_types: logger.info("Preparing bbox results") coco_results["bbox"] = prepare_for_coco_detection(predictions, dataset) if "segm" in iou_types: logger.info("Preparing segm results") coco_results["segm"] = prepare_for_coco_segmentation(predictions, dataset) results = COCOResults(*iou_types) logger.info("Evaluating predictions") dataset.coco.createIndex(use_ext=True) for iou_type in iou_types: with tempfile.NamedTemporaryFile() as f: file_path = f.name if output_folder: file_path = os.path.join(output_folder, iou_type + ".json") res = evaluate_predictions_on_coco( dataset.coco, coco_results[iou_type], file_path, iou_type ) results.update(res) logger.info(results) check_expected_results(results, expected_results, expected_results_sigma_tol) if output_folder: torch.save(results, os.path.join(output_folder, "coco_results.pth")) return results, coco_results def prepare_for_coco_detection(predictions, dataset): # assert isinstance(dataset, COCODataset) coco_results = [] for image_id, prediction in enumerate(predictions): original_id = dataset.id_to_img_map[image_id] if len(prediction) == 0: continue # TODO replace with get_img_info? image_width = dataset.coco.imgs[original_id]["width"] image_height = dataset.coco.imgs[original_id]["height"] prediction = prediction.resize((image_width, image_height)) prediction = prediction.convert("xywh") boxes = prediction.bbox.tolist() scores = prediction.get_field("scores").tolist() labels = prediction.get_field("labels").tolist() mapped_labels = [dataset.contiguous_category_id_to_json_id[i] for i in labels] coco_results.extend( [ { "image_id": original_id, "category_id": mapped_labels[k], "bbox": box, "score": scores[k], } for k, box in enumerate(boxes) ] ) return coco_results def prepare_for_coco_segmentation(predictions, dataset): import pycocotools.mask as mask_util import numpy as np masker = Masker(threshold=0.5, padding=1) # assert isinstance(dataset, COCODataset) coco_results = [] for image_id, prediction in tqdm(enumerate(predictions)): original_id = dataset.id_to_img_map[image_id] if len(prediction) == 0: continue # TODO replace with get_img_info? image_width = dataset.coco.imgs[original_id]["width"] image_height = dataset.coco.imgs[original_id]["height"] prediction = prediction.resize((image_width, image_height)) masks = prediction.get_field("mask") # t = time.time() # Masker is necessary only if masks haven't been already resized. if list(masks.shape[-2:]) != [image_height, image_width]: masks = masker(masks.expand(1, -1, -1, -1, -1), prediction) masks = masks[0] # logger.info('Time mask: {}'.format(time.time() - t)) # prediction = prediction.convert('xywh') # boxes = prediction.bbox.tolist() scores = prediction.get_field("scores").tolist() labels = prediction.get_field("labels").tolist() # rles = prediction.get_field('mask') rles = [ mask_util.encode(np.array(mask[0, :, :, np.newaxis], order="F"))[0] for mask in masks ] for rle in rles: rle["counts"] = rle["counts"].decode("utf-8") mapped_labels = [dataset.contiguous_category_id_to_json_id[i] for i in labels] coco_results.extend( [ { "image_id": original_id, "category_id": mapped_labels[k], "segmentation": rle, "score": scores[k], } for k, rle in enumerate(rles) ] ) return coco_results # inspired from Detectron def evaluate_box_proposals( predictions, dataset, thresholds=None, area="all", limit=None ): """Evaluate detection proposal recall metrics. This function is a much faster alternative to the official COCO API recall evaluation code. However, it produces slightly different results. """ # Record max overlap value for each gt box # Return vector of overlap values areas = { "all": 0, "small": 1, "medium": 2, "large": 3, "96-128": 4, "128-256": 5, "256-512": 6, "512-inf": 7, } area_ranges = [ [0 ** 2, 1e5 ** 2], # all [0 ** 2, 32 ** 2], # small [32 ** 2, 96 ** 2], # medium [96 ** 2, 1e5 ** 2], # large [96 ** 2, 128 ** 2], # 96-128 [128 ** 2, 256 ** 2], # 128-256 [256 ** 2, 512 ** 2], # 256-512 [512 ** 2, 1e5 ** 2], ] # 512-inf assert area in areas, "Unknown area range: {}".format(area) area_range = area_ranges[areas[area]] gt_overlaps = [] num_pos = 0 for image_id, prediction in enumerate(predictions): original_id = dataset.id_to_img_map[image_id] # TODO replace with get_img_info? image_width = dataset.coco.imgs[original_id]["width"] image_height = dataset.coco.imgs[original_id]["height"] prediction = prediction.resize((image_width, image_height)) # sort predictions in descending order # TODO maybe remove this and make it explicit in the documentation inds = prediction.get_field("objectness").sort(descending=True)[1] prediction = prediction[inds] ann_ids = dataset.coco.getAnnIds(imgIds=original_id) anno = dataset.coco.loadAnns(ann_ids) gt_boxes = [obj["bbox"] for obj in anno if obj["iscrowd"] == 0] gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes gt_boxes = BoxList(gt_boxes, (image_width, image_height), mode="xywh").convert( "xyxy" ) gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0]) if len(gt_boxes) == 0: continue valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1]) gt_boxes = gt_boxes[valid_gt_inds] num_pos += len(gt_boxes) if len(gt_boxes) == 0: continue if len(prediction) == 0: continue if limit is not None and len(prediction) > limit: prediction = prediction[:limit] overlaps = boxlist_iou(prediction, gt_boxes) _gt_overlaps = torch.zeros(len(gt_boxes)) for j in range(min(len(prediction), len(gt_boxes))): # find which proposal box maximally covers each gt box # and get the iou amount of coverage for each gt box max_overlaps, argmax_overlaps = overlaps.max(dim=0) # find which gt box is 'best' covered (i.e. 'best' = most iou) gt_ovr, gt_ind = max_overlaps.max(dim=0) assert gt_ovr >= 0 # find the proposal box that covers the best covered gt box box_ind = argmax_overlaps[gt_ind] # record the iou coverage of this gt box _gt_overlaps[j] = overlaps[box_ind, gt_ind] assert _gt_overlaps[j] == gt_ovr # mark the proposal box and the gt box as used overlaps[box_ind, :] = -1 overlaps[:, gt_ind] = -1 # append recorded iou coverage level gt_overlaps.append(_gt_overlaps) gt_overlaps = torch.cat(gt_overlaps, dim=0) gt_overlaps, _ = torch.sort(gt_overlaps) if thresholds is None: step = 0.05 thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32) recalls = torch.zeros_like(thresholds) # compute recall for each iou threshold for i, t in enumerate(thresholds): recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos) # ar = 2 * np.trapz(recalls, thresholds) ar = recalls.mean() return { "ar": ar, "recalls": recalls, "thresholds": thresholds, "gt_overlaps": gt_overlaps, "num_pos": num_pos, } def evaluate_predictions_on_coco( coco_gt, coco_results, json_result_file, iou_type="bbox" ): import json with open(json_result_file, "w") as f: json.dump(coco_results, f) from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval coco_dt = coco_gt.loadRes(str(json_result_file), use_ext=True) if coco_results else COCO() # coco_dt = coco_gt.loadRes(coco_results) coco_eval = COCOeval(coco_gt, coco_dt, iou_type, use_ext=True) coco_eval.evaluate() coco_eval.accumulate() coco_eval.summarize() return coco_eval class COCOResults(object): METRICS = { "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"], "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"], "box_proposal": [ "AR@100", "ARs@100", "ARm@100", "ARl@100", "AR@1000", "ARs@1000", "ARm@1000", "ARl@1000", ], "keypoint": ["AP", "AP50", "AP75", "APm", "APl"], } def __init__(self, *iou_types): allowed_types = ("box_proposal", "bbox", "segm") assert all(iou_type in allowed_types for iou_type in iou_types) results = OrderedDict() for iou_type in iou_types: results[iou_type] = OrderedDict( [(metric, -1) for metric in COCOResults.METRICS[iou_type]] ) self.results = results def update(self, coco_eval): if coco_eval is None: return from pycocotools.cocoeval import COCOeval assert isinstance(coco_eval, COCOeval) s = coco_eval.stats iou_type = coco_eval.params.iouType res = self.results[iou_type] metrics = COCOResults.METRICS[iou_type] for idx, metric in enumerate(metrics): res[metric] = s[idx] def __repr__(self): # TODO make it pretty return repr(self.results) def check_expected_results(results, expected_results, sigma_tol): if not expected_results: return logger = logging.getLogger("maskrcnn_benchmark.inference") for task, metric, (mean, std) in expected_results: actual_val = results.results[task][metric] lo = mean - sigma_tol * std hi = mean + sigma_tol * std ok = (lo < actual_val) and (actual_val < hi) msg = ( "{} > {} sanity check (actual vs. expected): " "{:.3f} vs. mean={:.4f}, std={:.4}, range=({:.4f}, {:.4f})" ).format(task, metric, actual_val, mean, std, lo, hi) if not ok: msg = "FAIL: " + msg logger.error(msg) else: msg = "PASS: " + msg logger.info(msg)
DeepLearningExamples-master
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data/datasets/evaluation/coco/coco_eval.py
import logging from .voc_eval import do_voc_evaluation def voc_evaluation(dataset, predictions, output_folder, box_only, **_): logger = logging.getLogger("maskrcnn_benchmark.inference") if box_only: logger.warning("voc evaluation doesn't support box_only, ignored.") logger.info("performing voc evaluation, ignored iou_types.") return do_voc_evaluation( dataset=dataset, predictions=predictions, output_folder=output_folder, logger=logger, )
DeepLearningExamples-master
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data/datasets/evaluation/voc/__init__.py
# A modification version from chainercv repository. # (See https://github.com/chainer/chainercv/blob/master/chainercv/evaluations/eval_detection_voc.py) from __future__ import division import os from collections import defaultdict import numpy as np from maskrcnn_benchmark.structures.bounding_box import BoxList from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou def do_voc_evaluation(dataset, predictions, output_folder, logger): # TODO need to make the use_07_metric format available # for the user to choose pred_boxlists = [] gt_boxlists = [] for image_id, prediction in enumerate(predictions): img_info = dataset.get_img_info(image_id) if len(prediction) == 0: continue image_width = img_info["width"] image_height = img_info["height"] prediction = prediction.resize((image_width, image_height)) pred_boxlists.append(prediction) gt_boxlist = dataset.get_groundtruth(image_id) gt_boxlists.append(gt_boxlist) result = eval_detection_voc( pred_boxlists=pred_boxlists, gt_boxlists=gt_boxlists, iou_thresh=0.5, use_07_metric=True, ) result_str = "mAP: {:.4f}\n".format(result["map"]) for i, ap in enumerate(result["ap"]): if i == 0: # skip background continue result_str += "{:<16}: {:.4f}\n".format( dataset.map_class_id_to_class_name(i), ap ) logger.info(result_str) if output_folder: with open(os.path.join(output_folder, "result.txt"), "w") as fid: fid.write(result_str) return result def eval_detection_voc(pred_boxlists, gt_boxlists, iou_thresh=0.5, use_07_metric=False): """Evaluate on voc dataset. Args: pred_boxlists(list[BoxList]): pred boxlist, has labels and scores fields. gt_boxlists(list[BoxList]): ground truth boxlist, has labels field. iou_thresh: iou thresh use_07_metric: boolean Returns: dict represents the results """ assert len(gt_boxlists) == len( pred_boxlists ), "Length of gt and pred lists need to be same." prec, rec = calc_detection_voc_prec_rec( pred_boxlists=pred_boxlists, gt_boxlists=gt_boxlists, iou_thresh=iou_thresh ) ap = calc_detection_voc_ap(prec, rec, use_07_metric=use_07_metric) return {"ap": ap, "map": np.nanmean(ap)} def calc_detection_voc_prec_rec(gt_boxlists, pred_boxlists, iou_thresh=0.5): """Calculate precision and recall based on evaluation code of PASCAL VOC. This function calculates precision and recall of predicted bounding boxes obtained from a dataset which has :math:`N` images. The code is based on the evaluation code used in PASCAL VOC Challenge. """ n_pos = defaultdict(int) score = defaultdict(list) match = defaultdict(list) for gt_boxlist, pred_boxlist in zip(gt_boxlists, pred_boxlists): pred_bbox = pred_boxlist.bbox.numpy() pred_label = pred_boxlist.get_field("labels").numpy() pred_score = pred_boxlist.get_field("scores").numpy() gt_bbox = gt_boxlist.bbox.numpy() gt_label = gt_boxlist.get_field("labels").numpy() gt_difficult = gt_boxlist.get_field("difficult").numpy() for l in np.unique(np.concatenate((pred_label, gt_label)).astype(int)): pred_mask_l = pred_label == l pred_bbox_l = pred_bbox[pred_mask_l] pred_score_l = pred_score[pred_mask_l] # sort by score order = pred_score_l.argsort()[::-1] pred_bbox_l = pred_bbox_l[order] pred_score_l = pred_score_l[order] gt_mask_l = gt_label == l gt_bbox_l = gt_bbox[gt_mask_l] gt_difficult_l = gt_difficult[gt_mask_l] n_pos[l] += np.logical_not(gt_difficult_l).sum() score[l].extend(pred_score_l) if len(pred_bbox_l) == 0: continue if len(gt_bbox_l) == 0: match[l].extend((0,) * pred_bbox_l.shape[0]) continue # VOC evaluation follows integer typed bounding boxes. pred_bbox_l = pred_bbox_l.copy() pred_bbox_l[:, 2:] += 1 gt_bbox_l = gt_bbox_l.copy() gt_bbox_l[:, 2:] += 1 iou = boxlist_iou( BoxList(pred_bbox_l, gt_boxlist.size), BoxList(gt_bbox_l, gt_boxlist.size), ).numpy() gt_index = iou.argmax(axis=1) # set -1 if there is no matching ground truth gt_index[iou.max(axis=1) < iou_thresh] = -1 del iou selec = np.zeros(gt_bbox_l.shape[0], dtype=bool) for gt_idx in gt_index: if gt_idx >= 0: if gt_difficult_l[gt_idx]: match[l].append(-1) else: if not selec[gt_idx]: match[l].append(1) else: match[l].append(0) selec[gt_idx] = True else: match[l].append(0) n_fg_class = max(n_pos.keys()) + 1 prec = [None] * n_fg_class rec = [None] * n_fg_class for l in n_pos.keys(): score_l = np.array(score[l]) match_l = np.array(match[l], dtype=np.int8) order = score_l.argsort()[::-1] match_l = match_l[order] tp = np.cumsum(match_l == 1) fp = np.cumsum(match_l == 0) # If an element of fp + tp is 0, # the corresponding element of prec[l] is nan. prec[l] = tp / (fp + tp) # If n_pos[l] is 0, rec[l] is None. if n_pos[l] > 0: rec[l] = tp / n_pos[l] return prec, rec def calc_detection_voc_ap(prec, rec, use_07_metric=False): """Calculate average precisions based on evaluation code of PASCAL VOC. This function calculates average precisions from given precisions and recalls. The code is based on the evaluation code used in PASCAL VOC Challenge. Args: prec (list of numpy.array): A list of arrays. :obj:`prec[l]` indicates precision for class :math:`l`. If :obj:`prec[l]` is :obj:`None`, this function returns :obj:`numpy.nan` for class :math:`l`. rec (list of numpy.array): A list of arrays. :obj:`rec[l]` indicates recall for class :math:`l`. If :obj:`rec[l]` is :obj:`None`, this function returns :obj:`numpy.nan` for class :math:`l`. use_07_metric (bool): Whether to use PASCAL VOC 2007 evaluation metric for calculating average precision. The default value is :obj:`False`. Returns: ~numpy.ndarray: This function returns an array of average precisions. The :math:`l`-th value corresponds to the average precision for class :math:`l`. If :obj:`prec[l]` or :obj:`rec[l]` is :obj:`None`, the corresponding value is set to :obj:`numpy.nan`. """ n_fg_class = len(prec) ap = np.empty(n_fg_class) for l in range(n_fg_class): if prec[l] is None or rec[l] is None: ap[l] = np.nan continue if use_07_metric: # 11 point metric ap[l] = 0 for t in np.arange(0.0, 1.1, 0.1): if np.sum(rec[l] >= t) == 0: p = 0 else: p = np.max(np.nan_to_num(prec[l])[rec[l] >= t]) ap[l] += p / 11 else: # correct AP calculation # first append sentinel values at the end mpre = np.concatenate(([0], np.nan_to_num(prec[l]), [0])) mrec = np.concatenate(([0], rec[l], [1])) mpre = np.maximum.accumulate(mpre[::-1])[::-1] # to calculate area under PR curve, look for points # where X axis (recall) changes value i = np.where(mrec[1:] != mrec[:-1])[0] # and sum (\Delta recall) * prec ap[l] = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) return ap
DeepLearningExamples-master
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data/datasets/evaluation/voc/voc_eval.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from . import transforms as T def build_transforms(cfg, is_train=True): if is_train: min_size = cfg.INPUT.MIN_SIZE_TRAIN max_size = cfg.INPUT.MAX_SIZE_TRAIN flip_prob = 0.5 # cfg.INPUT.FLIP_PROB_TRAIN else: min_size = cfg.INPUT.MIN_SIZE_TEST max_size = cfg.INPUT.MAX_SIZE_TEST flip_prob = 0 to_bgr255 = cfg.INPUT.TO_BGR255 normalize_transform = T.Normalize( mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD, to_bgr255=to_bgr255 ) transform = T.Compose( [ T.Resize(min_size, max_size), T.RandomHorizontalFlip(flip_prob), T.ToTensor(), normalize_transform, ] ) return transform
DeepLearningExamples-master
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data/transforms/build.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. import random import torch import torchvision from torchvision.transforms import functional as F class Compose(object): def __init__(self, transforms): self.transforms = transforms def __call__(self, image, target): for t in self.transforms: image, target = t(image, target) return image, target def __repr__(self): format_string = self.__class__.__name__ + "(" for t in self.transforms: format_string += "\n" format_string += " {0}".format(t) format_string += "\n)" return format_string class Resize(object): def __init__(self, min_size, max_size): self.min_size = min_size self.max_size = max_size # modified from torchvision to add support for max size def get_size(self, image_size): w, h = image_size size = self.min_size max_size = self.max_size if max_size is not None: min_original_size = float(min((w, h))) max_original_size = float(max((w, h))) if max_original_size / min_original_size * size > max_size: size = int(round(max_size * min_original_size / max_original_size)) if (w <= h and w == size) or (h <= w and h == size): return (h, w) if w < h: ow = size oh = int(size * h / w) else: oh = size ow = int(size * w / h) return (oh, ow) def __call__(self, image, target): if isinstance(image, torch.Tensor): image_size = image.shape[-2:] image_size = (image_size[1], image_size[0]) else: image_size = image.size size = self.get_size(image_size) image = F.resize(image, size) if isinstance(image, torch.Tensor): image_size = image.shape[-2:] image_size = (image_size[1], image_size[0]) else: image_size = image.size target = target.resize(image_size) return image, target class RandomHorizontalFlip(object): def __init__(self, prob=0.5): self.prob = prob def __call__(self, image, target): if random.random() < self.prob: image = F.hflip(image) target = target.transpose(0) return image, target class ToTensor(object): def __call__(self, image, target): if isinstance(image, torch.Tensor): return F.convert_image_dtype(image, dtype=torch.float32), target else: return F.to_tensor(image), target class Normalize(object): def __init__(self, mean, std, to_bgr255=True): self.mean = mean self.std = std self.to_bgr255 = to_bgr255 def __call__(self, image, target): if self.to_bgr255: image = image[[2, 1, 0]] * 255 image = F.normalize(image, mean=self.mean, std=self.std) return image, target
DeepLearningExamples-master
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data/transforms/transforms.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from .transforms import Compose from .transforms import Resize from .transforms import RandomHorizontalFlip from .transforms import ToTensor from .transforms import Normalize from .build import build_transforms
DeepLearningExamples-master
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data/transforms/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from .distributed import DistributedSampler from .grouped_batch_sampler import GroupedBatchSampler from .iteration_based_batch_sampler import IterationBasedBatchSampler __all__ = ["DistributedSampler", "GroupedBatchSampler", "IterationBasedBatchSampler"]
DeepLearningExamples-master
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data/samplers/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. import itertools import torch from torch.utils.data.sampler import BatchSampler from torch.utils.data.sampler import Sampler class GroupedBatchSampler(BatchSampler): """ Wraps another sampler to yield a mini-batch of indices. It enforces that elements from the same group should appear in groups of batch_size. It also tries to provide mini-batches which follows an ordering which is as close as possible to the ordering from the original sampler. Arguments: sampler (Sampler): Base sampler. batch_size (int): Size of mini-batch. drop_uneven (bool): If ``True``, the sampler will drop the batches whose size is less than ``batch_size`` """ def __init__(self, sampler, group_ids, batch_size, drop_uneven=False): if not isinstance(sampler, Sampler): raise ValueError( "sampler should be an instance of " "torch.utils.data.Sampler, but got sampler={}".format(sampler) ) self.sampler = sampler self.group_ids = torch.as_tensor(group_ids) assert self.group_ids.dim() == 1 self.batch_size = batch_size self.drop_uneven = drop_uneven self.groups = torch.unique(self.group_ids).sort(0)[0] self._can_reuse_batches = False def _prepare_batches(self): dataset_size = len(self.group_ids) # get the sampled indices from the sampler sampled_ids = torch.as_tensor(list(self.sampler)) # potentially not all elements of the dataset were sampled # by the sampler (e.g., DistributedSampler). # construct a tensor which contains -1 if the element was # not sampled, and a non-negative number indicating the # order where the element was sampled. # for example. if sampled_ids = [3, 1] and dataset_size = 5, # the order is [-1, 1, -1, 0, -1] order = torch.full((dataset_size,), -1, dtype=torch.int64) order[sampled_ids] = torch.arange(len(sampled_ids)) # get a mask with the elements that were sampled mask = order >= 0 # find the elements that belong to each individual cluster clusters = [(self.group_ids == i) & mask for i in self.groups] # get relative order of the elements inside each cluster # that follows the order from the sampler relative_order = [order[cluster] for cluster in clusters] # with the relative order, find the absolute order in the # sampled space permutation_ids = [s[s.sort()[1]] for s in relative_order] # permute each cluster so that they follow the order from # the sampler permuted_clusters = [sampled_ids[idx] for idx in permutation_ids] # splits each cluster in batch_size, and merge as a list of tensors splits = [c.split(self.batch_size) for c in permuted_clusters] merged = tuple(itertools.chain.from_iterable(splits)) # now each batch internally has the right order, but # they are grouped by clusters. Find the permutation between # different batches that brings them as close as possible to # the order that we have in the sampler. For that, we will consider the # ordering as coming from the first element of each batch, and sort # correspondingly first_element_of_batch = [t[0].item() for t in merged] # get and inverse mapping from sampled indices and the position where # they occur (as returned by the sampler) inv_sampled_ids_map = {v: k for k, v in enumerate(sampled_ids.tolist())} # from the first element in each batch, get a relative ordering first_index_of_batch = torch.as_tensor( [inv_sampled_ids_map[s] for s in first_element_of_batch] ) # permute the batches so that they approximately follow the order # from the sampler permutation_order = first_index_of_batch.sort(0)[1].tolist() # finally, permute the batches batches = [merged[i].tolist() for i in permutation_order] if self.drop_uneven: kept = [] for batch in batches: if len(batch) == self.batch_size: kept.append(batch) batches = kept return batches def __iter__(self): if self._can_reuse_batches: batches = self._batches self._can_reuse_batches = False else: batches = self._prepare_batches() self._batches = batches return iter(batches) def __len__(self): if not hasattr(self, "_batches"): self._batches = self._prepare_batches() self._can_reuse_batches = True return len(self._batches)
DeepLearningExamples-master
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data/samplers/grouped_batch_sampler.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # Code is copy-pasted exactly as in torch.utils.data.distributed. # FIXME remove this once c10d fixes the bug it has import math import torch import torch.distributed as dist from torch.utils.data.sampler import Sampler class DistributedSampler(Sampler): """Sampler that restricts data loading to a subset of the dataset. It is especially useful in conjunction with :class:`torch.nn.parallel.DistributedDataParallel`. In such case, each process can pass a DistributedSampler instance as a DataLoader sampler, and load a subset of the original dataset that is exclusive to it. .. note:: Dataset is assumed to be of constant size. Arguments: dataset: Dataset used for sampling. num_replicas (optional): Number of processes participating in distributed training. rank (optional): Rank of the current process within num_replicas. """ def __init__(self, dataset, num_replicas=None, rank=None, shuffle=True): if num_replicas is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") num_replicas = dist.get_world_size() if rank is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") rank = dist.get_rank() self.dataset = dataset self.num_replicas = num_replicas self.rank = rank self.epoch = 0 self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) self.total_size = self.num_samples * self.num_replicas self.shuffle = True def __iter__(self): if self.shuffle: # deterministically shuffle based on epoch g = torch.Generator() g.manual_seed(self.epoch) indices = torch.randperm(len(self.dataset), generator=g).tolist() else: indices = torch.arange(len(self.dataset)).tolist() # add extra samples to make it evenly divisible indices += indices[: (self.total_size - len(indices))] assert len(indices) == self.total_size # subsample offset = self.num_samples * self.rank indices = indices[offset : offset + self.num_samples] assert len(indices) == self.num_samples return iter(indices) def __len__(self): return self.num_samples def set_epoch(self, epoch): self.epoch = epoch
DeepLearningExamples-master
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data/samplers/distributed.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from torch.utils.data.sampler import BatchSampler class IterationBasedBatchSampler(BatchSampler): """ Wraps a BatchSampler, resampling from it until a specified number of iterations have been sampled """ def __init__(self, batch_sampler, num_iterations, start_iter=0): self.batch_sampler = batch_sampler self.num_iterations = num_iterations self.start_iter = start_iter def __iter__(self): iteration = self.start_iter while iteration <= self.num_iterations: # if the underlying sampler has a set_epoch method, like # DistributedSampler, used for making each process see # a different split of the dataset, then set it if hasattr(self.batch_sampler.sampler, "set_epoch"): self.batch_sampler.sampler.set_epoch(iteration) for batch in self.batch_sampler: iteration += 1 if iteration > self.num_iterations: break yield batch def __len__(self): return self.num_iterations
DeepLearningExamples-master
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/data/samplers/iteration_based_batch_sampler.py
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. import torch import os from maskrcnn_benchmark.data.build import make_data_loader from maskrcnn_benchmark.engine.inference import inference from maskrcnn_benchmark.utils.miscellaneous import mkdir from maskrcnn_benchmark.utils.comm import synchronize def test(cfg, model, distributed, dllogger): if distributed: model = model.module torch.cuda.empty_cache() # TODO check if it helps iou_types = ("bbox",) if cfg.MODEL.MASK_ON: iou_types = iou_types + ("segm",) output_folders = [None] * len(cfg.DATASETS.TEST) dataset_names = cfg.DATASETS.TEST if cfg.OUTPUT_DIR: for idx, dataset_name in enumerate(dataset_names): output_folder = os.path.join(cfg.OUTPUT_DIR, "inference", dataset_name) mkdir(output_folder) output_folders[idx] = output_folder data_loaders_val = make_data_loader(cfg, is_train=False, is_distributed=distributed) results = [] for output_folder, dataset_name, data_loader_val in zip(output_folders, dataset_names, data_loaders_val): result = inference( model, data_loader_val, dataset_name=dataset_name, iou_types=iou_types, box_only=cfg.MODEL.RPN_ONLY, device=cfg.MODEL.DEVICE, expected_results=cfg.TEST.EXPECTED_RESULTS, expected_results_sigma_tol=cfg.TEST.EXPECTED_RESULTS_SIGMA_TOL, output_folder=output_folder, dllogger=dllogger, ) synchronize() results.append(result) return results
DeepLearningExamples-master
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/engine/tester.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
DeepLearningExamples-master
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/engine/__init__.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. import datetime import logging import time import os import torch from tqdm import tqdm from maskrcnn_benchmark.data.datasets.evaluation import evaluate from ..utils.comm import is_main_process, all_gather, synchronize, synchronized_timestamp def compute_on_dataset(model, data_loader, device, steps=-1): model.eval() results_dict = {} latency = [] cpu_device = torch.device("cpu") for i, batch in enumerate(tqdm(data_loader)): #Break earlier for inference on partial dataset if steps > -1 and i >= steps: break images, targets, image_ids = batch images = images.to(device) with torch.no_grad(): batch_start = time.perf_counter() output = model(images) latency.append(time.perf_counter() - batch_start) output = [o.to(cpu_device) for o in output] results_dict.update( {img_id: result for img_id, result in zip(image_ids, output)} ) return results_dict, latency def _accumulate_predictions_from_multiple_gpus(predictions_per_gpu): all_predictions = all_gather(predictions_per_gpu) if not is_main_process(): return # merge the list of dicts predictions = {} for p in all_predictions: predictions.update(p) # convert a dict where the key is the index in a list image_ids = list(sorted(predictions.keys())) if len(image_ids) != image_ids[-1] + 1: logger = logging.getLogger("maskrcnn_benchmark.inference") logger.warning( "Number of images that were gathered from multiple processes is not " "a contiguous set. Some images might be missing from the evaluation" ) # convert to a list predictions = [predictions[i] for i in image_ids] return predictions def inference( model, data_loader, dataset_name, iou_types=("bbox",), box_only=False, device="cuda", expected_results=(), expected_results_sigma_tol=4, output_folder=None, skip_eval=False, dllogger=None, steps=-1, profile=False, ): # convert to a torch.device for efficiency device = torch.device(device) num_devices = ( torch.distributed.get_world_size() if torch.distributed.is_initialized() else 1 ) dataset = data_loader.dataset dllogger.log(step="PARAMETER", data={"eval_dataset_name": dataset_name, "eval_num_samples":len(dataset)}) start_time = synchronized_timestamp() with torch.autograd.profiler.emit_nvtx(enabled=profile): predictions, latency = compute_on_dataset(model, data_loader, device, steps=steps) # wait for all processes to complete before measuring the time synchronize() total_time = time.time() - start_time latency_avg = sum(latency) / len(latency) latency.sort() def _latency_avg(n): return sum(latency[:n]) / n latency_90 = _latency_avg(int(len(latency)*0.9)) latency_95 = _latency_avg(int(len(latency)*0.95)) latency_99 = _latency_avg(int(len(latency)*0.99)) len_dataset = len(dataset) if steps is -1 else steps total_time_str = str(datetime.timedelta(seconds=total_time)) dllogger.log(step=tuple(), data={"e2e_infer_time": total_time, "inference_perf_fps": len_dataset / total_time}) stats = {'latency_avg' : latency_avg, 'latency_90': latency_90, 'latency_95' : latency_95, 'latency_99': latency_99,} dllogger.log(step=tuple(), data=stats) logger = logging.getLogger("maskrcnn_benchmark.inference") logger.info( "Total inference time: {} ({} s / img per device, on {} devices)".format( total_time_str, total_time * num_devices / len_dataset, num_devices ) ) predictions = _accumulate_predictions_from_multiple_gpus(predictions) if not is_main_process(): return if output_folder: torch.save(predictions, os.path.join(output_folder, "predictions.pth")) if skip_eval: dllogger.log(step="PARAMETER", data={"skip_eval":True, "predictions_saved_path":os.path.join(output_folder, "predictions.pth")}) return extra_args = dict( box_only=box_only, iou_types=iou_types, expected_results=expected_results, expected_results_sigma_tol=expected_results_sigma_tol, ) return evaluate(dataset=dataset, predictions=predictions, output_folder=output_folder, **extra_args)
DeepLearningExamples-master
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/engine/inference.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. # Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. import datetime import logging import time import torch import torch.distributed as dist from maskrcnn_benchmark.utils.comm import get_world_size, synchronized_timestamp from maskrcnn_benchmark.utils.metric_logger import MetricLogger def reduce_loss_dict(loss_dict): """ Reduce the loss dictionary from all processes so that process with rank 0 has the averaged results. Returns a dict with the same fields as loss_dict, after reduction. """ world_size = get_world_size() if world_size < 2: return loss_dict with torch.no_grad(): loss_names = [] all_losses = [] for k in sorted(loss_dict.keys()): loss_names.append(k) all_losses.append(loss_dict[k]) all_losses = torch.stack(all_losses, dim=0) dist.reduce(all_losses, dst=0) if dist.get_rank() == 0: # only main process gets accumulated, so only divide by # world_size in this case all_losses /= world_size reduced_losses = {k: v for k, v in zip(loss_names, all_losses)} return reduced_losses class Prefetcher: def __init__(self, data_loader, device): self.data_loader = iter(data_loader) self.device = device self.images = None self.targets = None self.loader_stream = torch.cuda.Stream() self.done = False def __iter__(self): return self def prefetch(self): try: with torch.cuda.stream(self.loader_stream): self.images, self.targets, _ = next(self.data_loader) self.images = self.images.to(self.device) self.targets = [target.to(self.device, non_blocking=True) for target in self.targets] except StopIteration: self.images, self.targets = None, None self.done = True def __next__(self): torch.cuda.current_stream().wait_stream(self.loader_stream) if self.images is None and not self.done: self.prefetch() if self.done: raise StopIteration() else: images, targets = self.images, self.targets self.images, self.targets = None, None return images, targets def do_train( model, data_loader, optimizer, scheduler, checkpointer, device, checkpoint_period, arguments, use_amp, cfg, dllogger, per_iter_end_callback_fn=None, nhwc=False ): dllogger.log(step="PARAMETER", data={"train_start": True}) meters = MetricLogger(delimiter=" ") max_iter = len(data_loader) prefetcher = Prefetcher(data_loader, device) start_iter = arguments["iteration"] model.train() start_training_time = synchronized_timestamp() end = start_training_time if use_amp: scaler = torch.cuda.amp.GradScaler(init_scale=8192.0) for iteration, (images, targets) in enumerate(prefetcher, start_iter): data_time = time.time() - end iteration = iteration + 1 arguments["iteration"] = iteration images = images.to(device) if nhwc: images = images.to_nhwc() model = model.to(memory_format=torch.channels_last) targets = [target.to(device) for target in targets] if use_amp: with torch.cuda.amp.autocast(): loss_dict = model(images, targets) else: loss_dict = model(images, targets) losses = sum(loss for loss in loss_dict.values()) # reduce losses over all GPUs for logging purposes loss_dict_reduced = reduce_loss_dict(loss_dict) losses_reduced = sum(loss for loss in loss_dict_reduced.values()) meters.update(loss=losses_reduced, **loss_dict_reduced) # Note: If mixed precision is not used, this ends up doing nothing # Otherwise apply loss scaling for mixed-precision recipe if use_amp: scaler.scale(losses).backward() else: losses.backward() def _take_step(): if use_amp: scaler.step(optimizer) scaler.update() else: optimizer.step() scheduler.step() optimizer.zero_grad() if not cfg.SOLVER.ACCUMULATE_GRAD: _take_step() else: if (iteration + 1) % cfg.SOLVER.ACCUMULATE_STEPS == 0: for param in model.parameters(): if param.grad is not None: param.grad.data.div_(cfg.SOLVER.ACCUMULATE_STEPS) _take_step() batch_time = time.time() - end end = time.time() meters.update(time=batch_time, data=data_time) eta_seconds = meters.time.global_avg * (max_iter - iteration) eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) if iteration % 20 == 0 or iteration == max_iter: log_data = {"eta":eta_string, "learning_rate":optimizer.param_groups[0]["lr"], "memory": torch.cuda.max_memory_allocated() / 1024.0 / 1024.0 } log_data.update(meters.get_dict()) dllogger.log(step=(iteration,), data=log_data) if cfg.SAVE_CHECKPOINT: if iteration % checkpoint_period == 0: checkpointer.save("model_{:07d}".format(iteration), **arguments) if iteration == max_iter: checkpointer.save("model_final", **arguments) # per-epoch work (testing) if per_iter_end_callback_fn is not None: early_exit = per_iter_end_callback_fn(iteration=iteration) if early_exit: break total_training_time = synchronized_timestamp() - start_training_time total_time_str = str(datetime.timedelta(seconds=total_training_time)) dllogger.log(step=tuple(), data={"e2e_train_time": total_training_time, "train_perf_fps": max_iter * cfg.SOLVER.IMS_PER_BATCH / total_training_time}) logger = logging.getLogger("maskrcnn_benchmark.trainer") logger.info( "Total training time: {} ({:.4f} s / it)".format( total_time_str, total_training_time / (max_iter) ) )
DeepLearningExamples-master
PyTorch/Segmentation/MaskRCNN/pytorch/maskrcnn_benchmark/engine/trainer.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import time from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser from data_preprocessing.preprocessor import Preprocessor from utils.utils import get_task_code parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument("--data", type=str, default="/data", help="Path to data directory") parser.add_argument("--results", type=str, default="/data", help="Path for saving results directory") parser.add_argument( "--exec_mode", type=str, default="training", choices=["training", "val", "test"], help="Mode for data preprocessing", ) parser.add_argument("--ohe", action="store_true", help="Add one-hot-encoding for foreground voxels (voxels > 0)") parser.add_argument("--verbose", action="store_true") parser.add_argument("--task", type=str, help="Number of task to be run. MSD uses numbers 01-10") parser.add_argument("--dim", type=int, default=3, choices=[2, 3], help="Data dimension to prepare") parser.add_argument("--n_jobs", type=int, default=-1, help="Number of parallel jobs for data preprocessing") if __name__ == "__main__": args = parser.parse_args() start = time.time() Preprocessor(args).run() task_code = get_task_code(args) path = os.path.join(args.data, task_code) if args.exec_mode == "test": path = os.path.join(path, "test") end = time.time() print(f"Pre-processing time: {(end - start):.2f}")
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/preprocess.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser from subprocess import call from data_preprocessing.configs import task parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument("--task", type=str, required=True, help="Task to download") parser.add_argument("--results", type=str, default="/data", help="Directory for data storage") if __name__ == "__main__": args = parser.parse_args() tar_file = task[args.task] + ".tar" file_path = os.path.join(args.results, tar_file) call(f"aws s3 cp s3://msd-for-monai-eu/{tar_file} --no-sign-request {args.results}", shell=True) call(f"tar -xf {file_path} -C {args.results}", shell=True) call(f"rm -rf {file_path}", shell=True)
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/download.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import os from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser import nibabel import numpy as np from tqdm import tqdm parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument("--preds", type=str, required=True, help="Path to predictions") parser.add_argument("--lbls", type=str, required=True, help="Path to labels") def get_stats(pred, targ, class_idx): tp = np.logical_and(pred == class_idx, targ == class_idx).sum() fn = np.logical_and(pred != class_idx, targ == class_idx).sum() fp = np.logical_and(pred == class_idx, targ != class_idx).sum() return tp, fn, fp if __name__ == "__main__": args = parser.parse_args() y_pred = sorted(glob.glob(os.path.join(args.preds, "*.npy"))) y_true = [os.path.join(args.lbls, os.path.basename(pred).replace("npy", "nii.gz")) for pred in y_pred] assert len(y_pred) > 0 n_class = np.load(y_pred[0]).shape[0] - 1 dice = [[] for _ in range(n_class)] for pr, lb in tqdm(zip(y_pred, y_true), total=len(y_pred)): prd = np.transpose(np.argmax(np.load(pr), axis=0), (2, 1, 0)) lbl = nibabel.load(lb).get_fdata().astype(np.uint8) for i in range(1, n_class + 1): counts = np.count_nonzero(lbl == i) + np.count_nonzero(prd == i) if counts == 0: # no foreground class dice[i - 1].append(1) else: tp, fn, fp = get_stats(prd, lbl, i) denum = 2 * tp + fp + fn dice[i - 1].append(2 * tp / denum if denum != 0 else 0) dice_score = np.mean(np.array(dice), axis=-1) dice_cls = " ".join([f"L{i+1} {round(dice_score[i], 4)}" for i, dice in enumerate(dice_score)]) print(f"mean dice: {round(np.mean(dice_score), 4)} - {dice_cls}")
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/evaluate.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import torch from data_loading.data_module import DataModule from nnunet.nn_unet import NNUnet from pytorch_lightning import Trainer, seed_everything from pytorch_lightning.callbacks import ModelCheckpoint, ModelSummary, RichProgressBar from pytorch_lightning.plugins.io import AsyncCheckpointIO from pytorch_lightning.strategies import DDPStrategy from utils.args import get_main_args from utils.logger import LoggingCallback from utils.utils import make_empty_dir, set_cuda_devices, set_granularity, verify_ckpt_path torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True def get_trainer(args, callbacks): return Trainer( logger=False, default_root_dir=args.results, benchmark=True, deterministic=False, max_epochs=args.epochs, precision=16 if args.amp else 32, gradient_clip_val=args.gradient_clip_val, enable_checkpointing=args.save_ckpt, callbacks=callbacks, num_sanity_val_steps=0, accelerator="gpu", devices=args.gpus, num_nodes=args.nodes, plugins=[AsyncCheckpointIO()], strategy=DDPStrategy( find_unused_parameters=False, static_graph=True, gradient_as_bucket_view=True, ), limit_train_batches=1.0 if args.train_batches == 0 else args.train_batches, limit_val_batches=1.0 if args.test_batches == 0 else args.test_batches, limit_test_batches=1.0 if args.test_batches == 0 else args.test_batches, ) def main(): args = get_main_args() set_granularity() set_cuda_devices(args) if args.seed is not None: seed_everything(args.seed) data_module = DataModule(args) data_module.setup() ckpt_path = verify_ckpt_path(args) if ckpt_path is not None: model = NNUnet.load_from_checkpoint(ckpt_path, strict=False, args=args) else: model = NNUnet(args) callbacks = [RichProgressBar(), ModelSummary(max_depth=2)] if args.benchmark: batch_size = args.batch_size if args.exec_mode == "train" else args.val_batch_size filnename = args.logname if args.logname is not None else "perf.json" callbacks.append( LoggingCallback( log_dir=args.results, filnename=filnename, global_batch_size=batch_size * args.gpus * args.nodes, mode=args.exec_mode, warmup=args.warmup, dim=args.dim, ) ) elif args.exec_mode == "train": if args.save_ckpt: callbacks.append( ModelCheckpoint( dirpath=f"{args.ckpt_store_dir}/checkpoints", filename="{epoch}-{dice:.2f}", monitor="dice", mode="max", save_last=True, ) ) trainer = get_trainer(args, callbacks) if args.benchmark: if args.exec_mode == "train": trainer.fit(model, train_dataloaders=data_module.train_dataloader()) else: # warmup trainer.test(model, dataloaders=data_module.test_dataloader(), verbose=False) # benchmark run model.start_benchmark = 1 trainer.test(model, dataloaders=data_module.test_dataloader(), verbose=False) elif args.exec_mode == "train": trainer.fit(model, datamodule=data_module) elif args.exec_mode == "evaluate": trainer.validate(model, dataloaders=data_module.val_dataloader()) elif args.exec_mode == "predict": if args.save_preds: ckpt_name = "_".join(args.ckpt_path.split("/")[-1].split(".")[:-1]) dir_name = f"predictions_{ckpt_name}" dir_name += f"_task={model.args.task}_fold={model.args.fold}" if args.tta: dir_name += "_tta" save_dir = os.path.join(args.results, dir_name) model.save_dir = save_dir make_empty_dir(save_dir) model.args = args trainer.test(model, dataloaders=data_module.test_dataloader()) if __name__ == "__main__": main()
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/main.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import os import numpy as np from pytorch_lightning import LightningDataModule from sklearn.model_selection import KFold from utils.utils import get_config_file, get_task_code, print0 from data_loading.dali_loader import fetch_dali_loader class DataModule(LightningDataModule): def __init__(self, args): super().__init__() self.args = args self.data_path = get_data_path(args) self.kfold = get_kfold_splitter(args.nfolds) self.kwargs = { "dim": self.args.dim, "seed": self.args.seed, "gpus": self.args.gpus, "nvol": self.args.nvol, "layout": self.args.layout, "overlap": self.args.overlap, "benchmark": self.args.benchmark, "num_workers": self.args.num_workers, "oversampling": self.args.oversampling, "test_batches": self.args.test_batches, "train_batches": self.args.train_batches, "invert_resampled_y": self.args.invert_resampled_y, "patch_size": get_config_file(self.args)["patch_size"], } self.train_imgs, self.train_lbls, self.val_imgs, self.val_lbls, self.test_imgs = ([],) * 5 def setup(self, stage=None): meta = load_data(self.data_path, "*_meta.npy") orig_lbl = load_data(self.data_path, "*_orig_lbl.npy") imgs, lbls = load_data(self.data_path, "*_x.npy"), load_data(self.data_path, "*_y.npy") self.test_imgs, test_meta = get_test_fnames(self.args, self.data_path, meta) if self.args.exec_mode != "predict" or self.args.benchmark: train_idx, val_idx = list(self.kfold.split(imgs))[self.args.fold] orig_lbl, meta = get_split(orig_lbl, val_idx), get_split(meta, val_idx) self.kwargs.update({"orig_lbl": orig_lbl, "meta": meta}) self.train_imgs, self.train_lbls = get_split(imgs, train_idx), get_split(lbls, train_idx) self.val_imgs, self.val_lbls = get_split(imgs, val_idx), get_split(lbls, val_idx) if self.args.gpus > 1: rank = int(os.getenv("LOCAL_RANK", "0")) self.val_imgs = self.val_imgs[rank :: self.args.gpus] self.val_lbls = self.val_lbls[rank :: self.args.gpus] else: self.kwargs.update({"meta": test_meta}) print0(f"{len(self.train_imgs)} training, {len(self.val_imgs)} validation, {len(self.test_imgs)} test examples") def train_dataloader(self): return fetch_dali_loader(self.train_imgs, self.train_lbls, self.args.batch_size, "train", **self.kwargs) def val_dataloader(self): return fetch_dali_loader(self.val_imgs, self.val_lbls, 1, "eval", **self.kwargs) def test_dataloader(self): if self.kwargs["benchmark"]: return fetch_dali_loader(self.train_imgs, self.train_lbls, self.args.val_batch_size, "test", **self.kwargs) return fetch_dali_loader(self.test_imgs, None, 1, "test", **self.kwargs) def get_split(data, idx): return list(np.array(data)[idx]) def load_data(path, files_pattern, non_empty=True): data = sorted(glob.glob(os.path.join(path, files_pattern))) if non_empty: assert len(data) > 0, f"No data found in {path} with pattern {files_pattern}" return data def get_kfold_splitter(nfolds): return KFold(n_splits=nfolds, shuffle=True, random_state=12345) def get_test_fnames(args, data_path, meta=None): kfold = get_kfold_splitter(args.nfolds) test_imgs = load_data(data_path, "*_x.npy", non_empty=False) if args.exec_mode == "predict" and "val" in data_path: _, val_idx = list(kfold.split(test_imgs))[args.fold] test_imgs = sorted(get_split(test_imgs, val_idx)) if meta is not None: meta = sorted(get_split(meta, val_idx)) return test_imgs, meta def get_data_path(args): if args.data != "/data": return args.data data_path = os.path.join(args.data, get_task_code(args)) if args.exec_mode == "predict" and not args.benchmark: data_path = os.path.join(data_path, "test") return data_path
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/data_loading/data_module.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import os import numpy as np import nvidia.dali.fn as fn import nvidia.dali.math as math import nvidia.dali.ops as ops import nvidia.dali.types as types from nvidia.dali.pipeline import Pipeline from nvidia.dali.plugin.pytorch import DALIGenericIterator def random_augmentation(probability, augmented, original): condition = fn.cast(fn.random.coin_flip(probability=probability), dtype=types.DALIDataType.BOOL) neg_condition = condition ^ True return condition * augmented + neg_condition * original class GenericPipeline(Pipeline): def __init__(self, batch_size, num_threads, device_id, **kwargs): super().__init__(batch_size, num_threads, device_id) self.kwargs = kwargs self.dim = kwargs["dim"] self.device = device_id self.layout = kwargs["layout"] self.patch_size = kwargs["patch_size"] self.load_to_gpu = kwargs["load_to_gpu"] self.input_x = self.get_reader(kwargs["imgs"]) self.input_y = self.get_reader(kwargs["lbls"]) if kwargs["lbls"] is not None else None self.cdhw2dhwc = ops.Transpose(device="gpu", perm=[1, 2, 3, 0]) def get_reader(self, data): return ops.readers.Numpy( files=data, device="cpu", read_ahead=True, dont_use_mmap=True, pad_last_batch=True, shard_id=self.device, seed=self.kwargs["seed"], num_shards=self.kwargs["gpus"], shuffle_after_epoch=self.kwargs["shuffle"], ) def load_data(self): img = self.input_x(name="ReaderX") if self.load_to_gpu: img = img.gpu() img = fn.reshape(img, layout="CDHW") if self.input_y is not None: lbl = self.input_y(name="ReaderY") if self.load_to_gpu: lbl = lbl.gpu() lbl = fn.reshape(lbl, layout="CDHW") return img, lbl return img def make_dhwc_layout(self, img, lbl): img, lbl = self.cdhw2dhwc(img), self.cdhw2dhwc(lbl) return img, lbl def crop(self, data): return fn.crop(data, crop=self.patch_size, out_of_bounds_policy="pad") def crop_fn(self, img, lbl): img, lbl = self.crop(img), self.crop(lbl) return img, lbl def transpose_fn(self, img, lbl): img, lbl = fn.transpose(img, perm=(1, 0, 2, 3)), fn.transpose(lbl, perm=(1, 0, 2, 3)) return img, lbl class TrainPipeline(GenericPipeline): def __init__(self, batch_size, num_threads, device_id, **kwargs): super().__init__(batch_size, num_threads, device_id, **kwargs) self.oversampling = kwargs["oversampling"] self.crop_shape = types.Constant(np.array(self.patch_size), dtype=types.INT64) self.crop_shape_float = types.Constant(np.array(self.patch_size), dtype=types.FLOAT) @staticmethod def slice_fn(img): return fn.slice(img, 1, 3, axes=[0]) def resize(self, data, interp_type): return fn.resize(data, interp_type=interp_type, size=self.crop_shape_float) def biased_crop_fn(self, img, label): roi_start, roi_end = fn.segmentation.random_object_bbox( label, device="cpu", background=0, format="start_end", cache_objects=True, foreground_prob=self.oversampling, ) anchor = fn.roi_random_crop(label, roi_start=roi_start, roi_end=roi_end, crop_shape=[1, *self.patch_size]) anchor = fn.slice(anchor, 1, 3, axes=[0]) # drop channels from anchor img, label = fn.slice( [img, label], anchor, self.crop_shape, axis_names="DHW", out_of_bounds_policy="pad", device="cpu" ) return img.gpu(), label.gpu() def zoom_fn(self, img, lbl): scale = random_augmentation(0.15, fn.random.uniform(range=(0.7, 1.0)), 1.0) d, h, w = [scale * x for x in self.patch_size] if self.dim == 2: d = self.patch_size[0] img, lbl = fn.crop(img, crop_h=h, crop_w=w, crop_d=d), fn.crop(lbl, crop_h=h, crop_w=w, crop_d=d) img, lbl = self.resize(img, types.DALIInterpType.INTERP_CUBIC), self.resize(lbl, types.DALIInterpType.INTERP_NN) return img, lbl def noise_fn(self, img): img_noised = img + fn.random.normal(img, stddev=fn.random.uniform(range=(0.0, 0.33))) return random_augmentation(0.15, img_noised, img) def blur_fn(self, img): img_blurred = fn.gaussian_blur(img, sigma=fn.random.uniform(range=(0.5, 1.5))) return random_augmentation(0.15, img_blurred, img) def brightness_fn(self, img): brightness_scale = random_augmentation(0.15, fn.random.uniform(range=(0.7, 1.3)), 1.0) return img * brightness_scale def contrast_fn(self, img): scale = random_augmentation(0.15, fn.random.uniform(range=(0.65, 1.5)), 1.0) return math.clamp(img * scale, fn.reductions.min(img), fn.reductions.max(img)) def flips_fn(self, img, lbl): kwargs = { "horizontal": fn.random.coin_flip(probability=0.5), "vertical": fn.random.coin_flip(probability=0.5), } if self.dim == 3: kwargs.update({"depthwise": fn.random.coin_flip(probability=0.5)}) return fn.flip(img, **kwargs), fn.flip(lbl, **kwargs) def define_graph(self): img, lbl = self.load_data() img, lbl = self.biased_crop_fn(img, lbl) img, lbl = self.zoom_fn(img, lbl) img, lbl = self.flips_fn(img, lbl) img = self.noise_fn(img) img = self.blur_fn(img) img = self.brightness_fn(img) img = self.contrast_fn(img) if self.dim == 2: img, lbl = self.transpose_fn(img, lbl) if self.layout == "NDHWC" and self.dim == 3: img, lbl = self.make_dhwc_layout(img, lbl) return img, lbl class EvalPipeline(GenericPipeline): def __init__(self, batch_size, num_threads, device_id, **kwargs): super().__init__(batch_size, num_threads, device_id, **kwargs) self.invert_resampled_y = kwargs["invert_resampled_y"] if self.invert_resampled_y: self.input_meta = self.get_reader(kwargs["meta"]) self.input_orig_y = self.get_reader(kwargs["orig_lbl"]) def define_graph(self): img, lbl = self.load_data() if self.invert_resampled_y: meta = self.input_meta(name="ReaderM") orig_lbl = self.input_orig_y(name="ReaderO") return img, lbl, meta, orig_lbl if self.layout == "NDHWC" and self.dim == 3: img, lbl = self.make_dhwc_layout(img, lbl) return img, lbl class TritonPipeline(GenericPipeline): def __init__(self, batch_size, num_threads, device_id, **kwargs): super().__init__(batch_size, num_threads, device_id, **kwargs) def define_graph(self): img, lbl = self.load_data() img, lbl = self.crop_fn(img, lbl) return img, lbl class TestPipeline(GenericPipeline): def __init__(self, batch_size, num_threads, device_id, **kwargs): super().__init__(batch_size, num_threads, device_id, **kwargs) self.input_meta = self.get_reader(kwargs["meta"]) def define_graph(self): img = self.load_data() meta = self.input_meta(name="ReaderM") return img, meta class BenchmarkPipeline(GenericPipeline): def __init__(self, batch_size, num_threads, device_id, **kwargs): super().__init__(batch_size, num_threads, device_id, **kwargs) def define_graph(self): img, lbl = self.load_data() img, lbl = self.crop_fn(img, lbl) if self.dim == 2: img, lbl = self.transpose_fn(img, lbl) if self.layout == "NDHWC" and self.dim == 3: img, lbl = self.make_dhwc_layout(img, lbl) return img, lbl PIPELINES = { "train": TrainPipeline, "eval": EvalPipeline, "test": TestPipeline, "benchmark": BenchmarkPipeline, "triton": TritonPipeline, } class LightningWrapper(DALIGenericIterator): def __init__(self, pipe, **kwargs): super().__init__(pipe, **kwargs) def __next__(self): out = super().__next__()[0] return out def fetch_dali_loader(imgs, lbls, batch_size, mode, **kwargs): assert len(imgs) > 0, "Empty list of images!" if lbls is not None: assert len(imgs) == len(lbls), f"Number of images ({len(imgs)}) not matching number of labels ({len(lbls)})" if kwargs["benchmark"]: # Just to make sure the number of examples is large enough for benchmark run. batches = kwargs["test_batches"] if mode == "test" else kwargs["train_batches"] examples = batches * batch_size * kwargs["gpus"] imgs = list(itertools.chain(*(100 * [imgs])))[:examples] lbls = list(itertools.chain(*(100 * [lbls])))[:examples] mode = "benchmark" pipeline = PIPELINES[mode] shuffle = True if mode == "train" else False dynamic_shape = True if mode in ["eval", "test"] else False load_to_gpu = True if mode in ["eval", "test", "benchmark"] else False pipe_kwargs = {"imgs": imgs, "lbls": lbls, "load_to_gpu": load_to_gpu, "shuffle": shuffle, **kwargs} output_map = ["image", "meta"] if mode == "test" else ["image", "label"] if kwargs["dim"] == 2 and mode in ["train", "benchmark"]: batch_size_2d = batch_size // kwargs["nvol"] if mode == "train" else batch_size batch_size = kwargs["nvol"] if mode == "train" else 1 pipe_kwargs.update({"patch_size": [batch_size_2d] + kwargs["patch_size"]}) rank = int(os.getenv("LOCAL_RANK", "0")) if mode == "eval": # We sharded the data for evaluation manually. rank = 0 pipe_kwargs["gpus"] = 1 pipe = pipeline(batch_size, kwargs["num_workers"], rank, **pipe_kwargs) return LightningWrapper( pipe, auto_reset=True, reader_name="ReaderX", output_map=output_map, dynamic_shape=dynamic_shape, )
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/data_loading/dali_loader.py
from typing import Any, Dict, List, Optional import numpy as np from triton.deployment_toolkit.core import BaseMetricsCalculator class MetricsCalculator(BaseMetricsCalculator): def calc( self, *, ids: List[Any], x: Optional[Dict[str, np.ndarray]], y_real: Optional[Dict[str, np.ndarray]], y_pred: Dict[str, np.ndarray], ) -> Dict[str, float]: y_pred = y_pred["OUTPUT__0"] y_true = y_real["OUTPUT__0"] n_examples = y_pred.shape[0] nclass = max(np.max(y_pred), np.max(y_true)) dice = np.zeros((nclass,)) for i in range(n_examples): for c in range(nclass): if not (y_true[i] == c).any(): # no foreground class dice[c] += 1 if not (y_pred[i] == c).any() else 0 continue true_pos, false_neg, false_pos = self.get_stats(y_pred[i], y_true[i], c + 1) denom = 2 * true_pos + false_neg + false_pos dice[c] += 2 * true_pos / denom if denom != 0 else 0.0 dice /= n_examples dice = np.mean(dice) return {"dice": dice} @staticmethod def get_stats(pred, targ, class_idx): true_pos = np.logical_and(pred == class_idx, targ == class_idx).sum() false_neg = np.logical_and(pred != class_idx, targ == class_idx).sum() false_pos = np.logical_and(pred == class_idx, targ != class_idx).sum() return true_pos, false_neg, false_pos
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/triton/metrics.py
#!/usr/bin/env python3 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" Using `calculate_metrics.py` script, you can obtain model accuracy/error metrics using defined `MetricsCalculator` class. Data provided to `MetricsCalculator` are obtained from npz dump files stored in directory pointed by `--dump-dir` argument. Above files are prepared by `run_inference_on_fw.py` and `run_inference_on_triton.py` scripts. Output data is stored in csv file pointed by `--csv` argument. Example call: ```shell script python ./triton/calculate_metrics.py \ --dump-dir /results/dump_triton \ --csv /results/accuracy_results.csv \ --metrics metrics.py \ --metric-class-param1 value ``` """ import argparse import csv import logging import string from pathlib import Path import numpy as np # method from PEP-366 to support relative import in executed modules if __package__ is None: __package__ = Path(__file__).parent.name from .deployment_toolkit.args import ArgParserGenerator from .deployment_toolkit.core import BaseMetricsCalculator, load_from_file from .deployment_toolkit.dump import pad_except_batch_axis LOGGER = logging.getLogger("calculate_metrics") TOTAL_COLUMN_NAME = "_total_" def get_data(dump_dir, prefix): """Loads and concatenates dump files for given prefix (ex. inputs, outputs, labels, ids)""" dump_dir = Path(dump_dir) npz_files = sorted(dump_dir.glob(f"{prefix}*.npz")) data = None if npz_files: # assume that all npz files with given prefix contain same set of names names = list(np.load(npz_files[0].as_posix()).keys()) # calculate target shape target_shape = { name: tuple(np.max([np.load(npz_file.as_posix())[name].shape for npz_file in npz_files], axis=0)) for name in names } # pad and concatenate data data = { name: np.concatenate( [pad_except_batch_axis(np.load(npz_file.as_posix())[name], target_shape[name]) for npz_file in npz_files] ) for name in names } return data def main(): logging.basicConfig(level=logging.INFO) parser = argparse.ArgumentParser(description="Run models with given dataloader", allow_abbrev=False) parser.add_argument("--metrics", help=f"Path to python module containing metrics calculator", required=True) parser.add_argument("--csv", help="Path to csv file", required=True) parser.add_argument("--dump-dir", help="Path to directory with dumped outputs (and labels)", required=True) args, *_ = parser.parse_known_args() MetricsCalculator = load_from_file(args.metrics, "metrics", "MetricsCalculator") ArgParserGenerator(MetricsCalculator).update_argparser(parser) args = parser.parse_args() LOGGER.info(f"args:") for key, value in vars(args).items(): LOGGER.info(f" {key} = {value}") MetricsCalculator = load_from_file(args.metrics, "metrics", "MetricsCalculator") metrics_calculator: BaseMetricsCalculator = ArgParserGenerator(MetricsCalculator).from_args(args) ids = get_data(args.dump_dir, "ids")["ids"] x = get_data(args.dump_dir, "inputs") y_true = get_data(args.dump_dir, "labels") y_pred = get_data(args.dump_dir, "outputs") common_keys = list({k for k in (y_true or [])} & {k for k in (y_pred or [])}) for key in common_keys: if y_true[key].shape != y_pred[key].shape: LOGGER.warning( f"Model predictions and labels shall have equal shapes. " f"y_pred[{key}].shape={y_pred[key].shape} != " f"y_true[{key}].shape={y_true[key].shape}" ) metrics = metrics_calculator.calc(ids=ids, x=x, y_pred=y_pred, y_real=y_true) metrics = {TOTAL_COLUMN_NAME: len(ids), **metrics} metric_names_with_space = [name for name in metrics if any([c in string.whitespace for c in name])] if metric_names_with_space: raise ValueError(f"Metric names shall have no spaces; Incorrect names: {', '.join(metric_names_with_space)}") csv_path = Path(args.csv) csv_path.parent.mkdir(parents=True, exist_ok=True) with csv_path.open("w") as csv_file: writer = csv.DictWriter(csv_file, fieldnames=list(metrics.keys())) writer.writeheader() writer.writerow(metrics) if __name__ == "__main__": main()
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/triton/calculate_metrics.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import time from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser from data_preprocessing.preprocessor import Preprocessor from utils.utils import get_task_code parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) parser.add_argument("--data", type=str, default="/data", help="Path to data directory") parser.add_argument("--results", type=str, default="/data", help="Path for saving results directory") parser.add_argument( "--exec_mode", type=str, default="training", choices=["training", "val", "test"], help="Mode for data preprocessing", ) parser.add_argument("--dilation", action="store_true", help="Perform morphological label dilation") parser.add_argument("--task", type=str, help="Number of task to be run. MSD uses numbers 01-10") parser.add_argument("--dim", type=int, default=3, choices=[2, 3], help="Data dimension to prepare") parser.add_argument("--n_jobs", type=int, default=-1, help="Number of parallel jobs for data preprocessing") if __name__ == "__main__": args = parser.parse_args() start = time.time() Preprocessor(args).run() task_code = get_task_code(args) path = os.path.join(args.data, task_code) if args.exec_mode == "test": path = os.path.join(path, "test") end = time.time() print(f"Preprocessing time: {(end - start):.2f}")
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/triton/preprocess.py
#!/usr/bin/env python3 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" To infer the model deployed on Triton, you can use `run_inference_on_triton.py` script. It sends a request with data obtained from pointed data loader and dumps received data into npz files. Those files are stored in directory pointed by `--output-dir` argument. Currently, the client communicates with the Triton server asynchronously using GRPC protocol. Example call: ```shell script python ./triton/run_inference_on_triton.py \ --server-url localhost:8001 \ --model-name ResNet50 \ --model-version 1 \ --dump-labels \ --output-dir /results/dump_triton ``` """ import argparse import functools import logging import queue import threading import time from pathlib import Path from typing import Optional from tqdm import tqdm # pytype: disable=import-error try: from tritonclient import utils as client_utils # noqa: F401 from tritonclient.grpc import ( InferenceServerClient, InferInput, InferRequestedOutput, ) except ImportError: import tritongrpcclient as grpc_client from tritongrpcclient import ( InferenceServerClient, InferInput, InferRequestedOutput, ) # pytype: enable=import-error # method from PEP-366 to support relative import in executed modules if __package__ is None: __package__ = Path(__file__).parent.name from .deployment_toolkit.args import ArgParserGenerator from .deployment_toolkit.core import DATALOADER_FN_NAME, load_from_file from .deployment_toolkit.dump import NpzWriter LOGGER = logging.getLogger("run_inference_on_triton") class AsyncGRPCTritonRunner: DEFAULT_MAX_RESP_WAIT_S = 120 DEFAULT_MAX_UNRESP_REQS = 128 DEFAULT_MAX_FINISH_WAIT_S = 900 # 15min def __init__( self, server_url: str, model_name: str, model_version: str, *, dataloader, verbose=False, resp_wait_s: Optional[float] = None, max_unresponded_reqs: Optional[int] = None, ): self._server_url = server_url self._model_name = model_name self._model_version = model_version self._dataloader = dataloader self._verbose = verbose self._response_wait_t = self.DEFAULT_MAX_RESP_WAIT_S if resp_wait_s is None else resp_wait_s self._max_unresp_reqs = self.DEFAULT_MAX_UNRESP_REQS if max_unresponded_reqs is None else max_unresponded_reqs self._results = queue.Queue() self._processed_all = False self._errors = [] self._num_waiting_for = 0 self._sync = threading.Condition() self._req_thread = threading.Thread(target=self.req_loop, daemon=True) def __iter__(self): self._req_thread.start() timeout_s = 0.050 # check flags processed_all and error flags every 50ms while True: try: ids, x, y_pred, y_real = self._results.get(timeout=timeout_s) yield ids, x, y_pred, y_real except queue.Empty: shall_stop = self._processed_all or self._errors if shall_stop: break LOGGER.debug("Waiting for request thread to stop") self._req_thread.join() if self._errors: error_msg = "\n".join(map(str, self._errors)) raise RuntimeError(error_msg) def _on_result(self, ids, x, y_real, output_names, result, error): with self._sync: if error: self._errors.append(error) else: y_pred = {name: result.as_numpy(name) for name in output_names} self._results.put((ids, x, y_pred, y_real)) self._num_waiting_for -= 1 self._sync.notify_all() def req_loop(self): client = InferenceServerClient(self._server_url, verbose=self._verbose) self._errors = self._verify_triton_state(client) if self._errors: return LOGGER.debug( f"Triton server {self._server_url} and model {self._model_name}:{self._model_version} " f"are up and ready!" ) model_config = client.get_model_config(self._model_name, self._model_version) model_metadata = client.get_model_metadata(self._model_name, self._model_version) LOGGER.info(f"Model config {model_config}") LOGGER.info(f"Model metadata {model_metadata}") inputs = {tm.name: tm for tm in model_metadata.inputs} outputs = {tm.name: tm for tm in model_metadata.outputs} output_names = list(outputs) outputs_req = [InferRequestedOutput(name) for name in outputs] self._num_waiting_for = 0 for ids, x, y_real in self._dataloader: infer_inputs = [] for name in inputs: data = x[name] infer_input = InferInput(name, data.shape, inputs[name].datatype) target_np_dtype = client_utils.triton_to_np_dtype(inputs[name].datatype) data = data.astype(target_np_dtype) infer_input.set_data_from_numpy(data) infer_inputs.append(infer_input) with self._sync: def _check_can_send(): return self._num_waiting_for < self._max_unresp_reqs can_send = self._sync.wait_for(_check_can_send, timeout=self._response_wait_t) if not can_send: error_msg = f"Runner could not send new requests for {self._response_wait_t}s" self._errors.append(error_msg) break callback = functools.partial(AsyncGRPCTritonRunner._on_result, self, ids, x, y_real, output_names) client.async_infer( model_name=self._model_name, model_version=self._model_version, inputs=infer_inputs, outputs=outputs_req, callback=callback, ) self._num_waiting_for += 1 # wait till receive all requested data with self._sync: def _all_processed(): LOGGER.debug(f"wait for {self._num_waiting_for} unprocessed jobs") return self._num_waiting_for == 0 self._processed_all = self._sync.wait_for(_all_processed, self.DEFAULT_MAX_FINISH_WAIT_S) if not self._processed_all: error_msg = f"Runner {self._response_wait_t}s timeout received while waiting for results from server" self._errors.append(error_msg) LOGGER.debug("Finished request thread") def _verify_triton_state(self, triton_client): errors = [] if not triton_client.is_server_live(): errors.append(f"Triton server {self._server_url} is not live") elif not triton_client.is_server_ready(): errors.append(f"Triton server {self._server_url} is not ready") elif not triton_client.is_model_ready(self._model_name, self._model_version): errors.append(f"Model {self._model_name}:{self._model_version} is not ready") return errors def _parse_args(): parser = argparse.ArgumentParser(description="Infer model on Triton server", allow_abbrev=False) parser.add_argument( "--server-url", type=str, default="localhost:8001", help="Inference server URL (default localhost:8001)" ) parser.add_argument("--model-name", help="The name of the model used for inference.", required=True) parser.add_argument("--model-version", help="The version of the model used for inference.", required=True) parser.add_argument("--dataloader", help="Path to python file containing dataloader.", required=True) parser.add_argument("--dump-labels", help="Dump labels to output dir", action="store_true", default=False) parser.add_argument("--dump-inputs", help="Dump inputs to output dir", action="store_true", default=False) parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False) parser.add_argument("--output-dir", required=True, help="Path to directory where outputs will be saved") parser.add_argument("--response-wait-time", required=False, help="Maximal time to wait for response", default=120) parser.add_argument( "--max-unresponded-requests", required=False, help="Maximal number of unresponded requests", default=128 ) args, *_ = parser.parse_known_args() get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) ArgParserGenerator(get_dataloader_fn).update_argparser(parser) args = parser.parse_args() return args def main(): args = _parse_args() log_format = "%(asctime)s %(levelname)s %(name)s %(message)s" log_level = logging.INFO if not args.verbose else logging.DEBUG logging.basicConfig(level=log_level, format=log_format) LOGGER.info(f"args:") for key, value in vars(args).items(): LOGGER.info(f" {key} = {value}") get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args) runner = AsyncGRPCTritonRunner( args.server_url, args.model_name, args.model_version, dataloader=dataloader_fn(), verbose=False, resp_wait_s=args.response_wait_time, max_unresponded_reqs=args.max_unresponded_requests, ) with NpzWriter(output_dir=args.output_dir) as writer: start = time.time() for ids, x, y_pred, y_real in tqdm(runner, unit="batch", mininterval=10): data = _verify_and_format_dump(args, ids, x, y_pred, y_real) writer.write(**data) stop = time.time() LOGGER.info(f"\nThe inference took {stop - start:0.3f}s") def _verify_and_format_dump(args, ids, x, y_pred, y_real): data = {"outputs": y_pred, "ids": {"ids": ids}} if args.dump_inputs: data["inputs"] = x if args.dump_labels: if not y_real: raise ValueError( "Found empty label values. Please provide labels in dataloader_fn or do not use --dump-labels argument" ) data["labels"] = y_real return data if __name__ == "__main__": main()
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/triton/run_inference_on_triton.py
#!/usr/bin/env python3 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" For models with variable-sized inputs you must provide the --input-shape argument so that perf_analyzer knows what shape tensors to use. For example, for a model that has an input called IMAGE that has shape [ 3, N, M ], where N and M are variable-size dimensions, to tell perf_analyzer to send batch-size 4 requests of shape [ 3, 224, 224 ] `--shape IMAGE:3,224,224`. """ import argparse import csv import os import sys from pathlib import Path from typing import Dict, List, Optional # method from PEP-366 to support relative import in executed modules if __package__ is None: __package__ = Path(__file__).parent.name from .deployment_toolkit.report import save_results, show_results, sort_results from .deployment_toolkit.warmup import warmup def calculate_average_latency(r): avg_sum_fields = [ "Client Send", "Network+Server Send/Recv", "Server Queue", "Server Compute", "Server Compute Input", "Server Compute Infer", "Server Compute Output", "Client Recv", ] avg_latency = sum([int(r.get(f, 0)) for f in avg_sum_fields]) return avg_latency def update_performance_data(results: List, batch_size: int, performance_partial_file: str): row: Dict = {"batch_size": batch_size} with open(performance_partial_file, "r") as csvfile: reader = csv.DictReader(csvfile) for r in reader: avg_latency = calculate_average_latency(r) row = {**row, **r, "avg latency": avg_latency} results.append(row) def _parse_batch_sizes(batch_sizes: str): batches = batch_sizes.split(sep=",") return list(map(lambda x: int(x.strip()), batches)) def offline_performance( model_name: str, batch_sizes: List[int], result_path: str, input_shapes: Optional[List[str]] = None, profiling_data: str = "random", triton_instances: int = 1, server_url: str = "localhost", measurement_window: int = 10000, shared_memory: bool = False ): print("\n") print(f"==== Static batching analysis start ====") print("\n") input_shapes = " ".join(map(lambda shape: f" --shape {shape}", input_shapes)) if input_shapes else "" results: List[Dict] = list() for batch_size in batch_sizes: print(f"Running performance tests for batch size: {batch_size}") performance_partial_file = f"triton_performance_partial_{batch_size}.csv" exec_args = f"""-max-threads {triton_instances} \ -m {model_name} \ -x 1 \ -c {triton_instances} \ -t {triton_instances} \ -p {measurement_window} \ -v \ -i http \ -u {server_url}:8000 \ -b {batch_size} \ -f {performance_partial_file} \ --input-data {profiling_data} {input_shapes}""" if shared_memory: exec_args += " --shared-memory=cuda" result = os.system(f"perf_client {exec_args}") if result != 0: print(f"Failed running performance tests. Perf client failed with exit code {result}") sys.exit(1) update_performance_data(results, batch_size, performance_partial_file) os.remove(performance_partial_file) results = sort_results(results=results) save_results(filename=result_path, data=results) show_results(results=results) print("Performance results for static batching stored in: {0}".format(result_path)) print("\n") print(f"==== Analysis done ====") print("\n") def main(): parser = argparse.ArgumentParser() parser.add_argument("--model-name", type=str, required=True, help="Name of the model to test") parser.add_argument( "--input-data", type=str, required=False, default="random", help="Input data to perform profiling." ) parser.add_argument( "--input-shape", action="append", required=False, help="Input data shape in form INPUT_NAME:<full_shape_without_batch_axis>.", ) parser.add_argument("--batch-sizes", type=str, required=True, help="List of batch sizes to tests. Comma separated.") parser.add_argument("--result-path", type=str, required=True, help="Path where result file is going to be stored.") parser.add_argument("--triton-instances", type=int, default=1, help="Number of Triton Server instances") parser.add_argument("--server-url", type=str, required=False, default="localhost", help="Url to Triton server") parser.add_argument( "--measurement-window", required=False, help="Time which perf_analyzer will wait for results", default=10000 ) parser.add_argument("--shared-memory", help="Use shared memory for communication with Triton", action="store_true", default=False) args = parser.parse_args() warmup( server_url=args.server_url, model_name=args.model_name, batch_sizes=_parse_batch_sizes(args.batch_sizes), triton_instances=args.triton_instances, profiling_data=args.input_data, input_shapes=args.input_shape, measurement_window=args.measurement_window, shared_memory=args.shared_memory ) offline_performance( server_url=args.server_url, model_name=args.model_name, batch_sizes=_parse_batch_sizes(args.batch_sizes), triton_instances=args.triton_instances, profiling_data=args.input_data, input_shapes=args.input_shape, result_path=args.result_path, measurement_window=args.measurement_window, shared_memory=args.shared_memory ) if __name__ == "__main__": main()
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/triton/run_offline_performance_test_on_triton.py
from nnunet.nn_unet import NNUnet def get_model(*, checkpoint_dir: str, precision: str, data_dir: str): model = NNUnet.load_from_checkpoint(checkpoint_dir, data_dir=data_dir, triton=True, strict=False) model = model.cuda() if "fp16" in precision: model = model.half() model.eval() tensor_names = {"inputs": ["INPUT__0"], "outputs": ["OUTPUT__0"]} return model, tensor_names
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/triton/model.py
#!/usr/bin/env python3 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" To infer the model on framework runtime, you can use `run_inference_on_fw.py` script. It infers data obtained from pointed data loader locally and saves received data into npz files. Those files are stored in directory pointed by `--output-dir` argument. Example call: ```shell script python ./triton/run_inference_on_fw.py \ --input-path /models/exported/model.onnx \ --input-type onnx \ --dataloader triton/dataloader.py \ --data-dir /data/imagenet \ --batch-size 32 \ --output-dir /results/dump_local \ --dump-labels ``` """ import argparse import logging import os from pathlib import Path os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "0" from tqdm import tqdm # method from PEP-366 to support relative import in executed modules if __package__ is None: __package__ = Path(__file__).parent.name from .deployment_toolkit.args import ArgParserGenerator from .deployment_toolkit.core import DATALOADER_FN_NAME, BaseLoader, BaseRunner, Format, load_from_file from .deployment_toolkit.dump import NpzWriter from .deployment_toolkit.extensions import loaders, runners LOGGER = logging.getLogger("run_inference_on_fw") def _verify_and_format_dump(args, ids, x, y_pred, y_real): data = {"outputs": y_pred, "ids": {"ids": ids}} if args.dump_inputs: data["inputs"] = x if args.dump_labels: if not y_real: raise ValueError( "Found empty label values. Please provide labels in dataloader_fn or do not use --dump-labels argument" ) data["labels"] = y_real return data def _parse_and_validate_args(): supported_inputs = set(runners.supported_extensions) & set(loaders.supported_extensions) parser = argparse.ArgumentParser(description="Dump local inference output of given model", allow_abbrev=False) parser.add_argument("--input-path", help="Path to input model", required=True) parser.add_argument("--input-type", help="Input model type", choices=supported_inputs, required=True) parser.add_argument("--dataloader", help="Path to python file containing dataloader.", required=True) parser.add_argument("--output-dir", help="Path to dir where output files will be stored", required=True) parser.add_argument("--dump-labels", help="Dump labels to output dir", action="store_true", default=False) parser.add_argument("--dump-inputs", help="Dump inputs to output dir", action="store_true", default=False) parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False) args, *_ = parser.parse_known_args() get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) ArgParserGenerator(get_dataloader_fn).update_argparser(parser) Loader: BaseLoader = loaders.get(args.input_type) ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser) Runner: BaseRunner = runners.get(args.input_type) ArgParserGenerator(Runner).update_argparser(parser) args = parser.parse_args() types_requiring_io_params = [] if args.input_type in types_requiring_io_params and not all(p for p in [args.inputs, args.outputs]): parser.error(f"For {args.input_type} input provide --inputs and --outputs parameters") return args def main(): args = _parse_and_validate_args() log_level = logging.INFO if not args.verbose else logging.DEBUG log_format = "%(asctime)s %(levelname)s %(name)s %(message)s" logging.basicConfig(level=log_level, format=log_format) LOGGER.info(f"args:") for key, value in vars(args).items(): LOGGER.info(f" {key} = {value}") Loader: BaseLoader = loaders.get(args.input_type) Runner: BaseRunner = runners.get(args.input_type) loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args) runner = ArgParserGenerator(Runner).from_args(args) LOGGER.info(f"Loading {args.input_path}") model = loader.load(args.input_path) with runner.init_inference(model=model) as runner_session, NpzWriter(args.output_dir) as writer: get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args) LOGGER.info(f"Data loader initialized; Running inference") for ids, x, y_real in tqdm(dataloader_fn(), unit="batch", mininterval=10): y_pred = runner_session(x) data = _verify_and_format_dump(args, ids=ids, x=x, y_pred=y_pred, y_real=y_real) writer.write(**data) LOGGER.info(f"Inference finished") if __name__ == "__main__": main()
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/triton/run_inference_on_fw.py
import numpy as np from data_loading.dali_loader import fetch_dali_loader from sklearn.model_selection import KFold from utils.utils import get_split, load_data def get_dataloader_fn(*, data_dir: str, batch_size: int, precision: str): kwargs = { "dim": 3, "gpus": 1, "seed": 0, "num_workers": 8, "meta": None, "oversampling": 0, "benchmark": False, "patch_size": [128, 128, 128], } imgs, lbls = load_data(data_dir, "*_x.npy"), load_data(data_dir, "*_y.npy") kfold = KFold(n_splits=5, shuffle=True, random_state=12345) _, val_idx = list(kfold.split(imgs))[2] imgs, lbls = get_split(imgs, val_idx), get_split(lbls, val_idx) dataloader = fetch_dali_loader(imgs, lbls, batch_size, "bermuda", **kwargs) def _dataloader_fn(): for i, batch in enumerate(dataloader): fname = [f"{i}_{j}" for j in range(batch_size)] img = batch["image"].numpy() if "fp16" in precision: img = img.astype(np.half) img = {"INPUT__0": img} lbl = {"OUTPUT__0": batch["label"].squeeze(1).numpy().astype(int)} yield fname, img, lbl return _dataloader_fn
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/triton/dataloader.py
#!/usr/bin/env python3 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" To configure model on Triton, you can use `config_model_on_triton.py` script. This will prepare layout of Model Repository, including Model Configuration. ```shell script python ./triton/config_model_on_triton.py \ --model-repository /model_repository \ --model-path /models/exported/model.onnx \ --model-format onnx \ --model-name ResNet50 \ --model-version 1 \ --max-batch-size 32 \ --precision fp16 \ --backend-accelerator trt \ --load-model explicit \ --timeout 120 \ --verbose ``` If Triton server to which we prepare model repository is running with **explicit model control mode**, use `--load-model` argument to send request load_model request to Triton Inference Server. If server is listening on non-default address or port use `--server-url` argument to point server control endpoint. If it is required to use HTTP protocol to communicate with Triton server use `--http` argument. To improve inference throughput you can use [dynamic batching](https://github.com/triton-inference-server/server/blob/master/docs/model_configuration.md#dynamic-batcher) for your model by providing `--preferred-batch-sizes` and `--max-queue-delay-us` parameters. For models which doesn't support batching, set `--max-batch-sizes` to 0. By default Triton will [automatically obtain inputs and outputs definitions](https://github.com/triton-inference-server/server/blob/master/docs/model_configuration.md#auto-generated-model-configuration). but for TorchScript ang TF GraphDef models script uses file with I/O specs. This file is automatically generated when the model is converted to ScriptModule (either traced or scripted). If there is a need to pass different than default path to I/O spec file use `--io-spec` CLI argument. I/O spec file is yaml file with below structure: ```yaml - inputs: - name: input dtype: float32 # np.dtype name shape: [None, 224, 224, 3] - outputs: - name: probabilities dtype: float32 shape: [None, 1001] - name: classes dtype: int32 shape: [None, 1] ``` """ import argparse import logging import time from model_navigator import Accelerator, Format, Precision from model_navigator.args import str2bool from model_navigator.log import set_logger, log_dict from model_navigator.triton import ModelConfig, TritonClient, TritonModelStore LOGGER = logging.getLogger("config_model") def _available_enum_values(my_enum): return [item.value for item in my_enum] def main(): parser = argparse.ArgumentParser( description="Create Triton model repository and model configuration", allow_abbrev=False ) parser.add_argument("--model-repository", required=True, help="Path to Triton model repository.") parser.add_argument("--model-path", required=True, help="Path to model to configure") # TODO: automation parser.add_argument( "--model-format", required=True, choices=_available_enum_values(Format), help="Format of model to deploy", ) parser.add_argument("--model-name", required=True, help="Model name") parser.add_argument("--model-version", default="1", help="Version of model (default 1)") parser.add_argument( "--max-batch-size", type=int, default=32, help="Maximum batch size allowed for inference. " "A max_batch_size value of 0 indicates that batching is not allowed for the model", ) # TODO: automation parser.add_argument( "--precision", type=str, default=Precision.FP16.value, choices=_available_enum_values(Precision), help="Model precision (parameter used only by Tensorflow backend with TensorRT optimization)", ) # Triton Inference Server endpoint parser.add_argument( "--server-url", type=str, default="grpc://localhost:8001", help="Inference server URL in format protocol://host[:port] (default grpc://localhost:8001)", ) parser.add_argument( "--load-model", choices=["none", "poll", "explicit"], help="Loading model while Triton Server is in given model control mode", ) parser.add_argument( "--timeout", default=120, help="Timeout in seconds to wait till model load (default=120)", type=int ) # optimization related parser.add_argument( "--backend-accelerator", type=str, choices=_available_enum_values(Accelerator), default=Accelerator.TRT.value, help="Select Backend Accelerator used to serve model", ) parser.add_argument("--number-of-model-instances", type=int, default=1, help="Number of model instances per GPU") parser.add_argument( "--preferred-batch-sizes", type=int, nargs="*", help="Batch sizes that the dynamic batcher should attempt to create. " "In case --max-queue-delay-us is set and this parameter is not, default value will be --max-batch-size", ) parser.add_argument( "--max-queue-delay-us", type=int, default=0, help="Max delay time which dynamic batcher shall wait to form a batch (default 0)", ) parser.add_argument( "--capture-cuda-graph", type=int, default=0, help="Use cuda capture graph (used only by TensorRT platform)", ) parser.add_argument("-v", "--verbose", help="Provide verbose logs", type=str2bool, default=False) args = parser.parse_args() set_logger(verbose=args.verbose) log_dict("args", vars(args)) config = ModelConfig.create( model_path=args.model_path, # model definition model_name=args.model_name, model_version=args.model_version, model_format=args.model_format, precision=args.precision, max_batch_size=args.max_batch_size, # optimization accelerator=args.backend_accelerator, gpu_engine_count=args.number_of_model_instances, preferred_batch_sizes=args.preferred_batch_sizes or [], max_queue_delay_us=args.max_queue_delay_us, capture_cuda_graph=args.capture_cuda_graph, ) model_store = TritonModelStore(args.model_repository) model_store.deploy_model(model_config=config, model_path=args.model_path) if args.load_model != "none": client = TritonClient(server_url=args.server_url, verbose=args.verbose) client.wait_for_server_ready(timeout=args.timeout) if args.load_model == "explicit": client.load_model(model_name=args.model_name) if args.load_model == "poll": time.sleep(15) client.wait_for_model(model_name=args.model_name, model_version=args.model_version, timeout_s=args.timeout) if __name__ == "__main__": main()
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/triton/config_model_on_triton.py
#!/usr/bin/env python3 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" For models with variable-sized inputs you must provide the --input-shape argument so that perf_analyzer knows what shape tensors to use. For example, for a model that has an input called IMAGE that has shape [ 3, N, M ], where N and M are variable-size dimensions, to tell perf_analyzer to send batch-size 4 requests of shape [ 3, 224, 224 ] `--shape IMAGE:3,224,224`. """ import argparse import csv import os import sys from pathlib import Path from typing import List, Optional # method from PEP-366 to support relative import in executed modules if __package__ is None: __package__ = Path(__file__).parent.name from .deployment_toolkit.report import save_results, show_results, sort_results from .deployment_toolkit.warmup import warmup def calculate_average_latency(r): avg_sum_fields = [ "Client Send", "Network+Server Send/Recv", "Server Queue", "Server Compute", "Server Compute Input", "Server Compute Infer", "Server Compute Output", "Client Recv", ] avg_latency = sum([int(r.get(f, 0)) for f in avg_sum_fields]) return avg_latency def update_performance_data(results: List, performance_file: str): with open(performance_file, "r") as csvfile: reader = csv.DictReader(csvfile) for row in reader: row["avg latency"] = calculate_average_latency(row) results.append(row) def _parse_batch_sizes(batch_sizes: str): batches = batch_sizes.split(sep=",") return list(map(lambda x: int(x.strip()), batches)) def online_performance( model_name: str, batch_sizes: List[int], result_path: str, input_shapes: Optional[List[str]] = None, profiling_data: str = "random", triton_instances: int = 1, triton_gpu_engine_count: int = 1, server_url: str = "localhost", measurement_window: int = 10000, shared_memory: bool = False ): print("\n") print(f"==== Dynamic batching analysis start ====") print("\n") input_shapes = " ".join(map(lambda shape: f" --shape {shape}", input_shapes)) if input_shapes else "" print(f"Running performance tests for dynamic batching") performance_file = f"triton_performance_dynamic_partial.csv" max_batch_size = max(batch_sizes) max_total_requests = 2 * max_batch_size * triton_instances * triton_gpu_engine_count max_concurrency = min(256, max_total_requests) batch_size = max(1, max_total_requests // 256) step = max(1, max_concurrency // 32) min_concurrency = step exec_args = f"""-m {model_name} \ -x 1 \ -p {measurement_window} \ -v \ -i http \ -u {server_url}:8000 \ -b {batch_size} \ -f {performance_file} \ --concurrency-range {min_concurrency}:{max_concurrency}:{step} \ --input-data {profiling_data} {input_shapes}""" if shared_memory: exec_args += " --shared-memory=cuda" result = os.system(f"perf_client {exec_args}") if result != 0: print(f"Failed running performance tests. Perf client failed with exit code {result}") sys.exit(1) results = list() update_performance_data(results=results, performance_file=performance_file) results = sort_results(results=results) save_results(filename=result_path, data=results) show_results(results=results) os.remove(performance_file) print("Performance results for dynamic batching stored in: {0}".format(result_path)) print("\n") print(f"==== Analysis done ====") print("\n") def main(): parser = argparse.ArgumentParser() parser.add_argument("--model-name", type=str, required=True, help="Name of the model to test") parser.add_argument( "--input-data", type=str, required=False, default="random", help="Input data to perform profiling." ) parser.add_argument( "--input-shape", action="append", required=False, help="Input data shape in form INPUT_NAME:<full_shape_without_batch_axis>.", ) parser.add_argument("--batch-sizes", type=str, required=True, help="List of batch sizes to tests. Comma separated.") parser.add_argument("--triton-instances", type=int, default=1, help="Number of Triton Server instances") parser.add_argument( "--number-of-model-instances", type=int, default=1, help="Number of models instances on Triton Server" ) parser.add_argument("--result-path", type=str, required=True, help="Path where result file is going to be stored.") parser.add_argument("--server-url", type=str, required=False, default="localhost", help="Url to Triton server") parser.add_argument( "--measurement-window", required=False, help="Time which perf_analyzer will wait for results", default=10000 ) parser.add_argument("--shared-memory", help="Use shared memory for communication with Triton", action="store_true", default=False) args = parser.parse_args() warmup( server_url=args.server_url, model_name=args.model_name, batch_sizes=_parse_batch_sizes(args.batch_sizes), triton_instances=args.triton_instances, triton_gpu_engine_count=args.number_of_model_instances, profiling_data=args.input_data, input_shapes=args.input_shape, measurement_window=args.measurement_window, shared_memory=args.shared_memory ) online_performance( server_url=args.server_url, model_name=args.model_name, batch_sizes=_parse_batch_sizes(args.batch_sizes), triton_instances=args.triton_instances, triton_gpu_engine_count=args.number_of_model_instances, profiling_data=args.input_data, input_shapes=args.input_shape, result_path=args.result_path, measurement_window=args.measurement_window, shared_memory=args.shared_memory ) if __name__ == "__main__": main()
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/triton/run_online_performance_test_on_triton.py
#!/usr/bin/env python3 # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r""" `convert_model.py` script allows to convert between model formats with additional model optimizations for faster inference. It converts model from results of get_model function. Currently supported input and output formats are: - inputs - `tf-estimator` - `get_model` function returning Tensorflow Estimator - `tf-keras` - `get_model` function returning Tensorflow Keras Model - `tf-savedmodel` - Tensorflow SavedModel binary - `pyt` - `get_model` function returning PyTorch Module - output - `tf-savedmodel` - Tensorflow saved model - `tf-trt` - TF-TRT saved model - `ts-trace` - PyTorch traced ScriptModule - `ts-script` - PyTorch scripted ScriptModule - `onnx` - ONNX - `trt` - TensorRT plan file For tf-keras input you can use: - --large-model flag - helps loading model which exceeds maximum protobuf size of 2GB - --tf-allow-growth flag - control limiting GPU memory growth feature (https://www.tensorflow.org/guide/gpu#limiting_gpu_memory_growth). By default it is disabled. """ import argparse import logging import os from pathlib import Path os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" os.environ["TF_ENABLE_DEPRECATION_WARNINGS"] = "1" # method from PEP-366 to support relative import in executed modules if __name__ == "__main__" and __package__ is None: __package__ = Path(__file__).parent.name from .deployment_toolkit.args import ArgParserGenerator from .deployment_toolkit.core import ( DATALOADER_FN_NAME, BaseConverter, BaseLoader, BaseSaver, Format, Precision, load_from_file, ) from .deployment_toolkit.extensions import converters, loaders, savers LOGGER = logging.getLogger("convert_model") INPUT_MODEL_TYPES = [Format.TF_ESTIMATOR, Format.TF_KERAS, Format.TF_SAVEDMODEL, Format.PYT] OUTPUT_MODEL_TYPES = [Format.TF_SAVEDMODEL, Format.TF_TRT, Format.ONNX, Format.TRT, Format.TS_TRACE, Format.TS_SCRIPT] def _get_args(): parser = argparse.ArgumentParser(description="Script for conversion between model formats.", allow_abbrev=False) parser.add_argument("--input-path", help="Path to input model file (python module or binary file)", required=True) parser.add_argument( "--input-type", help="Input model type", choices=[f.value for f in INPUT_MODEL_TYPES], required=True ) parser.add_argument("--output-path", help="Path to output model file", required=True) parser.add_argument( "--output-type", help="Output model type", choices=[f.value for f in OUTPUT_MODEL_TYPES], required=True ) parser.add_argument("--dataloader", help="Path to python module containing data loader") parser.add_argument("-v", "--verbose", help="Verbose logs", action="store_true", default=False) parser.add_argument( "--ignore-unknown-parameters", help="Ignore unknown parameters (argument often used in CI where set of arguments is constant)", action="store_true", default=False, ) args, unparsed_args = parser.parse_known_args() Loader: BaseLoader = loaders.get(args.input_type) ArgParserGenerator(Loader, module_path=args.input_path).update_argparser(parser) converter_name = f"{args.input_type}--{args.output_type}" Converter: BaseConverter = converters.get(converter_name) if Converter is not None: ArgParserGenerator(Converter).update_argparser(parser) Saver: BaseSaver = savers.get(args.output_type) ArgParserGenerator(Saver).update_argparser(parser) if args.dataloader is not None: get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) ArgParserGenerator(get_dataloader_fn).update_argparser(parser) if args.ignore_unknown_parameters: args, unknown_args = parser.parse_known_args() LOGGER.warning(f"Got additional args {unknown_args}") else: args = parser.parse_args() return args def main(): args = _get_args() log_level = logging.INFO if not args.verbose else logging.DEBUG log_format = "%(asctime)s %(levelname)s %(name)s %(message)s" logging.basicConfig(level=log_level, format=log_format) LOGGER.info(f"args:") for key, value in vars(args).items(): LOGGER.info(f" {key} = {value}") requested_model_precision = Precision(args.precision) dataloader_fn = None # if conversion is required, temporary change model load precision to that required by converter # it is for TensorRT converters which require fp32 models for all requested precisions converter_name = f"{args.input_type}--{args.output_type}" Converter: BaseConverter = converters.get(converter_name) if Converter: args.precision = Converter.required_source_model_precision(requested_model_precision).value Loader: BaseLoader = loaders.get(args.input_type) loader = ArgParserGenerator(Loader, module_path=args.input_path).from_args(args) model = loader.load(args.input_path) LOGGER.info("inputs: %s", model.inputs) LOGGER.info("outputs: %s", model.outputs) if Converter: # if conversion is needed # dataloader must much source model precision - so not recovering it yet if args.dataloader is not None: get_dataloader_fn = load_from_file(args.dataloader, label="dataloader", target=DATALOADER_FN_NAME) dataloader_fn = ArgParserGenerator(get_dataloader_fn).from_args(args) # recover precision to that requested by user args.precision = requested_model_precision.value if Converter: converter = ArgParserGenerator(Converter).from_args(args) model = converter.convert(model, dataloader_fn=dataloader_fn) Saver: BaseSaver = savers.get(args.output_type) saver = ArgParserGenerator(Saver).from_args(args) saver.save(model, args.output_path) return 0 if __name__ == "__main__": main()
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/triton/convert_model.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/triton/deployment_toolkit/__init__.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import abc import importlib import logging import os from enum import Enum from pathlib import Path from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union import numpy as np LOGGER = logging.getLogger(__name__) DATALOADER_FN_NAME = "get_dataloader_fn" GET_MODEL_FN_NAME = "get_model" GET_SERVING_INPUT_RECEIVER_FN = "get_serving_input_receiver_fn" GET_ARGPARSER_FN_NAME = "update_argparser" class TensorSpec(NamedTuple): name: str dtype: str shape: Tuple class Parameter(Enum): def __lt__(self, other: "Parameter") -> bool: return self.value < other.value class Accelerator(Parameter): AMP = "amp" CUDA = "cuda" TRT = "trt" class Precision(Parameter): FP16 = "fp16" FP32 = "fp32" TF32 = "tf32" # Deprecated class Format(Parameter): TF_GRAPHDEF = "tf-graphdef" TF_SAVEDMODEL = "tf-savedmodel" TF_TRT = "tf-trt" TF_ESTIMATOR = "tf-estimator" TF_KERAS = "tf-keras" ONNX = "onnx" TRT = "trt" TS_SCRIPT = "ts-script" TS_TRACE = "ts-trace" PYT = "pyt" class Model(NamedTuple): handle: object precision: Optional[Precision] inputs: Dict[str, TensorSpec] outputs: Dict[str, TensorSpec] def load_from_file(file_path, label, target): spec = importlib.util.spec_from_file_location(name=label, location=file_path) my_module = importlib.util.module_from_spec(spec) spec.loader.exec_module(my_module) # pytype: disable=attribute-error return getattr(my_module, target, None) class BaseLoader(abc.ABC): required_fn_name_for_signature_parsing: Optional[str] = None @abc.abstractmethod def load(self, model_path: Union[str, Path], **kwargs) -> Model: """ Loads and process model from file based on given set of args """ pass class BaseSaver(abc.ABC): required_fn_name_for_signature_parsing: Optional[str] = None @abc.abstractmethod def save(self, model: Model, model_path: Union[str, Path]) -> None: """ Save model to file """ pass class BaseRunner(abc.ABC): required_fn_name_for_signature_parsing: Optional[str] = None @abc.abstractmethod def init_inference(self, model: Model): raise NotImplementedError class BaseRunnerSession(abc.ABC): def __init__(self, model: Model): self._model = model @abc.abstractmethod def __enter__(self): raise NotImplementedError() @abc.abstractmethod def __exit__(self, exc_type, exc_value, traceback): raise NotImplementedError() @abc.abstractmethod def __call__(self, x: Dict[str, object]): raise NotImplementedError() def _set_env_variables(self) -> Dict[str, object]: """this method not remove values; fix it if needed""" to_set = {} old_values = {k: os.environ.pop(k, None) for k in to_set} os.environ.update(to_set) return old_values def _recover_env_variables(self, old_envs: Dict[str, object]): for name, value in old_envs.items(): if value is None: del os.environ[name] else: os.environ[name] = str(value) class BaseConverter(abc.ABC): required_fn_name_for_signature_parsing: Optional[str] = None @abc.abstractmethod def convert(self, model: Model, dataloader_fn) -> Model: raise NotImplementedError() @staticmethod def required_source_model_precision(requested_model_precision: Precision) -> Precision: return requested_model_precision class BaseMetricsCalculator(abc.ABC): required_fn_name_for_signature_parsing: Optional[str] = None @abc.abstractmethod def calc( self, *, ids: List[Any], y_pred: Dict[str, np.ndarray], x: Optional[Dict[str, np.ndarray]], y_real: Optional[Dict[str, np.ndarray]], ) -> Dict[str, float]: """ Calculates error/accuracy metrics Args: ids: List of ids identifying each sample in the batch y_pred: model output as dict where key is output name and value is output value x: model input as dict where key is input name and value is input value y_real: input ground truth as dict where key is output name and value is output value Returns: dictionary where key is metric name and value is its value """ pass class ShapeSpec(NamedTuple): min: Tuple opt: Tuple max: Tuple
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/triton/deployment_toolkit/core.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from pathlib import Path from typing import Dict, Iterable import numpy as np MB2B = 2 ** 20 B2MB = 1 / MB2B FLUSH_THRESHOLD_B = 256 * MB2B def pad_except_batch_axis(data: np.ndarray, target_shape_with_batch_axis: Iterable[int]): assert all( [current_size <= target_size for target_size, current_size in zip(target_shape_with_batch_axis, data.shape)] ), "target_shape should have equal or greater all dimensions comparing to data.shape" padding = [(0, 0)] + [ # (0, 0) - do not pad on batch_axis (with index 0) (0, target_size - current_size) for target_size, current_size in zip(target_shape_with_batch_axis[1:], data.shape[1:]) ] return np.pad(data, padding, "constant", constant_values=np.nan) class NpzWriter: """ Dumps dicts of numpy arrays into npz files It can/shall be used as context manager: ``` with OutputWriter('mydir') as writer: writer.write(outputs={'classes': np.zeros(8), 'probs': np.zeros((8, 4))}, labels={'classes': np.zeros(8)}, inputs={'input': np.zeros((8, 240, 240, 3)}) ``` ## Variable size data Only dynamic of last axis is handled. Data is padded with np.nan value. Also each generated file may have different size of dynamic axis. """ def __init__(self, output_dir, compress=False): self._output_dir = Path(output_dir) self._items_cache: Dict[str, Dict[str, np.ndarray]] = {} self._items_counters: Dict[str, int] = {} self._flush_threshold_b = FLUSH_THRESHOLD_B self._compress = compress @property def cache_size(self): return {name: sum([a.nbytes for a in data.values()]) for name, data in self._items_cache.items()} def _append_to_cache(self, prefix, data): if data is None: return if not isinstance(data, dict): raise ValueError(f"{prefix} data to store shall be dict") cached_data = self._items_cache.get(prefix, {}) for name, value in data.items(): assert isinstance( value, (list, np.ndarray) ), f"Values shall be lists or np.ndarrays; current type {type(value)}" if not isinstance(value, np.ndarray): value = np.array(value) assert value.dtype.kind in ["S", "U"] or not np.any( np.isnan(value) ), f"Values with np.nan is not supported; {name}={value}" cached_value = cached_data.get(name, None) if cached_value is not None: target_shape = np.max([cached_value.shape, value.shape], axis=0) cached_value = pad_except_batch_axis(cached_value, target_shape) value = pad_except_batch_axis(value, target_shape) value = np.concatenate((cached_value, value)) cached_data[name] = value self._items_cache[prefix] = cached_data def write(self, **kwargs): """ Writes named list of dictionaries of np.ndarrays. Finally keyword names will be later prefixes of npz files where those dictionaries will be stored. ex. writer.write(inputs={'input': np.zeros((2, 10))}, outputs={'classes': np.zeros((2,)), 'probabilities': np.zeros((2, 32))}, labels={'classes': np.zeros((2,))}) Args: **kwargs: named list of dictionaries of np.ndarrays to store """ for prefix, data in kwargs.items(): self._append_to_cache(prefix, data) biggest_item_size = max(self.cache_size.values()) if biggest_item_size > self._flush_threshold_b: self.flush() def flush(self): for prefix, data in self._items_cache.items(): self._dump(prefix, data) self._items_cache = {} def _dump(self, prefix, data): idx = self._items_counters.setdefault(prefix, 0) filename = f"{prefix}-{idx:012d}.npz" output_path = self._output_dir / filename if self._compress: np.savez_compressed(output_path, **data) else: np.savez(output_path, **data) nitems = len(list(data.values())[0]) msg_for_labels = ( "If these are correct shapes - consider moving loading of them into metrics.py." if prefix == "labels" else "" ) shapes = {name: value.shape if isinstance(value, np.ndarray) else (len(value),) for name, value in data.items()} assert all(len(v) == nitems for v in data.values()), ( f'All items in "{prefix}" shall have same size on 0 axis equal to batch size. {msg_for_labels}' f'{", ".join(f"{name}: {shape}" for name, shape in shapes.items())}' ) self._items_counters[prefix] += nitems def __enter__(self): if self._output_dir.exists() and len(list(self._output_dir.iterdir())): raise ValueError(f"{self._output_dir.as_posix()} is not empty") self._output_dir.mkdir(parents=True, exist_ok=True) return self def __exit__(self, exc_type, exc_val, exc_tb): self.flush()
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/triton/deployment_toolkit/dump.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import logging import os import re from pathlib import Path from typing import List LOGGER = logging.getLogger(__name__) class ExtensionManager: def __init__(self, name: str): self._name = name self._registry = {} def register_extension(self, extension: str, clazz): already_registered_class = self._registry.get(extension, None) if already_registered_class and already_registered_class.__module__ != clazz.__module__: raise RuntimeError( f"Conflicting extension {self._name}/{extension}; " f"{already_registered_class.__module__}.{already_registered_class.__name} " f"and " f"{clazz.__module__}.{clazz.__name__}" ) elif already_registered_class is None: clazz_full_name = f"{clazz.__module__}.{clazz.__name__}" if clazz is not None else "None" LOGGER.debug(f"Registering extension {self._name}/{extension}: {clazz_full_name}") self._registry[extension] = clazz def get(self, extension): if extension not in self._registry: raise RuntimeError(f"Missing extension {self._name}/{extension}") return self._registry[extension] @property def supported_extensions(self): return list(self._registry) @staticmethod def scan_for_extensions(extension_dirs: List[Path]): register_pattern = r".*\.register_extension\(.*" for extension_dir in extension_dirs: for python_path in extension_dir.rglob("*.py"): if not python_path.is_file(): continue payload = python_path.read_text() if re.findall(register_pattern, payload): import_path = python_path.relative_to(toolkit_root_dir.parent) package = import_path.parent.as_posix().replace(os.sep, ".") package_with_module = f"{package}.{import_path.stem}" spec = importlib.util.spec_from_file_location(name=package_with_module, location=python_path) my_module = importlib.util.module_from_spec(spec) my_module.__package__ = package try: spec.loader.exec_module(my_module) # pytype: disable=attribute-error except ModuleNotFoundError as e: LOGGER.error( f"Could not load extensions from {import_path} due to missing python packages; {e}" ) runners = ExtensionManager("runners") loaders = ExtensionManager("loaders") savers = ExtensionManager("savers") converters = ExtensionManager("converters") toolkit_root_dir = (Path(__file__).parent / "..").resolve() ExtensionManager.scan_for_extensions([toolkit_root_dir])
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/triton/deployment_toolkit/extensions.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys from typing import List, Optional def warmup( model_name: str, batch_sizes: List[int], triton_gpu_engine_count: int = 1, triton_instances: int = 1, profiling_data: str = "random", input_shapes: Optional[List[str]] = None, server_url: str = "localhost", measurement_window: int = 10000, shared_memory: bool = False ): print("\n") print(f"==== Warmup start ====") print("\n") input_shapes = " ".join(map(lambda shape: f" --shape {shape}", input_shapes)) if input_shapes else "" measurement_window = 6 * measurement_window max_batch_size = max(batch_sizes) max_total_requests = 2 * max_batch_size * triton_instances * triton_gpu_engine_count max_concurrency = min(256, max_total_requests) batch_size = max(1, max_total_requests // 256) step = max(1, max_concurrency // 2) min_concurrency = step exec_args = f"""-m {model_name} \ -x 1 \ -p {measurement_window} \ -v \ -i http \ -u {server_url}:8000 \ -b {batch_size} \ --concurrency-range {min_concurrency}:{max_concurrency}:{step} \ --input-data {profiling_data} {input_shapes}""" if shared_memory: exec_args += " --shared-memory=cuda" result = os.system(f"perf_client {exec_args}") if result != 0: print(f"Failed running performance tests. Perf client failed with exit code {result}") sys.exit(1) print("\n") print(f"==== Warmup done ====") print("\n")
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/triton/deployment_toolkit/warmup.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import inspect import logging from typing import Any, Callable, Dict, Optional, Union from .core import GET_ARGPARSER_FN_NAME, load_from_file LOGGER = logging.getLogger(__name__) def str2bool(v): if isinstance(v, bool): return v if v.lower() in ("yes", "true", "t", "y", "1"): return True elif v.lower() in ("no", "false", "f", "n", "0"): return False else: raise argparse.ArgumentTypeError("Boolean value expected.") def filter_fn_args(args: Union[dict, argparse.Namespace], fn: Callable) -> dict: signature = inspect.signature(fn) parameters_names = list(signature.parameters) if isinstance(args, argparse.Namespace): args = vars(args) args = {k: v for k, v in args.items() if k in parameters_names} return args def add_args_for_fn_signature(parser, fn) -> argparse.ArgumentParser: parser.conflict_handler = "resolve" signature = inspect.signature(fn) for parameter in signature.parameters.values(): if parameter.name in ["self", "args", "kwargs"]: continue argument_kwargs = {} if parameter.annotation != inspect.Parameter.empty: if parameter.annotation == bool: argument_kwargs["type"] = str2bool argument_kwargs["choices"] = [0, 1] elif isinstance(parameter.annotation, type(Optional[Any])): types = [type_ for type_ in parameter.annotation.__args__ if not isinstance(None, type_)] if len(types) != 1: raise RuntimeError( f"Could not prepare argument parser for {parameter.name}: {parameter.annotation} in {fn}" ) argument_kwargs["type"] = types[0] else: argument_kwargs["type"] = parameter.annotation if parameter.default != inspect.Parameter.empty: if parameter.annotation == bool: argument_kwargs["default"] = str2bool(parameter.default) else: argument_kwargs["default"] = parameter.default else: argument_kwargs["required"] = True name = parameter.name.replace("_", "-") LOGGER.debug(f"Adding argument {name} with {argument_kwargs}") parser.add_argument(f"--{name}", **argument_kwargs) return parser class ArgParserGenerator: def __init__(self, cls_or_fn, module_path: Optional[str] = None): self._cls_or_fn = cls_or_fn self._handle = cls_or_fn if inspect.isfunction(cls_or_fn) else getattr(cls_or_fn, "__init__") input_is_python_file = module_path and module_path.endswith(".py") self._input_path = module_path if input_is_python_file else None self._required_fn_name_for_signature_parsing = getattr( cls_or_fn, "required_fn_name_for_signature_parsing", None ) def update_argparser(self, parser): name = self._handle.__name__ group_parser = parser.add_argument_group(name) add_args_for_fn_signature(group_parser, fn=self._handle) self._update_argparser(group_parser) def get_args(self, args: argparse.Namespace): filtered_args = filter_fn_args(args, fn=self._handle) tmp_parser = argparse.ArgumentParser(allow_abbrev=False) self._update_argparser(tmp_parser) custom_names = [ p.dest.replace("-", "_") for p in tmp_parser._actions if not isinstance(p, argparse._HelpAction) ] custom_params = {n: getattr(args, n) for n in custom_names} filtered_args = {**filtered_args, **custom_params} return filtered_args def from_args(self, args: Union[argparse.Namespace, Dict]): args = self.get_args(args) LOGGER.info(f"Initializing {self._cls_or_fn.__name__}({args})") return self._cls_or_fn(**args) def _update_argparser(self, parser): label = "argparser_update" if self._input_path: update_argparser_handle = load_from_file(self._input_path, label=label, target=GET_ARGPARSER_FN_NAME) if update_argparser_handle: update_argparser_handle(parser) elif self._required_fn_name_for_signature_parsing: fn_handle = load_from_file( self._input_path, label=label, target=self._required_fn_name_for_signature_parsing ) if fn_handle: add_args_for_fn_signature(parser, fn_handle)
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/triton/deployment_toolkit/args.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import csv import re from typing import Dict, List from natsort import natsorted from tabulate import tabulate def sort_results(results: List): results = natsorted(results, key=lambda item: [item[key] for key in item.keys()]) return results def save_results(filename: str, data: List, formatted: bool = False): data = format_data(data=data) if formatted else data with open(filename, "a") as csvfile: fieldnames = data[0].keys() writer = csv.DictWriter(csvfile, fieldnames=fieldnames) writer.writeheader() for row in data: writer.writerow(row) def format_data(data: List[Dict]) -> List[Dict]: formatted_data = list() for item in data: formatted_item = format_keys(data=item) formatted_data.append(formatted_item) return formatted_data def format_keys(data: Dict) -> Dict: keys = {format_key(key=key): value for key, value in data.items()} return keys def format_key(key: str) -> str: key = " ".join([k.capitalize() for k in re.split("_| ", key)]) return key def show_results(results: List[Dict]): headers = list(results[0].keys()) summary = map(lambda x: list(map(lambda item: item[1], x.items())), results) print(tabulate(summary, headers=headers))
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/triton/deployment_toolkit/report.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from typing import Dict, Iterable, Optional # pytype: disable=import-error import onnx import tensorrt as trt from ..core import BaseConverter, Format, Model, Precision, ShapeSpec from ..extensions import converters from .utils import get_input_shapes # pytype: enable=import-error LOGGER = logging.getLogger(__name__) TRT_LOGGER = trt.Logger(trt.Logger.INFO) class Onnx2TRTConverter(BaseConverter): def __init__(self, *, max_batch_size: int, max_workspace_size: int, precision: str): self._max_batch_size = max_batch_size self._max_workspace_size = max_workspace_size self._precision = Precision(precision) def convert(self, model: Model, dataloader_fn) -> Model: input_shapes = get_input_shapes(dataloader_fn(), self._max_batch_size) cuda_engine = onnx2trt( model.handle, shapes=input_shapes, max_workspace_size=self._max_workspace_size, max_batch_size=self._max_batch_size, model_precision=self._precision.value, ) return model._replace(handle=cuda_engine) @staticmethod def required_source_model_precision(requested_model_precision: Precision) -> Precision: # TensorRT requires source models to be in FP32 precision return Precision.FP32 def onnx2trt( onnx_model: onnx.ModelProto, *, shapes: Dict[str, ShapeSpec], max_workspace_size: int, max_batch_size: int, model_precision: str, ) -> "trt.ICudaEngine": """ Converts onnx model to TensorRT ICudaEngine Args: onnx_model: onnx.Model to convert shapes: dictionary containing min shape, max shape, opt shape for each input name max_workspace_size: The maximum GPU temporary memory which the CudaEngine can use at execution time. max_batch_size: The maximum batch size which can be used at execution time, and also the batch size for which the CudaEngine will be optimized. model_precision: precision of kernels (possible values: fp16, fp32) Returns: TensorRT ICudaEngine """ # Whether or not 16-bit kernels are permitted. # During :class:`ICudaEngine` build fp16 kernels will also be tried when this mode is enabled. fp16_mode = "16" in model_precision builder = trt.Builder(TRT_LOGGER) builder.fp16_mode = fp16_mode builder.max_batch_size = max_batch_size builder.max_workspace_size = max_workspace_size # In TensorRT 7.0, the ONNX parser only supports full-dimensions mode, # meaning that your network definition must be created with the explicitBatch flag set. # For more information, see # https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#work_dynamic_shapes flags = 1 << int(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH) network = builder.create_network(flags) with trt.OnnxParser(network, TRT_LOGGER) as parser: # onnx model parsing if not parser.parse(onnx_model.SerializeToString()): for i in range(parser.num_errors): LOGGER.error(f"OnnxParser error {i}/{parser.num_errors}: {parser.get_error(i)}") raise RuntimeError("Error during parsing ONNX model (see logs for details)") # optimization config = builder.create_builder_config() config.flags |= bool(fp16_mode) << int(trt.BuilderFlag.FP16) config.max_workspace_size = max_workspace_size profile = builder.create_optimization_profile() for name, spec in shapes.items(): profile.set_shape(name, **spec._asdict()) config.add_optimization_profile(profile) engine = builder.build_engine(network, config=config) return engine converters.register_extension(f"{Format.ONNX.value}--{Format.TRT.value}", Onnx2TRTConverter)
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/triton/deployment_toolkit/bermuda/onnx2trt_conv.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from pathlib import Path from typing import Dict, Optional, Union import numpy as np # pytype: disable=import-error import onnx import onnx.optimizer import onnx.shape_inference import onnxruntime from google.protobuf import text_format from onnx.mapping import TENSOR_TYPE_TO_NP_TYPE # pytype: enable=import-error from ..core import BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, TensorSpec from ..extensions import loaders, runners, savers from .utils import infer_precision LOGGER = logging.getLogger(__name__) def _value_info2tensor_spec(value_info: onnx.ValueInfoProto): onnx_data_type_map = {"float": "float32", "double": "float64"} elem_type_name = onnx.TensorProto.DataType.Name(value_info.type.tensor_type.elem_type).lower() dtype = onnx_data_type_map.get(elem_type_name, elem_type_name) def _get_dim(dim): which = dim.WhichOneof("value") if which is not None: # which is None when dim is None dim = getattr(dim, which) return None if isinstance(dim, (str, bytes)) else dim shape = value_info.type.tensor_type.shape shape = tuple([_get_dim(d) for d in shape.dim]) return TensorSpec(value_info.name, dtype=dtype, shape=shape) def _infer_graph_precision(onnx_graph: onnx.GraphProto) -> Optional[Precision]: import networkx as nx # build directed graph nx_graph = nx.DiGraph() def _get_dtype(vi): t = vi.type if hasattr(t, "tensor_type"): type_id = t.tensor_type.elem_type else: raise NotImplementedError("Not implemented yet") return TENSOR_TYPE_TO_NP_TYPE[type_id] node_output2type = {vi.name: _get_dtype(vi) for vi in onnx_graph.value_info} node_outputs2node = {output_name: node for node in onnx_graph.node for output_name in node.output} node_inputs2node = {input_name: node for node in onnx_graph.node for input_name in node.input} for node in onnx_graph.node: node_dtype = node_output2type.get("+".join(node.output), None) nx_graph.add_node( node.name, op=node.op_type, attr={a.name: a for a in node.attribute}, dtype=node_dtype, ) for input_name in node.input: prev_node = node_outputs2node.get(input_name, None) if prev_node: nx_graph.add_edge(prev_node.name, node.name) for input_node in onnx_graph.input: input_name = input_node.name nx_graph.add_node(input_name, op="input", dtype=_get_dtype(input_node)) next_node = node_inputs2node.get(input_name, None) if next_node: nx_graph.add_edge(input_name, next_node.name) for output in onnx_graph.output: output_name = output.name nx_graph.add_node(output_name, op="output", dtype=_get_dtype(output)) prev_node = node_outputs2node.get(output_name, None) if prev_node: nx_graph.add_edge(prev_node.name, output_name) else: LOGGER.warning(f"Could not find previous node for {output_name}") input_names = [n.name for n in onnx_graph.input] output_names = [n.name for n in onnx_graph.output] most_common_dtype = infer_precision(nx_graph, input_names, output_names, lambda node: node.get("dtype", None)) if most_common_dtype is not None: precision = {np.dtype("float32"): Precision.FP32, np.dtype("float16"): Precision.FP16}[most_common_dtype] else: precision = None return precision class OnnxLoader(BaseLoader): def load(self, model_path: Union[str, Path], **_) -> Model: if isinstance(model_path, Path): model_path = model_path.as_posix() model = onnx.load(model_path) onnx.checker.check_model(model) onnx.helper.strip_doc_string(model) model = onnx.shape_inference.infer_shapes(model) # TODO: probably modification of onnx model ios causes error on optimize # from onnx.utils import polish_model # model = polish_model(model) # run checker, docs strip, optimizer and shape inference inputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.input} outputs = {vi.name: _value_info2tensor_spec(vi) for vi in model.graph.output} precision = _infer_graph_precision(model.graph) return Model(model, precision, inputs, outputs) class OnnxSaver(BaseSaver): def __init__(self, as_text: bool = False): self._as_text = as_text def save(self, model: Model, model_path: Union[str, Path]) -> None: model_path = Path(model_path) LOGGER.debug(f"Saving ONNX model to {model_path.as_posix()}") model_path.parent.mkdir(parents=True, exist_ok=True) onnx_model: onnx.ModelProto = model.handle if self._as_text: with model_path.open("w") as f: f.write(text_format.MessageToString(onnx_model)) else: with model_path.open("wb") as f: f.write(onnx_model.SerializeToString()) """ ExecutionProviders on onnxruntime 1.4.0 ['TensorrtExecutionProvider', 'CUDAExecutionProvider', 'MIGraphXExecutionProvider', 'NGRAPHExecutionProvider', 'OpenVINOExecutionProvider', 'DnnlExecutionProvider', 'NupharExecutionProvider', 'VitisAIExecutionProvider', 'ArmNNExecutionProvider', 'ACLExecutionProvider', 'CPUExecutionProvider'] """ def _check_providers(providers): providers = providers or [] if not isinstance(providers, (list, tuple)): providers = [providers] available_providers = onnxruntime.get_available_providers() unavailable = set(providers) - set(available_providers) if unavailable: raise RuntimeError(f"Unavailable providers {unavailable}") return providers class OnnxRunner(BaseRunner): def __init__(self, verbose_runtime_logs: bool = False): self._providers = None self._verbose_runtime_logs = verbose_runtime_logs def init_inference(self, model: Model): assert isinstance(model.handle, onnx.ModelProto) return OnnxRunnerSession( model=model, providers=self._providers, verbose_runtime_logs=self._verbose_runtime_logs ) class OnnxRunnerSession(BaseRunnerSession): def __init__(self, model: Model, providers, verbose_runtime_logs: bool = False): super().__init__(model) self._input_names = None self._output_names = None self._session = None self._providers = providers self._verbose_runtime_logs = verbose_runtime_logs self._old_env_values = {} def __enter__(self): self._old_env_values = self._set_env_variables() sess_options = onnxruntime.SessionOptions() # default session options if self._verbose_runtime_logs: sess_options.log_severity_level = 0 sess_options.log_verbosity_level = 1 LOGGER.info( f"Starting inference session for onnx model providers={self._providers} sess_options={sess_options}" ) self._input_names = list(self._model.inputs) self._output_names = list(self._model.outputs) model_payload = self._model.handle.SerializeToString() self._session = onnxruntime.InferenceSession( model_payload, providers=self._providers, sess_options=sess_options ) return self def __exit__(self, exc_type, exc_value, traceback): self._input_names = None self._output_names = None self._session = None self._recover_env_variables(self._old_env_values) def __call__(self, x: Dict[str, object]): feed_dict = {k: x[k] for k in self._input_names} y_pred = self._session.run(self._output_names, feed_dict) y_pred = dict(zip(self._output_names, y_pred)) return y_pred loaders.register_extension(Format.ONNX.value, OnnxLoader) runners.register_extension(Format.ONNX.value, OnnxRunner) savers.register_extension(Format.ONNX.value, OnnxSaver)
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/triton/deployment_toolkit/bermuda/onnx.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License.
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/triton/deployment_toolkit/bermuda/__init__.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from collections import Counter from typing import Callable, Dict, List import networkx as nx from ..core import ShapeSpec def infer_precision( nx_graph: nx.Graph, input_names: List[str], output_names: List[str], get_node_dtype_fn: Callable, ): node_dtypes = [nx_graph.nodes[node_name].get("dtype", None) for node_name in nx_graph.nodes] node_dtypes = [dt for dt in node_dtypes if dt is None or dt.kind not in ["i", "b"]] dtypes_counter = Counter(node_dtypes) return dtypes_counter.most_common()[0][0] def get_shapes_with_dynamic_axes(dataloader, batch_size_dim=0): def _set_dynamic_shapes(t, shapes): for k, v in t.items(): shape = list(v.shape) for dim, s in enumerate(shape): if shapes[k][dim] != -1 and shapes[k][dim] != s: shapes[k][dim] = -1 ## get all shapes from input and output tensors input_shapes = {} output_shapes = {} for batch in dataloader: _, x, y = batch for k, v in x.items(): input_shapes[k] = list(v.shape) for k, v in y.items(): output_shapes[k] = list(v.shape) break # based on max <max_num_iters> iterations, check which # dimensions differ to determine dynamic_axes max_num_iters = 100 for idx, batch in enumerate(dataloader): if idx >= max_num_iters: break _, x, y = batch _set_dynamic_shapes(x, input_shapes) _set_dynamic_shapes(y, output_shapes) return input_shapes, output_shapes def get_dynamic_axes(dataloader, batch_size_dim=0): input_shapes, output_shapes = get_shapes_with_dynamic_axes(dataloader, batch_size_dim) all_shapes = {**input_shapes, **output_shapes} dynamic_axes = {} for k, shape in all_shapes.items(): for idx, s in enumerate(shape): if s == -1: dynamic_axes[k] = {idx: k + "_" + str(idx)} for k, v in all_shapes.items(): if k in dynamic_axes: dynamic_axes[k].update({batch_size_dim: "batch_size_" + str(batch_size_dim)}) else: dynamic_axes[k] = {batch_size_dim: "batch_size_" + str(batch_size_dim)} return dynamic_axes def get_input_shapes(dataloader, max_batch_size=1) -> Dict[str, ShapeSpec]: def init_counters_and_shapes(x, counters, min_shapes, max_shapes): for k, v in x.items(): counters[k] = Counter() min_shapes[k] = [float("inf")] * v.ndim max_shapes[k] = [float("-inf")] * v.ndim counters = {} min_shapes: Dict[str, tuple] = {} max_shapes: Dict[str, tuple] = {} for idx, batch in enumerate(dataloader): ids, x, y = batch if idx == 0: init_counters_and_shapes(x, counters, min_shapes, max_shapes) for k, v in x.items(): shape = v.shape counters[k][shape] += 1 min_shapes[k] = tuple([min(a, b) for a, b in zip(min_shapes[k], shape)]) max_shapes[k] = tuple([max(a, b) for a, b in zip(max_shapes[k], shape)]) opt_shapes: Dict[str, tuple] = {} for k, v in counters.items(): opt_shapes[k] = v.most_common(1)[0][0] shapes = {} for k in opt_shapes.keys(): # same keys in min_shapes and max_shapes shapes[k] = ShapeSpec( min=(1,) + min_shapes[k][1:], max=(max_batch_size,) + max_shapes[k][1:], opt=(max_batch_size,) + opt_shapes[k][1:], ) return shapes
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/triton/deployment_toolkit/bermuda/utils.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import sys from pathlib import Path from typing import Dict, NamedTuple, Optional, Union import numpy as np # pytype: disable=import-error try: import pycuda.autoinit import pycuda.driver as cuda except (ImportError, Exception) as e: logging.getLogger(__name__).warning(f"Problems with importing pycuda package; {e}") # pytype: enable=import-error import tensorrt as trt # pytype: disable=import-error from ..core import BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, TensorSpec from ..extensions import loaders, runners, savers LOGGER = logging.getLogger(__name__) TRT_LOGGER = trt.Logger(trt.Logger.INFO) """ documentation: https://docs.nvidia.com/deeplearning/tensorrt/api/python_api/index.html https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#python_samples_section """ class TensorRTLoader(BaseLoader): def load(self, model_path: Union[str, Path], **_) -> Model: model_path = Path(model_path) LOGGER.debug(f"Loading TensorRT engine from {model_path}") with model_path.open("rb") as fh, trt.Runtime(TRT_LOGGER) as runtime: engine = runtime.deserialize_cuda_engine(fh.read()) if engine is None: raise RuntimeError(f"Could not load ICudaEngine from {model_path}") inputs = {} outputs = {} for binding_idx in range(engine.num_bindings): name = engine.get_binding_name(binding_idx) is_input = engine.binding_is_input(binding_idx) dtype = engine.get_binding_dtype(binding_idx) shape = engine.get_binding_shape(binding_idx) if is_input: inputs[name] = TensorSpec(name, dtype, shape) else: outputs[name] = TensorSpec(name, dtype, shape) return Model(engine, None, inputs, outputs) class TensorRTSaver(BaseSaver): def __init__(self): pass def save(self, model: Model, model_path: Union[str, Path]) -> None: model_path = Path(model_path) LOGGER.debug(f"Saving TensorRT engine to {model_path.as_posix()}") model_path.parent.mkdir(parents=True, exist_ok=True) engine: "trt.ICudaEngine" = model.handle with model_path.open("wb") as fh: fh.write(engine.serialize()) class TRTBuffers(NamedTuple): x_host: Optional[Dict[str, object]] x_dev: Dict[str, object] y_pred_host: Dict[str, object] y_pred_dev: Dict[str, object] class TensorRTRunner(BaseRunner): def __init__(self): pass def init_inference(self, model: Model): return TensorRTRunnerSession(model=model) class TensorRTRunnerSession(BaseRunnerSession): def __init__(self, model: Model): super().__init__(model) assert isinstance(model.handle, trt.ICudaEngine) self._model = model self._has_dynamic_shapes = None self._context = None self._engine: trt.ICudaEngine = self._model.handle self._cuda_context = pycuda.autoinit.context self._input_names = None self._output_names = None self._buffers = None def __enter__(self): self._context = self._engine.create_execution_context() self._context.__enter__() self._input_names = [ self._engine[idx] for idx in range(self._engine.num_bindings) if self._engine.binding_is_input(idx) ] self._output_names = [ self._engine[idx] for idx in range(self._engine.num_bindings) if not self._engine.binding_is_input(idx) ] # all_binding_shapes_specified is True for models without dynamic shapes # so initially this variable is False for models with dynamic shapes self._has_dynamic_shapes = not self._context.all_binding_shapes_specified return self def __exit__(self, exc_type, exc_value, traceback): self._context.__exit__(exc_type, exc_value, traceback) self._input_names = None self._output_names = None # TODO: are cuda buffers dealloc automatically? self._buffers = None def __call__(self, x): buffers = self._prepare_buffers_if_needed(x) bindings = self._update_bindings(buffers) for name in self._input_names: cuda.memcpy_htod(buffers.x_dev[name], buffers.x_host[name]) self._cuda_context.push() self._context.execute_v2(bindings=bindings) self._cuda_context.pop() for name in self._output_names: cuda.memcpy_dtoh(buffers.y_pred_host[name], buffers.y_pred_dev[name]) return buffers.y_pred_host def _update_bindings(self, buffers: TRTBuffers): bindings = [None] * self._engine.num_bindings for name in buffers.y_pred_dev: binding_idx: int = self._engine[name] bindings[binding_idx] = buffers.y_pred_dev[name] for name in buffers.x_dev: binding_idx: int = self._engine[name] bindings[binding_idx] = buffers.x_dev[name] return bindings def _set_dynamic_input_shapes(self, x_host): def _is_shape_dynamic(input_shape): return any([dim is None or dim == -1 for dim in input_shape]) for name in self._input_names: bindings_idx = self._engine[name] data_shape = x_host[name].shape # pytype: disable=attribute-error if self._engine.is_shape_binding(bindings_idx): input_shape = self._context.get_shape(bindings_idx) if _is_shape_dynamic(input_shape): self._context.set_shape_input(bindings_idx, data_shape) else: input_shape = self._engine.get_binding_shape(bindings_idx) if _is_shape_dynamic(input_shape): self._context.set_binding_shape(bindings_idx, data_shape) assert self._context.all_binding_shapes_specified and self._context.all_shape_inputs_specified def _prepare_buffers_if_needed(self, x_host: Dict[str, object]): # pytype: disable=attribute-error new_batch_size = list(x_host.values())[0].shape[0] current_batch_size = list(self._buffers.y_pred_host.values())[0].shape[0] if self._buffers else 0 # pytype: enable=attribute-error if self._has_dynamic_shapes or new_batch_size != current_batch_size: # TODO: are CUDA buffers dealloc automatically? self._set_dynamic_input_shapes(x_host) y_pred_host = {} for name in self._output_names: shape = self._context.get_binding_shape(self._engine[name]) y_pred_host[name] = np.zeros(shape, dtype=trt.nptype(self._model.outputs[name].dtype)) y_pred_dev = {name: cuda.mem_alloc(data.nbytes) for name, data in y_pred_host.items()} x_dev = { name: cuda.mem_alloc(host_input.nbytes) for name, host_input in x_host.items() if name in self._input_names # pytype: disable=attribute-error } self._buffers = TRTBuffers(None, x_dev, y_pred_host, y_pred_dev) return self._buffers._replace(x_host=x_host) if "pycuda.driver" in sys.modules: loaders.register_extension(Format.TRT.value, TensorRTLoader) runners.register_extension(Format.TRT.value, TensorRTRunner) savers.register_extension(Format.TRT.value, TensorRTSaver) else: LOGGER.warning("Do not register TensorRT extension due problems with importing pycuda.driver package.")
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/triton/deployment_toolkit/bermuda/tensorrt.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import os from collections import Counter from pathlib import Path from typing import Dict, Iterable, NamedTuple, Optional, Union import torch # pytype: disable=import-error import yaml from ..core import ( GET_MODEL_FN_NAME, BaseConverter, BaseLoader, BaseRunner, BaseRunnerSession, BaseSaver, Format, Model, Precision, TensorSpec, load_from_file, ) from ..extensions import converters, loaders, runners, savers from .utils import get_dynamic_axes, get_input_shapes, get_shapes_with_dynamic_axes LOGGER = logging.getLogger(__name__) class InputOutputSpec(NamedTuple): inputs: Dict[str, TensorSpec] outputs: Dict[str, TensorSpec] def get_sample_input(dataloader, device): for batch in dataloader: _, x, _ = batch break if isinstance(x, dict): sample_input = list(x.values()) elif isinstance(x, list): sample_input = x else: raise TypeError("The first element (x) of batch returned by dataloader must be a list or a dict") for idx, s in enumerate(sample_input): sample_input[idx] = torch.from_numpy(s).to(device) return tuple(sample_input) def get_model_device(torch_model): if next(torch_model.parameters()).is_cuda: return "cuda" else: return "cpu" def infer_model_precision(model): counter = Counter() for param in model.parameters(): counter[param.dtype] += 1 if counter[torch.float16] > 0: return Precision.FP16 else: return Precision.FP32 def _get_tensor_dtypes(dataloader, precision): def _get_dtypes(t): dtypes = {} for k, v in t.items(): dtype = str(v.dtype) if dtype == "float64": dtype = "float32" if precision == Precision.FP16 and dtype == "float32": dtype = "float16" dtypes[k] = dtype return dtypes input_dtypes = {} output_dtypes = {} for batch in dataloader: _, x, y = batch input_dtypes = _get_dtypes(x) output_dtypes = _get_dtypes(y) break return input_dtypes, output_dtypes ### TODO assumption: floating point input ### type has same precision as the model def _get_io_spec(model, dataloader_fn): precision = model.precision dataloader = dataloader_fn() input_dtypes, output_dtypes = _get_tensor_dtypes(dataloader, precision) input_shapes, output_shapes = get_shapes_with_dynamic_axes(dataloader) inputs = { name: TensorSpec(name=name, dtype=input_dtypes[name], shape=tuple(input_shapes[name])) for name in model.inputs } outputs = { name: TensorSpec(name=name, dtype=output_dtypes[name], shape=tuple(output_shapes[name])) for name in model.outputs } return InputOutputSpec(inputs, outputs) class PyTorchModelLoader(BaseLoader): required_fn_name_for_signature_parsing: Optional[str] = GET_MODEL_FN_NAME def __init__(self, **kwargs): self._model_args = kwargs def load(self, model_path: Union[str, Path], **_) -> Model: if isinstance(model_path, Path): model_path = model_path.as_posix() get_model = load_from_file(model_path, "model", GET_MODEL_FN_NAME) model, tensor_infos = get_model(**self._model_args) io_spec = InputOutputSpec(tensor_infos["inputs"], tensor_infos["outputs"]) precision = infer_model_precision(model) return Model(handle=model, precision=precision, inputs=io_spec.inputs, outputs=io_spec.outputs) class TorchScriptLoader(BaseLoader): def __init__(self, tensor_names_path: str = None, **kwargs): self._model_args = kwargs self._io_spec = None if tensor_names_path is not None: with Path(tensor_names_path).open("r") as fh: tensor_infos = yaml.load(fh, Loader=yaml.SafeLoader) self._io_spec = InputOutputSpec(tensor_infos["inputs"], tensor_infos["outputs"]) def load(self, model_path: Union[str, Path], **_) -> Model: if not isinstance(model_path, Path): model_path = Path(model_path) model = torch.jit.load(model_path.as_posix()) precision = infer_model_precision(model) io_spec = self._io_spec if not io_spec: yaml_path = model_path.parent / f"{model_path.stem}.yaml" if not yaml_path.is_file(): raise ValueError( f"If `--tensor-names-path is not provided, " f"TorchScript model loader expects file {yaml_path} with tensor information." ) with yaml_path.open("r") as fh: tensor_info = yaml.load(fh, Loader=yaml.SafeLoader) io_spec = InputOutputSpec(tensor_info["inputs"], tensor_info["outputs"]) return Model(handle=model, precision=precision, inputs=io_spec.inputs, outputs=io_spec.outputs) class TorchScriptTraceConverter(BaseConverter): def __init__(self): pass def convert(self, model: Model, dataloader_fn) -> Model: device = get_model_device(model.handle) dummy_input = get_sample_input(dataloader_fn(), device) converted_model = torch.jit.trace_module(model.handle, {"forward": dummy_input}) io_spec = _get_io_spec(model, dataloader_fn) return Model(converted_model, precision=model.precision, inputs=io_spec.inputs, outputs=io_spec.outputs) class TorchScriptScriptConverter(BaseConverter): def __init__(self): pass def convert(self, model: Model, dataloader_fn) -> Model: converted_model = torch.jit.script(model.handle) io_spec = _get_io_spec(model, dataloader_fn) return Model(converted_model, precision=model.precision, inputs=io_spec.inputs, outputs=io_spec.outputs) class PYT2ONNXConverter(BaseConverter): def __init__(self, onnx_opset: int = None): self._onnx_opset = onnx_opset def convert(self, model: Model, dataloader_fn) -> Model: import tempfile import onnx # pytype: disable=import-error assert isinstance(model.handle, torch.jit.ScriptModule) or isinstance( model.handle, torch.nn.Module ), "The model must be of type 'torch.jit.ScriptModule' or 'torch.nn.Module'. Converter aborted." dynamic_axes = get_dynamic_axes(dataloader_fn()) device = get_model_device(model.handle) dummy_input = get_sample_input(dataloader_fn(), device) with tempfile.TemporaryDirectory() as tmpdirname: export_path = os.path.join(tmpdirname, "model.onnx") with torch.no_grad(): torch.onnx.export( model.handle, dummy_input, export_path, do_constant_folding=True, input_names=list(model.inputs), output_names=list(model.outputs), dynamic_axes=dynamic_axes, opset_version=self._onnx_opset, enable_onnx_checker=True, ) onnx_model = onnx.load(export_path) onnx.checker.check_model(onnx_model) onnx.helper.strip_doc_string(onnx_model) onnx_model = onnx.shape_inference.infer_shapes(onnx_model) return Model( handle=onnx_model, precision=model.precision, inputs=model.inputs, outputs=model.outputs, ) class PYT2TensorRTConverter(BaseConverter): def __init__(self, max_batch_size: int, max_workspace_size: int, onnx_opset: int, precision: str): self._max_batch_size = max_batch_size self._max_workspace_size = max_workspace_size self._onnx_opset = onnx_opset self._precision = Precision(precision) def convert(self, model: Model, dataloader_fn) -> Model: from .onnx import _infer_graph_precision from .onnx2trt_conv import onnx2trt pyt2onnx_converter = PYT2ONNXConverter(self._onnx_opset) onnx_model = pyt2onnx_converter.convert(model, dataloader_fn).handle precision = _infer_graph_precision(onnx_model.graph) input_shapes = get_input_shapes(dataloader_fn(), self._max_batch_size) cuda_engine = onnx2trt( onnx_model, shapes=input_shapes, max_workspace_size=self._max_workspace_size, max_batch_size=self._max_batch_size, model_precision=self._precision.value, ) return Model( handle=cuda_engine, precision=model.precision, inputs=model.inputs, outputs=model.outputs, ) @staticmethod def required_source_model_precision(requested_model_precision: Precision) -> Precision: # TensorRT requires source models to be in FP32 precision return Precision.FP32 class TorchScriptSaver(BaseSaver): def save(self, model: Model, model_path: Union[str, Path]) -> None: if not isinstance(model_path, Path): model_path = Path(model_path) if isinstance(model.handle, torch.jit.ScriptModule): torch.jit.save(model.handle, model_path.as_posix()) else: print("The model must be of type 'torch.jit.ScriptModule'. Saving aborted.") assert False # temporary error handling def _format_tensor_spec(tensor_spec): # wrapping shape with list and whole tensor_spec with dict() is required for correct yaml dump tensor_spec = tensor_spec._replace(shape=list(tensor_spec.shape)) tensor_spec = dict(tensor_spec._asdict()) return tensor_spec # store TensorSpecs from inputs and outputs in a yaml file tensor_specs = { "inputs": {k: _format_tensor_spec(v) for k, v in model.inputs.items()}, "outputs": {k: _format_tensor_spec(v) for k, v in model.outputs.items()}, } yaml_path = model_path.parent / f"{model_path.stem}.yaml" with Path(yaml_path).open("w") as fh: yaml.dump(tensor_specs, fh, indent=4) class PyTorchRunner(BaseRunner): def __init__(self): pass def init_inference(self, model: Model): return PyTorchRunnerSession(model=model) class PyTorchRunnerSession(BaseRunnerSession): def __init__(self, model: Model): super().__init__(model) assert isinstance(model.handle, torch.jit.ScriptModule) or isinstance( model.handle, torch.nn.Module ), "The model must be of type 'torch.jit.ScriptModule' or 'torch.nn.Module'. Runner aborted." self._model = model self._output_names = None def __enter__(self): self._output_names = list(self._model.outputs) return self def __exit__(self, exc_type, exc_value, traceback): self._output_names = None self._model = None def __call__(self, x: Dict[str, object]): with torch.no_grad(): feed_list = [torch.from_numpy(v).cuda() for k, v in x.items()] y_pred = self._model.handle(*feed_list) if isinstance(y_pred, torch.Tensor): y_pred = (y_pred,) y_pred = [t.cpu().numpy() for t in y_pred] y_pred = dict(zip(self._output_names, y_pred)) return y_pred loaders.register_extension(Format.PYT.value, PyTorchModelLoader) loaders.register_extension(Format.TS_TRACE.value, TorchScriptLoader) loaders.register_extension(Format.TS_SCRIPT.value, TorchScriptLoader) converters.register_extension(f"{Format.PYT.value}--{Format.TS_SCRIPT.value}", TorchScriptScriptConverter) converters.register_extension(f"{Format.PYT.value}--{Format.TS_TRACE.value}", TorchScriptTraceConverter) converters.register_extension(f"{Format.PYT.value}--{Format.ONNX.value}", PYT2ONNXConverter) converters.register_extension(f"{Format.PYT.value}--{Format.TRT.value}", PYT2TensorRTConverter) savers.register_extension(Format.TS_SCRIPT.value, TorchScriptSaver) savers.register_extension(Format.TS_TRACE.value, TorchScriptSaver) runners.register_extension(Format.PYT.value, PyTorchRunner) runners.register_extension(Format.TS_SCRIPT.value, PyTorchRunner) runners.register_extension(Format.TS_TRACE.value, PyTorchRunner)
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/triton/deployment_toolkit/bermuda/pyt.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import time import dllogger as logger import numpy as np import torch from dllogger import JSONStreamBackend, StdOutBackend, Verbosity from pytorch_lightning import Callback from pytorch_lightning.utilities import rank_zero_only class DLLogger: def __init__(self, log_dir, filename, append=True): super().__init__() self._initialize_dllogger(log_dir, filename, append) @rank_zero_only def _initialize_dllogger(self, log_dir, filename, append): backends = [ JSONStreamBackend(Verbosity.VERBOSE, os.path.join(log_dir, filename), append=append), StdOutBackend(Verbosity.VERBOSE), ] logger.init(backends=backends) @rank_zero_only def log_metrics(self, metrics, step=None): if step is None: step = () logger.log(step=step, data=metrics) @rank_zero_only def log_metadata(self, metric, metadata): logger.metadata(metric, metadata) @rank_zero_only def flush(self): logger.flush() class LoggingCallback(Callback): def __init__(self, log_dir, filnename, global_batch_size, mode, warmup, dim): self.dllogger = DLLogger(log_dir, filnename) self.warmup_steps = warmup self.global_batch_size = global_batch_size self.step = 0 self.dim = dim self.mode = mode self.timestamps = [] self.dllogger.log_metadata("dice_score", {"unit": None}) self.dllogger.log_metadata(f"throughput_{self.mode}", {"unit": "images/s"}) self.dllogger.log_metadata(f"latency_{self.mode}_mean", {"unit": "ms"}) for level in [90, 95, 99]: self.dllogger.log_metadata(f"latency_{self.mode}_{level}", {"unit": "ms"}) def do_step(self): if self.step > self.warmup_steps: self.step += 1 return torch.cuda.synchronize() self.timestamps.append(time.perf_counter()) def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx): if trainer.current_epoch == 1: self.do_step() def on_test_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx): if pl_module.start_benchmark == 1: self.do_step() def process_performance_stats(self): def _round3(val): return round(val, 3) elapsed_times = np.diff(self.timestamps) throughput_imgps = _round3(self.global_batch_size / np.mean(elapsed_times)) timestamps_ms = 1000 * elapsed_times stats = { f"throughput_{self.mode}": throughput_imgps, f"latency_{self.mode}_mean": _round3(np.mean(timestamps_ms)), } for level in [90, 95, 99]: stats.update({f"latency_{self.mode}_{level}": _round3(np.percentile(timestamps_ms, level))}) return stats @rank_zero_only def _log(self): stats = self.process_performance_stats() self.dllogger.log_metrics(metrics=stats) self.dllogger.flush() def on_train_end(self, trainer, pl_module): self._log() def on_test_end(self, trainer, pl_module): if pl_module.start_benchmark == 1: self._log()
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/utils/logger.py
import importlib import torch from torch import Tensor from torch.nn.modules.batchnorm import _NormBase global instance_norm_nvfuser_cuda instance_norm_nvfuser_cuda = None class InstanceNormNVFuserFunction(torch.autograd.Function): @staticmethod def forward(ctx, input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps): global instance_norm_nvfuser_cuda if instance_norm_nvfuser_cuda is None: instance_norm_nvfuser_cuda = importlib.import_module("instance_norm_nvfuser_cuda") channels_last = input.is_contiguous(memory_format=torch.channels_last) or input.is_contiguous( memory_format=torch.channels_last_3d ) if channels_last: order = [0] + [i for i in range(2, len(input.shape))] + [1] _input = input.permute(order) else: _input = input assert _input.is_contiguous() result = instance_norm_nvfuser_cuda.forward( _input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, channels_last ) if len(result) == 3: out, mean, invstd = result else: running_mean, running_var, out, mean, invstd = result ctx.use_input_stats = use_input_stats ctx.eps = eps ctx.channels_last = channels_last # saving for backward in "explicit channels-last format" ctx.save_for_backward(_input, weight, running_mean, running_var, mean, invstd) if channels_last: order = [0, len(_input.shape) - 1] + [i for i in range(1, len(_input.shape) - 1)] out = out.permute(order) if len(out.shape) == 4: assert out.is_contiguous(memory_format=torch.channels_last) assert input.is_contiguous(memory_format=torch.channels_last) elif len(out.shape) == 5: assert out.is_contiguous(memory_format=torch.channels_last_3d) assert input.is_contiguous(memory_format=torch.channels_last_3d) else: assert False, "unhandled channels_last format variation in forward" return out @staticmethod def backward(ctx, grad_output): global instance_norm_nvfuser_cuda if instance_norm_nvfuser_cuda is None: instance_norm_nvfuser_cuda = importlib.import_module("instance_norm_nvfuser_cuda") if ctx.channels_last: order = [0] + [i for i in range(2, len(grad_output.shape))] + [1] grad_output = grad_output.permute(order) # input was saved in "explicit channels-last format" assert ctx.saved_tensors[0].is_contiguous() grad_output = grad_output.contiguous() saved = list(ctx.saved_tensors) saved.insert(1, grad_output) running_mean = saved[3] running_var = saved[4] mean = saved[-2] var = saved[-1] grad_input, grad_weight, grad_bias = instance_norm_nvfuser_cuda.backward( *saved, ctx.use_input_stats, ctx.eps, ctx.channels_last ) if ctx.channels_last: order = [0, len(grad_input.shape) - 1] + [i for i in range(1, len(grad_input.shape) - 1)] grad_input = grad_input.permute(order) if len(grad_input.shape) == 4: assert grad_input.is_contiguous(memory_format=torch.channels_last) elif len(grad_input.shape) == 5: assert grad_input.is_contiguous(memory_format=torch.channels_last_3d) else: assert False, "unhandled channels_last format variation in backward" return grad_input, grad_weight, grad_bias, None, None, None, None, None, None class _InstanceNormNVFuser(_NormBase): def __init__( self, num_features: int, eps: float = 1e-5, momentum: float = 0.1, affine: bool = False, track_running_stats: bool = False, device=None, dtype=None, ) -> None: factory_kwargs = {"device": device, "dtype": dtype} super(_InstanceNormNVFuser, self).__init__( num_features, eps, momentum, affine, track_running_stats, **factory_kwargs ) self.dummy = torch.empty([], device=device) def _check_input_dim(self, input): raise NotImplementedError def _load_from_state_dict( self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ): version = local_metadata.get("version", None) # at version 1: removed running_mean and running_var when # track_running_stats=False (default) if version is None and not self.track_running_stats: running_stats_keys = [] for name in ("running_mean", "running_var"): key = prefix + name if key in state_dict: running_stats_keys.append(key) if len(running_stats_keys) > 0: error_msgs.append( "Unexpected running stats buffer(s) {names} for {klass} " "with track_running_stats=False. If state_dict is a " "checkpoint saved before 0.4.0, this may be expected " "because {klass} does not track running stats by default " "since 0.4.0. Please remove these keys from state_dict. If " "the running stats are actually needed, instead set " "track_running_stats=True in {klass} to enable them. See " "the documentation of {klass} for details.".format( names=" and ".join('"{}"'.format(k) for k in running_stats_keys), klass=self.__class__.__name__ ) ) for key in running_stats_keys: state_dict.pop(key) super(_InstanceNormNVFuser, self)._load_from_state_dict( state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs ) def forward(self, input: Tensor) -> Tensor: assert input.is_cuda, "NVFuser InstanceNorm is CUDA only" self._check_input_dim(input) if self.running_mean is not None: out = InstanceNormNVFuserFunction.apply( input, self.weight if self.weight is not None else self.dummy, self.bias if self.bias is not None else self.dummy, self.running_mean, self.running_var, self.training or not self.track_running_stats, self.momentum, self.eps, ) else: out = InstanceNormNVFuserFunction.apply( input, self.weight if self.weight is not None else self.dummy, self.bias if self.bias is not None else self.dummy, self.dummy, self.dummy, self.training or not self.track_running_stats, self.momentum, self.eps, ) return out class InstanceNorm3dNVFuser(_InstanceNormNVFuser): def _check_input_dim(self, input): if input.dim() != 5: raise ValueError("expected 5D input (got {}D input)".format(input.dim()))
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/utils/instance_norm.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ctypes import os import pickle from subprocess import run import numpy as np import torch from pytorch_lightning.utilities import rank_zero_only @rank_zero_only def print0(text): print(text) def get_task_code(args): return f"{args.task}_{args.dim}d" def get_config_file(args): if args.data != "/data": path = os.path.join(args.data, "config.pkl") else: task_code = get_task_code(args) path = os.path.join(args.data, task_code, "config.pkl") return pickle.load(open(path, "rb")) def set_cuda_devices(args): assert args.gpus <= torch.cuda.device_count(), f"Requested {args.gpus} gpus, available {torch.cuda.device_count()}." device_list = ",".join([str(i) for i in range(args.gpus)]) os.environ["CUDA_VISIBLE_DEVICES"] = os.environ.get("CUDA_VISIBLE_DEVICES", device_list) def verify_ckpt_path(args): if args.resume_training: resume_path_ckpt = os.path.join( args.ckpt_path if args.ckpt_path is not None else "", "checkpoints", "last.ckpt" ) resume_path_results = os.path.join(args.results, "checkpoints", "last.ckpt") if os.path.exists(resume_path_ckpt): return resume_path_ckpt if os.path.exists(resume_path_results): return resume_path_results print("[Warning] Checkpoint not found. Starting training from scratch.") return None if args.ckpt_path is None or not os.path.isfile(args.ckpt_path): print(f"Provided checkpoint {args.ckpt_path} is not a file. Starting training from scratch.") return None return args.ckpt_path def make_empty_dir(path): run(["rm", "-rf", path]) os.makedirs(path) def get_stats(pred, targ, class_idx): tp = np.logical_and(pred == class_idx, targ == class_idx).sum() fn = np.logical_and(pred != class_idx, targ == class_idx).sum() fp = np.logical_and(pred == class_idx, targ != class_idx).sum() return tp, fn, fp def set_granularity(): _libcudart = ctypes.CDLL("libcudart.so") pValue = ctypes.cast((ctypes.c_int * 1)(), ctypes.POINTER(ctypes.c_int)) _libcudart.cudaDeviceSetLimit(ctypes.c_int(0x05), ctypes.c_int(128)) _libcudart.cudaDeviceGetLimit(pValue, ctypes.c_int(0x05)) assert pValue.contents.value == 128
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/utils/utils.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, Namespace def positive_int(value): ivalue = int(value) assert ivalue > 0, f"Argparse error. Expected positive integer but got {value}" return ivalue def non_negative_int(value): ivalue = int(value) assert ivalue >= 0, f"Argparse error. Expected non-negative integer but got {value}" return ivalue def float_0_1(value): fvalue = float(value) assert 0 <= fvalue <= 1, f"Argparse error. Expected float value to be in range (0, 1), but got {value}" return fvalue def get_main_args(strings=None): parser = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter) arg = parser.add_argument arg( "--exec_mode", type=str, choices=["train", "evaluate", "predict"], default="train", help="Execution mode to run the model", ) arg("--data", type=str, default="/data", help="Path to data directory") arg("--results", type=str, default="/results", help="Path to results directory") arg("--config", type=str, default=None, help="Config file with arguments") arg("--logname", type=str, default="logs.json", help="Name of dlloger output") arg("--task", type=str, default="01", help="Task number. MSD uses numbers 01-10") arg("--gpus", type=non_negative_int, default=1, help="Number of gpus") arg("--nodes", type=non_negative_int, default=1, help="Number of nodes") arg("--learning_rate", type=float, default=0.0008, help="Learning rate") arg("--gradient_clip_val", type=float, default=0, help="Gradient clipping norm value") arg("--negative_slope", type=float, default=0.01, help="Negative slope for LeakyReLU") arg("--tta", action="store_true", help="Enable test time augmentation") arg("--brats", action="store_true", help="Enable BraTS specific training and inference") arg("--deep_supervision", action="store_true", help="Enable deep supervision") arg("--invert_resampled_y", action="store_true", help="Resize predictions to match label size before resampling") arg("--amp", action="store_true", help="Enable automatic mixed precision") arg("--benchmark", action="store_true", help="Run model benchmarking") arg("--focal", action="store_true", help="Use focal loss instead of cross entropy") arg("--save_ckpt", action="store_true", help="Enable saving checkpoint") arg("--nfolds", type=positive_int, default=5, help="Number of cross-validation folds") arg("--seed", type=non_negative_int, default=None, help="Random seed") arg("--skip_first_n_eval", type=non_negative_int, default=0, help="Skip the evaluation for the first n epochs.") arg("--ckpt_path", type=str, default=None, help="Path for loading checkpoint") arg("--ckpt_store_dir", type=str, default="/results", help="Path for saving checkpoint") arg("--fold", type=non_negative_int, default=0, help="Fold number") arg("--patience", type=positive_int, default=100, help="Early stopping patience") arg("--batch_size", type=positive_int, default=2, help="Batch size") arg("--val_batch_size", type=positive_int, default=4, help="Validation batch size") arg("--momentum", type=float, default=0.99, help="Momentum factor") arg("--weight_decay", type=float, default=0.0001, help="Weight decay (L2 penalty)") arg("--save_preds", action="store_true", help="Enable prediction saving") arg("--dim", type=int, choices=[2, 3], default=3, help="UNet dimension") arg("--resume_training", action="store_true", help="Resume training from the last checkpoint") arg("--num_workers", type=non_negative_int, default=8, help="Number of subprocesses to use for data loading") arg("--epochs", type=non_negative_int, default=1000, help="Number of training epochs.") arg("--warmup", type=non_negative_int, default=5, help="Warmup iterations before collecting statistics") arg("--nvol", type=positive_int, default=4, help="Number of volumes which come into single batch size for 2D model") arg("--depth", type=non_negative_int, default=5, help="The depth of the encoder") arg("--min_fmap", type=non_negative_int, default=4, help="Minimal dimension of feature map in the bottleneck") arg("--deep_supr_num", type=non_negative_int, default=2, help="Number of deep supervision heads") arg("--res_block", action="store_true", help="Enable residual blocks") arg("--filters", nargs="+", help="[Optional] Set U-Net filters", default=None, type=int) arg("--layout", type=str, default="NCDHW") arg("--brats22_model", action="store_true", help="Use BraTS22 model") arg( "--norm", type=str, choices=["instance", "instance_nvfuser", "batch", "group"], default="instance", help="Normalization layer", ) arg( "--data2d_dim", choices=[2, 3], type=int, default=3, help="Input data dimension for 2d model", ) arg( "--oversampling", type=float_0_1, default=0.4, help="Probability of crop to have some region with positive label", ) arg( "--overlap", type=float_0_1, default=0.25, help="Amount of overlap between scans during sliding window inference", ) arg( "--scheduler", action="store_true", help="Enable cosine rate scheduler with warmup", ) arg( "--optimizer", type=str, default="adam", choices=["sgd", "adam"], help="Optimizer", ) arg( "--blend", type=str, choices=["gaussian", "constant"], default="constant", help="How to blend output of overlapping windows", ) arg( "--train_batches", type=non_negative_int, default=0, help="Limit number of batches for training (used for benchmarking mode only)", ) arg( "--test_batches", type=non_negative_int, default=0, help="Limit number of batches for inference (used for benchmarking mode only)", ) if strings is not None: arg( "strings", metavar="STRING", nargs="*", help="String for searching", ) args = parser.parse_args(strings.split()) else: args = parser.parse_args() if args.config is not None: config = json.load(open(args.config, "r")) args = vars(args) args.update(config) args = Namespace(**args) with open(f"{args.results}/params.json", "w") as f: json.dump(vars(args), f) return args
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/utils/args.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import subprocess from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser from pathlib import Path parser = ArgumentParser(ArgumentDefaultsHelpFormatter) parser.add_argument("--mode", type=str, required=True, choices=["train", "predict"], help="Benchmarking mode") parser.add_argument("--task", type=str, default="01", help="Task code") parser.add_argument("--gpus", type=int, default=1, help="Number of GPUs to use") parser.add_argument("--nodes", type=int, default=1, help="Number of nodes to use") parser.add_argument("--dim", type=int, required=True, help="Dimension of UNet") parser.add_argument("--batch_size", type=int, required=True, help="Batch size") parser.add_argument("--amp", action="store_true", help="Enable automatic mixed precision") parser.add_argument("--bind", action="store_true", help="Bind CPUs for each GPU. Improves throughput for multi-GPU.") parser.add_argument("--train_batches", type=int, default=200, help="Number of batches for training") parser.add_argument("--test_batches", type=int, default=200, help="Number of batches for inference") parser.add_argument("--warmup", type=int, default=100, help="Warmup iterations before collecting statistics") parser.add_argument("--results", type=str, default="/results", help="Path to results directory") parser.add_argument("--logname", type=str, default="perf.json", help="Name of dlloger output") if __name__ == "__main__": args = parser.parse_args() path_to_main = Path(__file__).resolve().parent.parent / "main.py" cmd = "" if args.bind: cmd += "bindpcie --cpu=exclusive,nosmt " cmd += f"python main.py --task {args.task} --benchmark --epochs 2 " cmd += f"--results {args.results} " cmd += f"--logname {args.logname} " cmd += f"--exec_mode {args.mode} " cmd += f"--dim {args.dim} " cmd += f"--gpus {args.gpus} " cmd += f"--nodes {args.nodes} " cmd += f"--train_batches {args.train_batches} " cmd += f"--test_batches {args.test_batches} " cmd += f"--warmup {args.warmup} " cmd += "--amp " if args.amp else "" if args.mode == "train": cmd += f"--batch_size {args.batch_size} " else: cmd += f"--val_batch_size {args.batch_size} " if args.amp and args.dim == 3: cmd += "--norm instance_nvfuser --layout NDHWC" subprocess.run(cmd, shell=True)
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/scripts/benchmark.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser from pathlib import Path from subprocess import run parser = ArgumentParser(ArgumentDefaultsHelpFormatter) parser.add_argument("--task", type=str, default="01", help="Path to data") parser.add_argument("--gpus", type=int, required=True, help="Number of GPUs") parser.add_argument("--fold", type=int, required=True, choices=[0, 1, 2, 3, 4], help="Fold number") parser.add_argument("--dim", type=int, required=True, choices=[2, 3], help="Dimension of UNet") parser.add_argument("--seed", type=int, default=1, help="Random seed") parser.add_argument("--amp", action="store_true", help="Enable automatic mixed precision") parser.add_argument("--tta", action="store_true", help="Enable test time augmentation") parser.add_argument("--bind", action="store_true", help="Enable test time augmentation") parser.add_argument("--resume_training", action="store_true", help="Resume training from checkpoint") parser.add_argument("--results", type=str, default="/results", help="Path to results directory") parser.add_argument("--logname", type=str, default="train_logs.json", help="Name of dlloger output") parser.add_argument("--learning_rate", type=float, default=8e-4, help="Learning rate") if __name__ == "__main__": args = parser.parse_args() skip = 100 if args.gpus == 1 else 150 path_to_main = Path(__file__).resolve().parent.parent / "main.py" cmd = "" if args.bind: cmd += "bindpcie --cpu=exclusive,nosmt " cmd = f"python {path_to_main} --exec_mode train --save_ckpt --deep_supervision --skip_first_n_eval {skip} " cmd += f"--task {args.task} " cmd += f"--results {args.results} " cmd += f"--logname {args.logname} " cmd += f"--dim {args.dim} " cmd += f"--batch_size {2 if args.dim == 3 else 64} " cmd += f"--val_batch_size {1 if args.dim == 3 else 64} " cmd += f"--norm {'instance_nvfuser' if args.dim == 3 else 'instance'} " cmd += f"--layout {'NDHWC' if args.dim == 3 else 'NCDHW'} " cmd += f"--fold {args.fold} " cmd += f"--gpus {args.gpus} " cmd += f"--epochs {300 if args.gpus == 1 else 600} " cmd += f"--learning_rate {args.learning_rate} " cmd += "--amp " if args.amp else "" cmd += "--tta " if args.tta else "" cmd += "--resume_training " if args.resume_training else "" cmd += f"--seed {args.seed} " run(cmd, shell=True)
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/scripts/train.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser from os.path import dirname from subprocess import run parser = ArgumentParser(ArgumentDefaultsHelpFormatter) parser.add_argument("--data", type=str, required=True, help="Path to data") parser.add_argument("--task", type=str, default="01", help="Path to data") parser.add_argument("--fold", type=int, required=True, choices=[0, 1, 2, 3, 4], help="Fold number") parser.add_argument("--dim", type=int, required=True, help="Dimension of UNet") parser.add_argument("--ckpt_path", type=str, required=True, help="Path to checkpoint") parser.add_argument("--batch_size", type=int, default=4, help="Batch size") parser.add_argument("--amp", action="store_true", help="Enable automatic mixed precision") parser.add_argument("--tta", action="store_true", help="Enable test time augmentation") parser.add_argument("--save_preds", action="store_true", help="Save predicted masks") if __name__ == "__main__": args = parser.parse_args() path_to_main = os.path.join(dirname(dirname(os.path.realpath(__file__))), "main.py") cmd = f"python {path_to_main} --exec_mode predict --task {args.task} --gpus 1 " cmd += f"--data {args.data} " cmd += f"--dim {args.dim} " cmd += f"--fold {args.fold} " cmd += f"--ckpt_path {args.ckpt_path} " cmd += f"--val_batch_size {args.batch_size} " cmd += "--amp " if args.amp else "" cmd += "--tta " if args.tta else "" cmd += "--save_preds " if args.save_preds else "" run(cmd, shell=True)
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/scripts/inference.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. task = { "01": "Task01_BrainTumour", "02": "Task02_Heart", "03": "Task03_Liver", "04": "Task04_Hippocampus", "05": "Task05_Prostate", "06": "Task06_Lung", "07": "Task07_Pancreas", "08": "Task08_HepaticVessel", "09": "Task09_Spleen", "10": "Task10_Colon", "11": "BraTS2021_train", "12": "BraTS2021_val", } patch_size = { "01_3d": [128, 128, 128], "02_3d": [80, 192, 160], "03_3d": [128, 128, 128], "04_3d": [40, 56, 40], "05_3d": [20, 320, 256], "06_3d": [80, 192, 160], "07_3d": [40, 224, 224], "08_3d": [64, 192, 192], "09_3d": [64, 192, 160], "10_3d": [56, 192, 160], "11_3d": [128, 128, 128], "12_3d": [128, 128, 128], "01_2d": [192, 160], "02_2d": [320, 256], "03_2d": [512, 512], "04_2d": [56, 40], "05_2d": [320, 320], "06_2d": [512, 512], "07_2d": [512, 512], "08_2d": [512, 512], "09_2d": [512, 512], "10_2d": [512, 512], } spacings = { "01_3d": [1.0, 1.0, 1.0], "02_3d": [1.37, 1.25, 1.25], "03_3d": [1, 0.7676, 0.7676], "04_3d": [1.0, 1.0, 1.0], "05_3d": [3.6, 0.62, 0.62], "06_3d": [1.24, 0.79, 0.79], "07_3d": [2.5, 0.8, 0.8], "08_3d": [1.5, 0.8, 0.8], "09_3d": [1.6, 0.79, 0.79], "10_3d": [3, 0.78, 0.78], "11_3d": [1.0, 1.0, 1.0], "12_3d": [1.0, 1.0, 1.0], "01_2d": [1.0, 1.0], "02_2d": [1.25, 1.25], "03_2d": [0.7676, 0.7676], "04_2d": [1.0, 1.0], "05_2d": [0.62, 0.62], "06_2d": [0.79, 0.79], "07_2d": [0.8, 0.8], "08_2d": [0.8, 0.8], "09_2d": [0.79, 0.79], "10_2d": [0.78, 0.78], } ct_min = { "03": -17, "06": -1024, "07": -96, "08": -3, "09": -41, "10": -30, } ct_max = { "03": 201, "06": 325, "07": 215, "08": 243, "09": 176, "10": 165.82, } ct_mean = {"03": 99.4, "06": -158.58, "07": 77.9, "08": 104.37, "09": 99.29, "10": 62.18} ct_std = {"03": 39.36, "06": 324.7, "07": 75.4, "08": 52.62, "09": 39.47, "10": 32.65}
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/data_preprocessing/configs.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools import json import math import os import pickle import monai.transforms as transforms import nibabel import numpy as np from joblib import Parallel, delayed from skimage.transform import resize from utils.utils import get_task_code, make_empty_dir from data_preprocessing.configs import ct_max, ct_mean, ct_min, ct_std, patch_size, spacings, task class Preprocessor: def __init__(self, args): self.args = args self.target_spacing = None self.task = args.task self.task_code = get_task_code(args) self.verbose = args.verbose self.patch_size = patch_size[self.task_code] self.training = args.exec_mode == "training" self.data_path = os.path.join(args.data, task[args.task]) metadata_path = os.path.join(self.data_path, "dataset.json") self.metadata = json.load(open(metadata_path, "r")) self.modality = self.metadata["modality"]["0"] self.results = os.path.join(args.results, self.task_code) self.ct_min, self.ct_max, self.ct_mean, self.ct_std = (0,) * 4 if not self.training: self.results = os.path.join(self.results, self.args.exec_mode) self.crop_foreg = transforms.CropForegroundd(keys=["image", "label"], source_key="image") nonzero = True if self.modality != "CT" else False # normalize only non-zero region for MRI self.normalize_intensity = transforms.NormalizeIntensity(nonzero=nonzero, channel_wise=True) if self.args.exec_mode == "val": dataset_json = json.load(open(metadata_path, "r")) dataset_json["val"] = dataset_json["training"] with open(metadata_path, "w") as outfile: json.dump(dataset_json, outfile) def run(self): make_empty_dir(self.results) print(f"Preprocessing {self.data_path}") try: self.target_spacing = spacings[self.task_code] except: self.collect_spacings() if self.verbose: print(f"Target spacing {self.target_spacing}") if self.modality == "CT": try: self.ct_min = ct_min[self.task] self.ct_max = ct_max[self.task] self.ct_mean = ct_mean[self.task] self.ct_std = ct_std[self.task] except: self.collect_intensities() _mean = round(self.ct_mean, 2) _std = round(self.ct_std, 2) if self.verbose: print(f"[CT] min: {self.ct_min}, max: {self.ct_max}, mean: {_mean}, std: {_std}") self.run_parallel(self.preprocess_pair, self.args.exec_mode) pickle.dump( { "patch_size": self.patch_size, "spacings": self.target_spacing, "n_class": len(self.metadata["labels"]), "in_channels": len(self.metadata["modality"]) + int(self.args.ohe), }, open(os.path.join(self.results, "config.pkl"), "wb"), ) def preprocess_pair(self, pair): fname = os.path.basename(pair["image"] if isinstance(pair, dict) else pair) image, label, image_spacings = self.load_pair(pair) # Crop foreground and store original shapes. orig_shape = image.shape[1:] bbox = transforms.utils.generate_spatial_bounding_box(image) image = transforms.SpatialCrop(roi_start=bbox[0], roi_end=bbox[1])(image) image_metadata = np.vstack([bbox, orig_shape, image.shape[1:]]) if label is not None: label = transforms.SpatialCrop(roi_start=bbox[0], roi_end=bbox[1])(label) self.save_npy(label, fname, "_orig_lbl.npy") if self.args.dim == 3: image, label = self.resample(image, label, image_spacings) if self.modality == "CT": image = np.clip(image, self.ct_min, self.ct_max) image = self.normalize(image) if self.training: image, label = self.standardize(image, label) if self.args.ohe: mask = np.ones(image.shape[1:], dtype=np.float32) for i in range(image.shape[0]): zeros = np.where(image[i] <= 0) mask[zeros] *= 0.0 image = self.normalize_intensity(image).astype(np.float32) mask = np.expand_dims(mask, 0) image = np.concatenate([image, mask]) self.save(image, label, fname, image_metadata) def resample(self, image, label, image_spacings): if self.target_spacing != image_spacings: image, label = self.resample_pair(image, label, image_spacings) return image, label def standardize(self, image, label): pad_shape = self.calculate_pad_shape(image) image_shape = image.shape[1:] if pad_shape != image_shape: paddings = [(pad_sh - image_sh) / 2 for (pad_sh, image_sh) in zip(pad_shape, image_shape)] image = self.pad(image, paddings) label = self.pad(label, paddings) if self.args.dim == 2: # Center cropping 2D images. _, _, height, weight = image.shape start_h = (height - self.patch_size[0]) // 2 start_w = (weight - self.patch_size[1]) // 2 image = image[:, :, start_h : start_h + self.patch_size[0], start_w : start_w + self.patch_size[1]] label = label[:, :, start_h : start_h + self.patch_size[0], start_w : start_w + self.patch_size[1]] return image, label def normalize(self, image): if self.modality == "CT": return (image - self.ct_mean) / self.ct_std return self.normalize_intensity(image) def save(self, image, label, fname, image_metadata): mean, std = np.round(np.mean(image, (1, 2, 3)), 2), np.round(np.std(image, (1, 2, 3)), 2) if self.verbose: print(f"Saving {fname} shape {image.shape} mean {mean} std {std}") self.save_npy(image, fname, "_x.npy") if label is not None: self.save_npy(label, fname, "_y.npy") if image_metadata is not None: self.save_npy(image_metadata, fname, "_meta.npy") def load_pair(self, pair): image = self.load_nifty(pair["image"] if isinstance(pair, dict) else pair) image_spacing = self.load_spacing(image) image = image.get_fdata().astype(np.float32) image = self.standardize_layout(image) if self.training: label = self.load_nifty(pair["label"]).get_fdata().astype(np.uint8) label = self.standardize_layout(label) else: label = None return image, label, image_spacing def resample_pair(self, image, label, spacing): shape = self.calculate_new_shape(spacing, image.shape[1:]) if self.check_anisotrophy(spacing): image = self.resample_anisotrophic_image(image, shape) if label is not None: label = self.resample_anisotrophic_label(label, shape) else: image = self.resample_regular_image(image, shape) if label is not None: label = self.resample_regular_label(label, shape) image = image.astype(np.float32) if label is not None: label = label.astype(np.uint8) return image, label def calculate_pad_shape(self, image): min_shape = self.patch_size[:] image_shape = image.shape[1:] if len(min_shape) == 2: # In 2D case we don't want to pad depth axis. min_shape.insert(0, image_shape[0]) pad_shape = [max(mshape, ishape) for mshape, ishape in zip(min_shape, image_shape)] return pad_shape def get_intensities(self, pair): image = self.load_nifty(pair["image"]).get_fdata().astype(np.float32) label = self.load_nifty(pair["label"]).get_fdata().astype(np.uint8) foreground_idx = np.where(label > 0) intensities = image[foreground_idx].tolist() return intensities def collect_intensities(self): intensities = self.run_parallel(self.get_intensities, "training") intensities = list(itertools.chain(*intensities)) self.ct_min, self.ct_max = np.percentile(intensities, [0.5, 99.5]) self.ct_mean, self.ct_std = np.mean(intensities), np.std(intensities) def get_spacing(self, pair): image = nibabel.load(os.path.join(self.data_path, pair["image"])) spacing = self.load_spacing(image) return spacing def collect_spacings(self): spacing = self.run_parallel(self.get_spacing, "training") spacing = np.array(spacing) target_spacing = np.median(spacing, axis=0) if max(target_spacing) / min(target_spacing) >= 3: lowres_axis = np.argmin(target_spacing) target_spacing[lowres_axis] = np.percentile(spacing[:, lowres_axis], 10) self.target_spacing = list(target_spacing) def check_anisotrophy(self, spacing): def check(spacing): return np.max(spacing) / np.min(spacing) >= 3 return check(spacing) or check(self.target_spacing) def calculate_new_shape(self, spacing, shape): spacing_ratio = np.array(spacing) / np.array(self.target_spacing) new_shape = (spacing_ratio * np.array(shape)).astype(int).tolist() return new_shape def save_npy(self, image, fname, suffix): np.save(os.path.join(self.results, fname.replace(".nii.gz", suffix)), image, allow_pickle=False) def run_parallel(self, func, exec_mode): return Parallel(n_jobs=self.args.n_jobs)(delayed(func)(pair) for pair in self.metadata[exec_mode]) def load_nifty(self, fname): return nibabel.load(os.path.join(self.data_path, fname)) @staticmethod def load_spacing(image): return image.header["pixdim"][1:4].tolist()[::-1] @staticmethod def pad(image, padding): pad_d, pad_w, pad_h = padding return np.pad( image, ( (0, 0), (math.floor(pad_d), math.ceil(pad_d)), (math.floor(pad_w), math.ceil(pad_w)), (math.floor(pad_h), math.ceil(pad_h)), ), ) @staticmethod def standardize_layout(data): if len(data.shape) == 3: data = np.expand_dims(data, 3) return np.transpose(data, (3, 2, 1, 0)) @staticmethod def resize_fn(image, shape, order, mode): return resize(image, shape, order=order, mode=mode, cval=0, clip=True, anti_aliasing=False) def resample_anisotrophic_image(self, image, shape): resized_channels = [] for image_c in image: resized = [self.resize_fn(i, shape[1:], 3, "edge") for i in image_c] resized = np.stack(resized, axis=0) resized = self.resize_fn(resized, shape, 0, "constant") resized_channels.append(resized) resized = np.stack(resized_channels, axis=0) return resized def resample_regular_image(self, image, shape): resized_channels = [] for image_c in image: resized_channels.append(self.resize_fn(image_c, shape, 3, "edge")) resized = np.stack(resized_channels, axis=0) return resized def resample_anisotrophic_label(self, label, shape): depth = label.shape[1] reshaped = np.zeros(shape, dtype=np.uint8) shape_2d = shape[1:] reshaped_2d = np.zeros((depth, *shape_2d), dtype=np.uint8) n_class = np.max(label) for class_ in range(1, n_class + 1): for depth_ in range(depth): mask = label[0, depth_] == class_ resized_2d = self.resize_fn(mask.astype(float), shape_2d, 1, "edge") reshaped_2d[depth_][resized_2d >= 0.5] = class_ for class_ in range(1, n_class + 1): mask = reshaped_2d == class_ resized = self.resize_fn(mask.astype(float), shape, 0, "constant") reshaped[resized >= 0.5] = class_ reshaped = np.expand_dims(reshaped, 0) return reshaped def resample_regular_label(self, label, shape): reshaped = np.zeros(shape, dtype=np.uint8) n_class = np.max(label) for class_ in range(1, n_class + 1): mask = label[0] == class_ resized = self.resize_fn(mask.astype(float), shape, 1, "edge") reshaped[resized >= 0.5] = class_ reshaped = np.expand_dims(reshaped, 0) return reshaped
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/data_preprocessing/preprocessor.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from torchmetrics import Metric class Dice(Metric): full_state_update = False def __init__(self, n_class, brats): super().__init__(dist_sync_on_step=False) self.n_class = n_class self.brats = brats self.add_state("steps", default=torch.zeros(1), dist_reduce_fx="sum") self.add_state("dice", default=torch.zeros((n_class,)), dist_reduce_fx="sum") self.add_state("loss", default=torch.zeros(1), dist_reduce_fx="sum") def update(self, p, y, l): self.steps += 1 self.dice += self.compute_stats_brats(p, y) if self.brats else self.compute_stats(p, y) self.loss += l def compute(self): return 100 * self.dice / self.steps, self.loss / self.steps def compute_stats_brats(self, p, y): scores = torch.zeros(self.n_class, device=p.device, dtype=torch.float32) p = (torch.sigmoid(p) > 0.5).int() y_wt, y_tc, y_et = y > 0, ((y == 1) + (y == 3)) > 0, y == 3 y = torch.stack([y_wt, y_tc, y_et], dim=1) for i in range(self.n_class): p_i, y_i = p[:, i], y[:, i] if (y_i != 1).all(): # no foreground class scores[i - 1] += 1 if (p_i != 1).all() else 0 continue tp, fn, fp = self.get_stats(p_i, y_i, 1) denom = (2 * tp + fp + fn).to(torch.float) score_cls = (2 * tp).to(torch.float) / denom if torch.is_nonzero(denom) else 0.0 scores[i - 1] += score_cls return scores def compute_stats(self, p, y): scores = torch.zeros(self.n_class, device=p.device, dtype=torch.float32) p = torch.argmax(p, dim=1) for i in range(1, self.n_class + 1): if (y != i).all(): # no foreground class scores[i - 1] += 1 if (p != i).all() else 0 continue tp, fn, fp = self.get_stats(p, y, i) denom = (2 * tp + fp + fn).to(torch.float) score_cls = (2 * tp).to(torch.float) / denom if torch.is_nonzero(denom) else 0.0 scores[i - 1] += score_cls return scores @staticmethod def get_stats(p, y, c): tp = torch.logical_and(p == c, y == c).sum() fn = torch.logical_and(p != c, y == c).sum() fp = torch.logical_and(p == c, y != c).sum() return tp, fn, fp
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/nnunet/metrics.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch.nn as nn from monai.losses import DiceCELoss, DiceFocalLoss, DiceLoss, FocalLoss class Loss(nn.Module): def __init__(self, focal): super(Loss, self).__init__() if focal: self.loss_fn = DiceFocalLoss( include_background=False, softmax=True, to_onehot_y=True, batch=True, gamma=2.0 ) else: self.loss_fn = DiceCELoss(include_background=False, softmax=True, to_onehot_y=True, batch=True) def forward(self, y_pred, y_true): return self.loss_fn(y_pred, y_true) class LossBraTS(nn.Module): def __init__(self, focal): super(LossBraTS, self).__init__() self.dice = DiceLoss(sigmoid=True, batch=True) self.ce = FocalLoss(gamma=2.0, to_onehot_y=False) if focal else nn.BCEWithLogitsLoss() def _loss(self, p, y): return self.dice(p, y) + self.ce(p, y.float()) def forward(self, p, y): y_wt, y_tc, y_et = y > 0, ((y == 1) + (y == 3)) > 0, y == 3 p_wt, p_tc, p_et = p[:, 0].unsqueeze(1), p[:, 1].unsqueeze(1), p[:, 2].unsqueeze(1) l_wt, l_tc, l_et = self._loss(p_wt, y_wt), self._loss(p_tc, y_tc), self._loss(p_et, y_et) return l_wt + l_tc + l_et
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/nnunet/loss.py
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn from apex.optimizers import FusedAdam, FusedSGD from data_loading.data_module import get_data_path, get_test_fnames from monai.inferers import sliding_window_inference from monai.networks.nets import DynUNet from nnunet.brats22_model import UNet3D from nnunet.loss import Loss, LossBraTS from nnunet.metrics import Dice from pytorch_lightning.utilities import rank_zero_only from scipy.special import expit, softmax from skimage.transform import resize from utils.logger import DLLogger from utils.utils import get_config_file, print0 class NNUnet(pl.LightningModule): def __init__(self, args, triton=False, data_dir=None): super(NNUnet, self).__init__() self.save_hyperparameters() self.args = args self.triton = triton if data_dir is not None: self.args.data = data_dir self.build_nnunet() self.best_mean, self.best_epoch, self.test_idx = (0,) * 3 self.start_benchmark = 0 self.train_loss = [] self.test_imgs = [] if not self.triton: self.learning_rate = args.learning_rate loss = LossBraTS if self.args.brats else Loss self.loss = loss(self.args.focal) if self.args.dim == 2: self.tta_flips = [[2], [3], [2, 3]] else: self.tta_flips = [[2], [3], [4], [2, 3], [2, 4], [3, 4], [2, 3, 4]] self.dice = Dice(self.n_class, self.args.brats) if self.args.exec_mode in ["train", "evaluate"] and not self.args.benchmark: self.dllogger = DLLogger(args.results, args.logname) def forward(self, img): return torch.argmax(self.model(img), 1) def _forward(self, img): if self.args.benchmark: if self.args.dim == 2 and self.args.data2d_dim == 3: img = layout_2d(img, None) return self.model(img) return self.tta_inference(img) if self.args.tta else self.do_inference(img) def compute_loss(self, preds, label): if self.args.brats22_model: loss = self.loss(preds[0], label) for i, pred in enumerate(preds[1:]): downsampled_label = nn.functional.interpolate(label, pred.shape[2:]) loss += 0.5 ** (i + 1) * self.loss(pred, downsampled_label) c_norm = 1 / (2 - 2 ** (-len(preds))) return c_norm * loss if self.args.deep_supervision: loss, weights = 0.0, 0.0 for i in range(preds.shape[1]): loss += self.loss(preds[:, i], label) * 0.5**i weights += 0.5**i return loss / weights return self.loss(preds, label) def training_step(self, batch, batch_idx): img, lbl = self.get_train_data(batch) img, lbl = self.convert_data(img, lbl) pred = self.model(img) loss = self.compute_loss(pred, lbl) self.train_loss.append(loss.item()) return loss def validation_step(self, batch, batch_idx): if self.current_epoch < self.args.skip_first_n_eval: return None img, lbl = batch["image"], batch["label"] img, lbl = self.convert_data(img, lbl) pred = self._forward(img) loss = self.loss(pred, lbl) if self.args.invert_resampled_y: meta, lbl = batch["meta"][0].cpu().detach().numpy(), batch["orig_lbl"] pred = nn.functional.interpolate(pred, size=tuple(meta[3]), mode="trilinear", align_corners=True) self.dice.update(pred, lbl[:, 0], loss) def test_step(self, batch, batch_idx): if self.args.exec_mode == "evaluate": return self.validation_step(batch, batch_idx) img = batch["image"] img = self.convert_ncdhw_to_ndhwc(img) if self.args.benchmark: pred = self._forward(img) return pred = self._forward(img).squeeze(0).cpu().detach().numpy() if self.args.save_preds: meta = batch["meta"][0].cpu().detach().numpy() min_d, max_d = meta[0, 0], meta[1, 0] min_h, max_h = meta[0, 1], meta[1, 1] min_w, max_w = meta[0, 2], meta[1, 2] n_class, original_shape, cropped_shape = pred.shape[0], meta[2], meta[3] if not all(cropped_shape == pred.shape[1:]): resized_pred = np.zeros((n_class, *cropped_shape)) for i in range(n_class): resized_pred[i] = resize( pred[i], cropped_shape, order=3, mode="edge", cval=0, clip=True, anti_aliasing=False ) pred = resized_pred final_pred = np.zeros((n_class, *original_shape)) final_pred[:, min_d:max_d, min_h:max_h, min_w:max_w] = pred if self.args.brats: final_pred = expit(final_pred) else: final_pred = softmax(final_pred, axis=0) self.save_mask(final_pred) def get_unet_params(self): config = get_config_file(self.args) patch_size, spacings = config["patch_size"], config["spacings"] strides, kernels, sizes = [], [], patch_size[:] while True: spacing_ratio = [spacing / min(spacings) for spacing in spacings] stride = [ 2 if ratio <= 2 and size >= 2 * self.args.min_fmap else 1 for (ratio, size) in zip(spacing_ratio, sizes) ] kernel = [3 if ratio <= 2 else 1 for ratio in spacing_ratio] if all(s == 1 for s in stride): break sizes = [i / j for i, j in zip(sizes, stride)] spacings = [i * j for i, j in zip(spacings, stride)] kernels.append(kernel) strides.append(stride) if len(strides) == self.args.depth: break strides.insert(0, len(spacings) * [1]) kernels.append(len(spacings) * [3]) return config["in_channels"], config["n_class"], kernels, strides, patch_size def convert_ncdhw_to_ndhwc(self, tensor): if self.args.layout == "NCDHW": return tensor strides = tensor.stride() shape = tensor.shape tensor = torch.as_strided( tensor, (shape[0], shape[-1], *shape[1:-1]), (strides[0], strides[-1], *strides[1:-1]) ) return tensor def convert_data(self, img, lbl): img, lbl = self.convert_ncdhw_to_ndhwc(img), self.convert_ncdhw_to_ndhwc(lbl) return img, lbl def build_nnunet(self): self.in_channels, out_channels, kernels, strides, self.patch_size = self.get_unet_params() self.n_class = out_channels - 1 if self.args.brats: out_channels = 3 if self.args.brats22_model: self.model = UNet3D(kernels, strides) else: self.model = DynUNet( self.args.dim, self.in_channels, out_channels, kernels, strides, strides[1:], filters=self.args.filters, norm_name=(self.args.norm.upper(), {"affine": True}), act_name=("leakyrelu", {"inplace": False, "negative_slope": 0.01}), deep_supervision=self.args.deep_supervision, deep_supr_num=self.args.deep_supr_num, res_block=self.args.res_block, trans_bias=True, ) if self.args.layout == "NDHWC" and self.args.dim == 3: self.model.to(memory_format=torch.channels_last_3d) print0(f"Filters: {self.model.filters},\nKernels: {kernels}\nStrides: {strides}") def do_inference(self, image): if self.args.dim == 3: return self.sliding_window_inference(image) if self.args.data2d_dim == 2: return self.model(image) if self.args.exec_mode == "predict": return self.inference2d_test(image) return self.inference2d(image) def tta_inference(self, img): pred = self.do_inference(img) for flip_idx in self.tta_flips: pred += flip(self.do_inference(flip(img, flip_idx)), flip_idx) pred /= len(self.tta_flips) + 1 return pred def inference2d(self, image): image = torch.transpose(image.squeeze(0), 0, 1) preds = self.model(image) preds = torch.transpose(preds, 0, 1).unsqueeze(0) return preds def inference2d_test(self, image): preds_shape = (image.shape[0], self.n_class + 1, *image.shape[2:]) preds = torch.zeros(preds_shape, dtype=image.dtype, device=image.device) for depth in range(image.shape[2]): preds[:, :, depth] = self.sliding_window_inference(image[:, :, depth]) return preds def sliding_window_inference(self, image): return sliding_window_inference( inputs=image, roi_size=self.patch_size, sw_batch_size=self.args.val_batch_size, predictor=self.model, overlap=self.args.overlap, mode=self.args.blend, ) def round(self, tensor): return round(torch.mean(tensor).item(), 2) def validation_epoch_end(self, outputs): if self.current_epoch < self.args.skip_first_n_eval: self.log("dice", 0.0, sync_dist=False) self.dice.reset() return None dice, loss = self.dice.compute() self.dice.reset() # Update metrics dice_mean = torch.mean(dice) if dice_mean >= self.best_mean: self.best_mean = dice_mean self.best_mean_dice = dice[:] self.best_epoch = self.current_epoch metrics = {} metrics["Dice"] = self.round(dice) metrics["Val Loss"] = self.round(loss) metrics["Max Dice"] = self.round(self.best_mean_dice) metrics["Best epoch"] = self.best_epoch metrics["Train Loss"] = ( 0 if len(self.train_loss) == 0 else round(sum(self.train_loss) / len(self.train_loss), 4) ) if self.n_class > 1: metrics.update({f"D{i+1}": self.round(m) for i, m in enumerate(dice)}) self.dllogger.log_metrics(step=self.current_epoch, metrics=metrics) self.dllogger.flush() self.log("dice", metrics["Dice"], sync_dist=False) def test_epoch_end(self, outputs): if self.args.exec_mode == "evaluate": self.eval_dice, _ = self.dice.compute() @rank_zero_only def on_fit_end(self): if not self.args.benchmark: metrics = {} metrics["dice_score"] = round(self.best_mean.item(), 2) metrics["train_loss"] = round(sum(self.train_loss) / len(self.train_loss), 4) metrics["val_loss"] = round(1 - self.best_mean.item() / 100, 4) metrics["Epoch"] = self.best_epoch self.dllogger.log_metrics(step=(), metrics=metrics) self.dllogger.flush() def configure_optimizers(self): optimizer = { "sgd": FusedSGD(self.parameters(), lr=self.learning_rate, momentum=self.args.momentum), "adam": FusedAdam(self.parameters(), lr=self.learning_rate, weight_decay=self.args.weight_decay), }[self.args.optimizer.lower()] if self.args.scheduler: scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, 4096, eta_min=8e-5) return {"optimizer": optimizer, "monitor": "val_loss", "lr_scheduler": scheduler} return {"optimizer": optimizer, "monitor": "val_loss"} def save_mask(self, pred): if self.test_idx == 0: data_path = get_data_path(self.args) self.test_imgs, _ = get_test_fnames(self.args, data_path) fname = os.path.basename(self.test_imgs[self.test_idx]).replace("_x", "") np.save(os.path.join(self.save_dir, fname), pred, allow_pickle=False) self.test_idx += 1 def get_train_data(self, batch): img, lbl = batch["image"], batch["label"] if self.args.dim == 2 and self.args.data2d_dim == 3: img, lbl = layout_2d(img, lbl) return img, lbl def layout_2d(img, lbl): batch_size, depth, channels, height, width = img.shape img = torch.reshape(img, (batch_size * depth, channels, height, width)) if lbl is not None: lbl = torch.reshape(lbl, (batch_size * depth, 1, height, width)) return img, lbl return img def flip(data, axis): return torch.flip(data, dims=axis)
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/nnunet/nn_unet.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as np import torch import torch.nn as nn normalizations = { "instancenorm3d": nn.InstanceNorm3d, "instancenorm2d": nn.InstanceNorm2d, "batchnorm3d": nn.BatchNorm3d, "batchnorm2d": nn.BatchNorm2d, } convolutions = { "Conv2d": nn.Conv2d, "Conv3d": nn.Conv3d, "ConvTranspose2d": nn.ConvTranspose2d, "ConvTranspose3d": nn.ConvTranspose3d, } def get_norm(name, out_channels, groups=32): if "groupnorm" in name: return nn.GroupNorm(groups, out_channels, affine=True) return normalizations[name](out_channels, affine=True) def get_conv(in_channels, out_channels, kernel_size, stride, dim=3, bias=False): conv = convolutions[f"Conv{dim}d"] padding = get_padding(kernel_size, stride) return conv(in_channels, out_channels, kernel_size, stride, padding, bias=bias) def get_transp_conv(in_channels, out_channels, kernel_size, stride, dim): conv = convolutions[f"ConvTranspose{dim}d"] padding = get_padding(kernel_size, stride) output_padding = get_output_padding(kernel_size, stride, padding) return conv(in_channels, out_channels, kernel_size, stride, padding, output_padding, bias=True) def get_padding(kernel_size, stride): kernel_size_np = np.atleast_1d(kernel_size) stride_np = np.atleast_1d(stride) padding_np = (kernel_size_np - stride_np + 1) / 2 padding = tuple(int(p) for p in padding_np) return padding if len(padding) > 1 else padding[0] def get_output_padding(kernel_size, stride, padding): kernel_size_np = np.atleast_1d(kernel_size) stride_np = np.atleast_1d(stride) padding_np = np.atleast_1d(padding) out_padding_np = 2 * padding_np + stride_np - kernel_size_np out_padding = tuple(int(p) for p in out_padding_np) return out_padding if len(out_padding) > 1 else out_padding[0] class InputBlock(nn.Module): def __init__(self, in_channels, out_channels, **kwargs): super(InputBlock, self).__init__() self.conv1 = get_conv(in_channels, out_channels, 3, 1) self.conv2 = get_conv(out_channels, out_channels, 3, 1) self.norm = get_norm(kwargs["norm"], out_channels) self.relu = nn.ReLU(inplace=True) def forward(self, x): x = self.conv1(x) x = self.norm(x) x = self.relu(x) x = self.conv2(x) x = self.relu(x) return x class ConvLayer(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, **kwargs): super(ConvLayer, self).__init__() self.conv = get_conv(in_channels, out_channels, kernel_size, stride) self.norm = get_norm(kwargs["norm"], in_channels) self.relu = nn.ReLU(inplace=True) def forward(self, x): x = self.norm(x) x = self.conv(x) x = self.relu(x) return x class ConvBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, **kwargs): super(ConvBlock, self).__init__() self.conv1 = ConvLayer(in_channels, out_channels, kernel_size, stride, **kwargs) self.conv2 = ConvLayer(out_channels, out_channels, kernel_size, 1, **kwargs) def forward(self, x): x = self.conv1(x) x = self.conv2(x) return x class UpsampleBlock(nn.Module): def __init__(self, in_channels, out_channels, kernel_size, stride, **kwargs): super(UpsampleBlock, self).__init__() self.conv_block = ConvBlock(out_channels + in_channels, out_channels, kernel_size, 1, **kwargs) def forward(self, x, x_skip): x = nn.functional.interpolate(x, scale_factor=2, mode="trilinear", align_corners=True) x = torch.cat((x, x_skip), dim=1) x = self.conv_block(x) return x class OutputBlock(nn.Module): def __init__(self, in_channels, out_channels, dim): super(OutputBlock, self).__init__() self.conv = get_conv(in_channels, out_channels, kernel_size=1, stride=1, dim=dim, bias=True) def forward(self, input_data): return self.conv(input_data) class UNet3D(nn.Module): def __init__( self, kernels, strides, ): super(UNet3D, self).__init__() self.dim = 3 self.n_class = 3 self.deep_supervision = True self.norm = "instancenorm3d" self.filters = [64, 128, 256, 512, 768, 1024, 2048][: len(strides)] down_block = ConvBlock self.input_block = InputBlock(5, self.filters[0], norm=self.norm) self.downsamples = self.get_module_list( conv_block=down_block, in_channels=self.filters[:-1], out_channels=self.filters[1:], kernels=kernels[1:-1], strides=strides[1:-1], ) self.bottleneck = self.get_conv_block( conv_block=down_block, in_channels=self.filters[-2], out_channels=self.filters[-1], kernel_size=kernels[-1], stride=strides[-1], ) self.upsamples = self.get_module_list( conv_block=UpsampleBlock, in_channels=self.filters[1:][::-1], out_channels=self.filters[:-1][::-1], kernels=kernels[1:][::-1], strides=strides[1:][::-1], ) self.output_block = self.get_output_block(decoder_level=0) self.deep_supervision_heads = self.get_deep_supervision_heads() self.apply(self.initialize_weights) def forward(self, input_data): out = self.input_block(input_data) encoder_outputs = [out] for downsample in self.downsamples: out = downsample(out) encoder_outputs.append(out) out = self.bottleneck(out) decoder_outputs = [] for upsample, skip in zip(self.upsamples, reversed(encoder_outputs)): out = upsample(out, skip) decoder_outputs.append(out) out = self.output_block(out) if self.training and self.deep_supervision: out = [out] for i, decoder_out in enumerate(decoder_outputs[-3:-1][::-1]): out.append(self.deep_supervision_heads[i](decoder_out)) return out def get_conv_block(self, conv_block, in_channels, out_channels, kernel_size, stride, drop_block=False): return conv_block( dim=self.dim, stride=stride, norm=self.norm, kernel_size=kernel_size, in_channels=in_channels, out_channels=out_channels, ) def get_output_block(self, decoder_level): return OutputBlock(in_channels=self.filters[decoder_level], out_channels=self.n_class, dim=self.dim) def get_deep_supervision_heads(self): return nn.ModuleList([self.get_output_block(1), self.get_output_block(2)]) def get_module_list(self, in_channels, out_channels, kernels, strides, conv_block): layers = [] for in_channel, out_channel, kernel, stride in zip(in_channels, out_channels, kernels, strides): conv_layer = self.get_conv_block(conv_block, in_channel, out_channel, kernel, stride) layers.append(conv_layer) return nn.ModuleList(layers) def initialize_weights(self, module): name = module.__class__.__name__.lower() if name in ["conv2d", "conv3d"]: nn.init.kaiming_normal_(module.weight) if hasattr(module, "bias") and module.bias is not None: nn.init.constant_(module.bias, 0)
DeepLearningExamples-master
PyTorch/Segmentation/nnUNet/nnunet/brats22_model.py
#!/usr/bin/env python # coding=utf-8 # BSD 3-Clause License # # Copyright (c) 2017, # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import sys import zipfile from io import open if os.path.exists('train.txt'): print('Tokenized text8 already exists - skipping processing') sys.exit() data = zipfile.ZipFile('text8.zip').extractall() data = open('text8', 'r', encoding='utf-8').read() print('Length of text8: {}'.format(len(data))) num_test_chars = 5000000 train_data = data[: -2 * num_test_chars] valid_data = data[-2 * num_test_chars: -num_test_chars] test_data = data[-num_test_chars:] for fn, part in [('train.txt', train_data), ('valid.txt', valid_data), ('test.txt', test_data)]: print('{} will have {} bytes'.format(fn, len(part))) print('- Tokenizing...') # Change space ' ' to underscore '_' part_str = ' '.join(['_' if c == ' ' else c for c in part.strip()]) print('- Writing...') f = open(fn, 'w').write(part_str) f = open(fn + '.raw', 'w', encoding='utf-8').write(part)
DeepLearningExamples-master
PyTorch/LanguageModeling/Transformer-XL/prep_text8.py
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # MIT License # # Copyright (c) 2019 cybertronai # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. """Lamb optimizer.""" import torch from torch.optim import Optimizer class Lamb(Optimizer): r"""Implements Lamb algorithm. It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) adam (bool, optional): always use trust ratio = 1, which turns this into Adam. Useful for comparison purposes. .. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes: https://arxiv.org/abs/1904.00962 """ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0, adam=False): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) self.adam = adam super(Lamb, self).__init__(params, defaults) def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Lamb does not support sparse gradients.') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p.data) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['betas'] state['step'] += 1 # Decay the first and second moment running average coefficient # m_t exp_avg.mul_(beta1).add_(1 - beta1, grad) # v_t exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) # Paper v3 does not use debiasing. # bias_correction1 = 1 - beta1 ** state['step'] # bias_correction2 = 1 - beta2 ** state['step'] # Apply bias to lr to avoid broadcast. step_size = group['lr'] # * math.sqrt(bias_correction2) / bias_correction1 weight_norm = p.data.norm(p=2).clamp_(0, 10) adam_step = exp_avg / exp_avg_sq.sqrt().add(group['eps']) if group['weight_decay'] != 0: adam_step.add_(group['weight_decay'], p.data) adam_norm = adam_step.norm(p=2) if weight_norm == 0.0 or adam_norm == 0.0: trust_ratio = 1 else: trust_ratio = weight_norm / (adam_norm + group['eps']) state['weight_norm'] = weight_norm state['adam_norm'] = adam_norm state['trust_ratio'] = trust_ratio if self.adam: trust_ratio = 1 p.data.add_(-step_size * trust_ratio, adam_step) return loss @torch.jit.script def lamb_kernel(param, grad, exp_avg, exp_avg_sq, beta1: float, beta2: float, step_size: float, eps: float, weight_decay: float): exp_avg = exp_avg * beta1 + (1 - beta1) * grad exp_avg_sq = exp_avg_sq * beta2 + (1 - beta2) * (grad * grad) adam_step = exp_avg / (exp_avg_sq.sqrt() + eps) adam_step = adam_step + weight_decay * param weight_norm = param.norm(p=2).clamp(0, 10) adam_norm = adam_step.norm(p=2) trust_ratio = weight_norm / (adam_norm + eps) trust_ratio = (weight_norm == 0.0) * 1.0 + (weight_norm != 0.0) * trust_ratio trust_ratio = (adam_norm == 0.0) * 1.0 + (adam_norm != 0.0) * trust_ratio trust_ratio = trust_ratio.float() param = param - step_size * trust_ratio * adam_step return param, exp_avg, exp_avg_sq class JITLamb(Optimizer): r"""Implements Lamb algorithm. It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) adam (bool, optional): always use trust ratio = 1, which turns this into Adam. Useful for comparison purposes. .. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes: https://arxiv.org/abs/1904.00962 """ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6, weight_decay=0, adam=False): if not 0.0 <= lr: raise ValueError("Invalid learning rate: {}".format(lr)) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {}".format(eps)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1])) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay) self.adam = adam super().__init__(params, defaults) def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Lamb does not support sparse gradients.') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p.data) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['betas'] state['step'] += 1 step_size = group['lr'] param, exp_avg, exp_avg_sq = lamb_kernel(p.data, grad, exp_avg, exp_avg_sq, beta1, beta2, step_size, group['eps'], group['weight_decay'], ) state['exp_avg'] = exp_avg state['exp_avg_sq'] = exp_avg_sq p.data = param return loss
DeepLearningExamples-master
PyTorch/LanguageModeling/Transformer-XL/pytorch/lamb.py
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn as nn import torch.nn.functional as F from utils.log_uniform_sampler import LogUniformSampler from utils.log_uniform_sampler import sample_logits from utils.proj_adaptive_softmax import ProjectedAdaptiveLogSoftmax @torch.jit.script def add_and_scale(tensor1, tensor2, alpha: float): return alpha * (tensor1 + tensor2) class PositionalEmbedding(nn.Module): def __init__(self, demb): super(PositionalEmbedding, self).__init__() self.demb = demb inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb)) self.register_buffer('inv_freq', inv_freq) def forward(self, pos_seq, bsz=None): sinusoid_inp = torch.ger(pos_seq, self.inv_freq) pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1) if bsz is not None: return pos_emb[:, None, :].expand(-1, bsz, -1) else: return pos_emb[:, None, :] class PositionwiseFF(nn.Module): def __init__(self, d_model, d_inner, dropout, pre_lnorm=False): super(PositionwiseFF, self).__init__() self.d_model = d_model self.d_inner = d_inner self.dropout = dropout self.CoreNet = nn.Sequential( nn.Linear(d_model, d_inner), nn.ReLU(inplace=True), nn.Dropout(dropout), nn.Linear(d_inner, d_model), nn.Dropout(dropout), ) self.layer_norm = nn.LayerNorm(d_model) self.pre_lnorm = pre_lnorm def forward(self, inp): if self.pre_lnorm: # layer normalization + positionwise feed-forward core_out = self.CoreNet(self.layer_norm(inp)) # residual connection output = core_out + inp else: # positionwise feed-forward core_out = self.CoreNet(inp) # residual connection + layer normalization output = self.layer_norm(inp + core_out) return output class MultiHeadAttn(nn.Module): def __init__(self, n_head, d_model, d_head, dropout, dropatt=0, pre_lnorm=False): super(MultiHeadAttn, self).__init__() self.n_head = n_head self.d_model = d_model self.d_head = d_head self.dropout = dropout self.q_net = nn.Linear(d_model, n_head * d_head, bias=False) self.kv_net = nn.Linear(d_model, 2 * n_head * d_head, bias=False) self.drop = nn.Dropout(dropout) self.dropatt = nn.Dropout(dropatt) self.o_net = nn.Linear(n_head * d_head, d_model, bias=False) self.layer_norm = nn.LayerNorm(d_model) self.scale = 1 / (d_head ** 0.5) self.pre_lnorm = pre_lnorm def forward(self, h, attn_mask=None, mems=None): # multihead attention # [hlen x bsz x n_head x d_head] if mems is not None: c = torch.cat([mems, h], 0) else: c = h if self.pre_lnorm: # layer normalization c = self.layer_norm(c) head_q = self.q_net(h) head_k, head_v = torch.chunk(self.kv_net(c), 2, -1) head_q = head_q.view(h.size(0), h.size(1), self.n_head, self.d_head) head_k = head_k.view(c.size(0), c.size(1), self.n_head, self.d_head) head_v = head_v.view(c.size(0), c.size(1), self.n_head, self.d_head) # [bsz x n_head x qlen x klen] attn_score = torch.einsum('ibnd,jbnd->bnij', head_q, head_k) attn_score.mul_(self.scale) if attn_mask is not None: if attn_mask.dim() == 2: attn_score.masked_fill_(attn_mask[None, None, :, :], -float('inf')) elif attn_mask.dim() == 3: attn_score.masked_fill_(attn_mask[:, None, :, :], -float('inf')) # [bsz x qlen x klen x n_head] attn_prob = F.softmax(attn_score, dim=3) attn_prob = self.dropatt(attn_prob) # [bsz x n_head x qlen x klen] * [klen x bsz x n_head x d_head] -> [qlen x bsz x n_head x d_head] attn_vec = torch.einsum('bnij,jbnd->ibnd', attn_prob, head_v) attn_vec = attn_vec.contiguous().view( attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head) # linear projection attn_out = self.o_net(attn_vec) attn_out = self.drop(attn_out) if self.pre_lnorm: # residual connection output = h + attn_out else: # residual connection + layer normalization output = self.layer_norm(h + attn_out) return output class RelMultiHeadAttn(nn.Module): def __init__(self, n_head, d_model, d_head, dropout, dropatt=0, tgt_len=None, ext_len=None, mem_len=None, pre_lnorm=False): super(RelMultiHeadAttn, self).__init__() self.n_head = n_head self.d_model = d_model self.d_head = d_head self.dropout = dropout self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False) self.drop = nn.Dropout(dropout) self.dropatt = nn.Dropout(dropatt) self.o_net = nn.Linear(n_head * d_head, d_model, bias=False) self.layer_norm = nn.LayerNorm(d_model) self.scale = 1 / (d_head ** 0.5) self.pre_lnorm = pre_lnorm def _parallelogram_mask(self, h, w, left=False): mask = torch.ones((h, w)).byte() m = min(h, w) mask[:m, :m] = torch.triu(mask[:m, :m]) mask[-m:, -m:] = torch.tril(mask[-m:, -m:]) if left: return mask.bool() else: return mask.flip(0).bool() def _shift(self, x, qlen, klen, mask, left=False): if qlen > 1: zero_pad = torch.zeros((x.size(0), qlen-1, x.size(2), x.size(3)), device=x.device, dtype=x.dtype) else: zero_pad = torch.zeros(0, device=x.device, dtype=x.dtype) if left: mask = mask.flip(1) x_padded = torch.cat([zero_pad, x], dim=1).expand(qlen, -1, -1, -1) else: x_padded = torch.cat([x, zero_pad], dim=1).expand(qlen, -1, -1, -1) x = x_padded.masked_select(mask[:, :, None, None]) \ .view(qlen, klen, x.size(2), x.size(3)) return x def _rel_shift(self, x, zero_triu=False): zero_pad = torch.zeros((x.size(0), x.size(1), x.size(2), 1), device=x.device, dtype=x.dtype) x_padded = torch.cat([zero_pad, x], dim=3) x_padded = x_padded.view(x.size(0), x.size(1), x.size(3) + 1, x.size(2)) x = x_padded.narrow(2, 1, x_padded.size(2) - 1).view_as(x) if zero_triu: ones = torch.ones((x.size(2), x.size(3))) x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :] return x def forward(self, w, r, attn_mask=None, mems=None): raise NotImplementedError class RelPartialLearnableMultiHeadAttn(RelMultiHeadAttn): def __init__(self, *args, **kwargs): super(RelPartialLearnableMultiHeadAttn, self).__init__(*args, **kwargs) self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False) def forward(self, w, r, r_w_bias, r_r_bias, attn_mask=None, mems=None): qlen, rlen, bsz = w.size(0), r.size(0), w.size(1) if mems is not None: cat = torch.cat([mems, w], 0) if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(cat)) else: w_heads = self.qkv_net(cat) r_head_k = self.r_net(r) w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1) w_head_q = w_head_q[-qlen:] else: if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(w)) else: w_heads = self.qkv_net(w) r_head_k = self.r_net(r) w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1) klen = w_head_k.size(0) w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # klen x bsz x n_head x d_head w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # klen x bsz x n_head x d_head r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head # compute attention score rw_head_q = w_head_q + r_w_bias # qlen x bsz x n_head x d_head AC = torch.einsum('ibnd,jbnd->bnij', rw_head_q, w_head_k) # bsz x n_head x qlen x klen rr_head_q = w_head_q + r_r_bias BD = torch.einsum('ibnd,jnd->bnij', rr_head_q, r_head_k) # bsz x n_head x qlen x klen BD = self._rel_shift(BD) # [bsz x n_head x qlen x klen] attn_score = add_and_scale(AC, BD, self.scale) # compute attention probability if attn_mask is not None: if attn_mask.dim() == 2: attn_score.masked_fill_(attn_mask[None, None, :, :], -float('inf')) elif attn_mask.dim() == 3: attn_score.masked_fill_(attn_mask[:, None, :, :], -float('inf')) # [bsz x n_head x qlen x klen] attn_prob = F.softmax(attn_score, dim=3) attn_prob = self.dropatt(attn_prob) # compute attention vector attn_vec = torch.einsum('bnij,jbnd->ibnd', attn_prob, w_head_v) # [qlen x bsz x n_head x d_head] attn_vec = attn_vec.contiguous().view( attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head) # linear projection attn_out = self.o_net(attn_vec) attn_out = self.drop(attn_out) if self.pre_lnorm: # residual connection output = w + attn_out else: # residual connection + layer normalization output = self.layer_norm(w + attn_out) return output class RelLearnableMultiHeadAttn(RelMultiHeadAttn): def __init__(self, *args, **kwargs): super(RelLearnableMultiHeadAttn, self).__init__(*args, **kwargs) def forward(self, w, r_emb, r_w_bias, r_bias, attn_mask=None, mems=None): # r_emb: [klen, n_head, d_head], used for term B # r_w_bias: [n_head, d_head], used for term C # r_bias: [klen, n_head], used for term D qlen, bsz = w.size(0), w.size(1) if mems is not None: cat = torch.cat([mems, w], 0) if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(cat)) else: w_heads = self.qkv_net(cat) w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1) w_head_q = w_head_q[-qlen:] else: if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(w)) else: w_heads = self.qkv_net(w) w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1) klen = w_head_k.size(0) w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) if klen > r_emb.size(0): r_emb_pad = r_emb[0:1].expand(klen-r_emb.size(0), -1, -1) r_emb = torch.cat([r_emb_pad, r_emb], 0) r_bias_pad = r_bias[0:1].expand(klen-r_bias.size(0), -1) r_bias = torch.cat([r_bias_pad, r_bias], 0) else: r_emb = r_emb[-klen:] r_bias = r_bias[-klen:] r_bias = r_bias.t() # compute attention score rw_head_q = w_head_q + r_w_bias[None] # qlen x bsz x n_head x d_head AC = torch.einsum('ibnd,jbnd->bnij', rw_head_q, w_head_k) # bsz x n_head x qlen x klen B_ = torch.einsum('ibnd,jnd->bnij', w_head_q, r_emb) # bsz x n_head x qlen x klen D_ = r_bias[None, :, None, :] # 1 x n_head x 1 x klen BD = self._rel_shift(B_ + D_) # [bsz x qlen x klen x n_head] attn_score = add_and_scale(AC, BD, self.scale) # compute attention probability if attn_mask is not None: if attn_mask.dim() == 2: attn_score.masked_fill_(attn_mask[None, None, :, :], -float('inf')) elif attn_mask.dim() == 3: attn_score.masked_fill_(attn_mask[:, None, :, :], -float('inf')) # [bsz x n_head x qlen x klen] attn_prob = F.softmax(attn_score, dim=3) attn_prob = self.dropatt(attn_prob) # compute attention vector attn_vec = torch.einsum('bnij,jbnd->ibnd', attn_prob, w_head_v) # [qlen x bsz x n_head x d_head] attn_vec = attn_vec.contiguous().view( attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head) # linear projection attn_out = self.o_net(attn_vec) attn_out = self.drop(attn_out) if self.pre_lnorm: # residual connection output = w + attn_out else: # residual connection + layer normalization output = self.layer_norm(w + attn_out) return output class DecoderLayer(nn.Module): def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs): super(DecoderLayer, self).__init__() self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs) self.pos_ff = PositionwiseFF(d_model, d_inner, dropout, pre_lnorm=kwargs.get('pre_lnorm')) def forward(self, dec_inp, dec_attn_mask=None, mems=None): output = self.dec_attn(dec_inp, attn_mask=dec_attn_mask, mems=mems) output = self.pos_ff(output) return output class RelLearnableDecoderLayer(nn.Module): def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs): super(RelLearnableDecoderLayer, self).__init__() self.dec_attn = RelLearnableMultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs) self.pos_ff = PositionwiseFF(d_model, d_inner, dropout, pre_lnorm=kwargs.get('pre_lnorm')) def forward(self, dec_inp, r_emb, r_w_bias, r_bias, dec_attn_mask=None, mems=None): output = self.dec_attn(dec_inp, r_emb, r_w_bias, r_bias, attn_mask=dec_attn_mask, mems=mems) output = self.pos_ff(output) return output class RelPartialLearnableDecoderLayer(nn.Module): def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs): super(RelPartialLearnableDecoderLayer, self).__init__() self.dec_attn = RelPartialLearnableMultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs) self.pos_ff = PositionwiseFF(d_model, d_inner, dropout, pre_lnorm=kwargs.get('pre_lnorm')) def forward(self, dec_inp, r, r_w_bias, r_r_bias, dec_attn_mask=None, mems=None): output = self.dec_attn(dec_inp, r, r_w_bias, r_r_bias, attn_mask=dec_attn_mask, mems=mems) output = self.pos_ff(output) return output class AdaptiveEmbedding(nn.Module): def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, sample_softmax=False): super(AdaptiveEmbedding, self).__init__() self.n_token = n_token self.d_embed = d_embed self.cutoffs = cutoffs + [n_token] self.div_val = div_val self.d_proj = d_proj self.emb_scale = d_proj ** 0.5 self.cutoff_ends = [0] + self.cutoffs self.emb_layers = nn.ModuleList() self.emb_projs = nn.ParameterList() if div_val == 1: self.emb_layers.append( nn.Embedding(n_token, d_embed, sparse=(sample_softmax > 0)) ) if d_proj != d_embed: self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_embed).zero_())) else: for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1] d_emb_i = d_embed // (div_val ** i) self.emb_layers.append(nn.Embedding(r_idx-l_idx, d_emb_i)) self.emb_projs.append(nn.Parameter(torch.Tensor(d_proj, d_emb_i).zero_())) def forward(self, inp): if self.div_val == 1: embed = self.emb_layers[0](inp) if self.d_proj != self.d_embed: embed = F.linear(embed, self.emb_projs[0]) else: param = next(self.parameters()) inp_flat = inp.view(-1) emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device) for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx) indices_i = mask_i.nonzero(as_tuple=False).squeeze() if indices_i.numel() == 0: continue inp_i = inp_flat.index_select(0, indices_i) - l_idx emb_i = self.emb_layers[i](inp_i) emb_i = F.linear(emb_i, self.emb_projs[i]).to(emb_flat.dtype) emb_flat.index_copy_(0, indices_i, emb_i) embed = emb_flat.view(*inp.size(), self.d_proj) embed.mul_(self.emb_scale) return embed class MemTransformerLM(nn.Module): def __init__(self, n_token, n_layer, n_head, d_model, d_head, d_inner, dropout, dropatt, dtype, tie_weight=True, d_embed=None, div_val=1, tie_projs=[False], pre_lnorm=False, tgt_len=None, ext_len=None, mem_len=None, cutoffs=[], adapt_inp=False, same_length=False, attn_type=0, clamp_len=-1, sample_softmax=-1): super(MemTransformerLM, self).__init__() self.n_token = n_token d_embed = d_model if d_embed is None else d_embed self.d_embed = d_embed self.d_model = d_model self.n_head = n_head self.d_head = d_head self.word_emb = AdaptiveEmbedding(n_token, d_embed, d_model, cutoffs, div_val=div_val) self.drop = nn.Dropout(dropout) self.tie_weight = tie_weight self.tie_projs = tie_projs self.div_val = div_val self.n_layer = n_layer self.tgt_len = tgt_len self.mem_len = mem_len self.ext_len = ext_len self.max_klen = tgt_len + ext_len + mem_len self.attn_type = attn_type self.layers = nn.ModuleList() # the default attention if attn_type == 0: for i in range(n_layer): self.layers.append( RelPartialLearnableDecoderLayer( n_head, d_model, d_head, d_inner, dropout, tgt_len=tgt_len, ext_len=ext_len, mem_len=mem_len, dropatt=dropatt, pre_lnorm=pre_lnorm) ) # learnable embeddings elif attn_type == 1: for i in range(n_layer): self.layers.append( RelLearnableDecoderLayer( n_head, d_model, d_head, d_inner, dropout, tgt_len=tgt_len, ext_len=ext_len, mem_len=mem_len, dropatt=dropatt, pre_lnorm=pre_lnorm) ) # absolute embeddings elif attn_type in [2, 3]: for i in range(n_layer): self.layers.append( DecoderLayer( n_head, d_model, d_head, d_inner, dropout, dropatt=dropatt, pre_lnorm=pre_lnorm) ) self.sample_softmax = sample_softmax # use sampled softmax if sample_softmax > 0: self.out_layer = nn.Linear(d_model, n_token) self.tie_weight = tie_weight self.sampler = LogUniformSampler(n_token, sample_softmax) # use adaptive softmax (including standard softmax) else: if tie_weight: emb_layers = [i.weight for i in self.word_emb.emb_layers] else: emb_layers = None emb_projs = self.word_emb.emb_projs self.crit = ProjectedAdaptiveLogSoftmax(n_token, d_embed, d_model, cutoffs, div_val=div_val, tie_projs=tie_projs, out_projs=emb_projs, out_layers_weights=emb_layers) self.same_length = same_length self.clamp_len = clamp_len self._create_params() def backward_compatible(self): self.sample_softmax = -1 def _create_params(self): # default attention if self.attn_type == 0: self.pos_emb = PositionalEmbedding(self.d_model) self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head).zero_()) self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head).zero_()) # learnable elif self.attn_type == 1: self.r_emb = nn.Parameter(torch.Tensor( self.n_layer, self.max_klen, self.n_head, self.d_head).zero_()) self.r_w_bias = nn.Parameter(torch.Tensor( self.n_layer, self.n_head, self.d_head).zero_()) self.r_bias = nn.Parameter(torch.Tensor( self.n_layer, self.max_klen, self.n_head).zero_()) # absolute standard elif self.attn_type == 2: self.pos_emb = PositionalEmbedding(self.d_model) # absolute deeper SA elif self.attn_type == 3: self.r_emb = nn.Parameter(torch.Tensor( self.n_layer, self.max_klen, self.d_model).zero_()) def reset_length(self, tgt_len, ext_len, mem_len): if tgt_len < 1: raise RuntimeError(f'tgt_len should be >= 1, but got {tgt_len}') if ext_len < 0: raise RuntimeError(f'ext_len should be >= 0, but got {ext_len}') if mem_len < 0: raise RuntimeError(f'mem_len should be >= 0, but got {mem_len}') self.tgt_len = tgt_len self.mem_len = mem_len self.ext_len = ext_len def init_mems(self): if self.mem_len > 0: param = next(self.parameters()) mems = torch.empty(self.n_layer, 0, dtype=param.dtype, device=param.device) return mems else: return None def _update_mems(self, hids, mems, qlen, mlen): # does not deal with None if mems is None: return None # mems is not None assert len(hids) == len(mems), 'len(hids) != len(mems)' # There are `mlen + qlen` steps that can be cached into mems # For the next step, the last `ext_len` of the `qlen` tokens # will be used as the extended context. Hence, we only cache # the tokens from `mlen + qlen - self.ext_len - self.mem_len` # to `mlen + qlen - self.ext_len`. with torch.no_grad(): stacked = torch.stack(hids) if ( self.mem_len == self.tgt_len and self.ext_len == 0 and stacked.size(1) == self.mem_len ): new_mems = stacked.detach() else: end_idx = mlen + max(0, qlen - self.ext_len) beg_idx = max(0, end_idx - self.mem_len) if mems.numel(): cat = torch.cat([mems, stacked], dim=1) else: cat = stacked new_mems = cat[:, beg_idx:end_idx].detach() return new_mems def _forward(self, dec_inp, mems=None): qlen, bsz = dec_inp.size() word_emb = self.word_emb(dec_inp) mlen = mems[0].size(0) if mems is not None else 0 klen = mlen + qlen if self.same_length: all_ones = word_emb.new_ones(qlen, klen) mask_len = klen - self.mem_len - 1 if mask_len > 0: mask_shift_len = qlen - mask_len else: mask_shift_len = qlen dec_attn_mask = (torch.triu(all_ones, 1+mlen) + torch.tril(all_ones, -mask_shift_len)).bool() else: dec_attn_mask = torch.triu( word_emb.new_ones(qlen, klen), diagonal=1+mlen).bool() hids = [] # default if self.attn_type == 0: pos_seq = torch.arange(klen-1, -1, -1.0, device=word_emb.device, dtype=word_emb.dtype) if self.clamp_len > 0: pos_seq.clamp_(max=self.clamp_len) pos_emb = self.pos_emb(pos_seq) core_out = self.drop(word_emb) pos_emb = self.drop(pos_emb) for i, layer in enumerate(self.layers): hids.append(core_out.detach()) mems_i = None if mems is None else mems[i] core_out = layer(core_out, pos_emb, self.r_w_bias, self.r_r_bias, dec_attn_mask=dec_attn_mask, mems=mems_i) # learnable elif self.attn_type == 1: core_out = self.drop(word_emb) for i, layer in enumerate(self.layers): hids.append(core_out.detach()) if self.clamp_len > 0: r_emb = self.r_emb[i][-self.clamp_len:] r_bias = self.r_bias[i][-self.clamp_len:] else: r_emb, r_bias = self.r_emb[i], self.r_bias[i] mems_i = None if mems is None else mems[i] core_out = layer(core_out, r_emb, self.r_w_bias[i], r_bias, dec_attn_mask=dec_attn_mask, mems=mems_i) # absolute elif self.attn_type == 2: pos_seq = torch.arange(klen - 1, -1, -1.0, device=word_emb.device, dtype=word_emb.dtype) if self.clamp_len > 0: pos_seq.clamp_(max=self.clamp_len) pos_emb = self.pos_emb(pos_seq) core_out = self.drop(word_emb + pos_emb[-qlen:]) for i, layer in enumerate(self.layers): hids.append(core_out.detach()) mems_i = None if mems is None else mems[i] if mems_i is not None and len(mems_i) and i == 0: mems_i += pos_emb[:mlen] core_out = layer(core_out, dec_attn_mask=dec_attn_mask, mems=mems_i) elif self.attn_type == 3: core_out = self.drop(word_emb) for i, layer in enumerate(self.layers): hids.append(core_out.detach()) mems_i = None if mems is None else mems[i] if mems_i is not None and len(mems_i) and mlen > 0: cur_emb = self.r_emb[i][:-qlen] cur_size = cur_emb.size(0) if cur_size < mlen: cur_emb_pad = cur_emb[0:1].expand(mlen-cur_size, -1, -1) cur_emb = torch.cat([cur_emb_pad, cur_emb], 0) else: cur_emb = cur_emb[-mlen:] mems_i += cur_emb.view(mlen, 1, -1) core_out += self.r_emb[i][-qlen:].view(qlen, 1, -1) core_out = layer(core_out, dec_attn_mask=dec_attn_mask, mems=mems_i) core_out = self.drop(core_out) new_mems = self._update_mems(hids, mems, qlen, mlen) return core_out, new_mems def forward(self, data, target, mems): # nn.DataParallel does not allow size(0) tensors to be broadcasted. # So, have to initialize size(0) mems inside the model forward. # Moreover, have to return new_mems to allow nn.DataParallel to piece # them together. if mems is None: mems = self.init_mems() tgt_len = target.size(0) hidden, new_mems = self._forward(data, mems=mems) pred_hid = hidden[-tgt_len:] if self.sample_softmax > 0 and self.training: assert self.tie_weight logit = sample_logits(self.word_emb, self.out_layer.bias, target, pred_hid, self.sampler) loss = -F.log_softmax(logit, -1)[:, :, 0] else: loss = self.crit(pred_hid.view(-1, pred_hid.size(-1)), target.view(-1)) loss = loss.view(tgt_len, -1) return (loss, new_mems) if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='unit test') parser.add_argument('--n_layer', type=int, default=4, help='') parser.add_argument('--n_rel_layer', type=int, default=4, help='') parser.add_argument('--n_head', type=int, default=2, help='') parser.add_argument('--d_head', type=int, default=2, help='') parser.add_argument('--d_model', type=int, default=200, help='') parser.add_argument('--d_embed', type=int, default=200, help='') parser.add_argument('--d_inner', type=int, default=200, help='') parser.add_argument('--dropout', type=float, default=0.0, help='') parser.add_argument('--cuda', action='store_true', help='') parser.add_argument('--seed', type=int, default=1111, help='') parser.add_argument('--multi_gpu', action='store_true', help='') args = parser.parse_args() device = torch.device("cuda" if args.cuda else "cpu") B = 4 tgt_len, mem_len, ext_len = 36, 36, 0 data_len = tgt_len * 20 args.n_token = 10000 import data_utils data = torch.LongTensor(data_len*B).random_(0, args.n_token).to(device) diter = data_utils.LMOrderedIterator(data, B, tgt_len, device=device, ext_len=ext_len) cutoffs = [args.n_token // 2] tie_projs = [False] + [True] * len(cutoffs) for div_val in [1, 2]: for d_embed in [200, 100]: model = MemTransformerLM(args.n_token, args.n_layer, args.n_head, args.d_model, args.d_head, args.d_inner, args.dropout, dropatt=args.dropout, tie_weight=True, d_embed=d_embed, div_val=div_val, tie_projs=tie_projs, pre_lnorm=True, tgt_len=tgt_len, ext_len=ext_len, mem_len=mem_len, cutoffs=cutoffs, attn_type=0, dtype=None).to(device) print(sum(p.numel() for p in model.parameters())) mems = None for idx, (inp, tgt, seqlen, _) in enumerate(diter): print('batch {}'.format(idx)) _, mems = model(inp, tgt, mems)
DeepLearningExamples-master
PyTorch/LanguageModeling/Transformer-XL/pytorch/mem_transformer.py
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import glob import logging import os import re import numpy as np import sacremoses import torch import utils from utils.vocabulary import OpenAIVocab from utils.vocabulary import Vocab class LMOrderedIterator(object): def __init__(self, data, bsz, bptt, device='cpu', mem_len=None, ext_len=None, warmup=True): """ data -- LongTensor -- the LongTensor is strictly ordered """ self.bsz = bsz self.bptt = bptt self.ext_len = ext_len if ext_len is not None else 0 self.mem_len = mem_len self.warmup = warmup self.device = device # Work out how cleanly we can divide the dataset into bsz parts. n_step = data.size(0) // bsz # Trim off any extra elements that wouldn't cleanly fit (remainders). data = data[:n_step * bsz] # Evenly divide the data across the bsz batches. self.data = data.view(bsz, -1).t().contiguous().pin_memory() if mem_len and warmup: self.warmup_batches = (mem_len + bptt - 1) // bptt self.warmup_elems = self.warmup_batches * bptt warmup_data = self.data.roll((self.warmup_elems, 1), (0, 1))[:self.warmup_elems] self.data = torch.cat((warmup_data, self.data)) # Partition data for DistributedDataParallel world_size = utils.distributed.get_world_size() rank = utils.distributed.get_rank() self.data = self.data.chunk(world_size, dim=1)[rank] # Number of mini-batches self.n_batch = (self.data.size(0) + self.bptt - 1) // self.bptt self.last_iter = None def roll(self, seed): rng = torch.Generator() rng.manual_seed(seed) for i in range(self.data.size(1)): row = self.data[:, i] shift = torch.randint(0, self.data.size(0), (1,), generator=rng) row = torch.cat((row[shift:], row[:shift])) self.data[:, i] = row def get_batch(self, i, bptt=None): if bptt is None: bptt = self.bptt seq_len = min(bptt, self.data.size(0) - 1 - i) end_idx = i + seq_len beg_idx = max(0, i - self.ext_len) data = self.data[beg_idx:end_idx].to(self.device, non_blocking=True) target = self.data[i+1:i+1+seq_len].to(self.device, non_blocking=True) if self.mem_len and self.warmup: warm = i >= self.warmup_elems else: warm = True return data, target, seq_len, warm def get_fixlen_iter(self, start=0): if start != 0: start += self.bptt for i in range(start, self.data.size(0) - 1, self.bptt): self.last_iter = i yield self.get_batch(i) def get_varlen_iter(self, start=0, std=5, min_len=5, max_deviation=3): max_len = self.bptt + max_deviation * std i = start while True: bptt = self.bptt if np.random.random() < 0.95 else self.bptt / 2. bptt = min(max_len, max(min_len, int(np.random.normal(bptt, std)))) data, target, seq_len = self.get_batch(i, bptt) i += seq_len yield data, target, seq_len if i >= self.data.size(0) - 2: break def __iter__(self): return self.get_fixlen_iter() class LMShuffledIterator(object): def __init__(self, data, bsz, bptt, device='cpu', ext_len=None, shuffle=False): """ data -- list[LongTensor] -- there is no order among the LongTensors """ self.data = data self.bsz = bsz self.bptt = bptt self.ext_len = ext_len if ext_len is not None else 0 self.device = device self.shuffle = shuffle def get_sent_stream(self): # index iterator epoch_indices = np.random.permutation(len(self.data)) if self.shuffle \ else np.array(range(len(self.data))) # sentence iterator for idx in epoch_indices: yield self.data[idx] def stream_iterator(self, sent_stream): # streams for each data in the batch streams = [None] * self.bsz data = torch.LongTensor(self.bptt, self.bsz) target = torch.LongTensor(self.bptt, self.bsz) n_retain = 0 while True: # data : [n_retain+bptt x bsz] # target : [bptt x bsz] data[n_retain:].fill_(-1) target.fill_(-1) valid_batch = True for i in range(self.bsz): n_filled = 0 try: while n_filled < self.bptt: if streams[i] is None or len(streams[i]) <= 1: streams[i] = next(sent_stream) # number of new tokens to fill in n_new = min(len(streams[i]) - 1, self.bptt - n_filled) # first n_retain tokens are retained from last batch data[n_retain+n_filled:n_retain+n_filled+n_new, i] = \ streams[i][:n_new] target[n_filled:n_filled+n_new, i] = \ streams[i][1:n_new+1] streams[i] = streams[i][n_new:] n_filled += n_new except StopIteration: valid_batch = False break if not valid_batch: return data = data.to(self.device) target = target.to(self.device) yield data, target, self.bptt n_retain = min(data.size(0), self.ext_len) if n_retain > 0: data[:n_retain] = data[-n_retain:] data.resize_(n_retain + self.bptt, data.size(1)) def __iter__(self): # sent_stream is an iterator sent_stream = self.get_sent_stream() for batch in self.stream_iterator(sent_stream): yield batch class LMMultiFileIterator(LMShuffledIterator): def __init__(self, paths, vocab, bsz, bptt, device='cpu', ext_len=None, shuffle=False): self.paths = paths self.vocab = vocab self.bsz = bsz self.bptt = bptt self.ext_len = ext_len if ext_len is not None else 0 self.device = device self.shuffle = shuffle def get_sent_stream(self, path): sents = self.vocab.encode_file(path, add_double_eos=True) if self.shuffle: np.random.shuffle(sents) sent_stream = iter(sents) return sent_stream def __iter__(self): if self.shuffle: np.random.shuffle(self.paths) for path in self.paths: # sent_stream is an iterator sent_stream = self.get_sent_stream(path) for batch in self.stream_iterator(sent_stream): yield batch class Corpus(object): def __init__(self, path, dataset, vocab, *args, **kwargs): self.dataset = dataset if vocab == 'word': self.vocab = Vocab(*args, **kwargs) elif vocab == 'bpe': self.vocab = OpenAIVocab() else: raise RuntimeError('Unsupported vocab') if self.dataset in ['ptb', 'wt2', 'enwik8', 'text8']: self.vocab.count_file(os.path.join(path, 'train.txt')) self.vocab.count_file(os.path.join(path, 'valid.txt')) self.vocab.count_file(os.path.join(path, 'test.txt')) elif self.dataset == 'wt103': self.vocab.count_file(os.path.join(path, 'train.txt')) elif self.dataset == 'lm1b': train_path_pattern = os.path.join( path, '1-billion-word-language-modeling-benchmark-r13output', 'training-monolingual.tokenized.shuffled', 'news.en-*') train_paths = glob.glob(train_path_pattern) # the vocab will load from file when build_vocab() is called self.vocab.build_vocab() if self.dataset in ['ptb', 'wt2', 'wt103']: self.train = self.vocab.encode_file( os.path.join(path, 'train.txt'), ordered=True) self.valid = self.vocab.encode_file( os.path.join(path, 'valid.txt'), ordered=True) self.test = self.vocab.encode_file( os.path.join(path, 'test.txt'), ordered=True) elif self.dataset in ['enwik8', 'text8']: self.train = self.vocab.encode_file( os.path.join(path, 'train.txt'), ordered=True, add_eos=False) self.valid = self.vocab.encode_file( os.path.join(path, 'valid.txt'), ordered=True, add_eos=False) self.test = self.vocab.encode_file( os.path.join(path, 'test.txt'), ordered=True, add_eos=False) elif self.dataset == 'lm1b': self.train = train_paths self.valid = self.vocab.encode_file( os.path.join(path, 'valid.txt'), ordered=False, add_double_eos=True) self.test = self.vocab.encode_file( os.path.join(path, 'test.txt'), ordered=False, add_double_eos=True) def get_iterator(self, split, *args, **kwargs): if split == 'train': if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']: data_iter = LMOrderedIterator(self.train, *args, **kwargs) elif self.dataset == 'lm1b': kwargs['shuffle'] = True data_iter = LMMultiFileIterator(self.train, self.vocab, *args, **kwargs) elif split in ['valid', 'test']: data = self.valid if split == 'valid' else self.test if self.dataset in ['ptb', 'wt2', 'wt103', 'enwik8', 'text8']: data_iter = LMOrderedIterator(data, *args, **kwargs) elif self.dataset == 'lm1b': data_iter = LMShuffledIterator(data, *args, **kwargs) return data_iter def get_lm_corpus(datadir, dataset, vocab): if vocab == 'word': fn = os.path.join(datadir, 'cache.pt') elif vocab == 'bpe': fn = os.path.join(datadir, 'cache.pt.bpe') else: raise RuntimeError('Unsupported vocab') if os.path.exists(fn): logging.info('Loading cached dataset...') corpus = torch.load(fn) else: logging.info('Producing dataset {}...'.format(dataset)) kwargs = {} if dataset in ['wt103', 'wt2']: kwargs['special'] = ['<eos>'] kwargs['lower_case'] = False elif dataset == 'ptb': kwargs['special'] = ['<eos>'] kwargs['lower_case'] = True elif dataset == 'lm1b': kwargs['special'] = [] kwargs['lower_case'] = False kwargs['vocab_file'] = os.path.join(datadir, '1b_word_vocab.txt') elif dataset in ['enwik8', 'text8']: pass corpus = Corpus(datadir, dataset, vocab, **kwargs) with utils.distributed.sync_workers() as rank: if rank == 0: torch.save(corpus, fn) return corpus def tokenize_raw(text, lang='en'): mt = sacremoses.MosesTokenizer(lang) text = mt.tokenize(text, return_str=True) text = re.sub(r'&quot;', '"', text) text = re.sub(r'&apos;', "'", text) text = re.sub(r'(\d)\.(\d)', r'\1 @.@ \2', text) text = re.sub(r'(\d),(\d)', r'\1 @,@ \2', text) text = re.sub(r'(\w)-(\w)', r'\1 @-@ \2', text) return text if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='unit test') parser.add_argument('--datadir', type=str, default='../data/text8', help='location of the data corpus') parser.add_argument('--dataset', type=str, default='text8', choices=['ptb', 'wt2', 'wt103', 'lm1b', 'enwik8', 'text8'], help='dataset name') args = parser.parse_args() logging.basicConfig(level=logging.INFO) corpus = get_lm_corpus(args.datadir, args.dataset, vocab='word') logging.info('Vocab size : {}'.format(len(corpus.vocab.idx2sym)))
DeepLearningExamples-master
PyTorch/LanguageModeling/Transformer-XL/pytorch/data_utils.py
# coding: utf-8 # Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import functools import itertools import logging import math import os import shutil import sys import time import warnings import dllogger import numpy as np import torch import torch.nn as nn import torch.optim as optim import yaml try: from apex import amp except ModuleNotFoundError: warnings.warn('APEX AMP is unavailable') from torch.nn.parallel import DistributedDataParallel import lamb import utils from data_utils import get_lm_corpus from mem_transformer import MemTransformerLM from utils.data_parallel import BalancedDataParallel from utils.exp_utils import AverageMeter from utils.exp_utils import TimeoutHandler from utils.exp_utils import benchmark from utils.exp_utils import create_exp_dir from utils.exp_utils import l2_promote from utils.exp_utils import log_env_info from utils.exp_utils import register_ignoring_timeout_handler def parse_args(): parent_parser = argparse.ArgumentParser( description='PyTorch Transformer-XL Language Model', formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False, ) parser = argparse.ArgumentParser(parents=[parent_parser], add_help=True) cfg_parser = argparse.ArgumentParser(parents=[parent_parser], add_help=False) cfg_parser.add_argument('--config', default='default') cfg_parser.add_argument('--config_file', default=None) config_args, _ = cfg_parser.parse_known_args() if config_args.config is not None and config_args.config_file is not None: with open(config_args.config_file) as f: config = yaml.load(f, Loader=yaml.FullLoader)[config_args.config]['train'] else: config = {} general = parser.add_argument_group('general setup') general.add_argument('--work_dir', default='LM-TFM', type=str, help='Directory for the results') general.add_argument('--append_dataset', action='store_true', help='Automatically append dataset name to work_dir') general.add_argument('--append_time', action='store_true', help='Automatically append current time to work_dir') general.add_argument('--cuda', action='store_true', help='Run training on a GPU using CUDA') general.add_argument('--fp16', action='store_true', help='Run training in fp16/mixed precision') general.add_argument('--restart', type=str, default='', help='Restart training from the saved checkpoint') general.add_argument('--debug', action='store_true', help='Run in debug mode (do not create exp dir)') general.add_argument('--log_all_ranks', action='store_true', help='Enable logging from all distributed ranks') general.add_argument('--dllog_file', type=str, default='train_log.json', help='Name of the DLLogger output file') general.add_argument('--txtlog_file', type=str, default='train_log.log', help='Name of the txt log file') general.add_argument('--save_all', action='store_true', help='Save all checkpoints') general.add_argument('--no_env', action='store_true', help='Do not print info on execution env') general.add_argument('--no_eval', action='store_true', help='Disable model evaluation') general.add_argument('--no_test', action='store_true', help='Disable model evaluation on test data') general.add_argument('--log_interval', type=int, default=10, help='Report interval') general.add_argument('--target_throughput', type=float, default=None, help='Target training throughput (for benchmarking)') general.add_argument('--target_perplexity', type=float, default=None, help='Target validation perplexity (for benchmarking)') general.add_argument('--apex_amp_opt_level', type=str, default='O2', choices=['O0', 'O1', 'O2', 'O3'], help='Optimization level for apex amp') general.add_argument('--amp', choices=['apex', 'pytorch'], default='apex', help='Implementation of automatic mixed precision') general.add_argument('--affinity', type=str, default='socket_unique_interleaved', choices=['socket', 'single', 'single_unique', 'socket_unique_interleaved', 'socket_unique_continuous', 'disabled'], help='type of CPU affinity') dataset = parser.add_argument_group('dataset setup') dataset.add_argument('--data', type=str, default='../data/wikitext-103', help='Location of the data corpus') dataset.add_argument('--dataset', type=str, default='wt103', choices=['wt103', 'lm1b', 'enwik8', 'text8'], help='Dataset name') dataset.add_argument('--vocab', type=str, default='word', choices=['word', 'bpe'], help='Type of vocabulary') model = parser.add_argument_group('model setup') model.add_argument('--n_layer', type=int, default=16, help='Number of total layers') model.add_argument('--n_head', type=int, default=8, help='Number of heads') model.add_argument('--d_head', type=int, default=64, help='Head dimension') model.add_argument('--d_embed', type=int, default=-1, help='Embedding dimension') model.add_argument('--d_model', type=int, default=512, help='Model dimension') model.add_argument('--d_inner', type=int, default=2048, help='Inner dimension in feedforward layer') model.add_argument('--dropout', type=float, default=0.1, help='Global dropout rate') model.add_argument('--dropatt', type=float, default=0.0, help='Attention probability dropout rate') model.add_argument('--pre_lnorm', action='store_true', help='Apply LayerNorm to the input instead of the output') model.add_argument('--attn_type', type=int, default=0, help='Attention type. 0 for ours, 1 for Shaw et al,' '2 for Vaswani et al, 3 for Al Rfou et al.') model.add_argument('--not_tied', action='store_true', help='Do not tie the word embedding and softmax weights') model.add_argument('--clamp_len', type=int, default=-1, help='Use the same pos embeddings after clamp_len') model.add_argument('--adaptive', action='store_true', help='Use adaptive softmax') model.add_argument('--div_val', type=int, default=1, help='Dividend value for adaptive input and softmax') model.add_argument('--sample_softmax', type=int, default=-1, help='Number of samples in sampled softmax') model.add_argument('--init', default='normal', type=str, help='Parameter initializer to use') model.add_argument('--emb_init', default='normal', type=str, help='Parameter initializer to use') model.add_argument('--init_range', type=float, default=0.1, help='Parameters initialized by U(-init_range, init_range)') model.add_argument('--emb_init_range', type=float, default=0.01, help='Parameters initialized by U(-init_range, init_range)') model.add_argument('--init_std', type=float, default=0.02, help='Parameters initialized by N(0, init_std)') model.add_argument('--proj_init_std', type=float, default=0.01, help='Parameters initialized by N(0, init_std)') opt = parser.add_argument_group('optimizer setup') opt.add_argument('--optim', default='jitlamb', type=str, choices=['adam', 'sgd', 'adagrad', 'lamb', 'jitlamb'], help='Optimizer to use') opt.add_argument('--lr', type=float, default=0.01, help='Initial learning rate') opt.add_argument('--mom', type=float, default=0.0, help='Momentum for sgd') opt.add_argument('--scheduler', default='cosine', type=str, choices=['cosine', 'inv_sqrt', 'dev_perf', 'constant'], help='LR scheduler to use') opt.add_argument('--max_step_scheduler', type=int, default=None, help='Max number of training steps for LR scheduler') opt.add_argument('--warmup_step', type=int, default=1000, help='Number of iterations for LR warmup') opt.add_argument('--decay_rate', type=float, default=0.5, help='Decay factor when ReduceLROnPlateau is used') opt.add_argument('--lr_min', type=float, default=0.0, help='Minimum learning rate during annealing') opt.add_argument('--clip', type=float, default=0.25, help='Gradient clipping') opt.add_argument('--weight_decay', type=float, default=0.0, help='Weight decay for adam|lamb') opt.add_argument('--clip_nonemb', action='store_true', help='Only clip the gradient of non-embedding params') opt.add_argument('--patience', type=int, default=0, help='Patience') opt.add_argument('--eta_min', type=float, default=0.001, help='Min learning rate for cosine scheduler') training = parser.add_argument_group('training setup') training.add_argument('--max_step', type=int, default=40000, help='Max number of training steps') training.add_argument('--batch_size', type=int, default=256, help='Global batch size') training.add_argument('--local_batch_size', type=int, default=None, help='Local (per-device) batch size, this setting \ overrides global --batch_size and sets batch_size \ to local_batch_size * world_size') training.add_argument('--batch_chunk', type=int, default=1, help='Split batch into chunks and train with ' 'gradient accumulation') training.add_argument('--roll', action='store_true', help='Enable random shifts within each data stream') training.add_argument('--tgt_len', type=int, default=192, help='Number of tokens to predict') training.add_argument('--ext_len', type=int, default=0, help='Length of the extended context') training.add_argument('--mem_len', type=int, default=192, help='Length of the retained previous heads') training.add_argument('--seed', type=int, default=1111, help='Random seed') training.add_argument('--multi_gpu', default=None, type=str, choices=['ddp', 'dp'], help='Use multiple GPU') training.add_argument('--gpu0_bsz', type=int, default=-1, help='Batch size on gpu 0 (for "dp" backend)') training.add_argument('--same_length', action='store_true', help='Use the same attn length for all tokens') training.add_argument('--varlen', action='store_true', help='Use variable length') training.add_argument('--swap_mem', action='store_true', help='Swap memory tensors to cpu') val = parser.add_argument_group('validation setup') val.add_argument('--eval_tgt_len', type=int, default=192, help='Number of tokens to predict for evaluation') val.add_argument('--eval_batch_size', type=int, default=16, help='Eval batch size') val.add_argument('--eval_max_steps', type=int, default=-1, help='Max eval steps') val.add_argument('--eval_interval', type=int, default=5000, help='Evaluation interval') dist = parser.add_argument_group('distributed setup') dist.add_argument('--local_rank', type=int, default=os.getenv('LOCAL_RANK', 0), help='Used for multi-process training.') parser.set_defaults(**config) args, _ = parser.parse_known_args() args.tied = not args.not_tied if args.d_embed < 0: args.d_embed = args.d_model if args.ext_len < 0: raise RuntimeError('Extended context length must be non-negative') if args.mem_len == 0: if args.eval_tgt_len > args.ext_len + args.tgt_len: raise RuntimeError('eval_tgt_len should be <= tgt_len + ext_len; ' f'eval_tgt_len: {args.eval_tgt_len}, ' f'tgt_len: {args.tgt_len}, ' f'ext_len: {args.ext_len}') else: if args.eval_tgt_len > args.mem_len + args.tgt_len: raise RuntimeError('eval_tgt_len should be <= tgt_len + mem_len; ' f'eval_tgt_len: {args.eval_tgt_len}, ' f'tgt_len: {args.tgt_len}, ' f'mem_len: {args.mem_len}') if args.batch_size % args.batch_chunk != 0: raise RuntimeError('Batch size needs to be divisible by batch chunk') if ( args.local_batch_size is not None and args.local_batch_size % args.batch_chunk != 0 ): raise RuntimeError('Local batch size needs to be divisible by ' 'batch chunk') if args.fp16 and args.amp == 'apex' and 'apex' not in sys.modules: raise RuntimeError( 'APEX AMP unavailable, install APEX or switch to pytorch AMP' ) return args def save_checkpoint(args, model, mems, model_config, optimizer, scheduler, scaler, vocab, epoch, batch, last_iter, train_step, best_val_loss, is_best, work_dir, device): if args.fp16: if args.amp == 'pytorch': amp_state = scaler.state_dict() elif args.amp == 'apex': amp_state = amp.state_dict() else: amp_state = None memory = [ utils.distributed.all_gather_tensors(mem, device) for mem in mems ] state = { 'args': args, 'model_config': model_config, 'model_state': model.state_dict(), 'optimizer_state': optimizer.state_dict(), 'scheduler_state': scheduler.state_dict(), 'rng_states': utils.exp_utils.get_default_rng_states(device), 'memory': memory, 'vocab': vocab, 'amp_state': amp_state, 'epoch': epoch, 'batch': batch, 'last_iter': last_iter, 'train_step': train_step, 'best_val_loss': best_val_loss, } last_chkpt_fname = 'checkpoint_last.pt' with utils.distributed.sync_workers() as rank: last_chkpt_path = os.path.join(work_dir, last_chkpt_fname) if rank == 0: # always save last checkpoint logging.info(f'Saving checkpoint to {last_chkpt_path}') torch.save(state, last_chkpt_path) # save best checkpoint if better than previous best if is_best: best_chkpt_fname = 'checkpoint_best.pt' best_chkpt_path = os.path.join(work_dir, best_chkpt_fname) logging.info(f'Saving checkpoint to {best_chkpt_path}') shutil.copy(last_chkpt_path, best_chkpt_path) # save every checkpoint if save_all is true if args.save_all: step_chkpt_fname = f'checkpoint_{train_step}.pt' step_chkpt_path = os.path.join(work_dir, step_chkpt_fname) logging.info(f'Saving checkpoint to {step_chkpt_path}') shutil.copy(last_chkpt_path, step_chkpt_path) def load_checkpoint(path): if os.path.isdir(path): path = os.path.join(path, 'checkpoint_last.pt') dst = f'cuda:{torch.cuda.current_device()}' logging.info(f'Loading checkpoint from {path}') checkpoint = torch.load(path, map_location=dst) return checkpoint def init_weight(weight, args): if args.init == 'uniform': nn.init.uniform_(weight, -args.init_range, args.init_range) elif args.init == 'normal': nn.init.normal_(weight, 0.0, args.init_std) def init_bias(bias): nn.init.constant_(bias, 0.0) def weights_init(m, args): classname = m.__class__.__name__ if classname.find('Linear') != -1: if hasattr(m, 'weight') and m.weight is not None: init_weight(m.weight, args) if hasattr(m, 'bias') and m.bias is not None: init_bias(m.bias) elif classname.find('AdaptiveEmbedding') != -1: if hasattr(m, 'emb_projs'): for i in range(len(m.emb_projs)): if m.emb_projs[i] is not None: nn.init.normal_(m.emb_projs[i], 0.0, args.proj_init_std) elif classname.find('Embedding') != -1: if hasattr(m, 'weight'): init_weight(m.weight, args) elif classname.find('ProjectedAdaptiveLogSoftmax') != -1: if hasattr(m, 'cluster_weight') and m.cluster_weight is not None: init_weight(m.cluster_weight, args) if hasattr(m, 'cluster_bias') and m.cluster_bias is not None: init_bias(m.cluster_bias) if hasattr(m, 'out_projs'): for i in range(len(m.out_projs)): if m.out_projs[i] is not None: nn.init.normal_(m.out_projs[i], 0.0, args.proj_init_std) if hasattr(m, 'out_layers_weights'): for i in range(len(m.out_layers_weights)): if m.out_layers_weights[i] is not None: init_weight(m.out_layers_weights[i], args) elif classname.find('LayerNorm') != -1: if hasattr(m, 'weight'): nn.init.normal_(m.weight, 1.0, args.init_std) if hasattr(m, 'bias') and m.bias is not None: init_bias(m.bias) elif classname.find('TransformerLM') != -1: if hasattr(m, 'r_emb'): init_weight(m.r_emb, args) if hasattr(m, 'r_w_bias'): init_weight(m.r_w_bias, args) if hasattr(m, 'r_r_bias'): init_weight(m.r_r_bias, args) if hasattr(m, 'r_bias'): init_bias(m.r_bias) def update_dropout(m, args): classname = m.__class__.__name__ if classname.find('Dropout') != -1: if hasattr(m, 'p'): m.p = args.dropout def update_dropatt(m, args): if hasattr(m, 'dropatt'): m.dropatt.p = args.dropatt def evaluate(eval_iter, model, args): # Turn on evaluation mode which disables dropout. model.eval() # If the model does not use memory at all, make the ext_len longer. # Otherwise, make the mem_len longer and keep the ext_len the same. if args.mem_len == 0: model.reset_length(tgt_len=args.eval_tgt_len, ext_len=args.ext_len + args.tgt_len - args.eval_tgt_len, mem_len=args.mem_len ) else: model.reset_length(tgt_len=args.eval_tgt_len, ext_len=args.ext_len, mem_len=args.mem_len + args.tgt_len - args.eval_tgt_len, ) # Evaluation total_len, total_loss = 0, 0. with torch.no_grad(): mems = None for i, (data, target, seq_len, warm) in enumerate(eval_iter): if args.eval_max_steps > 0 and i >= args.eval_max_steps: break enable_autocast = args.fp16 and args.amp == 'pytorch' with torch.cuda.amp.autocast(enable_autocast): loss, mems = model(data, target, mems) loss = loss.float().mean().type_as(loss) if warm: # assert (mems is None) or mems.size(1) == model.mem_len total_loss += seq_len * loss.item() total_len += seq_len # Switch back to the training mode model.reset_length(tgt_len=args.tgt_len, ext_len=args.ext_len, mem_len=args.mem_len ) model.train() return total_loss / total_len def train_iteration(model, i, mems, data_chunks, target_chunks, scaler, optimizer, device, delay_unscale, args): cpu = torch.device('cpu') data_i = data_chunks[i].contiguous() target_i = target_chunks[i].contiguous() if args.swap_mem and mems[i] is not None: mems[i] = mems[i].to(device, non_blocking=True) enable_autocast = args.fp16 and args.amp == 'pytorch' with torch.cuda.amp.autocast(enable_autocast): loss, mems[i] = model(data_i, target_i, mems[i]) loss = loss.float().mean().type_as(loss) / args.batch_chunk if args.swap_mem and mems[i] is not None: mems[i] = mems[i].to(cpu, non_blocking=True) if args.fp16: if args.amp == 'pytorch': scaler.scale(loss).backward() elif args.amp == 'apex': with amp.scale_loss(loss, optimizer, delay_unscale=delay_unscale) as scaled_loss: scaled_loss.backward() else: loss.backward() train_loss = loss.float().item() return train_loss def train(tr_iter, va_iter, model, para_model, mems, model_config, optimizer, optimizer_sparse, scheduler, scheduler_sparse, scaler, vocab, epoch, last_batch, last_iter, train_step, best_val_loss, meters, timeout_handler, device, args): # Turn on training mode which enables dropout. model.train() train_loss = 0 cur_loss = float('inf') target_tokens = 0 log_step = 0 utils.distributed.barrier() log_start_time = time.time() if args.varlen: train_iter = tr_iter.get_varlen_iter(start=last_iter) else: train_iter = tr_iter.get_fixlen_iter(start=last_iter) for batch, (data, target, seq_len, _) in enumerate(train_iter, start=last_batch+1): log_step += 1 target_tokens += target.numel() for param in model.parameters(): param.grad = None data_chunks = torch.chunk(data, args.batch_chunk, 1) target_chunks = torch.chunk(target, args.batch_chunk, 1) for i in range(args.batch_chunk): if i < args.batch_chunk - 1 and isinstance(para_model, DistributedDataParallel): with para_model.no_sync(): train_loss_chunk = train_iteration( para_model, i, mems, data_chunks, target_chunks, scaler, optimizer, device, True, args ) else: train_loss_chunk = train_iteration( para_model, i, mems, data_chunks, target_chunks, scaler, optimizer, device, False, args ) train_loss += train_loss_chunk if args.fp16: if args.amp == 'pytorch': scaler.unscale_(optimizer) torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip) elif args.amp == 'apex': torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), args.clip) else: torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip) if args.fp16 and args.amp == 'pytorch': scaler.step(optimizer) scaler.update() else: optimizer.step() if optimizer_sparse: optimizer_sparse.step() # step-wise learning rate annealing train_step += 1 if args.scheduler in ['cosine', 'constant', 'dev_perf']: # linear warmup stage if train_step < args.warmup_step: curr_lr = args.lr * train_step / args.warmup_step optimizer.param_groups[0]['lr'] = curr_lr if optimizer_sparse: optimizer_sparse.param_groups[0]['lr'] = curr_lr * 2 else: if args.scheduler == 'cosine': scheduler.step(train_step - args.warmup_step) if scheduler_sparse: scheduler_sparse.step(train_step - args.warmup_step) elif args.scheduler == 'inv_sqrt': scheduler.step(train_step) if scheduler_sparse: scheduler_sparse.step(train_step) if train_step % args.log_interval == 0: cur_loss = train_loss / log_step cur_loss = utils.distributed.all_reduce_item(cur_loss, op='mean') train_loss = 0 utils.distributed.barrier() current_time = time.time() elapsed = current_time - log_start_time avg_elapsed = elapsed / log_step avg_elapsed = utils.distributed.all_reduce_item(avg_elapsed, op='max') log_start_time = current_time log_step = 0 lr = optimizer.param_groups[0]['lr'] throughput = target_tokens / elapsed throughput = utils.distributed.all_reduce_item(throughput, op='sum') meters['train_throughput'].update(throughput, elapsed) target_tokens = 0 log_str = '| epoch {:3d} step {:>8d} | batches {:>6d} / {:d} | lr {:.3e} ' \ '| ms/batch {:5.1f} | tok/s {:7.0f} | loss {:5.2f}'.format( epoch, train_step, batch, tr_iter.n_batch, lr, avg_elapsed * 1000, throughput, cur_loss, ) dllogger_data = { 'epoch': epoch, 'train_batch': batch+1, 'lr': lr, 'train_time/batch': avg_elapsed * 1000, 'train_throughput': throughput, 'train_loss': cur_loss, } if args.dataset in ['enwik8', 'text8']: log_str += ' | bpc {:9.5f}'.format(cur_loss / math.log(2)) dllogger_data['train_bits_per_character'] = cur_loss / math.log(2) else: log_str += ' | ppl {:9.2f}'.format(math.exp(cur_loss)) dllogger_data['train_perplexity'] = math.exp(cur_loss) logging.info(log_str) dllogger.log(step=tuple([train_step]), data=dllogger_data) do_periodic_eval = train_step % args.eval_interval == 0 is_final_step = train_step == args.max_step interrupted = timeout_handler.interrupted if (do_periodic_eval or is_final_step or interrupted) and not args.no_eval: utils.distributed.barrier() eval_start_time = time.time() val_loss = evaluate(va_iter, model, args) val_loss = utils.distributed.all_reduce_item(val_loss, op='mean') utils.distributed.barrier() eval_elapsed = time.time() - eval_start_time logging.info('-' * 100) log_str = '| Eval {:3d} at step {:>8d} | time: {:5.2f}s ' \ '| valid loss {:5.2f}'.format( train_step // args.eval_interval, train_step, eval_elapsed, val_loss, ) dllogger_data = { 'valid_elapsed': eval_elapsed, 'valid_loss': val_loss, } if args.dataset in ['enwik8', 'text8']: log_str += ' | bpc {:9.5f}'.format(val_loss / math.log(2)) dllogger_data['valid_bits_per_character'] = val_loss / math.log(2) else: log_str += ' | valid ppl {:9.3f}'.format(math.exp(val_loss)) dllogger_data['valid_perplexity'] = math.exp(val_loss) logging.info(log_str) logging.info('-' * 100) dllogger.log(step=tuple([train_step]), data=dllogger_data) last_iter = tr_iter.last_iter # Check if the validation loss is the best we've seen so far. is_best = False if not best_val_loss or val_loss < best_val_loss: best_val_loss = val_loss is_best = True if not args.debug: save_checkpoint(args, model, mems, model_config, optimizer, scheduler, scaler, vocab, epoch, batch, last_iter, train_step, best_val_loss, is_best, args.work_dir, device) # dev-performance based learning rate annealing if args.scheduler == 'dev_perf': scheduler.step(val_loss) if scheduler_sparse: scheduler_sparse.step(val_loss) # subtract eval time from timers for training utils.distributed.barrier() log_start_time += time.time() - eval_start_time if interrupted: logging.info(f'Received SIGTERM, exiting') sys.exit(0) if is_final_step: break return train_step, best_val_loss, cur_loss def main(): args = parse_args() if args.affinity != 'disabled': nproc_per_node = torch.cuda.device_count() affinity = utils.gpu_affinity.set_affinity( args.local_rank, nproc_per_node, args.affinity ) print(f'{args.local_rank}: thread affinity: {affinity}') # Initialize device and distributed backend torch.cuda.set_device(args.local_rank) l2_promote() device = torch.device('cuda' if args.cuda else 'cpu') utils.distributed.init_distributed(args.cuda) args.work_dir = utils.exp_utils.build_work_dir_name(args.work_dir, args.dataset, args.append_dataset, args.append_time, ) with utils.distributed.sync_workers() as rank: if rank == 0: create_exp_dir(args.work_dir, scripts_to_save=['train.py', 'mem_transformer.py'], debug=args.debug) # Setup logging if args.log_all_ranks: log_file = f'train_log_rank_{utils.distributed.get_rank()}.log' else: log_file = args.txtlog_file dllog_file = args.dllog_file log_file = os.path.join(args.work_dir, log_file) dllog_file = os.path.join(args.work_dir, dllog_file) if args.debug: log_file = os.devnull dllog_file = os.devnull utils.exp_utils.setup_logging(log_all_ranks=args.log_all_ranks, filename=log_file, ) utils.exp_utils.setup_dllogger(enabled=True, filename=dllog_file) if args.local_batch_size is not None: world_size = utils.distributed.get_world_size() args.batch_size = world_size * args.local_batch_size logging.info(f'--local_batch_size was set, adjusting global batch size' f' to {args.batch_size} (local_batch_size * world_size)') if args.batch_size % args.batch_chunk != 0: raise RuntimeError('Batch size needs to be divisible by ' 'batch chunk') logging.info(args) dllogger.log(step='PARAMETER', data=vars(args)) dllogger.metadata('train_throughput', {'unit': 'tokens/s'}) dllogger.metadata('train_elapsed', {'unit': 'min'}) dllogger.metadata('valid_elapsed', {'unit': 'min'}) dllogger.metadata('train_perplexity', {'unit': None}) dllogger.metadata('valid_perplexity', {'unit': None}) dllogger.metadata('train_loss', {'unit': None}) dllogger.metadata('valid_loss', {'unit': None}) logging.info(f'world size: {utils.distributed.get_world_size()}') if not args.no_env: log_env_info() register_ignoring_timeout_handler() # Set the random seed manually for reproducibility. np.random.seed(args.seed) torch.manual_seed(args.seed) ########################################################################### # Load data ########################################################################### corpus = get_lm_corpus(args.data, args.dataset, args.vocab) ntokens = len(corpus.vocab) vocab = corpus.vocab args.n_token = ntokens if args.mem_len == 0: eval_mem_len = 0 else: eval_mem_len = args.mem_len + args.tgt_len - args.eval_tgt_len tr_iter = corpus.get_iterator('train', args.batch_size, args.tgt_len, device=device, ext_len=args.ext_len) va_iter = corpus.get_iterator('valid', args.eval_batch_size, args.eval_tgt_len, device=device, mem_len=eval_mem_len, ext_len=args.ext_len) te_iter = corpus.get_iterator('test', args.eval_batch_size, args.eval_tgt_len, device=device, mem_len=eval_mem_len, ext_len=args.ext_len) # adaptive softmax / embedding cutoffs, tie_projs = [], [False] if args.adaptive: assert args.dataset in ['wt103', 'lm1b'] if args.dataset == 'wt103': cutoffs = [19997, 39997, 199997] tie_projs += [True] * len(cutoffs) elif args.dataset == 'lm1b': cutoffs = [59997, 99997, 639997] tie_projs += [False] * len(cutoffs) ########################################################################### # Build the model ########################################################################### model_config = { 'n_token': ntokens, 'n_layer': args.n_layer, 'n_head': args.n_head, 'd_model': args.d_model, 'd_head': args.d_head, 'd_inner': args.d_inner, 'dropout': args.dropout, 'dropatt': args.dropatt, 'dtype': None, 'tie_weight': args.tied, 'd_embed': args.d_embed, 'div_val': args.div_val, 'tie_projs': tie_projs, 'pre_lnorm': args.pre_lnorm, 'tgt_len': args.tgt_len, 'ext_len': args.ext_len, 'mem_len': args.mem_len, 'cutoffs': cutoffs, 'same_length': args.same_length, 'attn_type': args.attn_type, 'clamp_len': args.clamp_len, 'sample_softmax': args.sample_softmax, } model = MemTransformerLM(**model_config) model.apply(functools.partial(weights_init, args=args)) # ensure embedding init is not overridden by out_layer in case of weight sharing model.word_emb.apply(functools.partial(weights_init, args=args)) args.n_all_param = sum([p.nelement() for p in model.parameters()]) args.n_nonemb_param = sum([p.nelement() for p in model.layers.parameters()]) # optimizer if args.optim.lower() == 'sgd': if args.sample_softmax > 0: dense_params, sparse_params = [], [] for param in model.parameters(): if param.size() == model.word_emb.weight.size(): sparse_params.append(param) else: dense_params.append(param) optimizer_sparse = optim.SGD(sparse_params, lr=args.lr * 2) optimizer = optim.SGD(dense_params, lr=args.lr, momentum=args.mom) else: optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.mom) optimizer_sparse = None elif args.optim.lower() == 'adam': if args.sample_softmax > 0: dense_params, sparse_params = [], [] for param in model.parameters(): if param.size() == model.word_emb.weight.size(): sparse_params.append(param) else: dense_params.append(param) optimizer_sparse = optim.SparseAdam(sparse_params, lr=args.lr) optimizer = optim.Adam(dense_params, lr=args.lr, weight_decay=args.weight_decay) else: optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) optimizer_sparse = None elif args.optim.lower() == 'adagrad': optimizer = optim.Adagrad(model.parameters(), lr=args.lr) optimizer_sparse = None elif args.optim.lower() == 'lamb': optimizer = lamb.Lamb(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) optimizer_sparse = None elif args.optim.lower() == 'jitlamb': optimizer = lamb.JITLamb(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) optimizer_sparse = None model = model.to(device) scaler = None if args.fp16: if args.amp == 'pytorch': scaler = torch.cuda.amp.GradScaler() elif args.amp == 'apex': model, optimizer = amp.initialize( model, optimizer, opt_level=args.apex_amp_opt_level, ) if args.multi_gpu == 'ddp' and torch.distributed.is_initialized(): para_model = DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank, broadcast_buffers=False, find_unused_parameters=True, ) elif args.multi_gpu == 'dp': if args.gpu0_bsz >= 0: para_model = BalancedDataParallel(args.gpu0_bsz // args.batch_chunk, model, dim=1).to(device) else: para_model = nn.DataParallel(model, dim=1).to(device) else: para_model = model # scheduler if args.scheduler == 'cosine': if args.max_step_scheduler: max_step = args.max_step_scheduler else: max_step = args.max_step scheduler = optim.lr_scheduler.CosineAnnealingLR( optimizer, max_step - args.warmup_step, eta_min=args.eta_min) if args.sample_softmax > 0 and optimizer_sparse is not None: scheduler_sparse = optim.lr_scheduler.CosineAnnealingLR( optimizer_sparse, max_step - args.warmup_step, eta_min=args.eta_min) else: scheduler_sparse = None elif args.scheduler == 'inv_sqrt': # originally used for Transformer (in Attention is all you need) def lr_lambda(step): # return a multiplier instead of a learning rate if step == 0 and args.warmup_step == 0: return 1. else: return 1. / (step ** 0.5) if step > args.warmup_step \ else step / (args.warmup_step ** 1.5) scheduler = optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda) if args.sample_softmax > 0 and optimizer_sparse is not None: scheduler_sparse = optim.lr_scheduler.LambdaLR( optimizer_sparse, lr_lambda=lr_lambda ) else: scheduler_sparse = None elif args.scheduler == 'dev_perf': scheduler = optim.lr_scheduler.ReduceLROnPlateau( optimizer, factor=args.decay_rate, patience=args.patience, min_lr=args.lr_min, ) if args.sample_softmax > 0 and optimizer_sparse is not None: scheduler_sparse = optim.lr_scheduler.ReduceLROnPlateau( optimizer_sparse, factor=args.decay_rate, patience=args.patience, min_lr=args.lr_min, ) else: scheduler_sparse = None elif args.scheduler == 'constant': pass logging.info('=' * 100) for k, v in args.__dict__.items(): logging.info(' - {} : {}'.format(k, v)) logging.info('=' * 100) logging.info('#params = {}'.format(args.n_all_param)) logging.info('#non emb params = {}'.format(args.n_nonemb_param)) train_step = 0 start_epoch = 1 last_batch = 0 last_iter = 0 best_val_loss = None cur_loss = float('inf') train_mems = [None for _ in range(args.batch_chunk)] if args.restart: try: checkpoint = load_checkpoint(args.restart) model.load_state_dict(checkpoint['model_state']) optimizer.load_state_dict(checkpoint['optimizer_state']) scheduler.load_state_dict(checkpoint['scheduler_state']) if args.fp16: if args.amp == 'pytorch': scaler.load_state_dict(checkpoint['amp_state']) elif args.amp == 'apex': amp.load_state_dict(checkpoint['amp_state']) utils.exp_utils.set_default_rng_states( checkpoint['rng_states'], device ) train_mems = [ checkpoint['memory'][i][utils.distributed.get_rank()] for i in range(args.batch_chunk) ] train_step = checkpoint['train_step'] start_epoch = checkpoint['epoch'] last_batch = checkpoint['batch'] last_iter = checkpoint['last_iter'] best_val_loss = checkpoint['best_val_loss'] if train_step >= args.max_step: logging.info(f'Loaded checkpoint after {train_step} steps, but ' f'this run was scheduled for a total of ' f'{args.max_step} steps, exiting') sys.exit(1) model.apply(functools.partial(update_dropout, args=args)) model.apply(functools.partial(update_dropatt, args=args)) except FileNotFoundError: logging.info(f'Could not load checkpoint from {args.restart}, ' f'starting training from random init') meters = {} warmup = args.mem_len // args.tgt_len + 2 meters['train_throughput'] = AverageMeter(warmup=warmup) ########################################################################### # Train ########################################################################### # Loop over epochs. # At any point you can hit Ctrl + C to break out of training early. utils.distributed.barrier() start_time = time.time() with TimeoutHandler() as timeout_handler: try: for epoch in itertools.count(start=start_epoch): if args.roll: tr_iter.roll(seed=args.seed + epoch) train_step, best_val_loss, cur_loss = train( tr_iter, va_iter, model, para_model, train_mems, model_config, optimizer, optimizer_sparse, scheduler, scheduler_sparse, scaler, vocab, epoch, last_batch, last_iter, train_step, best_val_loss, meters, timeout_handler, device, args ) last_batch = 0 last_iter = 0 if train_step == args.max_step: logging.info('-' * 100) logging.info('End of training') break except KeyboardInterrupt: logging.info('-' * 100) logging.info('Exiting from training early') utils.distributed.barrier() elapsed = time.time() - start_time ########################################################################### # Test ########################################################################### summary = {} test_path = os.path.join(args.work_dir, 'checkpoint_best.pt') if ( not args.debug and not args.no_test and not args.no_eval and os.path.exists(test_path) ): # Load the best saved model. checkpoint = load_checkpoint(test_path) model.load_state_dict(checkpoint['model_state']) # Run on test data. utils.distributed.barrier() test_start_time = time.time() test_loss = evaluate(te_iter, model, args) test_loss = utils.distributed.all_reduce_item(test_loss, 'mean') utils.distributed.barrier() test_elapsed = time.time() - test_start_time logging.info('=' * 100) if args.dataset in ['enwik8', 'text8']: logging.info('| End of training | test time: {:5.2f}s | test loss {:5.2f} | test bpc {:9.5f}'.format( test_elapsed, test_loss, test_loss / math.log(2))) else: logging.info('| End of training | test time: {:5.2f}s | test loss {:5.2f} | test ppl {:9.3f}'.format( test_elapsed, test_loss, math.exp(test_loss))) logging.info('=' * 100) summary.update({ 'test_elapsed': test_elapsed, 'test_loss': test_loss, }) if args.dataset in ['enwik8', 'text8']: summary['test_bits_per_character'] = test_loss / math.log(2) else: summary['test_perplexity'] = math.exp(test_loss) logging.info(f'Training time: {(elapsed / 60):.2f} minutes') logging.info(f'Training throughput: {meters["train_throughput"].avg:.2f} tok/s') if best_val_loss: best_val_perplexity = math.exp(best_val_loss) else: best_val_perplexity = None summary.update({ 'train_throughput': meters['train_throughput'].avg, 'train_elapsed': elapsed / 60, 'train_loss': cur_loss, 'valid_loss': best_val_loss, 'valid_perplexity': best_val_perplexity, }) dllogger.log(step=tuple(), data=summary) passed = benchmark( target_perplexity=args.target_perplexity, test_perplexity=best_val_perplexity, target_throughput=args.target_throughput, test_throughput=meters['train_throughput'].avg ) if not passed: sys.exit(1) if __name__ == "__main__": # Disable profiling executor try: torch._C._jit_set_profiling_executor(False) torch._C._jit_set_profiling_mode(False) except AttributeError: pass # Before we do anything with models, we want to ensure that we get fp16 # execution of torch.einsum in APEX AMP. # Otherwise it'll default to "promote" mode, and we'll get fp32 operations. # Note that running `--apex_amp_opt_level O2` will remove the need for this # code, but it is still valid. if 'apex' in sys.modules: amp.register_half_function(torch, 'einsum') main()
DeepLearningExamples-master
PyTorch/LanguageModeling/Transformer-XL/pytorch/train.py
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import json import logging import math import os import pickle import sys import time import warnings import dllogger import numpy as np import torch import yaml import data_utils import utils from data_utils import get_lm_corpus from data_utils import tokenize_raw from utils.exp_utils import AverageMeter from utils.exp_utils import benchmark from utils.exp_utils import create_exp_dir from utils.exp_utils import l2_promote from utils.exp_utils import log_env_info def parse_args(): parent_parser = argparse.ArgumentParser( description='PyTorch Transformer-XL Language Model', formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False, ) parser = argparse.ArgumentParser(parents=[parent_parser], add_help=True) cfg_parser = argparse.ArgumentParser(parents=[parent_parser], add_help=False) cfg_parser.add_argument('--config', default='default') cfg_parser.add_argument('--config_file', default=None) config_args, _ = cfg_parser.parse_known_args() if config_args.config is not None and config_args.config_file is not None: with open(config_args.config_file) as f: config = yaml.load(f, Loader=yaml.FullLoader)[config_args.config]['eval'] else: config = {} parser.add_argument('--work_dir', default='LM-TFM', type=str, help='experiment directory') parser.add_argument('--debug', action='store_true', help='run in debug mode (do not create exp dir)') parser.add_argument('--data', type=str, default='../data/wikitext-103', help='location of the data corpus') parser.add_argument('--manual', type=str, default=None, nargs='+', help='run model on raw input data') parser.add_argument('--dataset', type=str, default='wt103', choices=['wt103', 'lm1b', 'enwik8', 'text8'], help='dataset name') parser.add_argument('--split', type=str, default='all', choices=['all', 'valid', 'test'], help='which split to evaluate') parser.add_argument('--affinity', type=str, default='single_unique', choices=['socket', 'single', 'single_unique', 'socket_unique_interleaved', 'socket_unique_continuous', 'disabled'], help='type of CPU affinity') parser.add_argument('--type', type=str, default='pytorch', choices=['pytorch', 'torchscript'], help='type of runtime to use') parser.add_argument('--batch_size', type=int, default=16, help='batch size') parser.add_argument('--tgt_len', type=int, default=64, help='number of tokens to predict') parser.add_argument('--ext_len', type=int, default=0, help='length of the extended context') parser.add_argument('--mem_len', type=int, default=640, help='length of the retained previous heads') parser.add_argument('--seed', type=int, default=1111, help='Random seed') parser.add_argument('--clamp_len', type=int, default=-1, help='max positional embedding index') parser.add_argument('--cuda', action='store_true', help='Run evaluation on a GPU using CUDA') parser.add_argument('--model', type=str, default='', help='path to the checkpoint') parser.add_argument('--manual_config', type=json.loads, default=None, help='Manually specify config for the model') parser.add_argument('--manual_vocab', type=str, default='word', choices=['word', 'bpe'], help='Manually specify type of vocabulary') parser.add_argument('--fp16', action='store_true', help='Run training in fp16/mixed precision') parser.add_argument('--log_all_ranks', action='store_true', help='Enable logging for all distributed ranks') parser.add_argument('--dllog_file', type=str, default='eval_log.json', help='Name of the DLLogger output file') parser.add_argument('--same_length', action='store_true', help='set same length attention with masking') parser.add_argument('--no_env', action='store_true', help='Do not print info on execution env') parser.add_argument('--log_interval', type=int, default=10, help='Report interval') parser.add_argument('--target_perplexity', type=float, default=None, help='target perplexity') parser.add_argument('--target_throughput', type=float, default=None, help='target throughput') parser.add_argument('--save_data', action='store_true', help='save latency and throughput data to a file') parser.add_argument('--repeat', type=int, default=1, help='loop over the dataset REPEAT times') parser.add_argument('--max_size', type=int, default=None, help='run inference on up to MAX_SIZE batches') parser.add_argument('--percentiles', nargs='+', default=[90, 95, 99], help='percentiles for latency confidence intervals') parser.add_argument('--save_torchscript', default=None, type=str, help='save torchscript model to a file') parser.add_argument('--load_torchscript', default=None, type=str, help='load torchscript model from a file') parser.add_argument('--local_rank', type=int, default=os.getenv('LOCAL_RANK', 0), help='Used for multi-process training.') parser.set_defaults(**config) args, _ = parser.parse_known_args() if args.manual: args.batch_size = 1 if args.same_length and args.tgt_len > args.mem_len: warnings.warn('--same_length is intended to be used with large ' 'mem_len relative to tgt_len') if args.ext_len < 0: raise RuntimeError('Extended context length must be non-negative') return args def load_checkpoint(path): dst = f'cuda:{torch.cuda.current_device()}' logging.info(f'Loading checkpoint from {path}') checkpoint = torch.load(path, map_location=dst) return checkpoint def format_log(loss, split, args): if args.dataset in ['enwik8', 'text8']: log_str = '| {0} loss {1:5.2f} | {0} bpc {2:9.5f} '.format( split, loss, loss / math.log(2)) else: log_str = '| {0} loss {1:5.2f} | {0} ppl {2:9.3f} '.format( split, loss, math.exp(loss)) return log_str def evaluate( eval_iter, model, device, meters, log_interval, max_size=None, repeat=1 ): total_len, total_loss = 0, 0. eval_step = 0 log_throughput = 0 log_latency = 0 log_loss = 0 utils.distributed.barrier() start_time = time.time() with torch.no_grad(): mems = None for _ in range(repeat): for idx, (data, target, seq_len, warm) in enumerate(eval_iter): if max_size and idx >= max_size: break eval_step += 1 utils.distributed.barrier() start_iter = time.time() loss, mems = model(data, target, mems) utils.distributed.barrier() elapsed = time.time() - start_iter loss = loss.float().mean() log_loss += loss.item() if warm: total_loss += seq_len * loss.item() total_len += seq_len meters['eval_latency'].update(elapsed) log_latency += elapsed target_tokens = target.numel() throughput = target_tokens / elapsed throughput = utils.distributed.all_reduce_item(throughput, op='sum') meters['eval_throughput'].update(throughput, elapsed) log_throughput += throughput if eval_step % log_interval == 0: log_throughput /= log_interval log_latency /= log_interval log_loss /= log_interval log_ppl = math.exp(log_loss) log_str = '| step {:>8d} | batches {:>6d} / {:d} ' \ '| ms/batch {:5.2f} | tok/s {:7.0f} | loss {:5.2f} | ppl {:5.2f}'.format( eval_step, idx+1, eval_iter.n_batch, log_latency * 1000, log_throughput, log_loss, log_ppl, ) logging.info(log_str) dllogger_data = { 'eval_latency': log_latency * 1000, 'eval_throughput': log_throughput, 'eval_loss': log_loss, 'eval_perplexity': log_ppl, } dllogger.log(step=tuple([eval_step]), data=dllogger_data) log_throughput = 0 log_latency = 0 log_loss = 0 utils.distributed.barrier() total_time = time.time() - start_time logging.info('Time : {:.2f}s, {:.2f}ms/segment'.format( total_time, 1000 * total_time / (idx+1))) avg_loss = total_loss / total_len avg_loss = utils.distributed.all_reduce_item(avg_loss, op='mean') return avg_loss def compile_model(model, device, args): inp = torch.randint(0, 1000, (args.tgt_len, args.batch_size)).to(device) tgt = torch.randint(0, 1000, (args.tgt_len, args.batch_size)).to(device) utils.distributed.barrier() start = time.time() with torch.no_grad(): mems = None for _ in range(2): _, mems = model(inp, tgt, mems) utils.distributed.barrier() stop = time.time() logging.info(f'Building the model took {stop - start:.2f} seconds') def main(): args = parse_args() if args.affinity != 'disabled': nproc_per_node = torch.cuda.device_count() affinity = utils.gpu_affinity.set_affinity( args.local_rank, nproc_per_node, args.affinity ) print(f'{args.local_rank}: thread affinity: {affinity}') if args.type == 'pytorch': from mem_transformer import MemTransformerLM else: from inference.mem_transformer_jit import MemTransformerLM torch.cuda.set_device(args.local_rank) l2_promote() device = torch.device('cuda' if args.cuda else 'cpu') utils.distributed.init_distributed(args.cuda) with utils.distributed.sync_workers() as rank: if rank == 0: create_exp_dir(args.work_dir, debug=args.debug) # Setup logging if args.log_all_ranks: log_file = f'eval_log_rank_{utils.distributed.get_rank()}.log' else: log_file = f'eval_log.log' dllog_file = args.dllog_file log_file = os.path.join(args.work_dir, log_file) dllog_file = os.path.join(args.work_dir, dllog_file) if args.debug: log_file = os.devnull dllog_file = os.devnull utils.exp_utils.setup_logging(log_all_ranks=args.log_all_ranks, filename=log_file, filemode='a', ) utils.exp_utils.setup_dllogger(enabled=True, filename=dllog_file) logging.info(args) dllogger.log(step='PARAMETER', data=vars(args)) dllogger.metadata('eval_throughput', {'unit': 'tokens/s'}) dllogger.metadata('eval_loss', {'unit': None}) dllogger.metadata('eval_perplexity', {'unit': None}) dllogger.metadata('eval_latency', {'unit': 'ms'}) dllogger.metadata('eval_avg_latency', {'unit': 'ms'}) for p in args.percentiles: dllogger.metadata(f'eval_{p}%_latency', {'unit': 'ms'}) if not args.no_env: log_env_info() # Set the random seed manually for reproducibility. np.random.seed(args.seed) torch.manual_seed(args.seed) if args.model: model_path = args.model elif args.work_dir: model_path = os.path.join(args.work_dir, 'checkpoint_best.pt') else: raise RuntimeError('Specify path to checkpoint using --model or --work_dir') if not args.manual_config: checkpoint = load_checkpoint(model_path) vocab_type = checkpoint['args'].vocab else: checkpoint = None vocab_type = args.manual_vocab if args.manual: vocab = checkpoint['vocab'] if hasattr(vocab, 'sym2idx') and not hasattr(vocab, 'unk_idx'): vocab.unk_idx = vocab.sym2idx['<unk>'] text = " ".join(args.manual) tokenized = tokenize_raw(text) symbols = vocab.tokenize(tokenized, add_eos=True) tensor = vocab.convert_to_tensor(symbols) iter = data_utils.LMOrderedIterator(tensor, bsz=args.batch_size, bptt=args.tgt_len, device=device, ext_len=args.ext_len, warmup=False) else: # Load dataset corpus = get_lm_corpus(args.data, args.dataset, vocab_type) if args.split == 'valid' or args.split == 'test': iter = corpus.get_iterator(args.split, args.batch_size, args.tgt_len, device=device, mem_len=args.mem_len, ext_len=args.ext_len) else: raise RuntimeError('Unknown split') if args.fp16: dtype = torch.float16 math_str = 'fp16' else: dtype = torch.float32 math_str = 'fp32' if args.load_torchscript: model = torch.jit.load(args.load_torchscript) elif not args.manual_config: checkpoint['model_config']['tgt_len'] = args.tgt_len checkpoint['model_config']['ext_len'] = args.ext_len checkpoint['model_config']['mem_len'] = args.mem_len checkpoint['model_config']['clamp_len'] = args.clamp_len checkpoint['model_config']['same_length'] = args.same_length checkpoint['model_config']['dtype'] = dtype model = MemTransformerLM(**checkpoint['model_config']) if args.type == 'pytorch': model.load_state_dict(checkpoint['model_state']) elif args.type == 'torchscript': model.load_state_dict(checkpoint['model_state'], strict=False) elif args.manual_config: args.manual_config['tgt_len'] = args.tgt_len args.manual_config['ext_len'] = args.ext_len args.manual_config['mem_len'] = args.mem_len args.manual_config['clamp_len'] = args.clamp_len args.manual_config['same_length'] = args.same_length args.manual_config['dtype'] = dtype model = MemTransformerLM(**args.manual_config) model = model.eval() model = model.to(device) model = model.to(dtype) if args.type == 'torchscript' and not args.manual_config: state = checkpoint['model_state'] tie_projs = checkpoint['model_config']['tie_projs'] tie_weight = checkpoint['model_config']['tie_weight'] div_val = checkpoint['model_config']['div_val'] d_model = checkpoint['model_config']['d_model'] d_embed = checkpoint['model_config']['d_embed'] if div_val != 1 or d_model != d_embed: for i in range(len(model.word_emb.emb_projs)): model.word_emb.emb_projs[i] = state[f'word_emb.emb_projs.{i}'].to(dtype) for i in range(len(model.crit.out_projs)): if div_val == 1: src = 0 else: src = i if model.crit.out_projs[i] is not None: if tie_projs[i]: model.crit.out_projs[i] = state[f'word_emb.emb_projs.{src}'].to(dtype) else: model.crit.out_projs[i] = state[f'crit.out_projs.{i}'].to(dtype) for i in range(len(model.crit.out_layers_biases)): model.crit.out_layers_biases[i] = state[f'crit.out_layers_biases.{i}'].to(dtype) if tie_weight: for i in range(len(model.crit.out_layers_weights)): model.crit.out_layers_weights[i] = state[f'word_emb.emb_layers.{i}.weight'].to(dtype) else: for i in range(len(model.crit.out_layers_weights)): model.crit.out_layers_weights[i] = state[f'crit.out_layers_weights.{i}'].to(dtype) model = torch.jit.script(model) if args.type != 'pytorch': compile_model(model, device, args) if args.type == 'torchscript' and args.save_torchscript: torch.jit.save(model, args.save_torchscript) logging.info(f'Evaluating with: math {math_str} type {args.type} ' f'bsz {args.batch_size} tgt_len {args.tgt_len} ' f'ext_len {args.ext_len} mem_len {args.mem_len} ' f'clamp_len {args.clamp_len}') meters = {} warmup = args.mem_len // args.tgt_len + 2 meters['eval_throughput'] = AverageMeter(warmup=warmup, keep=args.save_data) meters['eval_latency'] = AverageMeter(warmup=warmup, keep=args.save_data) loss = evaluate(iter, model, device, meters, args.log_interval, args.max_size, args.repeat) perplexity = math.exp(loss) log_str = format_log(loss, args.split, args) summary = { 'eval_loss': loss, 'eval_ppl': perplexity, } logging.info('=' * 100) logging.info(log_str) logging.info('=' * 100) if args.save_data: latency_data = np.array(meters['eval_latency'].vals) throughput_data = np.array(meters['eval_throughput'].vals) precision = 'fp16' if args.fp16 else 'fp32' data_fname = f'eval_data_{args.batch_size}_{precision}_{args.type}' data_path = os.path.join(args.work_dir, data_fname) data = { 'args': args, 'throughput': throughput_data, 'latency': latency_data, } with open(data_path, 'wb') as f: pickle.dump(data, f) avg_throughput = meters['eval_throughput'].avg logging.info(f'Throughput Avg: {avg_throughput:.2f} tok/s') logging.info(f'Latency Avg: {1000.0 * latency_data.mean():.2f} ms') for p in args.percentiles: logging.info(f'Latency {p}%: {1000.0 * np.percentile(latency_data, p):.2f} ms') logging.info('=' * 100) summary.update({ 'eval_throughput': avg_throughput, 'eval_avg_latency': 1000 * latency_data.mean(), }) for p in args.percentiles: summary[f'eval_{p}%_latency'] = 1000 * np.percentile(latency_data, p) dllogger.log(step=tuple(), data=summary) passed = benchmark(target_perplexity=args.target_perplexity, test_perplexity=perplexity, target_throughput=args.target_throughput, test_throughput=meters['eval_throughput'].avg, ) if not passed: sys.exit(1) if __name__ == "__main__": # Disable profiling executor try: torch._C._jit_set_profiling_executor(False) torch._C._jit_set_profiling_mode(False) except AttributeError: pass main()
DeepLearningExamples-master
PyTorch/LanguageModeling/Transformer-XL/pytorch/eval.py
import collections import math import os import pathlib import re import pynvml pynvml.nvmlInit() def systemGetDriverVersion(): return pynvml.nvmlSystemGetDriverVersion() def deviceGetCount(): return pynvml.nvmlDeviceGetCount() class device: # assume nvml returns list of 64 bit ints _nvml_affinity_elements = math.ceil(os.cpu_count() / 64) def __init__(self, device_idx): super().__init__() self.handle = pynvml.nvmlDeviceGetHandleByIndex(device_idx) def getName(self): return pynvml.nvmlDeviceGetName(self.handle) def getCpuAffinity(self): affinity_string = '' for j in pynvml.nvmlDeviceGetCpuAffinity( self.handle, device._nvml_affinity_elements ): # assume nvml returns list of 64 bit ints affinity_string = '{:064b}'.format(j) + affinity_string affinity_list = [int(x) for x in affinity_string] affinity_list.reverse() # so core 0 is in 0th element of list ret = [i for i, e in enumerate(affinity_list) if e != 0] return ret def set_socket_affinity(gpu_id): dev = device(gpu_id) affinity = dev.getCpuAffinity() os.sched_setaffinity(0, affinity) def set_single_affinity(gpu_id): dev = device(gpu_id) affinity = dev.getCpuAffinity() os.sched_setaffinity(0, affinity[:1]) def set_single_unique_affinity(gpu_id, nproc_per_node): devices = [device(i) for i in range(nproc_per_node)] socket_affinities = [dev.getCpuAffinity() for dev in devices] siblings_list = get_thread_siblings_list() siblings_dict = dict(siblings_list) # remove siblings for idx, socket_affinity in enumerate(socket_affinities): socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values())) affinities = [] assigned = [] for socket_affinity in socket_affinities: for core in socket_affinity: if core not in assigned: affinities.append([core]) assigned.append(core) break os.sched_setaffinity(0, affinities[gpu_id]) def set_socket_unique_affinity(gpu_id, nproc_per_node, mode): device_ids = [device(i) for i in range(nproc_per_node)] socket_affinities = [dev.getCpuAffinity() for dev in device_ids] siblings_list = get_thread_siblings_list() siblings_dict = dict(siblings_list) # remove siblings for idx, socket_affinity in enumerate(socket_affinities): socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values())) socket_affinities_to_device_ids = collections.defaultdict(list) for idx, socket_affinity in enumerate(socket_affinities): socket_affinities_to_device_ids[tuple(socket_affinity)].append(idx) for socket_affinity, device_ids in socket_affinities_to_device_ids.items(): devices_per_group = len(device_ids) cores_per_device = len(socket_affinity) // devices_per_group for group_id, device_id in enumerate(device_ids): if device_id == gpu_id: if mode == 'interleaved': affinity = list(socket_affinity[group_id::devices_per_group]) elif mode == 'continuous': affinity = list(socket_affinity[group_id*cores_per_device:(group_id+1)*cores_per_device]) else: raise RuntimeError('Unknown set_socket_unique_affinity mode') # reintroduce siblings affinity += [siblings_dict[aff] for aff in affinity if aff in siblings_dict] os.sched_setaffinity(0, affinity) def get_thread_siblings_list(): path = '/sys/devices/system/cpu/cpu*/topology/thread_siblings_list' thread_siblings_list = [] pattern = re.compile(r'(\d+)\D(\d+)') for fname in pathlib.Path(path[0]).glob(path[1:]): with open(fname) as f: content = f.read().strip() res = pattern.findall(content) if res: pair = tuple(map(int, res[0])) thread_siblings_list.append(pair) return thread_siblings_list def set_affinity(gpu_id, nproc_per_node, mode='socket'): if mode == 'socket': set_socket_affinity(gpu_id) elif mode == 'single': set_single_affinity(gpu_id) elif mode == 'single_unique': set_single_unique_affinity(gpu_id, nproc_per_node) elif mode == 'socket_unique_interleaved': set_socket_unique_affinity(gpu_id, nproc_per_node, 'interleaved') elif mode == 'socket_unique_continuous': set_socket_unique_affinity(gpu_id, nproc_per_node, 'continuous') else: raise RuntimeError('Unknown affinity mode') affinity = os.sched_getaffinity(0) return affinity
DeepLearningExamples-master
PyTorch/LanguageModeling/Transformer-XL/pytorch/utils/gpu_affinity.py
import torch import torch.nn as nn import torch.nn.functional as F class AdaptiveLogSoftmax(nn.Module): def __init__(self, in_features, n_classes, cutoffs, keep_order=False): super(AdaptiveLogSoftmax, self).__init__() cutoffs = list(cutoffs) if (cutoffs != sorted(cutoffs)) \ or (min(cutoffs) <= 0) \ or (max(cutoffs) >= (n_classes - 1)) \ or (len(set(cutoffs)) != len(cutoffs)) \ or any([int(c) != c for c in cutoffs]): raise ValueError("cutoffs should be a sequence of unique, positive " "integers sorted in an increasing order, where " "each value is between 1 and n_classes-1") self.in_features = in_features self.n_classes = n_classes self.cutoffs = cutoffs + [n_classes] self.shortlist_size = self.cutoffs[0] self.n_clusters = len(self.cutoffs) - 1 self.head_size = self.shortlist_size + self.n_clusters self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.in_features)) self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters)) self.keep_order = keep_order def forward(self, hidden, target, weight, bias, keep_order=False): if hidden.size(0) != target.size(0): raise RuntimeError('Input and target should have the same size ' 'in the batch dimension.') head_weight = torch.cat( [weight[:self.shortlist_size], self.cluster_weight], dim=0) head_bias = torch.cat( [bias[:self.shortlist_size], self.cluster_bias], dim=0) head_logit = F.linear(hidden, head_weight, bias=head_bias) head_logprob = F.log_softmax(head_logit, dim=1) nll = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device) offset = 0 cutoff_values = [0] + self.cutoffs for i in range(len(cutoff_values) - 1): l_idx, h_idx = cutoff_values[i], cutoff_values[i + 1] mask_i = (target >= l_idx) & (target < h_idx) indices_i = mask_i.nonzero(as_tuple=False).squeeze() if indices_i.numel() == 0: continue target_i = target.index_select(0, indices_i) - l_idx head_logprob_i = head_logprob.index_select(0, indices_i) if i == 0: logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1) else: weight_i = weight[l_idx:h_idx] bias_i = bias[l_idx:h_idx] hidden_i = hidden.index_select(0, indices_i) tail_logit_i = F.linear(hidden_i, weight_i, bias=bias_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) logprob_i = head_logprob_i[:, -i] \ + tail_logprob_i.gather(1, target_i[:, None]).squeeze(1) if (hasattr(self, 'keep_order') and self.keep_order) or keep_order: nll.index_copy_(0, indices_i, -logprob_i) else: nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i) offset += logprob_i.size(0) return nll
DeepLearningExamples-master
PyTorch/LanguageModeling/Transformer-XL/pytorch/utils/adaptive_softmax.py
import torch from torch.nn.parallel import DataParallel from torch.nn.parallel._functions import Scatter from torch.nn.parallel.parallel_apply import parallel_apply def scatter(inputs, target_gpus, chunk_sizes, dim=0): r""" Slices tensors into approximately equal chunks and distributes them across given GPUs. Duplicates references to objects that are not tensors. """ def scatter_map(obj): if isinstance(obj, torch.Tensor): try: return Scatter.apply(target_gpus, chunk_sizes, dim, obj) except: print('obj', obj.size()) print('dim', dim) print('chunk_sizes', chunk_sizes) quit() if isinstance(obj, tuple) and len(obj) > 0: return list(zip(*map(scatter_map, obj))) if isinstance(obj, list) and len(obj) > 0: return list(map(list, zip(*map(scatter_map, obj)))) if isinstance(obj, dict) and len(obj) > 0: return list(map(type(obj), zip(*map(scatter_map, obj.items())))) return [obj for targets in target_gpus] # After scatter_map is called, a scatter_map cell will exist. This cell # has a reference to the actual function scatter_map, which has references # to a closure that has a reference to the scatter_map cell (because the # fn is recursive). To avoid this reference cycle, we set the function to # None, clearing the cell try: return scatter_map(inputs) finally: scatter_map = None def scatter_kwargs(inputs, kwargs, target_gpus, chunk_sizes, dim=0): r"""Scatter with support for kwargs dictionary""" inputs = scatter(inputs, target_gpus, chunk_sizes, dim) if inputs else [] kwargs = scatter(kwargs, target_gpus, chunk_sizes, dim) if kwargs else [] if len(inputs) < len(kwargs): inputs.extend([() for _ in range(len(kwargs) - len(inputs))]) elif len(kwargs) < len(inputs): kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))]) inputs = tuple(inputs) kwargs = tuple(kwargs) return inputs, kwargs class BalancedDataParallel(DataParallel): def __init__(self, gpu0_bsz, *args, **kwargs): self.gpu0_bsz = gpu0_bsz super().__init__(*args, **kwargs) def forward(self, *inputs, **kwargs): if not self.device_ids: return self.module(*inputs, **kwargs) if self.gpu0_bsz == 0: device_ids = self.device_ids[1:] else: device_ids = self.device_ids inputs, kwargs = self.scatter(inputs, kwargs, device_ids) if len(self.device_ids) == 1: return self.module(*inputs[0], **kwargs[0]) replicas = self.replicate(self.module, self.device_ids) if self.gpu0_bsz == 0: replicas = replicas[1:] outputs = self.parallel_apply(replicas, device_ids, inputs, kwargs) return self.gather(outputs, self.output_device) def parallel_apply(self, replicas, device_ids, inputs, kwargs): return parallel_apply(replicas, inputs, kwargs, device_ids) def scatter(self, inputs, kwargs, device_ids): bsz = inputs[0].size(self.dim) num_dev = len(self.device_ids) gpu0_bsz = self.gpu0_bsz bsz_unit = (bsz - gpu0_bsz) // (num_dev - 1) if gpu0_bsz < bsz_unit: chunk_sizes = [gpu0_bsz] + [bsz_unit] * (num_dev - 1) delta = bsz - sum(chunk_sizes) for i in range(delta): chunk_sizes[i + 1] += 1 if gpu0_bsz == 0: chunk_sizes = chunk_sizes[1:] else: return super().scatter(inputs, kwargs, device_ids) return scatter_kwargs(inputs, kwargs, device_ids, chunk_sizes, dim=self.dim)
DeepLearningExamples-master
PyTorch/LanguageModeling/Transformer-XL/pytorch/utils/data_parallel.py
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch import torch.nn as nn import torch.nn.functional as F class OptionalParameterList(nn.ParameterList): def extra_repr(self): child_lines = [] for k, p in self._parameters.items(): if p is not None: size_str = 'x'.join(str(size) for size in p.size()) device_str = '' if not p.is_cuda else ' (GPU {})'.format(p.get_device()) parastr = 'Parameter containing: [{} of size {}{}]'.format( torch.typename(p), size_str, device_str) child_lines.append(' (' + str(k) + '): ' + parastr) tmpstr = '\n'.join(child_lines) return tmpstr class ProjectedAdaptiveLogSoftmax(nn.Module): def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, tie_projs=None, out_layers_weights=None, out_projs=None, keep_order=False): super().__init__() self.n_token = n_token self.d_embed = d_embed self.d_proj = d_proj self.cutoffs = cutoffs + [n_token] self.cutoff_ends = [0] + self.cutoffs self.div_val = div_val self.shortlist_size = self.cutoffs[0] self.n_clusters = len(self.cutoffs) - 1 self.head_size = self.shortlist_size + self.n_clusters self.tie_projs = tie_projs if self.n_clusters > 0: self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed)) self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters)) if not out_layers_weights: self.out_layers_weights = nn.ParameterList() else: self.out_layers_weights = out_layers_weights self.out_layers_biases = nn.ParameterList() self.shared_out_projs = out_projs self.out_projs = OptionalParameterList() if div_val == 1: if d_proj != d_embed: for i in range(len(self.cutoffs)): if tie_projs[i]: self.out_projs.append(None) else: self.out_projs.append( nn.Parameter(torch.zeros(d_proj, d_embed)) ) else: # self.out_projs = [None] * len(self.cutoffs) self.out_projs.append(None) self.out_layers_biases.append( nn.Parameter(torch.zeros(n_token)) ) if not out_layers_weights: self.out_layers_weights.append( nn.Parameter(torch.zeros(n_token, d_embed)) ) else: for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1] d_emb_i = d_embed // (div_val ** i) if tie_projs[i]: self.out_projs.append(None) else: self.out_projs.append( nn.Parameter(torch.zeros(d_proj, d_emb_i)) ) self.out_layers_biases.append( nn.Parameter(torch.zeros(r_idx - l_idx)) ) if not out_layers_weights: self.out_layers_weights.append( nn.Parameter(torch.zeros(r_idx - l_idx, d_emb_i)) ) self.keep_order = keep_order def _compute_logit(self, hidden, weight, bias, proj): if proj is None: logit = F.linear(hidden, weight, bias=bias) else: logit = torch.einsum('bd,de,ev->bv', hidden, proj, weight.t()) if bias is not None: logit = logit + bias return logit def get_out_proj(self, i): if self.tie_projs[i]: if len(self.shared_out_projs) == 0: return None elif len(self.shared_out_projs) == 1: return self.shared_out_projs[0] else: return self.shared_out_projs[i] else: return self.out_projs[i] def forward(self, hidden, target, keep_order=False): ''' hidden :: [len*bsz x d_proj] target :: [len*bsz] ''' if hidden.size(0) != target.size(0): raise RuntimeError('Input and target should have the same size ' 'in the batch dimension.') if self.n_clusters == 0: logit = self._compute_logit(hidden, self.out_layers_weights[0], self.out_layers_biases[0], self.get_out_proj(0)) nll = -F.log_softmax(logit, dim=-1) \ .gather(1, target.unsqueeze(1)).squeeze(1) else: # construct weights and biases weights, biases = [], [] for i in range(len(self.cutoffs)): if self.div_val == 1: l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] weight_i = self.out_layers_weights[0][l_idx:r_idx] bias_i = self.out_layers_biases[0][l_idx:r_idx] else: weight_i = self.out_layers_weights[i] bias_i = self.out_layers_biases[i] if i == 0: weight_i = torch.cat( [weight_i, self.cluster_weight], dim=0) bias_i = torch.cat( [bias_i, self.cluster_bias], dim=0) weights.append(weight_i) biases.append(bias_i) head_weight, head_bias, head_proj = weights[0], biases[0], self.get_out_proj(0) head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj) head_logprob = F.log_softmax(head_logit, dim=1) nll = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device) offset = 0 cutoff_values = [0] + self.cutoffs for i in range(len(cutoff_values) - 1): l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1] mask_i = (target >= l_idx) & (target < r_idx) indices_i = mask_i.nonzero(as_tuple=False).squeeze() if indices_i.numel() == 0: continue target_i = target.index_select(0, indices_i) - l_idx head_logprob_i = head_logprob.index_select(0, indices_i) if i == 0: logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1) else: weight_i, bias_i, proj_i = weights[i], biases[i], self.get_out_proj(i) hidden_i = hidden.index_select(0, indices_i) tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) logprob_i = head_logprob_i[:, -i] \ + tail_logprob_i.gather(1, target_i[:, None]).squeeze(1) if self.keep_order or keep_order: nll.index_copy_(0, indices_i, -logprob_i) else: nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i) offset += logprob_i.size(0) return nll
DeepLearningExamples-master
PyTorch/LanguageModeling/Transformer-XL/pytorch/utils/proj_adaptive_softmax.py
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from . import distributed from . import exp_utils from . import gpu_affinity
DeepLearningExamples-master
PyTorch/LanguageModeling/Transformer-XL/pytorch/utils/__init__.py
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os from contextlib import contextmanager import torch def init_distributed(cuda): """ Initializes distributed backend. :param cuda: (bool) if True initializes nccl backend, if False initializes gloo backend """ world_size = int(os.environ.get('WORLD_SIZE', 1)) distributed = (world_size > 1) if distributed: backend = 'nccl' if cuda else 'gloo' torch.distributed.init_process_group(backend=backend, init_method='env://') assert torch.distributed.is_initialized() return distributed def barrier(): """ Call torch.distributed.barrier() if distritubed is in use, else calls torch.cuda.synchronize() if CUDA is initialized. """ if torch.distributed.is_available() and torch.distributed.is_initialized(): torch.distributed.barrier() elif torch.cuda.is_available() and torch.cuda.is_initialized(): torch.cuda.synchronize() def get_rank(): """ Gets distributed rank or returns zero if distributed is not initialized. """ if torch.distributed.is_available() and torch.distributed.is_initialized(): rank = torch.distributed.get_rank() else: rank = 0 return rank def get_world_size(): """ Gets total number of distributed workers or returns one if distributed is not initialized. """ if torch.distributed.is_available() and torch.distributed.is_initialized(): world_size = torch.distributed.get_world_size() else: world_size = 1 return world_size def all_reduce_item(value, op='sum'): """ All-reduces single scalar value if distributed is in use """ if torch.distributed.is_available() and torch.distributed.is_initialized(): if op == 'sum' or op == 'mean': dop = torch.distributed.ReduceOp.SUM elif op == 'min': dop = torch.distributed.ReduceOp.MIN elif op == 'max': dop = torch.distributed.ReduceOp.MAX elif op == 'product': dop = torch.distributed.ReduceOp.PRODUCT else: raise RuntimeError('Unsupported reduce op') backend = torch.distributed.get_backend() if backend == torch.distributed.Backend.NCCL: device = torch.device('cuda') elif backend == torch.distributed.Backend.GLOO: device = torch.device('cpu') else: raise RuntimeError('Unsupported distributed backend') tensor = torch.tensor(value, device=device) torch.distributed.all_reduce(tensor, dop) if op == 'mean': tensor /= get_world_size() ret = tensor.item() else: ret = value return ret def all_gather_tensors(tensor, device): tensor = tensor.to(device) world_size = get_world_size() if world_size == 1: tensors = [tensor] else: tensors = [torch.empty_like(tensor) for _ in range(world_size)] torch.distributed.all_gather(tensors, tensor) return tensors @contextmanager def sync_workers(): """ Yields distributed rank and synchronizes all workers on exit. """ rank = get_rank() yield rank barrier()
DeepLearningExamples-master
PyTorch/LanguageModeling/Transformer-XL/pytorch/utils/distributed.py
import numpy as np import torch from torch import nn class LogUniformSampler(object): def __init__(self, range_max, n_sample): """ Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py `P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)` expected count can be approximated by 1 - (1 - p)^n and we use a numerically stable version -expm1(num_tries * log1p(-p)) Our implementation fixes num_tries at 2 * n_sample, and the actual #samples will vary from run to run """ with torch.no_grad(): self.range_max = range_max log_indices = torch.arange(1., range_max+2., 1.).log_() self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1] # print('P', self.dist.numpy().tolist()[-30:]) self.log_q = (- (-self.dist.double().log1p_() * 2 * n_sample).expm1_()).log_().float() self.n_sample = n_sample def sample(self, labels): """ labels: [b1, b2] Return true_log_probs: [b1, b2] samp_log_probs: [n_sample] neg_samples: [n_sample] """ # neg_samples = torch.empty(0).long() n_sample = self.n_sample n_tries = 2 * n_sample with torch.no_grad(): neg_samples = torch.multinomial(self.dist, n_tries, replacement=True).unique() device = labels.device neg_samples = neg_samples.to(device) true_log_probs = self.log_q[labels].to(device) samp_log_probs = self.log_q[neg_samples].to(device) return true_log_probs, samp_log_probs, neg_samples def sample_logits(embedding, bias, labels, inputs, sampler): """ embedding: an nn.Embedding layer bias: [n_vocab] labels: [b1, b2] inputs: [b1, b2, n_emb] sampler: you may use a LogUniformSampler Return logits: [b1, b2, 1 + n_sample] """ true_log_probs, samp_log_probs, neg_samples = sampler.sample(labels) n_sample = neg_samples.size(0) b1, b2 = labels.size(0), labels.size(1) all_ids = torch.cat([labels.view(-1), neg_samples]) all_w = embedding(all_ids) true_w = all_w[: -n_sample].view(b1, b2, -1) sample_w = all_w[- n_sample:].view(n_sample, -1) all_b = bias[all_ids] true_b = all_b[: -n_sample].view(b1, b2) sample_b = all_b[- n_sample:] hit = (labels[:, :, None] == neg_samples).detach() true_logits = torch.einsum('ijk,ijk->ij', true_w, inputs) + true_b - true_log_probs sample_logits = torch.einsum('lk,ijk->ijl', sample_w, inputs) + sample_b - samp_log_probs sample_logits.masked_fill_(hit, -1e30) logits = torch.cat([true_logits[:, :, None], sample_logits], -1) return logits # class LogUniformSampler(object): # def __init__(self, range_max, unique=False): # """ # Reference : https://github.com/tensorflow/tensorflow/blob/r1.10/tensorflow/python/ops/candidate_sampling_ops.py # `P(class) = (log(class + 2) - log(class + 1)) / log(range_max + 1)` # """ # self.range_max = range_max # log_indices = torch.arange(1., range_max+2., 1.).log_() # self.dist = (log_indices[1:] - log_indices[:-1]) / log_indices[-1] # self.unique = unique # if self.unique: # self.exclude_mask = torch.ByteTensor(range_max).fill_(0) # def sample(self, n_sample, labels): # pos_sample, new_labels = labels.unique(return_inverse=True) # n_pos_sample = pos_sample.size(0) # n_neg_sample = n_sample - n_pos_sample # if self.unique: # self.exclude_mask.index_fill_(0, pos_sample, 1) # sample_dist = self.dist.clone().masked_fill_(self.exclude_mask, 0) # self.exclude_mask.index_fill_(0, pos_sample, 0) # else: # sample_dist = self.dist # neg_sample = torch.multinomial(sample_dist, n_neg_sample) # sample = torch.cat([pos_sample, neg_sample]) # sample_prob = self.dist[sample] # return new_labels, sample, sample_prob if __name__ == '__main__': S, B = 3, 4 n_vocab = 10000 n_sample = 5 H = 32 labels = torch.LongTensor(S, B).random_(0, n_vocab) # sampler = LogUniformSampler(n_vocab, unique=False) # new_labels, sample, sample_prob = sampler.sample(n_sample, labels) sampler = LogUniformSampler(n_vocab, unique=True) # true_probs, samp_probs, neg_samples = sampler.sample(n_sample, labels) # print('true_probs', true_probs.numpy().tolist()) # print('samp_probs', samp_probs.numpy().tolist()) # print('neg_samples', neg_samples.numpy().tolist()) # print('sum', torch.sum(sampler.dist).item()) # assert torch.all(torch.sort(sample.unique())[0].eq(torch.sort(sample)[0])).item() embedding = nn.Embedding(n_vocab, H) bias = torch.zeros(n_vocab) inputs = torch.Tensor(S, B, H).normal_() logits, out_labels = sample_logits(embedding, bias, labels, inputs, sampler, n_sample) print('logits', logits.detach().numpy().tolist()) print('logits shape', logits.size()) print('out_labels', out_labels.detach().numpy().tolist()) print('out_labels shape', out_labels.size())
DeepLearningExamples-master
PyTorch/LanguageModeling/Transformer-XL/pytorch/utils/log_uniform_sampler.py
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ctypes import datetime import logging import os import shutil import signal import sys import time import dllogger import torch.utils.collect_env import utils class AverageMeter: """ Computes and stores the average and current value """ def __init__(self, warmup=0, keep=False): self.reset() self.warmup = warmup self.keep = keep def reset(self): self.val = 0 self.avg = 0 self.sum = 0 self.count = 0 self.iters = 0 self.vals = [] def update(self, val, n=1): self.iters += 1 self.val = val if self.iters > self.warmup: self.sum += val * n self.count += n self.avg = self.sum / self.count if self.keep: self.vals.append(val) class TimeoutHandler: def __init__(self, sig=signal.SIGTERM): self.sig = sig def __enter__(self): self.interrupted = False self.released = False self.original_handler = signal.getsignal(self.sig) def handler(signum, frame): self.release() self.interrupted = True logging.info(f'Received SIGTERM') signal.signal(self.sig, handler) return self def __exit__(self, type, value, tb): self.release() def release(self): if self.released: return False signal.signal(self.sig, self.original_handler) self.released = True return True def register_ignoring_timeout_handler(sig=signal.SIGTERM): def handler(signum, frame): logging.info('Received SIGTERM, ignoring') signal.signal(sig, handler) def log_env_info(): """ Prints information about execution environment. """ logging.info('Collecting environment information...') env_info = torch.utils.collect_env.get_pretty_env_info() logging.info(f'{env_info}') def benchmark(test_perplexity=None, target_perplexity=None, test_throughput=None, target_throughput=None): def test(achieved, target, name, higher_better=True): passed = True if target is not None and achieved is not None: logging.info(f'{name} achieved: {achieved:.2f} ' f'target: {target:.2f}') if higher_better: result = (achieved >= target) else: result = (achieved <= target) if result: logging.info(f'{name} test passed') else: logging.info(f'{name} test failed') passed = False return passed passed = True passed &= test(test_perplexity, target_perplexity, 'Perplexity', False) passed &= test(test_throughput, target_throughput, 'Throughput') return passed def setup_logging(log_all_ranks=True, filename=os.devnull, filemode='w'): """ Configures logging. By default logs from all workers are printed to the console, entries are prefixed with "N: " where N is the rank of the worker. Logs printed to the console don't include timestaps. Full logs with timestamps are saved to the log_file file. """ class RankFilter(logging.Filter): def __init__(self, rank, log_all_ranks): self.rank = rank self.log_all_ranks = log_all_ranks def filter(self, record): record.rank = self.rank if self.log_all_ranks: return True else: return (self.rank == 0) rank = utils.distributed.get_rank() rank_filter = RankFilter(rank, log_all_ranks) if log_all_ranks: logging_format = "%(asctime)s - %(levelname)s - %(rank)s - %(message)s" else: logging_format = "%(asctime)s - %(levelname)s - %(message)s" if rank != 0: filename = os.devnull for handler in logging.root.handlers[:]: logging.root.removeHandler(handler) handler.close() logging.basicConfig(level=logging.DEBUG, format=logging_format, datefmt="%Y-%m-%d %H:%M:%S", filename=filename, filemode=filemode) console = logging.StreamHandler(sys.stdout) console.setLevel(logging.INFO) if log_all_ranks: formatter = logging.Formatter('%(rank)s: %(message)s') else: formatter = logging.Formatter('%(message)s') console.setFormatter(formatter) logging.getLogger('').addHandler(console) logging.getLogger('').addFilter(rank_filter) def setup_dllogger(enabled=True, filename=os.devnull): rank = utils.distributed.get_rank() if enabled and rank == 0: backends = [ dllogger.JSONStreamBackend( dllogger.Verbosity.VERBOSE, filename, ), ] dllogger.init(backends) else: dllogger.init([]) def create_exp_dir(dir_path, scripts_to_save=None, debug=False): if debug: return os.makedirs(dir_path, exist_ok=True) print('Experiment dir : {}'.format(dir_path)) if scripts_to_save is not None: script_path = os.path.join(dir_path, 'scripts') os.makedirs(script_path, exist_ok=True) for script in scripts_to_save: dst_file = os.path.join(dir_path, 'scripts', os.path.basename(script)) shutil.copyfile(script, dst_file) def build_work_dir_name(work_dir, dataset, append_dataset, append_time): if append_dataset: work_dir = '{}-{}'.format(work_dir, dataset) if append_time: now = int(time.time()) now_max = utils.distributed.all_reduce_item(now, op='max') now_str = datetime.datetime.fromtimestamp(now_max).strftime('%Y%m%d-%H%M%S') work_dir = os.path.join(work_dir, now_str) return work_dir def l2_promote(): _libcudart = ctypes.CDLL('libcudart.so') # Set device limit on the current device # cudaLimitMaxL2FetchGranularity = 0x05 pValue = ctypes.cast((ctypes.c_int*1)(), ctypes.POINTER(ctypes.c_int)) _libcudart.cudaDeviceSetLimit(ctypes.c_int(0x05), ctypes.c_int(128)) _libcudart.cudaDeviceGetLimit(pValue, ctypes.c_int(0x05)) assert pValue.contents.value == 128 def get_default_rng_states(device): """ Get states of default random generators from all devices participating in a distributed training. If device == torch.device('cuda') it returns states of CUDA generators, if device == torch.device('cpu') it returns states of host generators. Returns a list of random states indexed with a distributed rank. All generator states are in host memory. """ if device == torch.device('cuda'): state = torch.cuda.get_rng_state() elif device == torch.device('cpu'): state = torch.random.get_rng_state() else: raise RuntimeError('Unknown device') states = utils.distributed.all_gather_tensors(state, device) states = [state.to(torch.device('cpu')) for state in states] return states def set_default_rng_states(rng_states, device): """ Sets states of default random generators for all devices participating in a distributed training. """ rank = utils.distributed.get_rank() rng_states = [s.to(torch.device('cpu')) for s in rng_states] if device == torch.device('cuda'): torch.cuda.set_rng_state(rng_states[rank]) elif device.type == 'cpu': torch.random.set_rng_state(rng_states[rank])
DeepLearningExamples-master
PyTorch/LanguageModeling/Transformer-XL/pytorch/utils/exp_utils.py
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import contextlib import os from collections import Counter from collections import OrderedDict import torch import utils class Vocab(object): def __init__(self, special=[], min_freq=0, max_size=None, lower_case=True, delimiter=None, vocab_file=None): self.counter = Counter() self.special = special self.min_freq = min_freq self.max_size = max_size self.lower_case = lower_case self.delimiter = delimiter self.vocab_file = vocab_file def tokenize(self, line, add_eos=False, add_double_eos=False): line = line.strip() # convert to lower case if self.lower_case: line = line.lower() # empty delimiter '' will evaluate False if self.delimiter == '': symbols = line else: symbols = line.split(self.delimiter) if add_double_eos: # lm1b return ['<S>'] + symbols + ['<S>'] elif add_eos: return symbols + ['<eos>'] else: return symbols def count_file(self, path, verbose=False, add_eos=False): if verbose: print('counting file {} ...'.format(path)) assert os.path.exists(path) sents = [] with open(path, 'r', encoding='utf-8') as f: for idx, line in enumerate(f): if verbose and idx > 0 and idx % 500000 == 0: print(' line {}'.format(idx)) symbols = self.tokenize(line, add_eos=add_eos) self.counter.update(symbols) sents.append(symbols) return sents def count_sents(self, sents, verbose=False): """ sents : a list of sentences, each a list of tokenized symbols """ if verbose: print('counting {} sents ...'.format(len(sents))) for idx, symbols in enumerate(sents): if verbose and idx > 0 and idx % 500000 == 0: print(' line {}'.format(idx)) self.counter.update(symbols) def _build_from_file(self, vocab_file): self.idx2sym = [] self.sym2idx = OrderedDict() with open(vocab_file, 'r', encoding='utf-8') as f: for line in f: symb = line.strip().split()[0] self.add_symbol(symb) self.unk_idx = self.sym2idx['<UNK>'] def build_vocab(self): if self.vocab_file: print('building vocab from {}'.format(self.vocab_file)) self._build_from_file(self.vocab_file) print('final vocab size {}'.format(len(self))) else: print('building vocab with min_freq={}, max_size={}'.format( self.min_freq, self.max_size)) self.idx2sym = [] self.sym2idx = OrderedDict() for sym in self.special: self.add_special(sym) for sym, cnt in self.counter.most_common(self.max_size): if cnt < self.min_freq: break self.add_symbol(sym) print('final vocab size {} from {} unique tokens'.format( len(self), len(self.counter))) def encode_file(self, path, ordered=False, verbose=False, add_eos=True, add_double_eos=False): if verbose: print('encoding file {} ...'.format(path)) assert os.path.exists(path) encoded = [] with open(path, 'r', encoding='utf-8') as f: for idx, line in enumerate(f): if verbose and idx > 0 and idx % 500000 == 0: print(' line {}'.format(idx)) symbols = self.tokenize(line, add_eos=add_eos, add_double_eos=add_double_eos) encoded.append(self.convert_to_tensor(symbols)) if ordered: encoded = torch.cat(encoded) return encoded def encode_sents(self, sents, ordered=False, verbose=False): if verbose: print('encoding {} sents ...'.format(len(sents))) encoded = [] for idx, symbols in enumerate(sents): if verbose and idx > 0 and idx % 500000 == 0: print(' line {}'.format(idx)) encoded.append(self.convert_to_tensor(symbols)) if ordered: encoded = torch.cat(encoded) return encoded def add_special(self, sym): if sym not in self.sym2idx: self.idx2sym.append(sym) self.sym2idx[sym] = len(self.idx2sym) - 1 setattr(self, '{}_idx'.format(sym.strip('<>')), self.sym2idx[sym]) def add_symbol(self, sym): if sym not in self.sym2idx: self.idx2sym.append(sym) self.sym2idx[sym] = len(self.idx2sym) - 1 def get_sym(self, idx): assert 0 <= idx < len(self), 'Index {} out of range'.format(idx) return self.idx2sym[idx] def get_idx(self, sym): if sym in self.sym2idx: return self.sym2idx[sym] else: # print('encounter unk {}'.format(sym)) assert '<eos>' not in sym assert hasattr(self, 'unk_idx') return self.sym2idx.get(sym, self.unk_idx) def get_symbols(self, indices): return [self.get_sym(idx) for idx in indices] def get_indices(self, symbols): return [self.get_idx(sym) for sym in symbols] def convert_to_tensor(self, symbols): return torch.LongTensor(self.get_indices(symbols)) def convert_to_sent(self, indices, exclude=None): if exclude is None: return ' '.join([self.get_sym(idx) for idx in indices]) else: return ' '.join([self.get_sym(idx) for idx in indices if idx not in exclude]) def __len__(self): return len(self.idx2sym) # Class OpenAIVocab has been adapted from # https://github.com/cybertronai/transformer-xl/blob/master/utils/vocabulary.py class OpenAIVocab(Vocab): def __init__(self, max_size=None, vocab_file=None): from pytorch_transformers import GPT2Tokenizer self.tokenizer = GPT2Tokenizer.from_pretrained('gpt2') self.EOT = self.tokenizer.encoder['<|endoftext|>'] self.max_size = max_size self.vocab_file = vocab_file pad = 8 vocab_size = len(self.tokenizer) padded_vocab_size = (vocab_size + pad - 1) // pad * pad for i in range(0, padded_vocab_size - vocab_size): token = f'madeupword{i:09d}' self.tokenizer.add_tokens([token]) def __len__(self): return len(self.tokenizer) def count_file(self, path, verbose=False, add_eos=False): # TODO: train from scratch, respect self.max_size pass def build_vocab(self): pass def encode_file(self, path, ordered=False, verbose=False, add_eos=True, add_double_eos=False) -> torch.LongTensor: cached = path + '.bpe' if os.path.exists(cached): return torch.load(cached) print(f'encoding file {path} ...') assert os.path.exists(path), f"{path} doesn't exist" with open(path, encoding='utf-8') as f: # Suppress warnings about length. with open(os.devnull, "w") as devnull, contextlib.redirect_stderr(devnull): out = torch.LongTensor(self.tokenizer.encode(f.read()) + [self.EOT]) with utils.distributed.sync_workers() as rank: if rank == 0: torch.save(out, cached) return out def tokenize(self, line, add_eos=False, add_double_eos=False): return self.tokenizer.encode(line) def convert_to_tensor(self, symbols): return torch.LongTensor(symbols)
DeepLearningExamples-master
PyTorch/LanguageModeling/Transformer-XL/pytorch/utils/vocabulary.py
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List from typing import Optional import torch import torch.nn as nn import torch.nn.functional as F class ProjectedAdaptiveLogSoftmax(nn.Module): out_projs: List[Optional[torch.Tensor]] def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, dtype=None, tie_projs=None, out_layers_weights=None, out_projs=None, keep_order=False): super().__init__() self.n_token = n_token self.d_embed = d_embed self.d_proj = d_proj self.cutoffs = cutoffs + [n_token] self.cutoff_ends = [0] + self.cutoffs self.div_val = div_val self.shortlist_size = self.cutoffs[0] self.n_clusters = len(self.cutoffs) - 1 self.head_size = self.shortlist_size + self.n_clusters self.tie_projs = tie_projs if self.n_clusters > 0: self.cluster_weight = nn.Parameter( torch.zeros( self.n_clusters, self.d_embed, dtype=dtype, device=torch.device('cuda'), ) ) self.cluster_bias = nn.Parameter( torch.zeros( self.n_clusters, dtype=dtype, device=torch.device('cuda'), ) ) if not out_layers_weights: self.out_layers_weights = [] else: self.out_layers_weights = out_layers_weights self.out_layers_biases = [] self.out_projs = [] if div_val == 1: if d_proj != d_embed: for i, tie_proj in enumerate(tie_projs): if tie_proj: self.out_projs.append(out_projs[0]) else: self.out_projs.append( torch.zeros( d_proj, d_embed, dtype=dtype, device=torch.device('cuda'), ) ) else: for i, tie_proj in enumerate(tie_projs): self.out_projs.append(None) else: for i, tie_proj in enumerate(tie_projs): d_emb_i = d_embed // (div_val ** i) if tie_proj: self.out_projs.append(out_projs[i]) else: self.out_projs.append( torch.zeros( d_proj, d_emb_i, dtype=dtype, device=torch.device('cuda'), ) ) if div_val == 1: self.out_layers_biases.append( torch.zeros( n_token, dtype=dtype, device=torch.device('cuda'), ) ) if not out_layers_weights: self.out_layers_weights.append( nn.Parameter( torch.zeros( n_token, d_embed, dtype=dtype, device=torch.device('cuda'), ) ) ) else: for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1] d_emb_i = d_embed // (div_val ** i) self.out_layers_biases.append( nn.Parameter( torch.zeros( r_idx - l_idx, dtype=dtype, device=torch.device('cuda'), ) ) ) if not out_layers_weights: self.out_layers_weights.append( nn.Parameter( torch.zeros( r_idx - l_idx, d_emb_i, dtype=dtype, device=torch.device('cuda'), ) ) ) self.keep_order = keep_order def _compute_logit(self, hidden, weight, bias, proj: Optional[torch.Tensor]): if proj is None: logit = F.linear(hidden, weight, bias=bias) else: logit = torch.einsum('bd,de,ev->bv', hidden, proj, weight.t()) if bias is not None: logit = logit + bias return logit def forward(self, hidden, target, keep_order: bool = False): ''' hidden :: [len*bsz x d_proj] target :: [len*bsz] ''' if hidden.size(0) != target.size(0): raise RuntimeError('Input and target should have the same size ' 'in the batch dimension.') if self.n_clusters == 0: logit = self._compute_logit(hidden, self.out_layers_weights[0], self.out_layers_biases[0], self.out_projs[0]) nll = -F.log_softmax(logit, dim=-1) \ .gather(1, target.unsqueeze(1)).squeeze(1) else: # construct weights and biases weights, biases = [], [] for i in range(len(self.cutoffs)): if self.div_val == 1: l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] weight_i = self.out_layers_weights[0][l_idx:r_idx] bias_i = self.out_layers_biases[0][l_idx:r_idx] else: weight_i = self.out_layers_weights[i] bias_i = self.out_layers_biases[i] if i == 0: weight_i = torch.cat( [weight_i, self.cluster_weight], dim=0) bias_i = torch.cat( [bias_i, self.cluster_bias], dim=0) weights.append(weight_i) biases.append(bias_i) head_weight, head_bias, head_proj = weights[0], biases[0], self.out_projs[0] head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj) head_logprob = F.log_softmax(head_logit, dim=1) nll = torch.zeros_like(target, layout=torch.strided, dtype=hidden.dtype, device=hidden.device, ) offset = 0 cutoff_values = [0] + self.cutoffs for i in range(len(cutoff_values) - 1): l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1] mask_i = (target >= l_idx) & (target < r_idx) indices_i = mask_i.nonzero().squeeze() if indices_i.numel() == 0: continue target_i = target.index_select(0, indices_i) - l_idx head_logprob_i = head_logprob.index_select(0, indices_i) if i == 0: logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1) else: weight_i, bias_i, proj_i = weights[i], biases[i], self.out_projs[i] hidden_i = hidden.index_select(0, indices_i) tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i) tail_logprob_i = F.log_softmax(tail_logit_i, dim=1) logprob_i = head_logprob_i[:, -i] \ + tail_logprob_i.gather(1, target_i[:, None]).squeeze(1) if self.keep_order or keep_order: nll.index_copy_(0, indices_i, -logprob_i) else: nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i) offset += logprob_i.size(0) return nll
DeepLearningExamples-master
PyTorch/LanguageModeling/Transformer-XL/pytorch/inference/proj_adaptive_softmax_jit.py
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from typing import List from typing import Optional import torch import torch.nn as nn import torch.nn.functional as F from inference.proj_adaptive_softmax_jit import ProjectedAdaptiveLogSoftmax from utils.log_uniform_sampler import LogUniformSampler class PositionalEmbedding(nn.Module): def __init__(self, demb): super(PositionalEmbedding, self).__init__() self.demb = demb inv_freq = 1 / (10000 ** (torch.arange(0.0, demb, 2.0) / demb)) self.register_buffer('inv_freq', inv_freq) def forward(self, pos_seq, bsz: Optional[int] = None): sinusoid_inp = torch.ger(pos_seq, self.inv_freq) pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=1) if bsz is not None: return pos_emb[:, None, :].expand(-1, bsz, -1) else: return pos_emb[:, None, :] class PositionwiseFF(nn.Module): def __init__(self, d_model, d_inner, dropout, pre_lnorm=False): super(PositionwiseFF, self).__init__() self.d_model = d_model self.d_inner = d_inner self.dropout = dropout self.CoreNet = nn.Sequential( nn.Linear(d_model, d_inner), nn.ReLU(inplace=True), nn.Dropout(dropout), nn.Linear(d_inner, d_model), nn.Dropout(dropout), ) self.layer_norm = nn.LayerNorm(d_model) self.pre_lnorm = pre_lnorm def forward(self, inp): if self.pre_lnorm: # layer normalization + positionwise feed-forward core_out = self.CoreNet(self.layer_norm(inp)) # residual connection output = core_out + inp else: # positionwise feed-forward core_out = self.CoreNet(inp) # residual connection + layer normalization output = self.layer_norm(inp + core_out) return output class MultiHeadAttn(nn.Module): def __init__(self, n_head, d_model, d_head, dropout, dropatt=0, pre_lnorm=False): super(MultiHeadAttn, self).__init__() self.n_head = n_head self.d_model = d_model self.d_head = d_head self.dropout = dropout self.q_net = nn.Linear(d_model, n_head * d_head, bias=False) self.kv_net = nn.Linear(d_model, 2 * n_head * d_head, bias=False) self.drop = nn.Dropout(dropout) self.dropatt = nn.Dropout(dropatt) self.o_net = nn.Linear(n_head * d_head, d_model, bias=False) self.layer_norm = nn.LayerNorm(d_model) self.scale = 1 / (d_head ** 0.5) self.pre_lnorm = pre_lnorm def forward(self, h, attn_mask=None, mems=None): # multihead attention # [hlen x bsz x n_head x d_head] if mems is not None: c = torch.cat([mems, h], 0) else: c = h if self.pre_lnorm: # layer normalization c = self.layer_norm(c) head_q = self.q_net(h) head_k, head_v = torch.chunk(self.kv_net(c), 2, -1) head_q = head_q.view(h.size(0), h.size(1), self.n_head, self.d_head) head_k = head_k.view(c.size(0), c.size(1), self.n_head, self.d_head) head_v = head_v.view(c.size(0), c.size(1), self.n_head, self.d_head) # [bsz x n_head x qlen x klen] attn_score = torch.einsum('ibnd,jbnd->bnij', head_q, head_k) attn_score.mul_(self.scale) if attn_mask is not None: if attn_mask.dim() == 2: attn_score.masked_fill_(attn_mask[None, None, :, :], -float('inf')) elif attn_mask.dim() == 3: attn_score.masked_fill_(attn_mask[:, None, :, :], -float('inf')) # [bsz x qlen x klen x n_head] attn_prob = F.softmax(attn_score, dim=3) attn_prob = self.dropatt(attn_prob) # [bsz x n_head x qlen x klen] * [klen x bsz x n_head x d_head] -> [qlen x bsz x n_head x d_head] attn_vec = torch.einsum('bnij,jbnd->ibnd', attn_prob, head_v) attn_vec = attn_vec.contiguous().view( attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head) # linear projection attn_out = self.o_net(attn_vec) attn_out = self.drop(attn_out) if self.pre_lnorm: # residual connection output = h + attn_out else: # residual connection + layer normalization output = self.layer_norm(h + attn_out) return output class RelMultiHeadAttn(nn.Module): def __init__(self, n_head, d_model, d_head, dropout, dropatt=0, tgt_len=None, ext_len=None, mem_len=None, pre_lnorm=False): super(RelMultiHeadAttn, self).__init__() self.n_head = n_head self.d_model = d_model self.d_head = d_head self.dropout = dropout self.qkv_net = nn.Linear(d_model, 3 * n_head * d_head, bias=False) self.drop = nn.Dropout(dropout) self.dropatt = nn.Dropout(dropatt) self.o_net = nn.Linear(n_head * d_head, d_model, bias=False) self.layer_norm = nn.LayerNorm(d_model) self.scale = 1 / (d_head ** 0.5) self.pre_lnorm = pre_lnorm def _parallelogram_mask(self, h, w, left=False): mask = torch.ones((h, w)).byte() m = min(h, w) mask[:m, :m] = torch.triu(mask[:m, :m]) mask[-m:, -m:] = torch.tril(mask[-m:, -m:]) if left: return mask.bool() else: return mask.flip(0).bool() def _shift(self, x, qlen, klen, mask, left=False): if qlen > 1: zero_pad = torch.zeros((x.size(0), qlen-1, x.size(2), x.size(3)), device=x.device, dtype=x.dtype) else: zero_pad = torch.zeros(0, device=x.device, dtype=x.dtype) if left: mask = mask.flip(1) x_padded = torch.cat([zero_pad, x], dim=1).expand(qlen, -1, -1, -1) else: x_padded = torch.cat([x, zero_pad], dim=1).expand(qlen, -1, -1, -1) x = x_padded.masked_select(mask[:, :, None, None]) \ .view(qlen, klen, x.size(2), x.size(3)) return x def _rel_shift(self, x, zero_triu: bool = False): zero_pad = torch.zeros((x.size(0), x.size(1), x.size(2), 1), device=x.device, dtype=x.dtype) x_padded = torch.cat([zero_pad, x], dim=3) x_padded = x_padded.view(x.size(0), x.size(1), x.size(3) + 1, x.size(2)) x = x_padded.narrow(2, 1, x_padded.size(2) - 1).view_as(x) if zero_triu: ones = torch.ones((x.size(2), x.size(3))) x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :] return x def forward(self, w, r, attn_mask=None, mems=None): raise NotImplementedError class RelPartialLearnableMultiHeadAttn(RelMultiHeadAttn): def __init__(self, *args, **kwargs): super(RelPartialLearnableMultiHeadAttn, self).__init__(*args, **kwargs) self.r_net = nn.Linear(self.d_model, self.n_head * self.d_head, bias=False) def forward(self, w, r, r_w_bias, r_r_bias, attn_mask, mems: Optional[torch.Tensor] = None): qlen, rlen, bsz = w.size(0), r.size(0), w.size(1) if mems is not None: cat = torch.cat([mems, w], 0) if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(cat)) else: w_heads = self.qkv_net(cat) r_head_k = self.r_net(r) w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1) w_head_q = w_head_q[-qlen:] else: if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(w)) else: w_heads = self.qkv_net(w) r_head_k = self.r_net(r) w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1) klen = w_head_k.size(0) w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) # qlen x bsz x n_head x d_head w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) # klen x bsz x n_head x d_head w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) # klen x bsz x n_head x d_head r_head_k = r_head_k.view(rlen, self.n_head, self.d_head) # qlen x n_head x d_head # compute attention score rw_head_q = w_head_q + r_w_bias # qlen x bsz x n_head x d_head # AC = torch.einsum('ibnd,jbnd->bnij', rw_head_q, w_head_k) # bsz x n_head x qlen x klen rw_head_q = rw_head_q.view(qlen, bsz * self.n_head, self.d_head).permute(1, 0, 2) w_head_k = w_head_k.reshape(klen, bsz * self.n_head, self.d_head).permute(1, 2, 0) AC = torch.bmm(rw_head_q, w_head_k).view(bsz, self.n_head, qlen, klen) rr_head_q = w_head_q + r_r_bias # BD = torch.einsum('ibnd,jnd->bnij', rr_head_q, r_head_k) # bsz x n_head x qlen x klen rr_head_q = rr_head_q.permute(2, 1, 0, 3).reshape(self.n_head, bsz * qlen, self.d_head) r_head_k = r_head_k.permute(1, 2, 0).view(self.n_head, self.d_head, klen) BD = torch.bmm(rr_head_q, r_head_k).view(self.n_head, bsz, qlen, klen).permute(1, 0, 2, 3) BD = self._rel_shift(BD) # [bsz x n_head x qlen x klen] attn_score = AC + BD attn_score.mul_(self.scale) # compute attention probability if attn_mask is not None: if attn_mask.dim() == 2: attn_score.masked_fill_(attn_mask[None, None, :, :], -float('inf')) elif attn_mask.dim() == 3: attn_score.masked_fill_(attn_mask[:, None, :, :], -float('inf')) # [bsz x n_head x qlen x klen] attn_prob = F.softmax(attn_score, dim=3) attn_prob = self.dropatt(attn_prob) # compute attention vector # attn_vec = torch.einsum('bnij,jbnd->ibnd', attn_prob, w_head_v) attn_prob = attn_prob.view(bsz * self.n_head, qlen, klen) w_head_v = w_head_v.permute(1, 2, 0, 3).reshape(bsz * self.n_head, klen, self.d_head) attn_vec = torch.bmm(attn_prob, w_head_v).permute(1, 0, 2).view(qlen, bsz, self.n_head, self.d_head) # [qlen x bsz x n_head x d_head] attn_vec = attn_vec.contiguous().view( attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head) # linear projection attn_out = self.o_net(attn_vec) attn_out = self.drop(attn_out) if self.pre_lnorm: # residual connection output = w + attn_out else: # residual connection + layer normalization output = self.layer_norm(w + attn_out) return output class RelLearnableMultiHeadAttn(RelMultiHeadAttn): def __init__(self, *args, **kwargs): super(RelLearnableMultiHeadAttn, self).__init__(*args, **kwargs) def forward(self, w, r_emb, r_w_bias, r_bias, attn_mask=None, mems=None): # r_emb: [klen, n_head, d_head], used for term B # r_w_bias: [n_head, d_head], used for term C # r_bias: [klen, n_head], used for term D qlen, bsz = w.size(0), w.size(1) if mems is not None: cat = torch.cat([mems, w], 0) if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(cat)) else: w_heads = self.qkv_net(cat) w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1) w_head_q = w_head_q[-qlen:] else: if self.pre_lnorm: w_heads = self.qkv_net(self.layer_norm(w)) else: w_heads = self.qkv_net(w) w_head_q, w_head_k, w_head_v = torch.chunk(w_heads, 3, dim=-1) klen = w_head_k.size(0) w_head_q = w_head_q.view(qlen, bsz, self.n_head, self.d_head) w_head_k = w_head_k.view(klen, bsz, self.n_head, self.d_head) w_head_v = w_head_v.view(klen, bsz, self.n_head, self.d_head) if klen > r_emb.size(0): r_emb_pad = r_emb[0:1].expand(klen-r_emb.size(0), -1, -1) r_emb = torch.cat([r_emb_pad, r_emb], 0) r_bias_pad = r_bias[0:1].expand(klen-r_bias.size(0), -1) r_bias = torch.cat([r_bias_pad, r_bias], 0) else: r_emb = r_emb[-klen:] r_bias = r_bias[-klen:] r_bias = r_bias.t() # compute attention score rw_head_q = w_head_q + r_w_bias[None] # qlen x bsz x n_head x d_head AC = torch.einsum('ibnd,jbnd->bnij', rw_head_q, w_head_k) # bsz x n_head x qlen x klen B_ = torch.einsum('ibnd,jnd->bnij', w_head_q, r_emb) # bsz x n_head x qlen x klen D_ = r_bias[None, :, None, :] # 1 x n_head x 1 x klen BD = self._rel_shift(B_ + D_) # [bsz x qlen x klen x n_head] attn_score = AC + BD attn_score.mul_(self.scale) # compute attention probability if attn_mask is not None: if attn_mask.dim() == 2: attn_score.masked_fill_(attn_mask[None, None, :, :], -float('inf')) elif attn_mask.dim() == 3: attn_score.masked_fill_(attn_mask[:, None, :, :], -float('inf')) # [bsz x n_head x qlen x klen] attn_prob = F.softmax(attn_score, dim=3) attn_prob = self.dropatt(attn_prob) # compute attention vector attn_vec = torch.einsum('bnij,jbnd->ibnd', attn_prob, w_head_v) # [qlen x bsz x n_head x d_head] attn_vec = attn_vec.contiguous().view( attn_vec.size(0), attn_vec.size(1), self.n_head * self.d_head) # linear projection attn_out = self.o_net(attn_vec) attn_out = self.drop(attn_out) if self.pre_lnorm: # residual connection output = w + attn_out else: # residual connection + layer normalization output = self.layer_norm(w + attn_out) return output class DecoderLayer(nn.Module): def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs): super(DecoderLayer, self).__init__() self.dec_attn = MultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs) self.pos_ff = PositionwiseFF(d_model, d_inner, dropout, pre_lnorm=kwargs.get('pre_lnorm')) def forward(self, dec_inp, dec_attn_mask=None, mems=None): output = self.dec_attn(dec_inp, attn_mask=dec_attn_mask, mems=mems) output = self.pos_ff(output) return output class RelLearnableDecoderLayer(nn.Module): def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs): super(RelLearnableDecoderLayer, self).__init__() self.dec_attn = RelLearnableMultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs) self.pos_ff = PositionwiseFF(d_model, d_inner, dropout, pre_lnorm=kwargs.get('pre_lnorm')) def forward(self, dec_inp, r_emb, r_w_bias, r_bias, dec_attn_mask=None, mems=None): output = self.dec_attn(dec_inp, r_emb, r_w_bias, r_bias, attn_mask=dec_attn_mask, mems=mems) output = self.pos_ff(output) return output class RelPartialLearnableDecoderLayer(nn.Module): def __init__(self, n_head, d_model, d_head, d_inner, dropout, **kwargs): super(RelPartialLearnableDecoderLayer, self).__init__() self.dec_attn = RelPartialLearnableMultiHeadAttn(n_head, d_model, d_head, dropout, **kwargs) self.pos_ff = PositionwiseFF(d_model, d_inner, dropout, pre_lnorm=kwargs.get('pre_lnorm')) def forward(self, dec_inp, r, r_w_bias, r_r_bias, dec_attn_mask, mems: Optional[torch.Tensor] = None ): output = self.dec_attn(dec_inp, r, r_w_bias, r_r_bias, attn_mask=dec_attn_mask, mems=mems) output = self.pos_ff(output) return output class AdaptiveEmbedding(nn.Module): def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1, sample_softmax=False, dtype=torch.float32): super(AdaptiveEmbedding, self).__init__() self.n_token = n_token self.d_embed = d_embed self.dtype = dtype self.cutoffs = cutoffs + [n_token] self.div_val = div_val self.d_proj = d_proj self.emb_scale = d_proj ** 0.5 self.cutoff_ends = [0] + self.cutoffs self.emb_layers = nn.ModuleList() self.emb_projs = [] if div_val == 1: self.emb_layers.append( nn.Embedding(n_token, d_embed, sparse=(sample_softmax > 0)) ) self.emb_projs.append( nn.Parameter( torch.zeros( (d_proj, d_embed), dtype=dtype, device=torch.device('cuda'), ) ) ) else: for i in range(len(self.cutoffs)): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1] d_emb_i = d_embed // (div_val ** i) self.emb_layers.append(nn.Embedding(r_idx-l_idx, d_emb_i)) self.emb_projs.append( nn.Parameter( torch.zeros( (d_proj, d_emb_i), dtype=dtype, device=torch.device('cuda'), ) ) ) def forward(self, inp): if self.div_val == 1: for emb_layer in self.emb_layers: inp = emb_layer(inp) if self.d_proj != self.d_embed: embed = F.linear(inp, self.emb_projs[0]) else: embed = inp else: inp_flat = inp.view(-1) emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=self.dtype, device=torch.device('cuda')) for i, emb_layer in enumerate(self.emb_layers): l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1] mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx) indices_i = mask_i.nonzero().squeeze() if indices_i.numel() != 0: inp_i = inp_flat.index_select(0, indices_i) - l_idx emb_i = emb_layer(inp_i) emb_i = F.linear(emb_i, self.emb_projs[i]) emb_flat.index_copy_(0, indices_i, emb_i) embed = emb_flat.view(inp.size(0), inp.size(1), self.d_proj) embed.mul_(self.emb_scale) return embed class MemTransformerLM(nn.Module): def __init__(self, n_token, n_layer, n_head, d_model, d_head, d_inner, dropout, dropatt, dtype, tie_weight=True, d_embed=None, div_val=1, tie_projs=[False], pre_lnorm=False, tgt_len=None, ext_len=None, mem_len=None, cutoffs=[], adapt_inp=False, same_length=False, attn_type=0, clamp_len=-1, sample_softmax=-1): super(MemTransformerLM, self).__init__() self.n_token = n_token d_embed = d_model if d_embed is None else d_embed self.d_embed = d_embed self.d_model = d_model self.n_head = n_head self.d_head = d_head self.dtype = dtype self.word_emb = AdaptiveEmbedding(n_token, d_embed, d_model, cutoffs, div_val=div_val, dtype=dtype) self.drop = nn.Dropout(dropout) self.tie_weight = tie_weight self.tie_projs = tie_projs self.div_val = div_val self.n_layer = n_layer self.tgt_len = tgt_len self.mem_len = mem_len self.ext_len = ext_len self.max_klen = tgt_len + ext_len + mem_len self.attn_type = attn_type if attn_type != 0: raise RuntimeError('TorchScripted model supports only attn_type == 0') self.layers = nn.ModuleList() # the default attention if attn_type == 0: for i in range(n_layer): self.layers.append( RelPartialLearnableDecoderLayer( n_head, d_model, d_head, d_inner, dropout, tgt_len=tgt_len, ext_len=ext_len, mem_len=mem_len, dropatt=dropatt, pre_lnorm=pre_lnorm) ) self.sample_softmax = sample_softmax # use sampled softmax if sample_softmax > 0: self.out_layer = nn.Linear(d_model, n_token) self.tie_weight = tie_weight self.sampler = LogUniformSampler(n_token, sample_softmax) # use adaptive softmax (including standard softmax) else: if tie_weight: emb_layers = [i.weight for i in self.word_emb.emb_layers] else: emb_layers = None emb_projs = self.word_emb.emb_projs self.crit = ProjectedAdaptiveLogSoftmax(n_token, d_embed, d_model, cutoffs, div_val=div_val, dtype=dtype, tie_projs=tie_projs, out_projs=emb_projs, out_layers_weights=emb_layers) self.same_length = same_length self.clamp_len = clamp_len self._create_params() def backward_compatible(self): self.sample_softmax = -1 def _create_params(self): # default attention if self.attn_type == 0: self.pos_emb = PositionalEmbedding(self.d_model) self.r_w_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head).zero_()) self.r_r_bias = nn.Parameter(torch.Tensor(self.n_head, self.d_head).zero_()) def init_mems(self): mems = torch.empty(self.n_layer, 0, dtype=self.dtype, device=torch.device('cuda')) return mems def _update_mems(self, hids: List[torch.Tensor], mems: torch.Tensor, qlen: int, mlen: int): assert len(hids) == len(mems), 'len(hids) != len(mems)' # There are `mlen + qlen` steps that can be cached into mems # For the next step, the last `ext_len` of the `qlen` tokens # will be used as the extended context. Hence, we only cache # the tokens from `mlen + qlen - self.ext_len - self.mem_len` # to `mlen + qlen - self.ext_len`. stacked = torch.stack(hids) end_idx = mlen + max(0, qlen - self.ext_len) beg_idx = max(0, end_idx - self.mem_len) if mems.numel(): cat = torch.cat([mems, stacked], dim=1) else: cat = stacked new_mems = cat[:, beg_idx:end_idx].detach() return new_mems def _forward(self, dec_inp, mems: torch.Tensor): qlen, bsz = dec_inp.size() word_emb = self.word_emb(dec_inp) mlen = mems[0].size(0) if mems is not None else 0 klen = mlen + qlen all_ones = torch.ones((qlen, klen), device=torch.device('cuda'), dtype=self.dtype) if self.same_length: mask_len = klen - self.mem_len - 1 if mask_len > 0: mask_shift_len = qlen - mask_len else: mask_shift_len = qlen dec_attn_mask = (torch.triu(all_ones, 1+mlen) + torch.tril(all_ones, -mask_shift_len)).to(torch.bool) else: dec_attn_mask = torch.triu(all_ones, diagonal=1+mlen).to(torch.bool) pos_seq = torch.arange(klen-1, -1, -1.0, device=word_emb.device, dtype=word_emb.dtype) if self.clamp_len > 0: pos_seq.clamp_(max=self.clamp_len) pos_emb = self.pos_emb(pos_seq) core_out = self.drop(word_emb) pos_emb = self.drop(pos_emb) hids = [] for i, layer in enumerate(self.layers): hids.append(core_out) mems_i = None if mems is None else mems[i] core_out = layer(core_out, pos_emb, self.r_w_bias, self.r_r_bias, dec_attn_mask=dec_attn_mask, mems=mems_i) core_out = self.drop(core_out) new_mems = self._update_mems(hids, mems, qlen, mlen) return core_out, new_mems def forward(self, data, target, mems: Optional[torch.Tensor]): # nn.DataParallel does not allow size(0) tensors to be broadcasted. # So, have to initialize size(0) mems inside the model forward. # Moreover, have to return new_mems to allow nn.DataParallel to piece # them together. if mems is None: mems = self.init_mems() tgt_len = target.size(0) hidden, new_mems = self._forward(data, mems=mems) pred_hid = hidden[-tgt_len:] loss = self.crit(pred_hid.view(-1, pred_hid.size(-1)), target.view(-1)) loss = loss.view(tgt_len, -1) return (loss, new_mems) if __name__ == '__main__': import argparse parser = argparse.ArgumentParser(description='unit test') parser.add_argument('--n_layer', type=int, default=4, help='') parser.add_argument('--n_rel_layer', type=int, default=4, help='') parser.add_argument('--n_head', type=int, default=2, help='') parser.add_argument('--d_head', type=int, default=2, help='') parser.add_argument('--d_model', type=int, default=200, help='') parser.add_argument('--d_embed', type=int, default=200, help='') parser.add_argument('--d_inner', type=int, default=200, help='') parser.add_argument('--dropout', type=float, default=0.0, help='') parser.add_argument('--cuda', action='store_true', help='') parser.add_argument('--seed', type=int, default=1111, help='') parser.add_argument('--multi_gpu', action='store_true', help='') args = parser.parse_args() device = torch.device("cuda" if args.cuda else "cpu") B = 4 tgt_len, mem_len, ext_len = 36, 36, 0 data_len = tgt_len * 20 args.n_token = 10000 import data_utils data = torch.LongTensor(data_len*B).random_(0, args.n_token).to(device) diter = data_utils.LMOrderedIterator(data, B, tgt_len, device=device, ext_len=ext_len) cutoffs = [args.n_token // 2] tie_projs = [False] + [True] * len(cutoffs) for div_val in [1, 2]: for d_embed in [200, 100]: model = MemTransformerLM(args.n_token, args.n_layer, args.n_head, args.d_model, args.d_head, args.d_inner, args.dropout, dropatt=args.dropout, tie_weight=True, d_embed=d_embed, div_val=div_val, tie_projs=tie_projs, pre_lnorm=True, tgt_len=tgt_len, ext_len=ext_len, mem_len=mem_len, cutoffs=cutoffs, attn_type=0, dtype=torch.float32).to(device) print(sum(p.numel() for p in model.parameters())) mems = None for idx, (inp, tgt, seqlen, _) in enumerate(diter): print('batch {}'.format(idx)) _, mems = model(inp, tgt, mems)
DeepLearningExamples-master
PyTorch/LanguageModeling/Transformer-XL/pytorch/inference/mem_transformer_jit.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import argparse import shutil import datetime import json import time import warnings from logging import getLogger from pathlib import Path from typing import Dict, List from json import JSONDecodeError import torch from torch import nn from tqdm import tqdm from torch.utils.data import DataLoader import numpy as np import os import glob import dllogger from bart.configuration.configuration_bart import BartConfig from bart.tokenization.tokenization_bart import BartTokenizer from bart.modeling.modeling_bart import BartForConditionalGeneration, shift_tokens_right from utils.utils import ( calculate_bleu, calculate_rouge, Seq2SeqDataset, parse_numeric_n_bool_cl_kwargs, use_task_specific_params, encode_line, load_json, lmap, chunks, write_txt_file, save_json, format_step) import utils.distributed_utils logger = getLogger(__name__) DEFAULT_DEVICE = "cuda" if torch.cuda.is_available() else "cpu" def distill(layers, num_layers): sft_layers = nn.ModuleList() for i in range(num_layers): sft_layers.append(layers[i]) # delete unnecessary layers delete_layers = [i for i in range(num_layers, len(layers))] for i in range(len(delete_layers)): del layers[delete_layers[i] - i] return sft_layers def distill_sft(model, num_layers, do_encoder=False, do_decoder=False): if do_encoder: layers = model.model.encoder.layers sft_layers = distill(layers, num_layers) model.model.encoder.layers = sft_layers if do_decoder: layers = model.model.decoder.layers sft_layers = distill(layers, num_layers) model.model.decoder.layers = sft_layers return model def generate_summaries_or_translations( data_dir: str, out_dir: str, model_path: str, config_path: str, batch_size: int = 8, device: str = DEFAULT_DEVICE, fp16=False, bf16=False, pre_ln=False, task="summarization", prefix=None, max_source_length=1024, max_target_length=142, eval_beams=5, eval_max_gen_length=142, n_obs=-1, type_path="test", num_return_sequences=1, distill=None, num_layers=None, do_encoder=False, do_decoder=False, **generate_kwargs, ) -> Dict: out_dir = Path(out_dir) save_path = out_dir.joinpath(f"rank_{utils.distributed_utils.get_rank()}_output.json") if num_return_sequences > eval_beams: eval_beams = num_return_sequences ### Define BART model # Config from "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/bart-large-cnn/config.json # Vocab modified to 50265 to be consistent with facebook/bart-large default config = BartConfig(**json.load(open(config_path, "r"))) if fp16: config.dtype = torch.float16 elif bf16: config.dtype = torch.bfloat16 else: config.dtype = None config.pre_ln = pre_ln model = BartForConditionalGeneration.from_pretrained(model_path, config=config).to(device) # if distilling, change model if distill == "sft": model = distill_sft(model, num_layers, do_encoder, do_decoder) if fp16: model = model.half() elif bf16: model = model.bfloat16() model.eval() tokenizer = BartTokenizer.from_pretrained('facebook/bart-large-cnn') logger.info(f"Inferred tokenizer type: {tokenizer.__class__}") # if this is wrong, check config.model_type. start_time = time.time() # update config with task specific params use_task_specific_params(model, task) if prefix is None: prefix = prefix or getattr(model.config, "prefix", "") or "" ds = Seq2SeqDataset(tokenizer, data_dir, max_source_length, max_target_length, type_path=type_path, n_obs=n_obs, prefix=prefix) # I set shuffle=True for a more accurate progress bar. # If all the longest samples are first, the prog bar estimate is too high at the beginning. is_distributed = True if utils.distributed_utils.get_world_size() > 1 else False sampler = ds.make_sortish_sampler(batch_size, distributed=is_distributed, add_extra_examples=False, shuffle=True) data_loader = DataLoader(ds, sampler=sampler, batch_size=batch_size, collate_fn=ds.collate_fn) results = [] with torch.no_grad(): for batch in tqdm(data_loader): torch.cuda.synchronize() t0 = time.time() summaries = model.generate( input_ids=batch["input_ids"].to(device), attention_mask=batch["attention_mask"].to(device), use_cache=True, num_return_sequences=num_return_sequences, num_beams=eval_beams, max_length=eval_max_gen_length, num_beam_groups=1, output_scores=False, return_dict_in_generate=False, encoder_no_repeat_ngram_size=0, diversity_penalty=0.0, **generate_kwargs, ) preds = tokenizer.batch_decode(summaries, skip_special_tokens=True, clean_up_tokenization_spaces=False) ids = batch["ids"] if num_return_sequences > 1: preds = chunks(preds, num_return_sequences) # batch size chunks, each of size num_return_seq torch.cuda.synchronize() eval_time = time.time() - t0 for i, pred in enumerate(preds): store_time = eval_time if i == 0 else None #only store latency for element 0 of every batch results.append(dict(pred=pred, id=ids[i].item(), eval_time=store_time)) save_json(results, save_path) runtime = int(time.time() - start_time) # seconds num_replicas = sampler.num_replicas if is_distributed else 1 n_obs = len(results) return results, num_replicas, dict(n_obs=n_obs, eval_only_runtime=runtime, seconds_per_sample=round(runtime / n_obs, 4)) def datetime_now(): return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") def run_generate(verbose=True): """ Takes input text, generates output, and then using reference calculates the BLEU scores. The results are saved to a file and returned to the caller, and printed out unless ``verbose=False`` is passed. Args: verbose (:obj:`bool`, `optional`, defaults to :obj:`True`): print results to stdout Returns: a tuple: ``(scores, params}`` - ``scores``: a dict of scores data ``{'bleu': 39.6501, 'n_obs': 2000, 'runtime': 186, 'seconds_per_sample': 0.093}`` - ``params``: a dict of custom params, e.g. ``{'num_beams': 5, 'length_penalty': 0.8}`` """ parser = argparse.ArgumentParser() parser.add_argument("model_path", type=str, help="like facebook/bart-large-cnn or path to ckpt") parser.add_argument("config_path", type=str, help="path to config") parser.add_argument("data_dir", type=str, help="like cnn_dm/test.source") parser.add_argument("save_path", type=str, help="where to save summaries") parser.add_argument("--type_path", type=str, required=False, default="test", help="like cnn_dm/test.target") parser.add_argument("--device", type=str, required=False, default=DEFAULT_DEVICE, help="cuda, cuda:1, cpu etc.") parser.add_argument( "--prefix", type=str, required=False, default=None, help="will be added to the begininng of src examples" ) parser.add_argument("--task", type=str, default="summarization", help="used for task_specific_params + metrics") parser.add_argument("--bs", type=int, default=8, required=False, help="batch size") parser.add_argument( "--n_obs", type=int, default=None, required=False, help="How many observations. Defaults to all." ) parser.add_argument( "--num_return_sequences", type=int, default=1, required=False, help="How many sequences to return" ) parser.add_argument("--fp16", action="store_true") parser.add_argument("--bf16", action="store_true") parser.add_argument("--dump-args", action="store_true", help="print the custom hparams with the results") parser.add_argument( "--info", nargs="?", type=str, const=datetime_now(), help="use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g. lang=en-ru. If no value is passed, the current datetime string will be used.", ) parser.add_argument("--eval_max_gen_length", type=int, default=None, help="never generate more than n tokens") parser.add_argument("--eval_beams", type=int, default=None, required=False, help="# beams to use. 0 corresponds to not using beam search.") parser.add_argument( "--max_source_length", default=1024, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument( "--max_target_length", default=142, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument( "--sync_timeout", type=int, default=600, required=False, help="How long should master process wait for other processes to finish.", ) parser.add_argument("--debug", action="store_true") parser.add_argument('--json-summary', type=str, default="results/dllogger.json", help='If provided, the json summary will be written to' 'the specified file.') parser.add_argument('--distill', type=str, default=None, help="string indicating how model is distilled, only sft supported", choices=["sft",None]) parser.add_argument('--layers', type=str, default=None, help="string indicating which teacher layers remain, split by '-' (ex. 0-6-11)") parser.add_argument('--do_encoder', action="store_true", default=False, help="if true encoder distilled") parser.add_argument('--do_decoder', action="store_true", default=False, help="if true decoder distilled") parser.add_argument("--pre_ln", default=False, action='store_true', help="Whether to use Pre-LN architecture." ) dist = parser.add_argument_group('distributed setup') dist.add_argument('--local_rank', type=int, default=os.getenv('LOCAL_RANK', 0), help='Used for multi-process training.') start_time = time.time() # Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate args, rest = parser.parse_known_args() parsed_args = parse_numeric_n_bool_cl_kwargs(rest) if args.local_rank <= 0: print(args) print(rest) # Initialize device and distributed backend utils.distributed_utils.init_distributed(args.device == "cuda") if utils.distributed_utils.get_world_size() > 1: utils.distributed_utils.set_affinity(args.local_rank) torch.cuda.set_device(args.local_rank) if Path(args.json_summary).exists(): warnings.warn(f"json_summary {args.json_summary} will be overwritten unless you type ctrl-c.") if utils.distributed_utils.get_rank() == 0: dllogger.init(backends=[dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE, filename=args.json_summary), dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE, step_format=format_step)]) else: dllogger.init(backends=[]) if parsed_args and verbose: print(f"parsed the following generate kwargs: {parsed_args}") Path(args.save_path).parent.mkdir(exist_ok=True) json_save_path = Path(args.save_path + "/tmp") Path(json_save_path).mkdir(exist_ok=True) # this handles locking. if args.layers: num_layers = len(args.layers.split('-')) else: num_layers = None results, num_replicas, runtime_metrics = generate_summaries_or_translations( args.data_dir, json_save_path, args.model_path, args.config_path, batch_size=args.bs, device=args.device, fp16=args.fp16, bf16=args.bf16, pre_ln=args.pre_ln, task=args.task, prefix=args.prefix, eval_beams=args.eval_beams, max_source_length=args.max_source_length, max_target_length=args.max_target_length, eval_max_gen_length=args.eval_max_gen_length, n_obs=args.n_obs, type_path=args.type_path, num_return_sequences=args.num_return_sequences, distill=args.distill, num_layers=num_layers, do_encoder=args.do_encoder, do_decoder=args.do_decoder, **parsed_args, ) if args.local_rank <= 0: save_path = Path(args.save_path) save_path.mkdir(exist_ok=True) partial_results = gather_results_from_each_node(num_replicas, json_save_path, args.sync_timeout) preds, time_list = combine_partial_results(partial_results) if args.num_return_sequences > 1: save_path = save_path.joinpath("pseudolabel_results.json") print(f"Saving aggregated results at {save_path}, intermediate in {json_save_path}/") save_json(preds, save_path) return tgt_file = Path(args.data_dir).joinpath(args.type_path + ".target") labels = [x.rstrip() for x in open(tgt_file).readlines()][: len(preds)] # Calculate metrics, save metrics, and save _generations.txt calc_bleu = "translation" in args.task score_fn = calculate_bleu if calc_bleu else calculate_rouge metric_name = "bleu" if calc_bleu else "rouge" metrics: Dict = score_fn(preds, labels) metrics["n_obs"] = len(preds) runtime = time.time() - start_time metrics["seconds_per_sample"] = round(runtime / metrics["n_obs"], 4) metrics["n_gpus"] = num_replicas metrics.update(runtime_metrics) time_list.sort() metrics["inference_latency_mean"] = np.mean(time_list) metrics["inference_latency_conf_50"] = max(time_list[:int(len(time_list) * 0.50)]) metrics["inference_latency_conf_90"] = max(time_list[:int(len(time_list) * 0.90)]) metrics["inference_latency_conf_95"] = max(time_list[:int(len(time_list) * 0.95)]) metrics["inference_latency_conf_99"] = max(time_list[:int(len(time_list) * 0.99)]) metrics["inference_latency_conf_100"] = max(time_list[:int(len(time_list) * 1)]) metrics["inference_throughput_mean"] = len(preds) * 1.0 / sum(time_list) metrics_save_path = save_path.joinpath(f"{args.type_path}_{metric_name}.json") save_json(metrics, metrics_save_path, indent=None) dllogger.log(step=tuple(), data=metrics) print(metrics) write_txt_file(preds, save_path.joinpath(f"{args.type_path}_generations.txt")) if args.debug: write_txt_file(labels, save_path.joinpath(f"{args.type_path}.target")) else: shutil.rmtree(json_save_path) dllogger.flush() def combine_partial_results(partial_results) -> List: """Concatenate partial results into one file, then sort it by id.""" records = [] for partial_result in partial_results: records.extend(partial_result) records = list(sorted(records, key=lambda x: x["id"])) preds = [x["pred"] for x in records] eval_time = [x["eval_time"] for x in records if x["eval_time"] is not None] return preds, eval_time def gather_results_from_each_node(num_replicas, save_path, timeout) -> List[Dict[str, List]]: # WAIT FOR lots of .json files start_wait = time.time() logger.info("waiting for all nodes to finish") json_data = None while (time.time() - start_wait) < timeout: json_files = list(save_path.glob("rank_*.json")) if len(json_files) < num_replicas: continue try: # make sure all json files are fully saved json_data = lmap(load_json, json_files) return json_data except JSONDecodeError: continue else: raise TimeoutError("Rank 0 gave up on waiting for other processes") # Unreachable if __name__ == "__main__": # Usage for MT: # python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_path/test_translations.txt --reference_path $DATA_DIR/test.target --task translation $@ run_generate(verbose=True)
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/run_eval.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import argparse import glob import logging import os from tabnanny import check import time import datetime import random from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import json import numpy as np import torch from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from training_base import BaseTransformer, add_generic_args, generic_train from bart.tokenization.tokenization_mbart import MBartTokenizer from bart.configuration.configuration_bart import BartConfig from bart.tokenization.tokenization_bart import BartTokenizer from bart.modeling.modeling_bart import BartForConditionalGeneration from utils.utils import ( PretrainingSeq2SeqDataset, Seq2SeqDataset, assert_all_frozen, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, format_step ) from utils.data_collator import DataCollatorForBART from utils.gpu_affinity import set_affinity from utils.distributed_utils import get_rank, get_device_count, get_world_size import dllogger import lddl.torch from lddl.utils import get_all_parquets_under logger = logging.getLogger(__name__) class BartForConditionalGenerationWrapper(torch.nn.Module): def __init__(self, model, args): super(BartForConditionalGenerationWrapper, self).__init__() if args.fp16: model.half() elif args.bf16: model.bfloat16() model.train() self.module = model def forward(self, input_ids, attention_mask, decoder_input_ids): outputs = self.module.forward(input_ids, attention_mask=attention_mask, decoder_input_ids=decoder_input_ids, use_cache=False) return outputs class PretrainingModule(BaseTransformer): mode = "pretraining" loss_names = ["loss"] def __init__(self, hparams, **kwargs): super().__init__(hparams, num_labels=None, mode=self.mode, **kwargs) use_task_specific_params(self.model, "pretraining") save_git_info(self.hparams.output_dir) self.metrics_save_path = Path(self.output_dir) / "metrics.json" self.hparams_save_path = Path(self.output_dir) / "hparams.pkl" pickle_save(self.hparams, self.hparams_save_path) self.step_count = 0 self.metrics = defaultdict(list) self.dataset_kwargs: dict = dict( max_source_length=self.hparams.max_source_length, prefix=self.model.config.prefix or "", ) self.n_obs = { "train": self.hparams.n_train if self.hparams.n_train >= 0 else None } #@todo should you freeze? if self.hparams.freeze_embeds: self.freeze_embeds() if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder()) assert_all_frozen(self.model.get_encoder()) self.hparams.git_sha = get_git_info()["repo_sha"] self.num_workers = hparams.num_workers self.decoder_start_token_id = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer, MBartTokenizer): self.decoder_start_token_id = self.tokenizer.lang_code_to_id[hparams.tgt_lang] self.model.config.decoder_start_token_id = self.decoder_start_token_id self.collate_fn = DataCollatorForBART( tokenizer=self.tokenizer, mlm_probability=self.hparams.mlm_probability, permute_sentence_ratio=self.hparams.permute_sentence_ratio, decoder_start_token_id=self.model.config.decoder_start_token_id ) self.dataset_class = ( PretrainingSeq2SeqDataset ) self.conig = self.model.config def freeze_embeds(self): """Freeze token embeddings and positional embeddings for bart, just token embeddings for t5.""" try: freeze_params(self.model.model.shared) for d in [self.model.model.encoder, self.model.model.decoder]: freeze_params(d.embed_positions) freeze_params(d.embed_tokens) except AttributeError: freeze_params(self.model.shared) for d in [self.model.encoder, self.model.decoder]: freeze_params(d.embed_tokens) def forward(self, input_ids, **kwargs): return self.model(input_ids, **kwargs) def ids_to_clean_text(self, generated_ids: List[int]): gen_text = self.tokenizer.batch_decode( generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True ) return lmap(str.strip, gen_text) def _step(self, batch: dict) -> Tuple: pad_token_id = self.tokenizer.pad_token_id src_ids, src_mask, decoder_input_ids = batch["input_ids"], batch["attention_mask"], batch["decoder_input_ids"] tgt_ids = batch["labels"] outputs = self(src_ids, attention_mask=src_mask, decoder_input_ids=decoder_input_ids, use_cache=False) lm_logits = outputs[0] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id ce_loss_fct = torch.nn.CrossEntropyLoss(ignore_index=pad_token_id) #@should you ignore unmasked tokens? Check! assert lm_logits.shape[-1] == self.config.vocab_size loss = ce_loss_fct(lm_logits.view(-1, lm_logits.shape[-1]), tgt_ids.view(-1)) else: lprobs = torch.nn.functional.log_softmax(lm_logits, dim=-1) loss, nll_loss = label_smoothed_nll_loss( lprobs, tgt_ids, self.hparams.label_smoothing, ignore_index=pad_token_id ) return (loss,), lm_logits @property def pad(self) -> int: return self.tokenizer.pad_token_id def training_step(self, batch) -> Dict: loss_tensors, logits = self._step(batch) logs = {name: loss for name, loss in zip(self.loss_names, loss_tensors)} # tokens per batch logs["ip_tpb"] = batch["input_ids"].numel() logs["op_tpb"] = batch["labels"].numel() logs["tpb"] = batch["input_ids"].ne(self.pad).sum() + batch["labels"].ne(self.pad).sum() logs["bs"] = batch["input_ids"].shape[0] logs["src_pad_tok"] = batch["input_ids"].eq(self.pad).sum() logs["src_pad_frac"] = batch["input_ids"].eq(self.pad).float().mean() # TODO(SS): make a wandb summary metric for this # self.log("train_loss_ddp_avg", loss_tensors[0], on_step=True, prog_bar=True, logger=True, sync_dist=self.sync_dist) return {"loss": loss_tensors[0], "log": logs} # Can remove after pytorch lightning fix def training_epoch_end(self, outputs) -> None: return def save_metrics(self, latest_metrics, type_path) -> None: self.metrics[type_path].append(latest_metrics) save_json(self.metrics, self.metrics_save_path) def get_dataset(self, type_path, src_file, shuffle_buffer_size=1000, shuffle_buffer_warmup_factor=16, max_shards_per_node=1048576) -> Seq2SeqDataset: lddl_dataset_kwargs = { 'transform':lambda x:x, 'local_rank': get_rank(), 'shuffle_buffer_size': shuffle_buffer_size, 'shuffle_buffer_warmup_factor': shuffle_buffer_warmup_factor, 'base_seed': self.hparams.seed, 'max_shards_per_node': max_shards_per_node } n_obs = self.n_obs[type_path] dataset = self.dataset_class( get_all_parquets_under(src_file), self.tokenizer, n_obs=n_obs, type_path=type_path, **self.dataset_kwargs, **lddl_dataset_kwargs, ) return dataset def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False) -> DataLoader: dataset = self.get_dataset(type_path, self.hparams.data_dir) dataloader_args = {"collate_fn":self.collate_fn} return DataLoader( dataset, batch_size=batch_size, collate_fn=self.collate_fn, shuffle=False, num_workers=self.num_workers, sampler=None, pin_memory=True ) def train_dataloader(self) -> DataLoader: dataloader = self.get_dataloader("train", batch_size=self.hparams.train_batch_size, shuffle=True) return dataloader @staticmethod def add_model_specific_args(parser, root_dir): BaseTransformer.add_model_specific_args(parser, root_dir) add_generic_args(parser, root_dir) parser.add_argument( "--max_source_length", default=1024, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument("--load_model_weights_only", action="store_true", help="Only load model weights, ignoring other ckpt states. useful at the start of phase2 training") parser.add_argument("--freeze_encoder", action="store_true") parser.add_argument("--freeze_embeds", action="store_true") parser.add_argument("--logger_name", type=str, choices=["default", "wandb", "wandb_shared"], default="default") parser.add_argument("--n_train", type=int, default=-1, required=False, help="# examples. -1 means use all.") parser.add_argument("--buffer_size", type=int, default=128, required=False, help="Buffer size for shuffling dataset") parser.add_argument( "--task", type=str, default="pretraining", required=False, help="# examples. -1 means use all." ) parser.add_argument("--label_smoothing", type=float, default=0.0, required=False) parser.add_argument("--mlm_probability", type=float, default=0.3, required=False) parser.add_argument("--permute_sentence_ratio", type=float, default=1.0, required=False) parser.add_argument("--src_lang", type=str, default="", required=False) parser.add_argument("--tgt_lang", type=str, default="", required=False) parser.add_argument( "--early_stopping_patience", type=int, default=-1, required=False, help="-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So val_check_interval will effect it.", ) parser.add_argument("--local_rank", type=int, default=os.getenv('LOCAL_RANK', 0), help="local_rank for distributed training on gpus") parser.add_argument('--json-summary', type=str, default="results/dllogger.json", help='If provided, the json summary will be written to' 'the specified file.') return parser def set_seed(args): random.seed(args.seed + get_rank()) np.random.seed(args.seed + get_rank()) torch.manual_seed(args.seed + get_rank()) def load_checkpoint(args, path, model, optimizer, scaler): checkpoint = torch.load(path, map_location=args.device) model.load_state_dict(checkpoint["model"]) if not args.load_model_weights_only: if 'optimizer' in checkpoint: optimizer.load_state_dict(checkpoint["optimizer"]) if 'scaler' in checkpoint: scaler.load_state_dict(checkpoint["scaler"]) def main(args, model=None) -> PretrainingModule: print(args) Path(args.output_dir).mkdir(parents=True, exist_ok=True) # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.device = device # Set GPU affinity if args.affinity != 'disabled': affinity = set_affinity( get_rank(), get_device_count(), args.affinity ) logger.warning(f'{get_rank()}: thread affinity: {affinity}') # Set seed set_seed(args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if get_rank() in [-1, 0] else logging.WARN, ) logger.warning( "Process global rank: %s, device: %s, distributed training: %s, 16-bits training: %s", get_rank(), device, bool(get_rank() != -1), (args.fp16 or args.bf16), ) if model is None: if "pretraining" in args.task: ### Define BART model # Config from "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/bart-large-cnn/config.json # Vocab modified to 50265 to be consistent with facebook/bart-large default config = BartConfig(**json.load(open(args.config_path, "r"))) if args.fp16: config.dtype = torch.float16 elif args.bf16: config.dtype = torch.bfloat16 else: config.dtype = None config.pre_ln = args.pre_ln model = BartForConditionalGeneration(config=config) tokenizer = BartTokenizer.from_pretrained( 'facebook/bart-large') # Downloads vocab and merges file automatically trainer: PretrainingModule = PretrainingModule(args, model=model, config=config, tokenizer=tokenizer) else: raise ValueError("Only pretraining supported!") dataset = Path(args.data_dir).name trainer.model.to(device) # Set up optimizer and scheduler optimizer, scheduler = trainer.configure_optimizers() optimizer = optimizer[0] scheduler = scheduler[0]['scheduler'] scaler = torch.cuda.amp.GradScaler(enabled=args.fp16) checkpoints = list(sorted(glob.glob(os.path.join(args.output_dir, "_step*.ckpt"), recursive=True), key=lambda x:int(x.split("step")[1].split(".")[0]))) step = 0 if args.resume_from_checkpoint: if ".ckpt" in args.resume_from_checkpoint: checkpoint = args.resume_from_checkpoint else: if len(checkpoints) > 0: #No checkpoints available checkpoint = checkpoints[-1] args.resume_from_checkpoint = checkpoint else: args.resume_from_checkpoint = None checkpoint = None if checkpoint is None: logger.info("Pretraining from scratch") else: logger.info("Loading BART model checkpoint using %s", checkpoint) checkpoint_suffix = checkpoint.split("step")[-1].split(".")[0] step = int(checkpoint_suffix) + 1 load_checkpoint(args, checkpoint, trainer.model, optimizer, scaler) if args.load_model_weights_only: args.resume_from_checkpoint = None step = 0 if args.fp16 and args.allreduce_post_accumulation_half_precision: trainer.model.half() if args.bf16 and args.allreduce_post_accumulation_half_precision: trainer.model.bfloat16() # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: trainer.model = torch.nn.parallel.DistributedDataParallel( trainer.model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True ) generic_train(args, trainer, optimizer, scheduler, scaler, checkpoints, step) pickle_save(trainer.hparams, trainer.output_dir / "hparams.pkl") return trainer if __name__ == "__main__": parser = argparse.ArgumentParser() parser = PretrainingModule.add_model_specific_args(parser, os.getcwd()) args = parser.parse_args() if get_rank() == 0: dllogger.init(backends=[dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE, filename=args.json_summary)]) main(args) dllogger.flush()
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/pretrain.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import argparse import logging import os from pathlib import Path from typing import Any, Dict import time from bart.configuration.configuration_bart import BartConfig from bart.tokenization.tokenization_bart import BartTokenizer from bart.modeling.modeling_bart import * from utils.optimization import ( AdamW, Adafactor, get_cosine_schedule_with_warmup, get_cosine_with_hard_restarts_schedule_with_warmup, get_linear_schedule_with_warmup, get_polynomial_decay_schedule_with_warmup, ) from utils.gpu_affinity import set_affinity from utils.distributed_utils import get_rank, get_device_count, get_world_size from utils.utils import get_readable_time, Mean from apex.optimizers import FusedAdam, FusedMixedPrecisionLamb import dllogger logger = logging.getLogger(__name__) MODEL_MODES = { "question-answering": BartForQuestionAnswering, "pretraining": PretrainedBartModel, "token-classification": BartForSequenceClassification, "language-modeling": BartModel, "summarization": BartForConditionalGeneration, "translation": BartForConditionalGeneration, } # update this and the import above to support new schedulers from transformers.optimization arg_to_scheduler = { "linear": get_linear_schedule_with_warmup, "cosine": get_cosine_schedule_with_warmup, "cosine_w_restarts": get_cosine_with_hard_restarts_schedule_with_warmup, "polynomial": get_polynomial_decay_schedule_with_warmup, # '': get_constant_schedule, # not supported for now # '': get_constant_schedule_with_warmup, # not supported for now } arg_to_scheduler_choices = sorted(arg_to_scheduler.keys()) arg_to_scheduler_metavar = "{" + ", ".join(arg_to_scheduler_choices) + "}" class BaseTransformer(): def __init__( self, hparams: argparse.Namespace, num_labels=None, mode="base", config=None, tokenizer=None, model=None, **config_kwargs ): """Initialize a model, tokenizer and config.""" super().__init__() self.step_count = 0 self.hparams = hparams self.output_dir = Path(self.hparams.output_dir) cache_dir = self.hparams.cache_dir if self.hparams.cache_dir else None if config is None: self.config = AutoConfig.from_pretrained( self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path, **({"num_labels": num_labels} if num_labels is not None else {}), cache_dir=cache_dir, **config_kwargs, ) else: self.config: BartConfig = config extra_model_params = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout") for p in extra_model_params: if getattr(self.hparams, p, None): assert hasattr(self.config, p), f"model config doesn't have a `{p}` attribute" setattr(self.config, p, getattr(self.hparams, p)) if tokenizer is None: self.tokenizer = AutoTokenizer.from_pretrained( self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path, cache_dir=cache_dir, ) else: self.tokenizer: BartTokenizer = tokenizer # self.model_type = MODEL_MODES[mode] if model is None: self.model = self.model_type.from_pretrained( self.hparams.model_name_or_path, from_tf=bool(".ckpt" in self.hparams.model_name_or_path), config=self.config, cache_dir=cache_dir, ) else: self.model = model def __call__(self, input_ids, **kwargs): return self.forward(input_ids, **kwargs) def load_hf_checkpoint(self, *args, **kwargs): self.model = self.model_type.from_pretrained(*args, **kwargs) def get_lr_scheduler(self): get_schedule_func = arg_to_scheduler[self.hparams.lr_scheduler] scheduler = get_schedule_func( self.opt, num_warmup_steps=self.hparams.warmup_steps, num_training_steps=self.total_steps ) scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1} return scheduler def configure_optimizers(self): """Prepare optimizer and schedule (linear warmup and decay)""" model = self.model no_decay = ["bias", "LayerNorm.weight"] optimizer_grouped_parameters = [ { "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": self.hparams.weight_decay, }, { "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] if self.hparams.lamb: optimizer_reduced_precision_type = self.config.dtype if self.hparams.allreduce_post_accumulation_half_precision else None optimizer = FusedMixedPrecisionLamb( optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon, max_grad_norm=self.hparams.gradient_clip_val, reduced_precision_dtype=optimizer_reduced_precision_type) elif self.hparams.allreduce_post_accumulation_half_precision: raise ValueError("--allreduce_post_accumulation_half_precision is only supported on LAMB optimizer") elif self.hparams.adafactor: optimizer = Adafactor( optimizer_grouped_parameters, lr=self.hparams.learning_rate, scale_parameter=False, relative_step=False ) else: optimizer = FusedAdam( optimizer_grouped_parameters, lr=self.hparams.learning_rate, eps=self.hparams.adam_epsilon) self.opt = optimizer scheduler = self.get_lr_scheduler() return [optimizer], [scheduler] def test_step(self, batch, batch_nb): return self.validation_step(batch, batch_nb) def test_epoch_end(self, outputs): return self.validation_end(outputs) @property def total_steps(self) -> int: """The number of total training steps that will be run. Used for lr scheduler purposes.""" if self.hparams.max_steps: return self.hparams.max_steps else: assert self.hparams.max_epochs is not None num_devices = max(1, self.hparams.gpus * self.hparams.num_nodes) # TODO: consider num_tpu_cores effective_batch_size = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices dataset_size = len(self.train_loader.dataset) return (dataset_size / effective_batch_size) * self.hparams.max_epochs def get_dataloader(self, type_path, batch_size, shuffle=False): raise NotImplementedError("You must implement this for your task") def train_dataloader(self): return self.get_dataloader("train", self.hparams.train_batch_size, shuffle=True) def val_dataloader(self): return self.get_dataloader("dev", self.hparams.eval_batch_size, shuffle=False) def test_dataloader(self): return self.get_dataloader("test", self.hparams.eval_batch_size, shuffle=False) def _feature_file(self, mode): return os.path.join( self.hparams.data_dir, "cached_{}_{}_{}".format( mode, list(filter(None, self.hparams.model_name_or_path.split("/"))).pop(), str(self.hparams.max_seq_length), ), ) def on_save_checkpoint(self, checkpoint: Dict[str, Any]) -> None: save_path = self.output_dir.joinpath("best_tfmr") self.model.config.save_step = self.step_count self.model.save_pretrained(save_path) self.tokenizer.save_pretrained(save_path) @staticmethod def add_model_specific_args(parser, root_dir): parser.add_argument("--config_path", default="config.json", type=str, help="Config File for Bart model") parser.add_argument( "--cache_dir", default="", type=str, help="Where do you want to store the pre-trained models downloaded from s3", ) parser.add_argument( "--resume_from_checkpoint", type=str, help="""Path/URL of the checkpoint from which training is resumed. If there is no checkpoint file at the path, start from scratch. If resuming from mid-epoch checkpoint, training will start from the beginning of the next epoch.""", ) parser.add_argument( "--encoder_layerdrop", type=float, help="Encoder layer dropout probability (Optional). Goes into model.config", ) parser.add_argument( "--decoder_layerdrop", type=float, help="Decoder layer dropout probability (Optional). Goes into model.config", ) parser.add_argument( "--dropout", type=float, help="Dropout probability (Optional). Goes into model.config", ) parser.add_argument( "--attention_dropout", type=float, help="Attention dropout probability (Optional). Goes into model.config", ) parser.add_argument("--learning_rate", default=5e-5, type=float, help="The initial learning rate for Adam.") parser.add_argument( "--lr_scheduler", default="linear", choices=arg_to_scheduler_choices, metavar=arg_to_scheduler_metavar, type=str, help="Learning rate scheduler", ) parser.add_argument("--weight_decay", default=0.0, type=float, help="Weight decay if we apply some.") parser.add_argument("--gradient_clip_val", default=0.5, type=float, help="The value at which to clip gradients.") parser.add_argument("--adam_epsilon", default=1e-8, type=float, help="Epsilon for Adam optimizer.") parser.add_argument("--max_steps", default=10, type=int, help="Stop training after this number of steps.") parser.add_argument("--warmup_steps", default=0, type=int, help="Linear warmup over warmup_steps.") parser.add_argument("--num_workers", default=4, type=int, help="kwarg passed to DataLoader") parser.add_argument("--min_num_train_epochs", dest="min_epochs", default=0, type=int) parser.add_argument("--train_batch_size", default=32, type=int) parser.add_argument("--eval_batch_size", default=32, type=int) parser.add_argument("--adafactor", action="store_true") parser.add_argument("--lamb", action="store_true") parser.add_argument('--affinity', type=str, default='socket_unique_interleaved', choices=['socket', 'single', 'single_unique', 'socket_unique_interleaved', 'socket_unique_continuous', 'disabled'], help='type of CPU affinity') parser.add_argument('--allreduce_post_accumulation_half_precision', default=False, action='store_true', help="Whether to do fp16/bf16 allreduce post accumulation.") def add_generic_args(parser, root_dir) -> None: parser.add_argument( "--output_dir", default=None, type=str, required=True, help="The output directory where the model predictions and checkpoints will be written.", ) parser.add_argument( "--fp16", action="store_true", help="Whether to use 16-bit (mixed) precision instead of 32-bit", ) parser.add_argument( "--bf16", action="store_true", help="Whether to use BFloat 16 mixed precision instead of 32-bit", ) parser.add_argument("--n_tpu_cores", dest="tpu_cores", type=int) parser.add_argument("--max_grad_norm", dest="gradient_clip_val", default=1.0, type=float, help="Max gradient norm") parser.add_argument("--do_train", action="store_true", help="Whether to run training.") parser.add_argument("--do_predict", action="store_true", help="Whether to run predictions on the test set.") parser.add_argument( "--gradient_accumulation_steps", dest="accumulate_grad_batches", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument("--seed", type=int, default=42, help="random seed for initialization") parser.add_argument( "--data_dir", default=None, type=str, required=True, help="The input data dir. Should contain the training files for the CoNLL-2003 NER task.", ) parser.add_argument("--log_freq", type=int, default=100, help="Log every X updates steps.") parser.add_argument("--save_checkpoint_steps", type=int, default=100, required=False, help="How many checkpoints to save") parser.add_argument( "--profile", action="store_true", ) parser.add_argument("--pre_ln", default=True, action='store_true', help="Whether to use Pre-LN architecture." ) def save_checkpoint(args, checkpoints, model, optimizer, scaler, step): output_filename = os.path.join(args.output_dir, "_step{}.ckpt".format(step)) if get_rank() == 0: model_to_save = model while(hasattr(model_to_save, "module")): model_to_save = model_to_save.module torch.save({"model": model_to_save.state_dict(), "optimizer": optimizer.state_dict(), "scaler": scaler.state_dict()}, output_filename) def train_one_step(args, trainer, optimizer, scheduler, features, local_step, scaler): if args.fp16: cast_dtype = torch.float16 elif args.bf16: cast_dtype = torch.bfloat16 else: cast_dtype = None with torch.cuda.amp.autocast(dtype=cast_dtype, enabled=(args.fp16 or args.bf16) and not args.allreduce_post_accumulation_half_precision): result = trainer.training_step(features) total_loss = result["loss"] loss = total_loss if args.accumulate_grad_batches > 1: total_loss = total_loss / args.accumulate_grad_batches if local_step % args.accumulate_grad_batches == 0: scaler.scale(total_loss).backward() if not args.lamb: scaler.unscale_(optimizer) torch.nn.utils.clip_grad_norm_(trainer.model.parameters(), args.gradient_clip_val) scheduler.step() # Update learning rate schedule scaler.step(optimizer) optimizer.zero_grad() skip_optimizer_step = scaler._found_inf_per_device(optimizer)[args.device] if scaler.is_enabled() else 0 result["log"]["skip_optimizer_step"] = int(skip_optimizer_step) scaler.update() else: with trainer.model.no_sync(): scaler.scale(total_loss).backward() return loss, result["log"] def generic_train( args, trainer, optimizer, scheduler, scaler, checkpoints, step, **extra_train_kwargs ): device = args.device # Set up dataset dataloader = trainer.train_dataloader() # Set up metrics metrics = {} metrics["avg_train_throughput"] = Mean(name="train_perf") metrics["total_loss"] = Mean(name="total_loss") trainer.model.train() local_step = 0 train_start, start_step = time.time(), step - 1 resume_step = step skipped_optimizer_steps = 0 if get_rank() == 0: dllogger.metadata("avg_train_time", {"unit": "s"}) dllogger.metadata("avg_train_throughput", {"unit": "seq/s"}) while step <= args.max_steps: for batch in dataloader: batch = {k: v.to(device) for k, v in batch.items()} local_step += 1 torch.cuda.synchronize() iter_start = time.time() total_loss, logs = train_one_step(args, trainer, optimizer, scheduler, batch, local_step, scaler) torch.cuda.synchronize() train_perf = logs["bs"] * get_world_size() / (time.time() - iter_start) metrics["total_loss"].update(total_loss) metrics["avg_train_throughput"].update(train_perf) if local_step % args.accumulate_grad_batches == 0: static_optimizer_step = local_step // args.accumulate_grad_batches skipped_optimizer_steps += logs["skip_optimizer_step"] opt_step = static_optimizer_step - skipped_optimizer_steps + resume_step if args.log_freq > 0 and step != opt_step and ( step % args.log_freq == 0 or step == args.max_steps): log_info_dict = {k:v.result() for k, v in metrics.items()} if get_rank() == 0: dllogger.log(step=(step,), data=log_info_dict, verbosity=0) print( 'Step:{step:6d}, Loss:{total_loss:10.6f}, Perf:{train_perf:4.2f}, Loss Scaler: {loss_scale}, ' 'Elapsed: {elapsed}, ETA: {eta}'.format( step=step, total_loss=total_loss, train_perf=train_perf, loss_scale=scaler.get_scale(), elapsed=get_readable_time(time.time() - train_start), eta=get_readable_time( (time.time() - train_start) / (step - start_step) * (args.max_steps - step))), flush=True ) if step == args.max_steps: final_metrics = {} log_info_dict['avg_train_time'] = time.time() - train_start for key, v in log_info_dict.items(): val = torch.tensor(v, device=device) torch.distributed.all_reduce(val, op=torch.distributed.ReduceOp.SUM) val /= get_world_size() final_metrics[key] = val.item() if get_rank() == 0: dllogger.log(step=(), data=log_info_dict, verbosity=0) logger.info('<FINAL STEP METRICS> Step:{step:6d}, Loss:{total_loss:10.6f}, Perf:{avg_train_throughput:4.2f}, Train time:{avg_train_time}s'.format( step=step, **final_metrics)) for key, m in metrics.items(): if key != 'avg_train_throughput': m.reset() if get_rank() == 0: dllogger.flush() if args.save_checkpoint_steps > 0 and step != opt_step and \ ((step % args.save_checkpoint_steps == 0 and step > 0) or step == args.max_steps): save_checkpoint(args, checkpoints, trainer.model, optimizer, scaler, step) logger.info(f" ** Saved model checkpoint for step {step}") step = opt_step if step > args.max_steps: break def generic_test( args, trainer ): device = args.device # Set up dataset dataloader = trainer.test_dataloader() metrics = {k: Mean(name=k) for k in trainer.loss_names + trainer.metric_names} for batch in dataloader: batch = {k: v.to(device) for k, v in batch.items()} result_metric = trainer.test_step(batch) for k, v in result_metric: metrics[k].update(v) log_info_dict = {k:v.result() for k, v in metrics.items()} final_metrics = {} for key, v in log_info_dict.items(): val = torch.tensor(v, device=device) torch.distributed.all_reduce(val, op=torch.distributed.ReduceOp.SUM) val /= get_world_size() final_metrics[key] = val.item() if get_rank() == 0: dllogger.log(step=(), data=log_info_dict, verbosity=0) print(final_metrics)
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/training_base.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import argparse import glob import logging import os import time from collections import defaultdict from pathlib import Path from typing import Dict, List, Tuple import json import random import numpy as np import torch from torch import nn from torch.utils.data import DataLoader from training_base import BaseTransformer, add_generic_args, generic_test, generic_train from bart.tokenization.tokenization_mbart import MBartTokenizer from bart.modeling.modeling_t5 import T5ForConditionalGeneration from bart.configuration.configuration_bart import BartConfig from bart.tokenization.tokenization_bart import BartTokenizer from bart.modeling.modeling_bart import BartForConditionalGeneration, shift_tokens_right from utils.utils import ( ROUGE_KEYS, LegacySeq2SeqDataset, Seq2SeqDataset, assert_all_frozen, calculate_bleu, calculate_rouge, flatten_list, freeze_params, get_git_info, label_smoothed_nll_loss, lmap, pickle_save, save_git_info, save_json, use_task_specific_params, format_step ) from utils.gpu_affinity import set_affinity from utils.distributed_utils import get_rank, get_device_count, get_world_size import dllogger import time logger = logging.getLogger(__name__) class SummarizationModule(BaseTransformer): mode = "summarization" loss_names = ["loss"] metric_names = ROUGE_KEYS default_val_metric = "rouge2" def __init__(self, hparams, **kwargs): if hparams.sortish_sampler and hparams.gpus > 1: hparams.replace_sampler_ddp = False elif hparams.max_tokens_per_batch is not None: if hparams.gpus > 1: raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training") if hparams.sortish_sampler: raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously") super().__init__(hparams, num_labels=None, mode=self.mode, **kwargs) use_task_specific_params(self.model, "summarization") save_git_info(self.hparams.output_dir) self.metrics_save_path = Path(self.output_dir) / "metrics.json" self.hparams_save_path = Path(self.output_dir) / "hparams.pkl" pickle_save(self.hparams, self.hparams_save_path) self.step_count = 0 self.metrics = defaultdict(list) self.dataset_kwargs: dict = dict( data_dir=self.hparams.data_dir, max_source_length=self.hparams.max_source_length, prefix=self.model.config.prefix or "", ) n_observations_per_split = { "train": self.hparams.n_train, "val": self.hparams.n_val, "test": self.hparams.n_test, } self.n_obs = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()} self.target_lens = { "train": self.hparams.max_target_length, "val": self.hparams.val_max_target_length, "test": self.hparams.test_max_target_length, } assert self.target_lens["train"] <= self.target_lens["val"], f"target_lens: {self.target_lens}" assert self.target_lens["train"] <= self.target_lens["test"], f"target_lens: {self.target_lens}" if self.hparams.freeze_embeds: self.freeze_embeds() if self.hparams.freeze_encoder: freeze_params(self.model.get_encoder()) assert_all_frozen(self.model.get_encoder()) self.hparams.git_sha = get_git_info()["repo_sha"] self.num_workers = hparams.num_workers self.sync_dist = True if hparams.gpus > 1 else False self.decoder_start_token_id = None # default to config if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer, MBartTokenizer): self.decoder_start_token_id = self.tokenizer.lang_code_to_id[hparams.tgt_lang] self.model.config.decoder_start_token_id = self.decoder_start_token_id self.dataset_class = ( LegacySeq2SeqDataset ) self.eval_beams = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams assert self.eval_beams >= 0, f"got self.eval_beams={self.eval_beams}. Need an integer >= 0" if self.hparams.eval_max_gen_length is not None: self.eval_max_length = self.hparams.eval_max_gen_length else: self.eval_max_length = self.model.config.max_length self.val_metric = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric self.config = self.model.config def freeze_embeds(self): """Freeze token embeddings and positional embeddings for bart, just token embeddings for t5.""" try: freeze_params(self.model.model.shared) for d in [self.model.model.encoder, self.model.model.decoder]: freeze_params(d.embed_positions) freeze_params(d.embed_tokens) except AttributeError: freeze_params(self.model.shared) for d in [self.model.encoder, self.model.decoder]: freeze_params(d.embed_tokens) def forward(self, input_ids, **kwargs): return self.model(input_ids, **kwargs) def ids_to_clean_text(self, generated_ids: List[int]): gen_text = self.tokenizer.batch_decode( generated_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True ) return lmap(str.strip, gen_text) def _step(self, batch: dict) -> Tuple: pad_token_id = self.tokenizer.pad_token_id src_ids, src_mask = batch["input_ids"], batch["attention_mask"] tgt_ids = batch["labels"] if isinstance(self.model, T5ForConditionalGeneration): decoder_input_ids = self.model._shift_right(tgt_ids) else: decoder_input_ids = shift_tokens_right(tgt_ids, pad_token_id, self.config.decoder_start_token_id) outputs = self(src_ids, attention_mask=src_mask, decoder_input_ids=decoder_input_ids, use_cache=False) lm_logits = outputs[0] if self.hparams.label_smoothing == 0: # Same behavior as modeling_bart.py, besides ignoring pad_token_id ce_loss_fct = torch.nn.CrossEntropyLoss(ignore_index=pad_token_id) assert lm_logits.shape[-1] == self.config.vocab_size loss = ce_loss_fct(lm_logits.view(-1, lm_logits.shape[-1]), tgt_ids.view(-1)) else: lprobs = torch.nn.functional.log_softmax(lm_logits, dim=-1) loss, nll_loss = label_smoothed_nll_loss( lprobs, tgt_ids, self.hparams.label_smoothing, ignore_index=pad_token_id ) return (loss,), lm_logits @property def pad(self) -> int: return self.tokenizer.pad_token_id def training_step(self, batch) -> Dict: loss_tensors, logits = self._step(batch) logs = {name: loss for name, loss in zip(self.loss_names, loss_tensors)} # tokens per batch logs["ip_tpb"] = batch["input_ids"].numel() logs["op_tpb"] = batch["labels"].numel() logs["tpb"] = batch["input_ids"].ne(self.pad).sum() + batch["labels"].ne(self.pad).sum() logs["bs"] = batch["input_ids"].shape[0] logs["src_pad_tok"] = batch["input_ids"].eq(self.pad).sum() logs["src_pad_frac"] = batch["input_ids"].eq(self.pad).float().mean() # TODO(SS): make a wandb summary metric for this # self.log("train_loss_ddp_avg", loss_tensors[0], on_step=True, prog_bar=True, logger=True, sync_dist=self.sync_dist) return {"loss": loss_tensors[0], "log": logs} # Can remove after pytorch lightning fix def training_epoch_end(self, outputs) -> None: return def validation_step(self, batch) -> Dict: return self._generative_step(batch) def validation_epoch_end(self, outputs, prefix="val") -> Dict: self.step_count += 1 losses = {k: torch.stack([x[k] for x in outputs]).mean() for k in self.loss_names} loss = losses["loss"] generative_metrics = { k: np.array([x[k] for x in outputs]).mean() for k in self.metric_names + ["gen_time", "gen_len"] } metric_val = ( generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric] ) metric_tensor: torch.FloatTensor = torch.tensor(metric_val).type_as(loss) generative_metrics.update({k: v.item() for k, v in losses.items()}) losses.update(generative_metrics) all_metrics = {f"{prefix}_avg_{k}": x for k, x in losses.items()} all_metrics["step_count"] = self.step_count self.save_metrics(all_metrics, prefix) # writes to self.metrics_save_path preds = flatten_list([x["preds"] for x in outputs]) self.log(f"{prefix}_{self.val_metric}", metric_tensor, prog_bar=True, logger=True, sync_dist=self.sync_dist) return { "log": all_metrics, "preds": preds, f"{prefix}_loss": loss, f"{prefix}_{self.val_metric}": metric_tensor, } def save_metrics(self, latest_metrics, type_path) -> None: self.metrics[type_path].append(latest_metrics) save_json(self.metrics, self.metrics_save_path) def calc_generative_metrics(self, preds, target) -> Dict: return calculate_rouge(preds, target) def _generative_step(self, batch: dict) -> dict: t0 = time.time() loss_tensors, logits = self._step(batch) if self.eval_beams == 0: generated_ids = torch.argmax(logits.detach(), axis=-1) else: generated_ids = self.model.generate( batch["input_ids"], attention_mask=batch["attention_mask"], use_cache=True, decoder_start_token_id=self.decoder_start_token_id, num_beams=self.eval_beams, max_length=self.eval_max_length, num_beam_groups=1, output_scores=False, return_dict_in_generate=False, encoder_no_repeat_ngram_size=0, diversity_penalty=0.0 ) gen_time = (time.time() - t0) / batch["input_ids"].shape[0] preds: List[str] = self.ids_to_clean_text(generated_ids) target: List[str] = self.ids_to_clean_text(batch["labels"]) base_metrics = {name: loss.detach() for name, loss in zip(self.loss_names, loss_tensors)} rouge: Dict = self.calc_generative_metrics(preds, target) summ_len = np.mean(lmap(len, generated_ids)) base_metrics.update(gen_time=gen_time, gen_len=summ_len, preds=preds, target=target, **rouge) return base_metrics def test_step(self, batch): return self._generative_step(batch) def test_epoch_end(self, outputs): return self.validation_epoch_end(outputs, prefix="test") def get_dataset(self, type_path) -> Seq2SeqDataset: n_obs = self.n_obs[type_path] max_target_length = self.target_lens[type_path] dataset = self.dataset_class( self.tokenizer, type_path=type_path, n_obs=n_obs, max_target_length=max_target_length, **self.dataset_kwargs, ) return dataset def get_dataloader(self, type_path: str, batch_size: int, shuffle: bool = False) -> DataLoader: dataset = self.get_dataset(type_path) if self.hparams.sortish_sampler and type_path != "test": sampler = dataset.make_sortish_sampler(batch_size, distributed=self.hparams.gpus > 1) return DataLoader( dataset, batch_size=batch_size, collate_fn=dataset.collate_fn, shuffle=False, num_workers=self.num_workers, sampler=sampler, pin_memory=True, ) elif self.hparams.max_tokens_per_batch is not None and type_path != "test": batch_sampler = dataset.make_dynamic_sampler( self.hparams.max_tokens_per_batch, distributed=self.hparams.gpus > 1 ) return DataLoader( dataset, batch_sampler=batch_sampler, collate_fn=dataset.collate_fn, num_workers=self.num_workers, pin_memory=True, ) else: return DataLoader( dataset, batch_size=batch_size, collate_fn=dataset.collate_fn, shuffle=shuffle, num_workers=self.num_workers, sampler=None, pin_memory=True, ) def train_dataloader(self) -> DataLoader: dataloader = self.get_dataloader("train", batch_size=self.hparams.train_batch_size, shuffle=True) return dataloader def val_dataloader(self) -> DataLoader: return self.get_dataloader("val", batch_size=self.hparams.eval_batch_size) def test_dataloader(self) -> DataLoader: return self.get_dataloader("test", batch_size=self.hparams.eval_batch_size) @staticmethod def add_model_specific_args(parser, root_dir): BaseTransformer.add_model_specific_args(parser, root_dir) add_generic_args(parser, root_dir) parser.add_argument( "--max_source_length", default=1024, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument( "--max_target_length", default=56, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument( "--val_max_target_length", default=142, # these defaults are optimized for CNNDM. For xsum, see README.md. type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument( "--test_max_target_length", default=142, type=int, help="The maximum total input sequence length after tokenization. Sequences longer " "than this will be truncated, sequences shorter will be padded.", ) parser.add_argument("--freeze_encoder", action="store_true") parser.add_argument("--freeze_embeds", action="store_true") parser.add_argument("--sortish_sampler", action="store_true", default=False) parser.add_argument("--max_tokens_per_batch", type=int, default=None) parser.add_argument("--logger_name", type=str, choices=["default", "wandb", "wandb_shared"], default="default") parser.add_argument("--n_train", type=int, default=-1, required=False, help="# examples. -1 means use all.") parser.add_argument("--n_val", type=int, default=500, required=False, help="# examples. -1 means use all.") parser.add_argument("--n_test", type=int, default=-1, required=False, help="# examples. -1 means use all.") parser.add_argument( "--task", type=str, default="summarization", required=False, help="# examples. -1 means use all." ) parser.add_argument("--label_smoothing", type=float, default=0.0, required=False) parser.add_argument("--src_lang", type=str, default="", required=False) parser.add_argument("--tgt_lang", type=str, default="", required=False) parser.add_argument("--eval_beams", type=int, default=None, required=False, help="# beams to use. 0 corresponds to not using beam search.") parser.add_argument( "--val_metric", type=str, default=None, required=False, choices=["bleu", "rouge2", "loss", None] ) parser.add_argument("--eval_max_gen_length", type=int, default=None, help="never generate more than n tokens") parser.add_argument("--save_top_k", type=int, default=1, required=False, help="How many checkpoints to save") parser.add_argument( "--early_stopping_patience", type=int, default=-1, required=False, help="-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So val_check_interval will effect it.", ) parser.add_argument('--json-summary', type=str, default="results/dllogger.json", help='If provided, the json summary will be written to' 'the specified file.') parser.add_argument('--distill', type=str, default=None, help="string indicating distillation to perform, only sft supported", choices=["sft", None]) parser.add_argument('--layers', type=str, default=None, help="string indicating which layers to distill for SFT, split by '-' (ex. 0-6-11)") parser.add_argument('--do_encoder', action="store_true", default=False, help="if true distills the encoder") parser.add_argument('--do_decoder', action="store_true", default=False, help="if true distills the decoder") parser.add_argument("--local_rank", type=int, default=os.getenv('LOCAL_RANK', 0), help="local_rank for distributed training on gpus") parser.add_argument("--gpus", type=int, default=1, help="number of gpus to train on applied per node") parser.add_argument("--load_model_weights_only", action="store_true", help="Only load model weights, ignoring other ckpt states. useful at the start of phase2 training") return parser class TranslationModule(SummarizationModule): mode = "translation" loss_names = ["loss"] metric_names = ["bleu"] default_val_metric = "bleu" def __init__(self, hparams, **kwargs): super().__init__(hparams, **kwargs) self.dataset_kwargs["src_lang"] = hparams.src_lang self.dataset_kwargs["tgt_lang"] = hparams.tgt_lang def calc_generative_metrics(self, preds, target) -> dict: return calculate_bleu(preds, target) def set_seed(args): random.seed(args.seed + get_rank()) np.random.seed(args.seed + get_rank()) torch.manual_seed(args.seed + get_rank()) def save_final_checkpoint(args, model): output_filename = os.path.join(args.output_dir, "final_step.ckpt") if get_rank() == 0: model_to_save = model.module if hasattr(model, "module") else model torch.save(model_to_save.state_dict(), output_filename) def load_checkpoint(args, path, model, optimizer, scaler): checkpoint = torch.load(path, map_location=args.device) model.load_state_dict(checkpoint["model"]) if not args.load_model_weights_only: if 'optimizer' in checkpoint: optimizer.load_state_dict(checkpoint["optimizer"]) if 'scaler' in checkpoint: scaler.load_state_dict(checkpoint["scaler"]) def distill(layers, pick_layers): sft_layers = nn.ModuleList() delete_layers = [] for i in range(len(layers)): if i in pick_layers: sft_layers.append(layers[i]) else: delete_layers.append(i) # delete unnecessary layers for i in range(len(delete_layers)): del layers[delete_layers[i] - i] return sft_layers def distill_sft(model): pick_layers = [int(s) for s in args.layers.split('-')] # if distilling encoder if args.do_encoder: layers = model.model.encoder.layers sft_layers = distill(layers, pick_layers) model.model.encoder.layers = sft_layers # if distilling decoder if args.do_decoder: layers = model.model.decoder.layers sft_layers = distill(layers, pick_layers) model.model.decoder.layers = sft_layers return model def main(args, model=None) -> SummarizationModule: print(args) Path(args.output_dir).mkdir(exist_ok=True) # Initializes the distributed backend which will take care of sychronizing nodes/GPUs torch.cuda.set_device(args.local_rank) device = torch.device("cuda", args.local_rank) torch.distributed.init_process_group(backend="nccl") args.device = device # Set GPU affinity if args.affinity != 'disabled': affinity = set_affinity( get_rank(), get_device_count(), args.affinity ) logger.warning(f'{get_rank()}: thread affinity: {affinity}') # Set seed set_seed(args) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if get_rank() in [-1, 0] else logging.WARN, ) logger.warning( "Process global rank: %s, device: %s, distributed training: %s, 16-bits training: %s", get_rank(), device, bool(get_rank() != -1), (args.fp16 or args.bf16), ) checkpoints = list(sorted(glob.glob(os.path.join(args.output_dir, "_step*.ckpt"), recursive=True), key=lambda x:int(x.split("step")[1].split(".")[0]))) if model is None: if "summarization" in args.task: ### Define BART model # Config from "https://s3.amazonaws.com/models.huggingface.co/bert/facebook/bart-large-cnn/config.json # Vocab modified to 50265 to be consistent with facebook/bart-large default config = BartConfig(**json.load(open(args.config_path, "r"))) if args.fp16: config.dtype = torch.float16 elif args.bf16: config.dtype = torch.bfloat16 else: config.dtype = None config.pre_ln = args.pre_ln if args.distill: # if distilling, start from finetuned checkpoint if Path(args.data_dir).name == "cnn_dm": checkpoint = 'facebook/bart-large-cnn' else: checkpoint = 'facebook/bart-large-xsum' else: checkpoint = 'facebook/bart-large' #Start from pretrained checkpoint otherwise if args.resume_from_checkpoint: print("Resuming from checkpoint, make sure checkpoint is finetuned for best results") if ".ckpt" in args.resume_from_checkpoint: checkpoint = args.resume_from_checkpoint if args.distill: # set resume from checkpoint to None (state dict is different) args.resume_from_checkpoint = None model = BartForConditionalGeneration(config=config) else: if len(checkpoints) > 0: #No checkpoints available checkpoint = checkpoints[-1] args.resume_from_checkpoint = checkpoint model = BartForConditionalGeneration(config=config) else: args.resume_from_checkpoint = None print("No valid checkpoint to resume from. Using ", checkpoint) model = BartForConditionalGeneration.from_pretrained(checkpoint, config=config) else: model = BartForConditionalGeneration.from_pretrained(checkpoint, config=config) print("Loading BART model checkpoint using ", checkpoint) if args.distill == "sft": model = distill_sft(model) tokenizer = BartTokenizer.from_pretrained( 'facebook/bart-large') # Downloads vocab and merges file automatically trainer: SummarizationModule = SummarizationModule(args, model=model, config=config, tokenizer=tokenizer) else: raise ValueError("Translation not supported at this time") model: SummarizationModule = TranslationModule(args) dataset = Path(args.data_dir).name trainer.model.to(device) # Set up optimizer and scheduler optimizer, scheduler = trainer.configure_optimizers() optimizer = optimizer[0] scheduler = scheduler[0]['scheduler'] scaler = torch.cuda.amp.GradScaler(enabled=args.fp16) step = 0 if args.resume_from_checkpoint: logger.info("Loading BART model checkpoint using %s", checkpoint) checkpoint_suffix = checkpoint.split("step")[-1].split(".")[0] step = int(checkpoint_suffix) + 1 load_checkpoint(args, checkpoint, trainer.model, optimizer, scaler) if args.distill or args.load_model_weights_only: args.resume_from_checkpoint = None step = 0 # Distributed training (should be after apex fp16 initialization) if args.local_rank != -1: trainer.model = torch.nn.parallel.DistributedDataParallel( trainer.model, device_ids=[args.local_rank], output_device=args.local_rank, find_unused_parameters=True ) generic_train(args, trainer, optimizer, scheduler, scaler, checkpoints, step) pickle_save(trainer.hparams, trainer.output_dir / "hparams.pkl") save_final_checkpoint(args, trainer.model) if args.do_predict: # Testing from a checkpoint generic_test(args, trainer) return trainer if __name__ == "__main__": parser = argparse.ArgumentParser() parser = SummarizationModule.add_model_specific_args(parser, os.getcwd()) args = parser.parse_args() if get_rank() == 0: dllogger.init(backends=[dllogger.JSONStreamBackend(verbosity=dllogger.Verbosity.VERBOSE, filename=args.json_summary), dllogger.StdOutBackend(verbosity=dllogger.Verbosity.VERBOSE, step_format=format_step)]) main(args) dllogger.flush() torch.distributed.barrier()
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/finetune.py
# coding=utf-8 # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2020 Optuna, Hugging Face # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Logging utilities. """ import logging import os import sys import threading from logging import CRITICAL # NOQA from logging import DEBUG # NOQA from logging import ERROR # NOQA from logging import FATAL # NOQA from logging import INFO # NOQA from logging import NOTSET # NOQA from logging import WARN # NOQA from logging import WARNING # NOQA from typing import Optional _lock = threading.Lock() _default_handler: Optional[logging.Handler] = None log_levels = { "debug": logging.DEBUG, "info": logging.INFO, "warning": logging.WARNING, "error": logging.ERROR, "critical": logging.CRITICAL, } _default_log_level = logging.WARNING def _get_default_logging_level(): """ If TRANSFORMERS_VERBOSITY env var is set to one of the valid choices return that as the new default level. If it is not - fall back to ``_default_log_level`` """ env_level_str = os.getenv("TRANSFORMERS_VERBOSITY", None) if env_level_str: if env_level_str in log_levels: return log_levels[env_level_str] else: logging.getLogger().warning( f"Unknown option TRANSFORMERS_VERBOSITY={env_level_str}, " f"has to be one of: { ', '.join(log_levels.keys()) }" ) return _default_log_level def _get_library_name() -> str: return __name__.split(".")[0] def _get_library_root_logger() -> logging.Logger: return logging.getLogger(_get_library_name()) def _configure_library_root_logger() -> None: global _default_handler with _lock: if _default_handler: # This library has already configured the library root logger. return _default_handler = logging.StreamHandler() # Set sys.stderr as stream. _default_handler.flush = sys.stderr.flush # Apply our default configuration to the library root logger. library_root_logger = _get_library_root_logger() library_root_logger.addHandler(_default_handler) library_root_logger.setLevel(_get_default_logging_level()) library_root_logger.propagate = False def _reset_library_root_logger() -> None: global _default_handler with _lock: if not _default_handler: return library_root_logger = _get_library_root_logger() library_root_logger.removeHandler(_default_handler) library_root_logger.setLevel(logging.NOTSET) _default_handler = None def get_logger(name: Optional[str] = None) -> logging.Logger: """ Return a logger with the specified name. This function is not supposed to be directly accessed unless you are writing a custom transformers module. """ if name is None: name = _get_library_name() _configure_library_root_logger() return logging.getLogger(name) def get_verbosity() -> int: """ Return the current level for the 🤗 Transformers's root logger as an int. Returns: :obj:`int`: The logging level. .. note:: 🤗 Transformers has following logging levels: - 50: ``transformers.logging.CRITICAL`` or ``transformers.logging.FATAL`` - 40: ``transformers.logging.ERROR`` - 30: ``transformers.logging.WARNING`` or ``transformers.logging.WARN`` - 20: ``transformers.logging.INFO`` - 10: ``transformers.logging.DEBUG`` """ _configure_library_root_logger() return _get_library_root_logger().getEffectiveLevel() def set_verbosity(verbosity: int) -> None: """ Set the vebosity level for the 🤗 Transformers's root logger. Args: verbosity (:obj:`int`): Logging level, e.g., one of: - ``transformers.logging.CRITICAL`` or ``transformers.logging.FATAL`` - ``transformers.logging.ERROR`` - ``transformers.logging.WARNING`` or ``transformers.logging.WARN`` - ``transformers.logging.INFO`` - ``transformers.logging.DEBUG`` """ _configure_library_root_logger() _get_library_root_logger().setLevel(verbosity) def set_verbosity_info(): """Set the verbosity to the :obj:`INFO` level.""" return set_verbosity(INFO) def set_verbosity_warning(): """Set the verbosity to the :obj:`WARNING` level.""" return set_verbosity(WARNING) def set_verbosity_debug(): """Set the verbosity to the :obj:`DEBUG` level.""" return set_verbosity(DEBUG) def set_verbosity_error(): """Set the verbosity to the :obj:`ERROR` level.""" return set_verbosity(ERROR) def disable_default_handler() -> None: """Disable the default handler of the HuggingFace Transformers's root logger.""" _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().removeHandler(_default_handler) def enable_default_handler() -> None: """Enable the default handler of the HuggingFace Transformers's root logger.""" _configure_library_root_logger() assert _default_handler is not None _get_library_root_logger().addHandler(_default_handler) def disable_propagation() -> None: """ Disable propagation of the library log outputs. Note that log propagation is disabled by default. """ _configure_library_root_logger() _get_library_root_logger().propagate = False def enable_propagation() -> None: """ Enable propagation of the library log outputs. Please disable the HuggingFace Transformers's default handler to prevent double logging if the root logger has been configured. """ _configure_library_root_logger() _get_library_root_logger().propagate = True def enable_explicit_format() -> None: """ Enable explicit formatting for every HuggingFace Transformers's logger. The explicit formatter is as follows: :: [LEVELNAME|FILENAME|LINE NUMBER] TIME >> MESSAGE All handlers currently bound to the root logger are affected by this method. """ handlers = _get_library_root_logger().handlers for handler in handlers: formatter = logging.Formatter("[%(levelname)s|%(filename)s:%(lineno)s] %(asctime)s >> %(message)s") handler.setFormatter(formatter) def reset_format() -> None: """ Resets the formatting for HuggingFace Transformers's loggers. All handlers currently bound to the root logger are affected by this method. """ handlers = _get_library_root_logger().handlers for handler in handlers: handler.setFormatter(None)
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/utils/logging.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import collections import math import os import pathlib import re import pynvml pynvml.nvmlInit() def systemGetDriverVersion(): return pynvml.nvmlSystemGetDriverVersion() def deviceGetCount(): return pynvml.nvmlDeviceGetCount() class device: # assume nvml returns list of 64 bit ints _nvml_affinity_elements = math.ceil(os.cpu_count() / 64) def __init__(self, device_idx): super().__init__() self.handle = pynvml.nvmlDeviceGetHandleByIndex(device_idx) def getName(self): return pynvml.nvmlDeviceGetName(self.handle) def getCpuAffinity(self): affinity_string = '' for j in pynvml.nvmlDeviceGetCpuAffinity( self.handle, device._nvml_affinity_elements ): # assume nvml returns list of 64 bit ints affinity_string = '{:064b}'.format(j) + affinity_string affinity_list = [int(x) for x in affinity_string] affinity_list.reverse() # so core 0 is in 0th element of list ret = [i for i, e in enumerate(affinity_list) if e != 0] return ret def set_socket_affinity(gpu_id): dev = device(gpu_id) affinity = dev.getCpuAffinity() os.sched_setaffinity(0, affinity) def set_single_affinity(gpu_id): dev = device(gpu_id) affinity = dev.getCpuAffinity() os.sched_setaffinity(0, affinity[:1]) def set_single_unique_affinity(gpu_id, nproc_per_node): devices = [device(i) for i in range(nproc_per_node)] socket_affinities = [dev.getCpuAffinity() for dev in devices] siblings_list = get_thread_siblings_list() siblings_dict = dict(siblings_list) # remove siblings for idx, socket_affinity in enumerate(socket_affinities): socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values())) affinities = [] assigned = [] for socket_affinity in socket_affinities: for core in socket_affinity: if core not in assigned: affinities.append([core]) assigned.append(core) break os.sched_setaffinity(0, affinities[gpu_id]) def set_socket_unique_affinity(gpu_id, nproc_per_node, mode): device_ids = [device(i) for i in range(nproc_per_node)] socket_affinities = [dev.getCpuAffinity() for dev in device_ids] siblings_list = get_thread_siblings_list() siblings_dict = dict(siblings_list) # remove siblings for idx, socket_affinity in enumerate(socket_affinities): socket_affinities[idx] = list(set(socket_affinity) - set(siblings_dict.values())) socket_affinities_to_device_ids = collections.defaultdict(list) for idx, socket_affinity in enumerate(socket_affinities): socket_affinities_to_device_ids[tuple(socket_affinity)].append(idx) for socket_affinity, device_ids in socket_affinities_to_device_ids.items(): devices_per_group = len(device_ids) cores_per_device = len(socket_affinity) // devices_per_group for group_id, device_id in enumerate(device_ids): if device_id == gpu_id: if mode == 'interleaved': affinity = list(socket_affinity[group_id::devices_per_group]) elif mode == 'continuous': affinity = list(socket_affinity[group_id*cores_per_device:(group_id+1)*cores_per_device]) else: raise RuntimeError('Unknown set_socket_unique_affinity mode') # reintroduce siblings affinity += [siblings_dict[aff] for aff in affinity if aff in siblings_dict] os.sched_setaffinity(0, affinity) def get_thread_siblings_list(): path = '/sys/devices/system/cpu/cpu*/topology/thread_siblings_list' thread_siblings_list = [] pattern = re.compile(r'(\d+)\D(\d+)') for fname in pathlib.Path(path[0]).glob(path[1:]): with open(fname) as f: content = f.read().strip() res = pattern.findall(content) if res: pair = tuple(map(int, res[0])) thread_siblings_list.append(pair) return thread_siblings_list def set_affinity(gpu_id, nproc_per_node, mode='socket'): if mode == 'socket': set_socket_affinity(gpu_id) elif mode == 'single': set_single_affinity(gpu_id) elif mode == 'single_unique': set_single_unique_affinity(gpu_id, nproc_per_node) elif mode == 'socket_unique_interleaved': set_socket_unique_affinity(gpu_id, nproc_per_node, 'interleaved') elif mode == 'socket_unique_continuous': set_socket_unique_affinity(gpu_id, nproc_per_node, 'continuous') else: raise RuntimeError('Unknown affinity mode') affinity = os.sched_getaffinity(0) return affinity
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/utils/gpu_affinity.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import logging import math import torch import torch.nn.functional as F logger = logging.getLogger(__name__) def swish(x): return x * torch.sigmoid(x) def _gelu_python(x): """ Original Implementation of the gelu activation function in Google Bert repo when initially created. For information: OpenAI GPT's gelu is slightly different (and gives slightly different results): 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3)))) This is now written in C in torch.nn.functional Also see https://arxiv.org/abs/1606.08415 """ return x * 0.5 * (1.0 + torch.erf(x / math.sqrt(2.0))) def gelu_new(x): """ Implementation of the gelu activation function currently in Google Bert repo (identical to OpenAI GPT). Also see https://arxiv.org/abs/1606.08415 """ return 0.5 * x * (1.0 + torch.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * torch.pow(x, 3.0)))) if torch.__version__ < "1.4.0": gelu = _gelu_python else: gelu = F.gelu def gelu_fast(x): return 0.5 * x * (1.0 + torch.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x))) ACT2FN = { "relu": F.relu, "swish": swish, "gelu": gelu, "tanh": torch.tanh, "gelu_new": gelu_new, "gelu_fast": gelu_fast, } def get_activation(activation_string): if activation_string in ACT2FN: return ACT2FN[activation_string] else: raise KeyError("function {} not found in ACT2FN mapping {}".format(activation_string, list(ACT2FN.keys())))
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/utils/activations.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import sys import os import hashlib dm_single_close_quote = u'\u2019' # unicode dm_double_close_quote = u'\u201d' END_TOKENS = ['.', '!', '?', '...', "'", "`", '"', dm_single_close_quote, dm_double_close_quote, ")"] # acceptable ways to end a sentence all_train_urls = "url_lists/all_train.txt" all_val_urls = "url_lists/all_val.txt" all_test_urls = "url_lists/all_test.txt" finished_files_dir = "cnn_dm" # These are the number of .story files we expect there to be in cnn_stories_dir and dm_stories_dir num_expected_cnn_stories = 92579 num_expected_dm_stories = 219506 def read_text_file(text_file): lines = [] with open(text_file, "r") as f: for line in f: lines.append(line.strip()) return lines def hashhex(s): """Returns a heximal formated SHA1 hash of the input string.""" h = hashlib.sha1() h.update(s.encode()) return h.hexdigest() def get_url_hashes(url_list): return [hashhex(url) for url in url_list] def fix_missing_period(line): """Adds a period to a line that is missing a period""" if "@highlight" in line: return line if line=="": return line if line[-1] in END_TOKENS: return line # print line[-1] return line + " ." def get_art_abs(story_file): lines = read_text_file(story_file) # Put periods on the ends of lines that are missing them (this is a problem in the dataset because many image captions don't end in periods; consequently they end up in the body of the article as run-on sentences) lines = [fix_missing_period(line) for line in lines] # Separate out article and abstract sentences article_lines = [] highlights = [] next_is_highlight = False for idx,line in enumerate(lines): if line == "": continue # empty line elif line.startswith("@highlight"): next_is_highlight = True elif next_is_highlight: highlights.append(line) else: article_lines.append(line) # Make article into a single string article = ' '.join(article_lines) # Make abstract into a signle string abstract = ' '.join(highlights) return article, abstract def write_to_bin(url_file, out_name): """Reads the tokenized .story files corresponding to the urls listed in the url_file and writes them to a out_file.""" print("Making bin file for URLs listed in %s..." % url_file) url_list = read_text_file(url_file) url_hashes = get_url_hashes(url_list) story_fnames = [s+".story" for s in url_hashes] num_stories = len(story_fnames) article_out = out_name + '.source' abstract_out = out_name + '.target' with open(article_out, 'w') as article_writer, open(abstract_out, 'w') as abstract_writer: for idx,s in enumerate(story_fnames): if idx % 1000 == 0: print("Writing story %i of %i; %.2f percent done" % (idx, num_stories, float(idx)*100.0/float(num_stories))) # Look in the tokenized story dirs to find the .story file corresponding to this url if os.path.isfile(os.path.join(cnn_stories_dir, s)): story_file = os.path.join(cnn_stories_dir, s) elif os.path.isfile(os.path.join(dm_stories_dir, s)): story_file = os.path.join(dm_stories_dir, s) else: print("Error: Couldn't find story file %s in story directories %s and %s." % (s, cnn_stories_dir, dm_stories_dir)) # Check again if stories directories contain correct number of files print("Checking that the stories directories %s and %s contain correct number of files..." % (cnn_stories_dir, dm_stories_dir)) check_num_stories(cnn_stories_dir, num_expected_cnn_stories) check_num_stories(dm_stories_dir, num_expected_dm_stories) raise Exception("Stories directories %s and %s contain correct number of files but story file %s found in neither." % (cnn_stories_dir, dm_stories_dir, s)) # Get the strings to write to .bin file article, abstract = get_art_abs(story_file) article_writer.write(article + '\n') abstract_writer.write(abstract + '\n') print("Finished writing file %s and %s\n" % (article_out, abstract_out)) def check_num_stories(stories_dir, num_expected): num_stories = len(os.listdir(stories_dir)) if num_stories != num_expected: raise Exception("stories directory %s contains %i files but should contain %i" % (stories_dir, num_stories, num_expected)) if __name__ == '__main__': if len(sys.argv) != 3: print("USAGE: python make_datafiles.py <cnn_stories_dir> <dailymail_stories_dir>") sys.exit() cnn_stories_dir = sys.argv[1] dm_stories_dir = sys.argv[2] # Check the stories directories contain the correct number of .story files check_num_stories(cnn_stories_dir, num_expected_cnn_stories) check_num_stories(dm_stories_dir, num_expected_dm_stories) # Create some new directories if not os.path.exists(finished_files_dir): os.makedirs(finished_files_dir) # Read the tokenized stories, do a little postprocessing then write to bin files write_to_bin(all_test_urls, os.path.join(finished_files_dir, "test")) write_to_bin(all_val_urls, os.path.join(finished_files_dir, "val")) write_to_bin(all_train_urls, os.path.join(finished_files_dir, "train"))
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/utils/make_datafiles.py
# coding=utf-8 # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """PyTorch optimization for BERT model.""" import math from typing import Callable, Iterable, Tuple import torch from torch.optim import Optimizer from torch.optim.lr_scheduler import LambdaLR from utils import logging logger = logging.get_logger(__name__) def get_constant_schedule(optimizer: Optimizer, last_epoch: int = -1): """ Create a schedule with a constant learning rate, using the learning rate set in optimizer. Args: optimizer (:class:`~torch.optim.Optimizer`): The optimizer for which to schedule the learning rate. last_epoch (:obj:`int`, `optional`, defaults to -1): The index of the last epoch when resuming training. Return: :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ return LambdaLR(optimizer, lambda _: 1, last_epoch=last_epoch) def get_constant_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, last_epoch: int = -1): """ Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer (:class:`~torch.optim.Optimizer`): The optimizer for which to schedule the learning rate. num_warmup_steps (:obj:`int`): The number of steps for the warmup phase. last_epoch (:obj:`int`, `optional`, defaults to -1): The index of the last epoch when resuming training. Return: :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ def lr_lambda(current_step: int): if current_step < num_warmup_steps: return float(current_step) / float(max(1.0, num_warmup_steps)) return 1.0 return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch) def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1): """ Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer. Args: optimizer (:class:`~torch.optim.Optimizer`): The optimizer for which to schedule the learning rate. num_warmup_steps (:obj:`int`): The number of steps for the warmup phase. num_training_steps (:obj:`int`): The total number of training steps. last_epoch (:obj:`int`, `optional`, defaults to -1): The index of the last epoch when resuming training. Return: :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ def lr_lambda(current_step: int): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) return max( 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)) ) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_cosine_schedule_with_warmup( optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float = 0.5, last_epoch: int = -1 ): """ Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer (:class:`~torch.optim.Optimizer`): The optimizer for which to schedule the learning rate. num_warmup_steps (:obj:`int`): The number of steps for the warmup phase. num_training_steps (:obj:`int`): The total number of training steps. num_cycles (:obj:`float`, `optional`, defaults to 0.5): The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0 following a half-cosine). last_epoch (:obj:`int`, `optional`, defaults to -1): The index of the last epoch when resuming training. Return: :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_cosine_with_hard_restarts_schedule_with_warmup( optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int = 1, last_epoch: int = -1 ): """ Create a schedule with a learning rate that decreases following the values of the cosine function between the initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases linearly between 0 and the initial lr set in the optimizer. Args: optimizer (:class:`~torch.optim.Optimizer`): The optimizer for which to schedule the learning rate. num_warmup_steps (:obj:`int`): The number of steps for the warmup phase. num_training_steps (:obj:`int`): The total number of training steps. num_cycles (:obj:`int`, `optional`, defaults to 1): The number of hard restarts to use. last_epoch (:obj:`int`, `optional`, defaults to -1): The index of the last epoch when resuming training. Return: :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ def lr_lambda(current_step): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) if progress >= 1.0: return 0.0 return max(0.0, 0.5 * (1.0 + math.cos(math.pi * ((float(num_cycles) * progress) % 1.0)))) return LambdaLR(optimizer, lr_lambda, last_epoch) def get_polynomial_decay_schedule_with_warmup( optimizer, num_warmup_steps, num_training_steps, lr_end=1e-7, power=1.0, last_epoch=-1 ): """ Create a schedule with a learning rate that decreases as a polynomial decay from the initial lr set in the optimizer to end lr defined by `lr_end`, after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer. Args: optimizer (:class:`~torch.optim.Optimizer`): The optimizer for which to schedule the learning rate. num_warmup_steps (:obj:`int`): The number of steps for the warmup phase. num_training_steps (:obj:`int`): The total number of training steps. lr_end (:obj:`float`, `optional`, defaults to 1e-7): The end LR. power (:obj:`float`, `optional`, defaults to 1.0): Power factor. last_epoch (:obj:`int`, `optional`, defaults to -1): The index of the last epoch when resuming training. Note: `power` defaults to 1.0 as in the fairseq implementation, which in turn is based on the original BERT implementation at https://github.com/google-research/bert/blob/f39e881b169b9d53bea03d2d341b31707a6c052b/optimization.py#L37 Return: :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. """ lr_init = optimizer.defaults["lr"] assert lr_init > lr_end, f"lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})" def lr_lambda(current_step: int): if current_step < num_warmup_steps: return float(current_step) / float(max(1, num_warmup_steps)) elif current_step > num_training_steps: return lr_end / lr_init # as LambdaLR multiplies by lr_init else: lr_range = lr_init - lr_end decay_steps = num_training_steps - num_warmup_steps pct_remaining = 1 - (current_step - num_warmup_steps) / decay_steps decay = lr_range * pct_remaining ** power + lr_end return decay / lr_init # as LambdaLR multiplies by lr_init return LambdaLR(optimizer, lr_lambda, last_epoch) class AdamW(Optimizer): """ Implements Adam algorithm with weight decay fix as introduced in `Decoupled Weight Decay Regularization <https://arxiv.org/abs/1711.05101>`__. Parameters: params (:obj:`Iterable[torch.nn.parameter.Parameter]`): Iterable of parameters to optimize or dictionaries defining parameter groups. lr (:obj:`float`, `optional`, defaults to 1e-3): The learning rate to use. betas (:obj:`Tuple[float,float]`, `optional`, defaults to (0.9, 0.999)): Adam's betas parameters (b1, b2). eps (:obj:`float`, `optional`, defaults to 1e-6): Adam's epsilon for numerical stability. weight_decay (:obj:`float`, `optional`, defaults to 0): Decoupled weight decay to apply. correct_bias (:obj:`bool`, `optional`, defaults to `True`): Whether ot not to correct bias in Adam (for instance, in Bert TF repository they use :obj:`False`). """ def __init__( self, params: Iterable[torch.nn.parameter.Parameter], lr: float = 1e-3, betas: Tuple[float, float] = (0.9, 0.999), eps: float = 1e-6, weight_decay: float = 0.0, correct_bias: bool = True, ): if lr < 0.0: raise ValueError("Invalid learning rate: {} - should be >= 0.0".format(lr)) if not 0.0 <= betas[0] < 1.0: raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[0])) if not 0.0 <= betas[1] < 1.0: raise ValueError("Invalid beta parameter: {} - should be in [0.0, 1.0[".format(betas[1])) if not 0.0 <= eps: raise ValueError("Invalid epsilon value: {} - should be >= 0.0".format(eps)) defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias) super().__init__(params, defaults) def step(self, closure: Callable = None): """ Performs a single optimization step. Arguments: closure (:obj:`Callable`, `optional`): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group["params"]: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError("Adam does not support sparse gradients, please consider SparseAdam instead") state = self.state[p] # State initialization if len(state) == 0: state["step"] = 0 # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state["exp_avg_sq"] = torch.zeros_like(p.data) exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"] beta1, beta2 = group["betas"] state["step"] += 1 # Decay the first and second moment running average coefficient # In-place operations to update the averages at the same time exp_avg.mul_(beta1).add_(grad, alpha=1.0 - beta1) exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1.0 - beta2) denom = exp_avg_sq.sqrt().add_(group["eps"]) step_size = group["lr"] if group["correct_bias"]: # No bias correction for Bert bias_correction1 = 1.0 - beta1 ** state["step"] bias_correction2 = 1.0 - beta2 ** state["step"] step_size = step_size * math.sqrt(bias_correction2) / bias_correction1 p.data.addcdiv_(exp_avg, denom, value=-step_size) # Just adding the square of the weights to the loss function is *not* # the correct way of using L2 regularization/weight decay with Adam, # since that will interact with the m and v parameters in strange ways. # # Instead we want to decay the weights in a manner that doesn't interact # with the m/v parameters. This is equivalent to adding the square # of the weights to the loss with plain (non-momentum) SGD. # Add weight decay at the end (fixed version) if group["weight_decay"] > 0.0: p.data.add_(p.data, alpha=-group["lr"] * group["weight_decay"]) return loss class Adafactor(Optimizer): """ AdaFactor pytorch implementation can be used as a drop in replacement for Adam original fairseq code: https://github.com/pytorch/fairseq/blob/master/fairseq/optim/adafactor.py Paper: `Adafactor: Adaptive Learning Rates with Sublinear Memory Cost` https://arxiv.org/abs/1804.04235 Note that this optimizer internally adjusts the learning rate depending on the *scale_parameter*, *relative_step* and *warmup_init* options. To use a manual (external) learning rate schedule you should set `scale_parameter=False` and `relative_step=False`. Arguments: params (:obj:`Iterable[torch.nn.parameter.Parameter]`): Iterable of parameters to optimize or dictionaries defining parameter groups. lr (:obj:`float`, `optional`): The external learning rate. eps (:obj:`Tuple[float, float]`, `optional`, defaults to (1e-30, 1e-3)): Regularization constants for square gradient and parameter scale respectively clip_threshold (:obj:`float`, `optional`, defaults 1.0): Threshold of root mean square of final gradient update decay_rate (:obj:`float`, `optional`, defaults to -0.8): Coefficient used to compute running averages of square beta1 (:obj:`float`, `optional`): Coefficient used for computing running averages of gradient weight_decay (:obj:`float`, `optional`, defaults to 0): Weight decay (L2 penalty) scale_parameter (:obj:`bool`, `optional`, defaults to :obj:`True`): If True, learning rate is scaled by root mean square relative_step (:obj:`bool`, `optional`, defaults to :obj:`True`): If True, time-dependent learning rate is computed instead of external learning rate warmup_init (:obj:`bool`, `optional`, defaults to :obj:`False`): Time-dependent learning rate computation depends on whether warm-up initialization is being used This implementation handles low-precision (FP16, bfloat) values, but we have not thoroughly tested. Recommended T5 finetuning settings: - Scheduled LR warm-up to fixed LR - disable relative updates - use clip threshold: https://arxiv.org/abs/2004.14546 Example:: Adafactor(model.parameters(), lr=1e-3, relative_step=False, warmup_init=True) - Alternatively, relative_step with warmup_init can be used. - Training without LR warmup or clip threshold is not recommended. Additional optimizer operations like gradient clipping should not be used alongside Adafactor. Usage:: # replace AdamW with Adafactor optimizer = Adafactor( model.parameters(), lr=1e-3, eps=(1e-30, 1e-3), clip_threshold=1.0, decay_rate=-0.8, beta1=None, weight_decay=0.0, relative_step=False, scale_parameter=False, warmup_init=False ) """ def __init__( self, params, lr=None, eps=(1e-30, 1e-3), clip_threshold=1.0, decay_rate=-0.8, beta1=None, weight_decay=0.0, scale_parameter=True, relative_step=True, warmup_init=False, ): if lr is not None and relative_step: raise ValueError("Cannot combine manual lr and relative_step options") if warmup_init and not relative_step: raise ValueError("warmup_init requires relative_step=True") defaults = dict( lr=lr, eps=eps, clip_threshold=clip_threshold, decay_rate=decay_rate, beta1=beta1, weight_decay=weight_decay, scale_parameter=scale_parameter, relative_step=relative_step, warmup_init=warmup_init, ) super().__init__(params, defaults) @staticmethod def _get_lr(param_group, param_state): rel_step_sz = param_group["lr"] if param_group["relative_step"]: min_step = 1e-6 * param_state["step"] if param_group["warmup_init"] else 1e-2 rel_step_sz = min(min_step, 1.0 / math.sqrt(param_state["step"])) param_scale = 1.0 if param_group["scale_parameter"]: param_scale = max(param_group["eps"][1], param_state["RMS"]) return param_scale * rel_step_sz @staticmethod def _get_options(param_group, param_shape): factored = len(param_shape) >= 2 use_first_moment = param_group["beta1"] is not None return factored, use_first_moment @staticmethod def _rms(tensor): return tensor.norm(2) / (tensor.numel() ** 0.5) @staticmethod def _approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col): r_factor = (exp_avg_sq_row / exp_avg_sq_row.mean(dim=-1, keepdim=True)).rsqrt_() c_factor = exp_avg_sq_col.rsqrt() return torch.mm(r_factor.unsqueeze(-1), c_factor.unsqueeze(0)) def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group["params"]: if p.grad is None: continue grad = p.grad.data if grad.dtype in {torch.float16, torch.bfloat16}: grad = grad.float() if grad.is_sparse: raise RuntimeError("Adafactor does not support sparse gradients.") state = self.state[p] grad_shape = grad.shape factored, use_first_moment = self._get_options(group, grad_shape) # State Initialization if len(state) == 0: state["step"] = 0 if use_first_moment: # Exponential moving average of gradient values state["exp_avg"] = torch.zeros_like(grad) if factored: state["exp_avg_sq_row"] = torch.zeros(grad_shape[:-1]).to(grad) state["exp_avg_sq_col"] = torch.zeros(grad_shape[:-2] + grad_shape[-1:]).to(grad) else: state["exp_avg_sq"] = torch.zeros_like(grad) state["RMS"] = 0 else: if use_first_moment: state["exp_avg"] = state["exp_avg"].to(grad) if factored: state["exp_avg_sq_row"] = state["exp_avg_sq_row"].to(grad) state["exp_avg_sq_col"] = state["exp_avg_sq_col"].to(grad) else: state["exp_avg_sq"] = state["exp_avg_sq"].to(grad) p_data_fp32 = p.data if p.data.dtype in {torch.float16, torch.bfloat16}: p_data_fp32 = p_data_fp32.float() state["step"] += 1 state["RMS"] = self._rms(p_data_fp32) group["lr"] = self._get_lr(group, state) beta2t = 1.0 - math.pow(state["step"], group["decay_rate"]) update = (grad ** 2) + group["eps"][0] if factored: exp_avg_sq_row = state["exp_avg_sq_row"] exp_avg_sq_col = state["exp_avg_sq_col"] exp_avg_sq_row.mul_(beta2t).add_(1.0 - beta2t, update.mean(dim=-1)) exp_avg_sq_col.mul_(beta2t).add_(1.0 - beta2t, update.mean(dim=-2)) # Approximation of exponential moving average of square of gradient update = self._approx_sq_grad(exp_avg_sq_row, exp_avg_sq_col) update.mul_(grad) else: exp_avg_sq = state["exp_avg_sq"] exp_avg_sq.mul_(beta2t).add_(1.0 - beta2t, update) update = exp_avg_sq.rsqrt().mul_(grad) update.div_((self._rms(update) / group["clip_threshold"]).clamp_(min=1.0)) update.mul_(group["lr"]) if use_first_moment: exp_avg = state["exp_avg"] exp_avg.mul_(group["beta1"]).add_(1 - group["beta1"], update) update = exp_avg if group["weight_decay"] != 0: p_data_fp32.add_(-group["weight_decay"] * group["lr"], p_data_fp32) p_data_fp32.add_(-update) if p.data.dtype in {torch.float16, torch.bfloat16}: p.data.copy_(p_data_fp32) return loss
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/utils/optimization.py
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/utils/__init__.py
# coding=utf-8 # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2020 The HuggingFace Inc. team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import inspect import math from abc import ABC from typing import Callable, Iterable, List import numpy as np import torch from .file_utils import add_start_docstrings LOGITS_PROCESSOR_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`~transformers.BertTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.vocab_size)`): Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax or scores for each vocabulary token after SoftMax. kwargs: Additional logits processor specific kwargs. Return: :obj:`torch.FloatTensor` of shape :obj:`(batch_size, config.vocab_size)`: The processed prediction scores. """ class LogitsProcessor(ABC): """Abstract base class for all logit processors that can be applied during generation.""" @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: """Torch method for processing logits.""" raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) class LogitsWarper(ABC): """Abstract base class for all logit warpers that can be applied during generation with multinomial sampling.""" @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: """Torch method for warping logits.""" raise NotImplementedError( f"{self.__class__} is an abstract class. Only classes inheriting this class can be called." ) class LogitsProcessorList(list): """ This class can be used to create a list of :class:`~transformers.LogitsProcessor` or :class:`~transformers.LogitsWarper` to subsequently process a :obj:`scores` input tensor. This class inherits from list and adds a specific `__call__` method to apply each :class:`~transformers.LogitsProcessor` or :class:`~transformers.LogitsProcessor` to the inputs. """ @add_start_docstrings(LOGITS_PROCESSOR_INPUTS_DOCSTRING) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> torch.FloatTensor: for processor in self: function_args = inspect.signature(processor.__call__).parameters if len(function_args) > 2: assert all( arg in kwargs for arg in list(function_args.keys())[2:] ), f"Make sure that all the required parameters: {list(function_args.keys())} for {processor.__class__} are passed to the logits processor." scores = processor(input_ids, scores, **kwargs) else: scores = processor(input_ids, scores) return scores class MinLengthLogitsProcessor(LogitsProcessor): r""" :class:`transformers.LogitsProcessor` enforcing a min-length by setting EOS probability to 0. Args: min_length (:obj:`int`): The minimum length below which the score of :obj:`eos_token_id` is set to :obj:`-float("Inf")`. eos_token_id (:obj:`int`): The id of the `end-of-sequence` token. """ def __init__(self, min_length: int, eos_token_id: int): if not isinstance(min_length, int) or min_length < 0: raise ValueError(f"`min_length` has to be a positive integer, but is {min_length}") if not isinstance(eos_token_id, int) or eos_token_id < 0: raise ValueError(f"`eos_token_id` has to be a positive integer, but is {eos_token_id}") self.min_length = min_length self.eos_token_id = eos_token_id def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: cur_len = input_ids.shape[-1] if cur_len < self.min_length: scores[:, self.eos_token_id] = -float("inf") return scores class TemperatureLogitsWarper(LogitsWarper): r""" :class:`transformers.LogitsWarper` for temperature (exponential scaling output probability distribution). Args: temperature (:obj:`float`): The value used to module the logits distribution. """ def __init__(self, temperature: float): if not isinstance(temperature, float) or not (temperature > 0): raise ValueError(f"`temperature` has to be a strictly positive float, but is {temperature}") self.temperature = temperature def __call__(self, input_ids: torch.Tensor, scores: torch.Tensor) -> torch.Tensor: scores = scores / self.temperature return scores class RepetitionPenaltyLogitsProcessor(LogitsProcessor): r""" :class:`transformers.LogitsProcessor` enforcing an exponential penalty on repeated sequences. Args: repetition_penalty (:obj:`float`): The parameter for repetition penalty. 1.0 means no penalty. See `this paper <https://arxiv.org/pdf/1909.05858.pdf>`__ for more details. """ def __init__(self, penalty: float): if not isinstance(penalty, float) or not (penalty > 0): raise ValueError(f"`penalty` has to be a strictly positive float, but is {penalty}") self.penalty = penalty def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: score = torch.gather(scores, 1, input_ids) # if score < 0 then repetition penalty has to be multiplied to reduce the previous token probability score = torch.where(score < 0, score * self.penalty, score / self.penalty) scores.scatter_(1, input_ids, score) return scores class TopPLogitsWarper(LogitsWarper): """ :class:`transformers.LogitsWarper` that performs top-p, i.e. restricting to top tokens summing to prob_cut_off <= prob_cut_off. Args: top_p (:obj:`float`): If set to < 1, only the most probable tokens with probabilities that add up to :obj:`top_p` or higher are kept for generation. filter_value (:obj:`float`, `optional`, defaults to :obj:`-float("Inf")`): All filtered values will be set to this float value. min_tokens_to_keep (:obj:`int`, `optional`, defaults to 1): Minimum number of tokens that cannot be filtered. """ def __init__(self, top_p: float, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): if not isinstance(top_p, float) or (top_p < 0 or top_p > 1.0): raise ValueError(f"`top_p` has to be a float > 0 and < 1, but is {top_p}") self.top_p = top_p self.filter_value = filter_value self.min_tokens_to_keep = min_tokens_to_keep def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: sorted_logits, sorted_indices = torch.sort(scores, descending=True) cumulative_probs = sorted_logits.softmax(dim=-1).cumsum(dim=-1) # Remove tokens with cumulative top_p above the threshold (token with 0 are kept) sorted_indices_to_remove = cumulative_probs > self.top_p if self.min_tokens_to_keep > 1: # Keep at least min_tokens_to_keep (set to min_tokens_to_keep-1 because we add the first one below) sorted_indices_to_remove[..., : self.min_tokens_to_keep - 1] = 0 # Shift the indices to the right to keep also the first token above the threshold sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone() sorted_indices_to_remove[..., 0] = 0 # scatter sorted tensors to original indexing indices_to_remove = sorted_indices_to_remove.scatter(1, sorted_indices, sorted_indices_to_remove) scores = scores.masked_fill(indices_to_remove, self.filter_value) return scores class TopKLogitsWarper(LogitsWarper): r""" :class:`transformers.LogitsWarper` that performs top-k, i.e. restricting to the k highest probability elements. Args: top_k (:obj:`int`): The number of highest probability vocabulary tokens to keep for top-k-filtering. filter_value (:obj:`float`, `optional`, defaults to :obj:`-float("Inf")`): All filtered values will be set to this float value. min_tokens_to_keep (:obj:`int`, `optional`, defaults to 1): Minimum number of tokens that cannot be filtered. """ def __init__(self, top_k: int, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1): if not isinstance(top_k, int) or top_k <= 0: raise ValueError(f"`top_k` has to be a strictly positive integer, but is {top_k}") self.top_k = top_k self.filter_value = filter_value self.min_tokens_to_keep = min_tokens_to_keep def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: top_k = min(max(self.top_k, self.min_tokens_to_keep), scores.size(-1)) # Safety check # Remove all tokens with a probability less than the last token of the top-k indices_to_remove = scores < torch.topk(scores, top_k)[0][..., -1, None] scores = scores.masked_fill(indices_to_remove, self.filter_value) return scores def _get_ngrams(ngram_size: int, prev_input_ids: torch.Tensor, num_hypos: int): generated_ngrams = [{} for _ in range(num_hypos)] for idx in range(num_hypos): gen_tokens = prev_input_ids[idx].tolist() generated_ngram = generated_ngrams[idx] for ngram in zip(*[gen_tokens[i:] for i in range(ngram_size)]): prev_ngram_tuple = tuple(ngram[:-1]) generated_ngram[prev_ngram_tuple] = generated_ngram.get(prev_ngram_tuple, []) + [ngram[-1]] return generated_ngrams def _get_generated_ngrams(banned_ngrams, prev_input_ids, ngram_size, cur_len): # Before decoding the next token, prevent decoding of ngrams that have already appeared start_idx = cur_len + 1 - ngram_size ngram_idx = tuple(prev_input_ids[start_idx:cur_len].tolist()) return banned_ngrams.get(ngram_idx, []) def _calc_banned_ngram_tokens( ngram_size: int, prev_input_ids: torch.Tensor, num_hypos: int, cur_len: int ) -> List[Iterable[int]]: """Copied from fairseq for no_repeat_ngram in beam_search""" if cur_len + 1 < ngram_size: # return no banned tokens if we haven't generated no_repeat_ngram_size tokens yet return [[] for _ in range(num_hypos)] generated_ngrams = _get_ngrams(ngram_size, prev_input_ids, num_hypos) banned_tokens = [ _get_generated_ngrams(generated_ngrams[hypo_idx], prev_input_ids[hypo_idx], ngram_size, cur_len) for hypo_idx in range(num_hypos) ] return banned_tokens class NoRepeatNGramLogitsProcessor(LogitsProcessor): r""" :class:`transformers.LogitsProcessor` that enforces no repetition of n-grams. See `Fairseq <https://github.com/pytorch/fairseq/blob/a07cb6f40480928c9e0548b737aadd36ee66ac76/fairseq/sequence_generator.py#L345>`__. Args: ngram_size (:obj:`int`): All ngrams of size :obj:`ngram_size` can only occur once. """ def __init__(self, ngram_size: int): if not isinstance(ngram_size, int) or ngram_size <= 0: raise ValueError(f"`ngram_size` has to be a strictly positive integer, but is {ngram_size}") self.ngram_size = ngram_size def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: num_batch_hypotheses = scores.shape[0] cur_len = input_ids.shape[-1] banned_batch_tokens = _calc_banned_ngram_tokens(self.ngram_size, input_ids, num_batch_hypotheses, cur_len) for i, banned_tokens in enumerate(banned_batch_tokens): scores[i, banned_tokens] = -float("inf") return scores class EncoderNoRepeatNGramLogitsProcessor(LogitsProcessor): r""" :class:`transformers.LogitsProcessor` that enforces no repetition of encoder input ids n-grams for the decoder ids. See `ParlAI <https://github.com/facebookresearch/ParlAI/blob/master/parlai/core/torch_generator_agent.py#L1350>`__. Args: encoder_ngram_size (:obj:`int`): All ngrams of size :obj:`ngram_size` can only occur within the encoder input ids. encoder_input_ids (:obj:`int`): The encoder_input_ids that should not be repeated within the decoder ids. """ def __init__(self, encoder_ngram_size: int, encoder_input_ids: torch.LongTensor): if not isinstance(encoder_ngram_size, int) or encoder_ngram_size <= 0: raise ValueError( f"`encoder_ngram_size` has to be a strictly positive integer, but is {encoder_ngram_size}" ) self.ngram_size = encoder_ngram_size if len(encoder_input_ids.shape) == 1: encoder_input_ids = encoder_input_ids.unsqueeze(0) self.batch_size = encoder_input_ids.shape[0] self.generated_ngrams = _get_ngrams(encoder_ngram_size, encoder_input_ids, self.batch_size) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: # B x num_beams num_hypos = scores.shape[0] num_beams = num_hypos // self.batch_size cur_len = input_ids.shape[-1] banned_batch_tokens = [ _get_generated_ngrams( self.generated_ngrams[hypo_idx // num_beams], input_ids[hypo_idx], self.ngram_size, cur_len ) for hypo_idx in range(num_hypos) ] for i, banned_tokens in enumerate(banned_batch_tokens): scores[i, banned_tokens] = -float("inf") return scores class NoBadWordsLogitsProcessor(LogitsProcessor): """ :class:`transformers.LogitsProcessor` that enforces that specified sequences will never be sampled. Args: bad_words_ids (:obj:`List[List[int]]`): List of list of token ids that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use :obj:`tokenizer(bad_word, add_prefix_space=True).input_ids`. eos_token_id (:obj:`int`): The id of the `end-of-sequence` token. """ def __init__(self, bad_words_ids: Iterable[Iterable[int]], eos_token_id: int): if not isinstance(bad_words_ids, List) or len(bad_words_ids) == 0: raise ValueError(f"`bad_words_ids` has to be a non-emtpy list, but is {bad_words_ids}.") if any(not isinstance(bad_word_ids, list) for bad_word_ids in bad_words_ids): raise ValueError(f"`bad_words_ids` has to be a list of lists, but is {bad_words_ids}.") if any( any((not isinstance(token_id, (int, np.integer)) or token_id < 0) for token_id in bad_word_ids) for bad_word_ids in bad_words_ids ): raise ValueError( f"Each list in `bad_words_ids` has to be a list of positive integers, but is {bad_words_ids}." ) self.bad_words_ids = list(filter(lambda bad_token_seq: bad_token_seq != [eos_token_id], bad_words_ids)) for banned_token_seq in self.bad_words_ids: assert len(banned_token_seq) > 0, "Banned words token sequences {} cannot have an empty list".format( bad_words_ids ) def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: banned_tokens = self._calc_banned_bad_words_ids(input_ids) scores = self._set_scores_to_inf_for_banned_tokens(scores, banned_tokens) return scores def _tokens_match(self, prev_tokens: torch.LongTensor, tokens: List[int]) -> bool: if len(tokens) == 0: # if bad word tokens is just one token always ban it return True elif len(tokens) > len(prev_tokens): # if bad word tokens are longer then prev input_ids they can't be equal return False elif prev_tokens[-len(tokens) :].tolist() == tokens: # if tokens match return True else: return False def _calc_banned_bad_words_ids(self, prev_input_ids: Iterable[int]) -> Iterable[int]: banned_tokens = [] for prev_input_ids_slice in prev_input_ids: banned_tokens_slice = [] for banned_token_seq in self.bad_words_ids: if self._tokens_match(prev_input_ids_slice, banned_token_seq[:-1]) is False: # if tokens do not match continue continue banned_tokens_slice.append(banned_token_seq[-1]) banned_tokens.append(banned_tokens_slice) return banned_tokens def _set_scores_to_inf_for_banned_tokens(self, scores: torch.Tensor, banned_tokens: List[List[int]]) -> None: """ Modifies the scores in place by setting the banned token positions to `-inf`. Banned token is expected to be a list of list of banned tokens to ban in the format [[batch index, vocabulary position],... Args: scores: logits distribution of shape (batch size, vocabulary size) banned_tokens: list of list of tokens to ban of length (batch_size) """ banned_mask_list = [] for idx, batch_banned_tokens in enumerate(banned_tokens): for token in batch_banned_tokens: banned_mask_list.append([idx, token]) if not banned_mask_list: return scores banned_mask = torch.LongTensor(banned_mask_list) indices = torch.ones(len(banned_mask)) # A sparse tensor is generated from a list of coordinates: [[0, 1], [0, 2], [2, 0]]. A conversion to dense tensor generates: # [ 0 1 1 ] # [ 0 0 0 ] # [ 1 0 0 ] banned_mask = ( torch.sparse.LongTensor(banned_mask.t(), indices, scores.size()).to(scores.device).to_dense().bool() ) scores = scores.masked_fill(banned_mask, -float("inf")) return scores class PrefixConstrainedLogitsProcessor(LogitsProcessor): r""" :class:`transformers.LogitsProcessor` that enforces contrained generation and is useful for prefix-conditioned constrained generation. See `Autoregressive Entity Retrieval <https://arxiv.org/abs/2010.00904>`__ for more information. Args: prefix_allowed_tokens_fn: (:obj:`Callable[[int, torch.Tensor], List[int]]`): This function constraints the beam search to allowed tokens only at each step. This function takes 2 arguments :obj:`inputs_ids` and the batch ID :obj:`batch_id`. It has to return a list with the allowed tokens for the next generation step conditioned on the previously generated tokens :obj:`inputs_ids` and the batch ID :obj:`batch_id`. """ def __init__(self, prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]], num_beams: int): self._prefix_allowed_tokens_fn = prefix_allowed_tokens_fn self._num_beams = num_beams def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor) -> torch.FloatTensor: mask = torch.full_like(scores, -math.inf) for batch_id, beam_sent in enumerate(input_ids.view(-1, self._num_beams, input_ids.shape[-1])): for beam_id, sent in enumerate(beam_sent): mask[batch_id * self._num_beams + beam_id, self._prefix_allowed_tokens_fn(batch_id, sent)] = 0 return scores + mask class HammingDiversityLogitsProcessor(LogitsProcessor): r""" :class:`transformers.LogitsProcessor` that enforces diverse beam search. Note that this logits processor is only effective for :meth:`transformers.PretrainedModel.group_beam_search`. See `Diverse Beam Search: Decoding Diverse Solutions from Neural Sequence Models <https://arxiv.org/pdf/1610.02424.pdf>`__ for more details. Args: diversity_penalty (:obj:`float`): This value is subtracted from a beam's score if it generates a token same as any beam from other group at a particular time. Note that :obj:`diversity_penalty` is only effective if ``group beam search`` is enabled. num_beams (:obj:`int`): Number of beams used for group beam search. See `this paper <https://arxiv.org/pdf/1610.02424.pdf>`__ for more details. num_beam_groups (:obj:`int`): Number of groups to divide :obj:`num_beams` into in order to ensure diversity among different groups of beams. See `this paper <https://arxiv.org/pdf/1610.02424.pdf>`__ for more details. """ def __init__(self, diversity_penalty: float, num_beams: int, num_beam_groups: int): if not isinstance(diversity_penalty, float) or (not diversity_penalty > 0.0): raise ValueError("`diversity_penalty` should be a float strictly larger than 0.") self._diversity_penalty = diversity_penalty if not isinstance(num_beams, int) or num_beams < 2: raise ValueError("`num_beams` should be an integer strictly larger than 1.") self._num_beams = num_beams if not isinstance(num_beam_groups, int) or num_beam_groups < 2: raise ValueError("`num_beam_groups` should be an integer strictly larger than 1.") if num_beam_groups > num_beams: raise ValueError("`beam_groups` has to be smaller or equal to `num_beams`.") self._num_sub_beams = num_beams // num_beam_groups def __call__( self, input_ids: torch.LongTensor, scores: torch.FloatTensor, current_tokens: torch.LongTensor, beam_group_idx: int, ) -> torch.FloatTensor: # hamming diversity: penalise using same token in current group which was used in previous groups at # the same time step batch_size = current_tokens.shape[0] // self._num_beams group_start_idx = beam_group_idx * self._num_sub_beams group_end_idx = min(group_start_idx + self._num_sub_beams, self._num_beams) group_size = group_end_idx - group_start_idx vocab_size = scores.shape[-1] if group_start_idx == 0: return scores for batch_idx in range(batch_size): # predicted tokens of last time step of previous groups previous_group_tokens = current_tokens[ batch_idx * self._num_beams : batch_idx * self._num_beams + group_start_idx ] token_frequency = torch.bincount(previous_group_tokens, minlength=vocab_size).to(scores.device) scores[batch_idx * group_size : (batch_idx + 1) * group_size] -= self._diversity_penalty * token_frequency return scores
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/utils/generation_logits_process.py
# coding=utf-8 # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2018 The Google AI Language Team Authors, Facebook AI Research authors and The HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from dataclasses import dataclass from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union import torch from torch import Tensor from torch.nn import functional as F from utils.file_utils import ModelOutput from utils.generation_beam_search import BeamScorer, BeamSearchScorer from utils.generation_logits_process import ( EncoderNoRepeatNGramLogitsProcessor, HammingDiversityLogitsProcessor, LogitsProcessorList, MinLengthLogitsProcessor, NoBadWordsLogitsProcessor, NoRepeatNGramLogitsProcessor, PrefixConstrainedLogitsProcessor, RepetitionPenaltyLogitsProcessor, TemperatureLogitsWarper, TopKLogitsWarper, TopPLogitsWarper, ) logger = logging.getLogger(__name__) @dataclass class GreedySearchDecoderOnlyOutput(ModelOutput): """ Base class for outputs of decoder-only generation models using greedy search. Args: sequences (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or shorter if all batches finished early due to the :obj:`eos_token_id`. scores (:obj:`tuple(torch.FloatTensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. :obj:`(max_length,)`-shaped tuple of :obj:`torch.FloatTensor` with each tensor of shape :obj:`(batch_size, config.vocab_size)`). attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_heads, generated_length, sequence_length)`. hidden_states (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`torch.FloatTensor` of shape :obj:`(batch_size, generated_length, hidden_size)`. """ sequences: torch.LongTensor = None scores: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None @dataclass class GreedySearchEncoderDecoderOutput(ModelOutput): """ Base class for outputs of encoder-decoder generation models using greedy search. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) Args: sequences (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or shorter if all batches finished early due to the :obj:`eos_token_id`. scores (:obj:`tuple(torch.FloatTensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. :obj:`(max_length,)`-shaped tuple of :obj:`torch.FloatTensor` with each tensor of shape :obj:`(batch_size, config.vocab_size)`). encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer of the decoder) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size, sequence_length, hidden_size)`. decoder_attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`torch.FloatTensor` of shape :obj:`(batch_size, num_heads, generated_length, sequence_length)`. decoder_hidden_states (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`torch.FloatTensor` of shape :obj:`(batch_size, generated_length, hidden_size)`. """ sequences: torch.LongTensor = None scores: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None @dataclass class SampleDecoderOnlyOutput(ModelOutput): """ Base class for outputs of decoder-only generation models using sampling. Args: sequences (:obj:`torch.LongTensor` of shape :obj:`(batch_size*num_return_sequences, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or shorter if all batches finished early due to the :obj:`eos_token_id`. scores (:obj:`tuple(torch.FloatTensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. :obj:`(max_length,)`-shaped tuple of :obj:`torch.FloatTensor` with each tensor of shape :obj:`(batch_size*num_return_sequences, config.vocab_size)`). attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`torch.FloatTensor` of shape :obj:`(num_return_sequences*batch_size, num_heads, generated_length, sequence_length)`. hidden_states (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`torch.FloatTensor` of shape :obj:`(num_return_sequences*batch_size, generated_length, hidden_size)`. """ sequences: torch.LongTensor = None scores: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None @dataclass class SampleEncoderDecoderOutput(ModelOutput): """ Base class for outputs of encoder-decoder generation models using sampling. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) Args: sequences (:obj:`torch.LongTensor` of shape :obj:`(batch_size*num_return_sequences, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or shorter if all batches finished early due to the :obj:`eos_token_id`. scores (:obj:`tuple(torch.FloatTensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): Processed prediction scores of the language modeling head (scores for each vocabulary token before SoftMax) at each generation step. :obj:`(max_length,)`-shaped tuple of :obj:`torch.FloatTensor` with each tensor of shape :obj:`(batch_size*num_return_sequences, config.vocab_size)`). encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer of the decoder) of shape :obj:`(batch_size*num_return_sequences, num_heads, sequence_length, sequence_length)`. encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size*num_return_sequences, sequence_length, hidden_size)`. decoder_attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_return_sequences, num_heads, generated_length, sequence_length)`. decoder_hidden_states (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_return_sequences, generated_length, hidden_size)`. """ sequences: torch.LongTensor = None scores: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None @dataclass class BeamSearchDecoderOnlyOutput(ModelOutput): """ Base class for outputs of decoder-only generation models using beam search. Args: sequences (:obj:`torch.LongTensor` of shape :obj:`(batch_size*num_return_sequences, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or shorter if all batches finished early due to the :obj:`eos_token_id`. sequences_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_return_sequences)`, `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): Final beam scores of the generated ``sequences``. scores (:obj:`tuple(torch.FloatTensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this beam . :obj:`(max_length,)`-shaped tuple of :obj:`torch.FloatTensor` with each tensor of shape :obj:`(batch_size*num_beams*num_return_sequences, config.vocab_size)`). attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_beams, num_heads, generated_length, sequence_length)`. hidden_states (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`. """ sequences: torch.LongTensor = None sequences_scores: Optional[torch.FloatTensor] = None scores: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None @dataclass class BeamSearchEncoderDecoderOutput(ModelOutput): """ Base class for outputs of encoder-decoder generation models using beam search. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) Args: sequences (:obj:`torch.LongTensor` of shape :obj:`(batch_size*num_return_sequences, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or shorter if all batches finished early due to the :obj:`eos_token_id`. sequences_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_return_sequences)`, `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): Final beam scores of the generated ``sequences``. scores (:obj:`tuple(torch.FloatTensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this beam . :obj:`(max_length,)`-shaped tuple of :obj:`torch.FloatTensor` with each tensor of shape :obj:`(batch_size*num_beams, config.vocab_size)`). attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer of the decoder) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size*num_beams*num_return_sequences, sequence_length, hidden_size)`. decoder_attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_beams*num_return_sequences, num_heads, generated_length, sequence_length)`. decoder_hidden_states (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_beams*num_return_sequences, generated_length, hidden_size)`. """ sequences: torch.LongTensor = None sequences_scores: Optional[torch.FloatTensor] = None scores: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None @dataclass class BeamSampleDecoderOnlyOutput(ModelOutput): """ Base class for outputs of decoder-only generation models using beam sample. Args: sequences (:obj:`torch.LongTensor` of shape :obj:`(batch_size*num_return_sequences, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or shorter if all batches finished early due to the :obj:`eos_token_id`. sequences_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size * num_return_sequence)`, `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): Final beam scores of the generated ``sequences``. scores (:obj:`tuple(torch.FloatTensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this beam . :obj:`(max_length,)`-shaped tuple of :obj:`torch.FloatTensor` with each tensor of shape :obj:`(batch_size*num_beams*num_return_sequences, config.vocab_size)`). attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_beams, num_heads, generated_length, sequence_length)`. hidden_states (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_beams, generated_length, hidden_size)`. """ sequences: torch.LongTensor = None sequences_scores: Optional[torch.FloatTensor] = None scores: Optional[Tuple[torch.FloatTensor]] = None attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None @dataclass class BeamSampleEncoderDecoderOutput(ModelOutput): """ Base class for outputs of encoder-decoder generation models using beam sampling. Hidden states and attention weights of the decoder (respectively the encoder) can be accessed via the encoder_attentions and the encoder_hidden_states attributes (respectively the decoder_attentions and the decoder_hidden_states attributes) Args: sequences (:obj:`torch.LongTensor` of shape :obj:`(batch_size*num_beams, sequence_length)`): The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or shorter if all batches finished early due to the :obj:`eos_token_id`. sequences_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size * num_return_sequence)`, `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): Final beam scores of the generated ``sequences``. scores (:obj:`tuple(torch.FloatTensor)` `optional`, returned when ``output_scores=True`` is passed or when ``config.output_scores=True``): Processed beam scores for each vocabulary token at each generation step. Beam scores consisting of log softmax scores for each vocabulary token and sum of log softmax of previously generated tokens in this beam . :obj:`(max_length,)`-shaped tuple of :obj:`torch.FloatTensor` with each tensor of shape :obj:`(batch_size*num_beams, config.vocab_size)`). encoder_attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): Tuple of :obj:`torch.FloatTensor` (one for each layer of the decoder) of shape :obj:`(batch_size, num_heads, sequence_length, sequence_length)`. encoder_hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of shape :obj:`(batch_size*num_beams, sequence_length, hidden_size)`. decoder_attentions (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_attentions=True`` is passed or ``config.output_attentions=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_beams, num_heads, generated_length, sequence_length)`. decoder_hidden_states (:obj:`tuple(tuple(torch.FloatTensor))`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``): Tuple (one element for each generated token) of tuples (one element for each layer of the decoder) of :obj:`torch.FloatTensor` of shape :obj:`(batch_size*num_beams, generated_length, hidden_size)`. """ sequences: torch.LongTensor = None sequences_scores: Optional[torch.FloatTensor] = None scores: Optional[Tuple[torch.FloatTensor]] = None encoder_attentions: Optional[Tuple[torch.FloatTensor]] = None encoder_hidden_states: Optional[Tuple[torch.FloatTensor]] = None decoder_attentions: Optional[Tuple[Tuple[torch.FloatTensor]]] = None decoder_hidden_states: Optional[Tuple[Tuple[torch.FloatTensor]]] = None GreedySearchOutput = Union[GreedySearchEncoderDecoderOutput, GreedySearchDecoderOnlyOutput] SampleOutput = Union[SampleEncoderDecoderOutput, SampleDecoderOnlyOutput] BeamSearchOutput = Union[BeamSearchEncoderDecoderOutput, BeamSearchDecoderOnlyOutput] BeamSampleOutput = Union[BeamSampleEncoderDecoderOutput, BeamSampleDecoderOnlyOutput] class GenerationMixin: """ A class containing all of the functions supporting generation, to be used as a mixin in :class:`~transformers.PreTrainedModel`. """ def prepare_inputs_for_generation(self, input_ids: torch.LongTensor, **kwargs) -> Dict[str, Any]: """ Implement in subclasses of :class:`~transformers.PreTrainedModel` for custom behavior to prepare inputs in the generate method. """ return {"input_ids": input_ids} def adjust_logits_during_generation(self, logits: torch.FloatTensor, **kwargs) -> torch.FloatTensor: """ Implement in subclasses of :class:`~transformers.PreTrainedModel` for custom behavior to adjust the logits in the generate method. """ return logits def _prepare_input_ids_for_generation(self, bos_token_id: int) -> torch.LongTensor: if bos_token_id is None: raise ValueError("`bos_token_id` has to be defined when no `input_ids` are provided.") return torch.ones((1, 1), dtype=torch.long, device=self.device) * bos_token_id def _prepare_attention_mask_for_generation( self, input_ids: torch.Tensor, pad_token_id: int, eos_token_id: int ) -> torch.LongTensor: is_pad_token_in_inputs_ids = (pad_token_id is not None) and (pad_token_id in input_ids) is_pad_token_not_equal_to_eos_token_id = (eos_token_id is None) or ( (eos_token_id is not None) and (pad_token_id != eos_token_id) ) if is_pad_token_in_inputs_ids and is_pad_token_not_equal_to_eos_token_id: return input_ids.ne(pad_token_id).long() return input_ids.new_ones(input_ids.shape) def _prepare_encoder_decoder_kwargs_for_generation( self, input_ids: torch.LongTensor, model_kwargs ) -> Dict[str, Any]: # retrieve encoder hidden states encoder = self.get_encoder() encoder_kwargs = { argument: value for argument, value in model_kwargs.items() if not argument.startswith("decoder_") } model_kwargs["encoder_outputs"]: ModelOutput = encoder(input_ids, return_dict=True, **encoder_kwargs) return model_kwargs def _prepare_decoder_input_ids_for_generation( self, input_ids: torch.LongTensor, decoder_start_token_id: int = None, bos_token_id: int = None ) -> torch.LongTensor: decoder_start_token_id = self._get_decoder_start_token_id(decoder_start_token_id, bos_token_id) decoder_input_ids = ( torch.ones((input_ids.shape[0], 1), dtype=input_ids.dtype, device=input_ids.device) * decoder_start_token_id ) return decoder_input_ids def _get_pad_token_id(self, pad_token_id: int = None, eos_token_id: int = None) -> int: if pad_token_id is None and eos_token_id is not None: logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.") pad_token_id = eos_token_id return pad_token_id def _get_decoder_start_token_id(self, decoder_start_token_id: int = None, bos_token_id: int = None) -> int: decoder_start_token_id = ( decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id ) bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id if decoder_start_token_id is not None: return decoder_start_token_id elif ( hasattr(self.config, "decoder") and hasattr(self.config.decoder, "decoder_start_token_id") and self.config.decoder.decoder_start_token_id is not None ): return self.config.decoder.decoder_start_token_id elif bos_token_id is not None: return bos_token_id elif ( hasattr(self.config, "decoder") and hasattr(self.config.decoder, "bos_token_id") and self.config.decoder.bos_token_id is not None ): return self.config.decoder.bos_token_id raise ValueError( "`decoder_start_token_id` or `bos_token_id` has to be defined for encoder-decoder generation." ) @staticmethod def _expand_inputs_for_generation( input_ids: torch.LongTensor, expand_size: int = 1, is_encoder_decoder: bool = False, attention_mask: torch.LongTensor = None, encoder_outputs: ModelOutput = None, **model_kwargs, ) -> Tuple[torch.LongTensor, Dict[str, Any]]: expanded_return_idx = ( torch.arange(input_ids.shape[0]).view(-1, 1).repeat(1, expand_size).view(-1).to(input_ids.device) ) input_ids = input_ids.index_select(0, expanded_return_idx) if "token_type_ids" in model_kwargs: token_type_ids = model_kwargs["token_type_ids"] model_kwargs["token_type_ids"] = token_type_ids.index_select(0, expanded_return_idx) if attention_mask is not None: model_kwargs["attention_mask"] = attention_mask.index_select(0, expanded_return_idx) if is_encoder_decoder: assert encoder_outputs is not None encoder_outputs["last_hidden_state"] = encoder_outputs.last_hidden_state.index_select( 0, expanded_return_idx.to(encoder_outputs.last_hidden_state.device) ) model_kwargs["encoder_outputs"] = encoder_outputs return input_ids, model_kwargs @staticmethod def _init_sequence_length_for_generation( input_ids: torch.LongTensor, max_length: int ) -> Tuple[torch.Tensor, torch.Tensor, int]: unfinished_sequences = input_ids.new(input_ids.shape[0]).fill_(1) sequence_lengths = input_ids.new(input_ids.shape[0]).fill_(max_length) cur_len = input_ids.shape[-1] return sequence_lengths, unfinished_sequences, cur_len @staticmethod def _update_seq_length_for_generation( sequence_lengths: torch.LongTensor, unfinished_sequences: torch.LongTensor, cur_len: int, is_eos_in_next_token: torch.BoolTensor, ) -> Tuple[torch.LongTensor, torch.LongTensor]: # check if sentence is not finished yet is_sent_unfinished = unfinished_sequences.mul(is_eos_in_next_token.long()).bool() # update sentence length sequence_lengths = sequence_lengths.masked_fill(is_sent_unfinished, cur_len) unfinished_sequences = unfinished_sequences.mul((~is_eos_in_next_token).long()) return sequence_lengths, unfinished_sequences @staticmethod def _update_model_kwargs_for_generation( outputs: ModelOutput, model_kwargs: Dict[str, Any], is_encoder_decoder: bool = False ) -> Dict[str, Any]: # update past if "past_key_values" in outputs: model_kwargs["past"] = outputs.past_key_values elif "mems" in outputs: model_kwargs["past"] = outputs.mems elif "past_buckets_states" in outputs: model_kwargs["past"] = outputs.past_buckets_states else: model_kwargs["past"] = None # update token_type_ids with last value if "token_type_ids" in model_kwargs: token_type_ids = model_kwargs["token_type_ids"] model_kwargs["token_type_ids"] = torch.cat([token_type_ids, token_type_ids[:, -1].unsqueeze(-1)], dim=-1) # update attention mask if not is_encoder_decoder: if "attention_mask" in model_kwargs: attention_mask = model_kwargs["attention_mask"] model_kwargs["attention_mask"] = torch.cat( [attention_mask, attention_mask.new_ones((attention_mask.shape[0], 1))], dim=-1 ) return model_kwargs def _reorder_cache(self, past, beam_idx): raise NotImplementedError( f"Make sure that a `_reorder_cache` function is correctly implemented in {self.__class__.__module__} to enable beam search for {self.__class__}" ) def _get_logits_warper( self, top_k: int = None, top_p: float = None, temperature: float = None, num_beams: int = None ) -> LogitsProcessorList: """ This class returns a :obj:`~transformers.LogitsProcessorList` list object that contains all relevant :obj:`~transformers.LogitsWarper` instances used for multinomial sampling. """ # init warp parameters top_k = top_k if top_k is not None else self.config.top_k top_p = top_p if top_p is not None else self.config.top_p temperature = temperature if temperature is not None else self.config.temperature # instantiate warpers list warpers = LogitsProcessorList() # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files # all samplers can be found in `generation_utils_samplers.py` if temperature is not None and temperature != 1.0: warpers.append(TemperatureLogitsWarper(temperature)) if top_k is not None and top_k != 0: warpers.append(TopKLogitsWarper(top_k=top_k, min_tokens_to_keep=(2 if num_beams > 1 else 1))) if top_p is not None and top_p < 1.0: warpers.append(TopPLogitsWarper(top_p=top_p, min_tokens_to_keep=(2 if num_beams > 1 else 1))) return warpers def _get_logits_processor( self, repetition_penalty: float, no_repeat_ngram_size: int, encoder_no_repeat_ngram_size: int, encoder_input_ids: torch.LongTensor, bad_words_ids: List[List[int]], min_length: int, eos_token_id: int, prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]], num_beams: int, num_beam_groups: int, diversity_penalty: float, ) -> LogitsProcessorList: """ This class returns a :obj:`~transformers.LogitsProcessorList` list object that contains all relevant :obj:`~transformers.LogitsProcessor` instances used to modify the scores of the language model head. """ # init warp parameters repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty no_repeat_ngram_size = ( no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size ) encoder_no_repeat_ngram_size = ( encoder_no_repeat_ngram_size if encoder_no_repeat_ngram_size is not None else self.config.encoder_no_repeat_ngram_size ) bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids min_length = min_length if min_length is not None else self.config.min_length eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id diversity_penalty = diversity_penalty if diversity_penalty is not None else self.config.diversity_penalty # instantiate processors list processors = LogitsProcessorList() # the following idea is largely copied from this PR: https://github.com/huggingface/transformers/pull/5420/files # all samplers can be found in `generation_utils_samplers.py` if diversity_penalty is not None and diversity_penalty > 0.0: processors.append( HammingDiversityLogitsProcessor( diversity_penalty=diversity_penalty, num_beams=num_beams, num_beam_groups=num_beam_groups ) ) if repetition_penalty is not None and repetition_penalty != 1.0: processors.append(RepetitionPenaltyLogitsProcessor(penalty=repetition_penalty)) if no_repeat_ngram_size is not None and no_repeat_ngram_size > 0: processors.append(NoRepeatNGramLogitsProcessor(no_repeat_ngram_size)) if encoder_no_repeat_ngram_size is not None and encoder_no_repeat_ngram_size > 0: if self.config.is_encoder_decoder: processors.append(EncoderNoRepeatNGramLogitsProcessor(encoder_no_repeat_ngram_size, encoder_input_ids)) else: raise ValueError( "It's impossible to use `encoder_no_repeat_ngram_size` with decoder-only architecture" ) if bad_words_ids is not None: processors.append(NoBadWordsLogitsProcessor(bad_words_ids, eos_token_id)) if min_length is not None and eos_token_id is not None and min_length > -1: processors.append(MinLengthLogitsProcessor(min_length, eos_token_id)) if prefix_allowed_tokens_fn is not None: processors.append(PrefixConstrainedLogitsProcessor(prefix_allowed_tokens_fn, num_beams)) return processors @torch.no_grad() def generate( self, input_ids: Optional[torch.LongTensor] = None, max_length: Optional[int] = None, min_length: Optional[int] = None, do_sample: Optional[bool] = None, early_stopping: Optional[bool] = None, num_beams: Optional[int] = None, temperature: Optional[float] = None, top_k: Optional[int] = None, top_p: Optional[float] = None, repetition_penalty: Optional[float] = None, bad_words_ids: Optional[Iterable[int]] = None, bos_token_id: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, length_penalty: Optional[float] = None, no_repeat_ngram_size: Optional[int] = None, encoder_no_repeat_ngram_size: Optional[int] = None, num_return_sequences: Optional[int] = None, decoder_start_token_id: Optional[int] = None, use_cache: Optional[bool] = None, num_beam_groups: Optional[int] = None, diversity_penalty: Optional[float] = None, prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, **model_kwargs, ) -> Union[GreedySearchOutput, SampleOutput, BeamSearchOutput, BeamSampleOutput, torch.LongTensor]: r""" Generates sequences for models with a language modeling head. The method currently supports greedy decoding, multinomial sampling, beam-search decoding, and beam-search multinomial sampling. Apart from :obj:`input_ids` and :obj:`attention_mask`, all the arguments below will default to the value of the attribute of the same name inside the :class:`~transformers.PretrainedConfig` of the model. The default values indicated are the default values of those config. Most of these parameters are explained in more detail in `this blog post <https://huggingface.co/blog/how-to-generate>`__. Parameters: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): The sequence used as a prompt for the generation. If :obj:`None` the method initializes it as an empty :obj:`torch.LongTensor` of shape :obj:`(1,)`. max_length (:obj:`int`, `optional`, defaults to 20): The maximum length of the sequence to be generated. min_length (:obj:`int`, `optional`, defaults to 10): The minimum length of the sequence to be generated. do_sample (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether or not to use sampling ; use greedy decoding otherwise. early_stopping (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to stop the beam search when at least ``num_beams`` sentences are finished per batch or not. num_beams (:obj:`int`, `optional`, defaults to 1): Number of beams for beam search. 1 means no beam search. temperature (:obj:`float`, `optional`, defaults tp 1.0): The value used to module the next token probabilities. top_k (:obj:`int`, `optional`, defaults to 50): The number of highest probability vocabulary tokens to keep for top-k-filtering. top_p (:obj:`float`, `optional`, defaults to 1.0): If set to float < 1, only the most probable tokens with probabilities that add up to :obj:`top_p` or higher are kept for generation. repetition_penalty (:obj:`float`, `optional`, defaults to 1.0): The parameter for repetition penalty. 1.0 means no penalty. See `this paper <https://arxiv.org/pdf/1909.05858.pdf>`__ for more details. pad_token_id (:obj:`int`, `optional`): The id of the `padding` token. bos_token_id (:obj:`int`, `optional`): The id of the `beginning-of-sequence` token. eos_token_id (:obj:`int`, `optional`): The id of the `end-of-sequence` token. length_penalty (:obj:`float`, `optional`, defaults to 1.0): Exponential penalty to the length. 1.0 means no penalty. Set to values < 1.0 in order to encourage the model to generate shorter sequences, to a value > 1.0 in order to encourage the model to produce longer sequences. no_repeat_ngram_size (:obj:`int`, `optional`, defaults to 0): If set to int > 0, all ngrams of that size can only occur once. encoder_no_repeat_ngram_size (:obj:`int`, `optional`, defaults to 0): If set to int > 0, all ngrams of that size that occur in the ``encoder_input_ids`` cannot occur in the ``decoder_input_ids``. bad_words_ids(:obj:`List[List[int]]`, `optional`): List of token ids that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use :obj:`tokenizer(bad_word, add_prefix_space=True).input_ids`. num_return_sequences(:obj:`int`, `optional`, defaults to 1): The number of independently computed returned sequences for each element in the batch. attention_mask (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): Mask to avoid performing attention on padding token indices. Mask values are in ``[0, 1]``, 1 for tokens that are not masked, and 0 for masked tokens. If not provided, will default to a tensor the same shape as :obj:`input_ids` that masks the pad token. `What are attention masks? <../glossary.html#attention-mask>`__ decoder_start_token_id (:obj:`int`, `optional`): If an encoder-decoder model starts decoding with a different token than `bos`, the id of that token. use_cache: (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not the model should use the past last key/values attentions (if applicable to the model) to speed up decoding. num_beam_groups (:obj:`int`, `optional`, defaults to 1): Number of groups to divide :obj:`num_beams` into in order to ensure diversity among different groups of beams. `this paper <https://arxiv.org/pdf/1610.02424.pdf>`__ for more details. diversity_penalty (:obj:`float`, `optional`, defaults to 0.0): This value is subtracted from a beam's score if it generates a token same as any beam from other group at a particular time. Note that :obj:`diversity_penalty` is only effective if ``group beam search`` is enabled. prefix_allowed_tokens_fn: (:obj:`Callable[[int, torch.Tensor], List[int]]`, `optional`): If provided, this function constraints the beam search to allowed tokens only at each step. If not provided no constraint is applied. This function takes 2 arguments :obj:`inputs_ids` and the batch ID :obj:`batch_id`. It has to return a list with the allowed tokens for the next generation step conditioned on the previously generated tokens :obj:`inputs_ids` and the batch ID :obj:`batch_id`. This argument is useful for constrained generation conditioned on the prefix, as described in `Autoregressive Entity Retrieval <https://arxiv.org/abs/2010.00904>`__. output_attentions (:obj:`bool`, `optional`, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more details. output_hidden_states (:obj:`bool`, `optional`, defaults to `False`): Whether or not to return trhe hidden states of all layers. See ``hidden_states`` under returned tensors for more details. output_scores (:obj:`bool`, `optional`, defaults to `False`): Whether or not to return the prediction scores. See ``scores`` under returned tensors for more details. return_dict_in_generate (:obj:`bool`, `optional`, defaults to `False`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. model_kwargs: Additional model specific kwargs will be forwarded to the :obj:`forward` function of the model. If the model is an encoder-decoder model, encoder specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with `decoder_`. Return: :class:`~transformers.file_utils.ModelOutput` or :obj:`torch.LongTensor`: A :class:`~transformers.file_utils.ModelOutput` (if ``return_dict_in_generate=True`` or when ``config.return_dict_in_generate=True``) or a :obj:`torch.FloatTensor`. If the model is `not` an encoder-decoder model (``model.config.is_encoder_decoder=False``), the possible :class:`~transformers.file_utils.ModelOutput` types are: - :class:`~transformers.generation_utils.GreedySearchDecoderOnlyOutput`, - :class:`~transformers.generation_utils.SampleDecoderOnlyOutput`, - :class:`~transformers.generation_utils.BeamSearchDecoderOnlyOutput`, - :class:`~transformers.generation_utils.BeamSampleDecoderOnlyOutput` If the model is an encoder-decoder model (``model.config.is_encoder_decoder=True``), the possible :class:`~transformers.file_utils.ModelOutput` types are: - :class:`~transformers.generation_utils.GreedySearchEncoderDecoderOutput`, - :class:`~transformers.generation_utils.SampleEncoderDecoderOutput`, - :class:`~transformers.generation_utils.BeamSearchEncoderDecoderOutput`, - :class:`~transformers.generation_utils.BeamSampleEncoderDecoderOutput` Examples:: >>> from transformers import AutoTokenizer, AutoModelForCausalLM, AutoModelForSeq2SeqLM >>> tokenizer = AutoTokenizer.from_pretrained("distilgpt2") >>> model = AutoModelForCausalLM.from_pretrained("distilgpt2") >>> # do greedy decoding without providing a prompt >>> outputs = model.generate(max_length=40) >>> print("Generated:", tokenizer.decode(outputs[0], skip_special_tokens=True)) >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> document = ( ... "at least two people were killed in a suspected bomb attack on a passenger bus " ... "in the strife-torn southern philippines on monday , the military said." ... ) >>> # encode input contex >>> input_ids = tokenizer(document, return_tensors="pt").input_ids >>> # generate 3 independent sequences using beam search decoding (5 beams) >>> # with T5 encoder-decoder model conditioned on short news article. >>> outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3) >>> print("Generated:", tokenizer.batch_decode(outputs, skip_special_tokens=True)) >>> tokenizer = AutoTokenizer.from_pretrained("distilgpt2") >>> model = AutoModelForCausalLM.from_pretrained("distilgpt2") >>> input_context = "The dog" >>> # encode input context >>> input_ids = tokenizer(input_context, return_tensors="pt").input_ids >>> # generate 3 candidates using sampling >>> outputs = model.generate(input_ids=input_ids, max_length=20, num_return_sequences=3, do_sample=True) >>> print("Generated:", tokenizer.batch_decode(outputs, skip_special_tokens=True)) >>> tokenizer = AutoTokenizer.from_pretrained("ctrl") >>> model = AutoModelForCausalLM.from_pretrained("ctrl") >>> # "Legal" is one of the control codes for ctrl >>> input_context = "Legal My neighbor is" >>> # encode input context >>> input_ids = tokenizer(input_context, return_tensors="pt").input_ids >>> outputs = model.generate(input_ids=input_ids, max_length=20, repetition_penalty=1.2) >>> print("Generated:", tokenizer.decode(outputs[0], skip_special_tokens=True)) >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> model = AutoModelForCausalLM.from_pretrained("gpt2") >>> input_context = "My cute dog" >>> # get tokens of words that should not be generated >>> bad_words_ids = [tokenizer(bad_word, add_prefix_space=True).input_ids for bad_word in ["idiot", "stupid", "shut up"]] >>> # encode input context >>> input_ids = tokenizer(input_context, return_tensors="pt").input_ids >>> # generate sequences without allowing bad_words to be generated >>> outputs = model.generate(input_ids=input_ids, max_length=20, do_sample=True, bad_words_ids=bad_words_ids) >>> print("Generated:", tokenizer.decode(outputs[0], skip_special_tokens=True)) """ # set init values num_beams = num_beams if num_beams is not None else self.config.num_beams num_beam_groups = num_beam_groups if num_beam_groups is not None else self.config.num_beam_groups max_length = max_length if max_length is not None else self.config.max_length do_sample = do_sample if do_sample is not None else self.config.do_sample num_return_sequences = ( num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences ) pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) model_kwargs["output_attentions"] = output_attentions model_kwargs["output_hidden_states"] = output_hidden_states if input_ids is None: # init `input_ids` with bos_token_id input_ids = self._prepare_input_ids_for_generation(bos_token_id) if model_kwargs.get("attention_mask", None) is None: # init `attention_mask` depending on `pad_token_id` model_kwargs["attention_mask"] = self._prepare_attention_mask_for_generation( input_ids, pad_token_id, eos_token_id ) # special case if pad_token_id is not defined if pad_token_id is None and eos_token_id is not None: logger.warning(f"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.") pad_token_id = eos_token_id # Storing encoder_input_ids for logits_processor that could use them encoder_input_ids = input_ids if self.config.is_encoder_decoder else None if self.config.is_encoder_decoder: # add encoder_outputs to model_kwargs model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(input_ids, model_kwargs) # set input_ids as decoder_input_ids if "decoder_input_ids" in model_kwargs: input_ids = model_kwargs.pop("decoder_input_ids") else: input_ids = self._prepare_decoder_input_ids_for_generation( input_ids, decoder_start_token_id=decoder_start_token_id, bos_token_id=bos_token_id ) if "encoder_outputs" not in model_kwargs or not isinstance(model_kwargs["encoder_outputs"], ModelOutput): raise ValueError("Make sure that `model_kwargs` include `encoder_outputs` of type `ModelOutput`.") if input_ids.shape[-1] >= max_length: input_ids_string = "decoder_input_ids" if self.config.is_encoder_decoder else "input_ids" logger.warning( f"Input length of {input_ids_string} is {input_ids.shape[-1]}, but ``max_length`` is set to {max_length}." "This can lead to unexpected behavior. You should consider increasing ``config.max_length`` or ``max_length``." ) # determine generation mode is_greedy_gen_mode = (num_beams == 1) and (num_beam_groups == 1) and do_sample is False is_sample_gen_mode = (num_beams == 1) and (num_beam_groups == 1) and do_sample is True is_beam_gen_mode = (num_beams > 1) and (num_beam_groups == 1) and do_sample is False is_beam_sample_gen_mode = (num_beams > 1) and (num_beam_groups == 1) and do_sample is True is_group_beam_gen_mode = (num_beams > 1) and (num_beam_groups > 1) if num_beam_groups > num_beams: raise ValueError("`num_beam_groups` has to be smaller or equal to `num_beams`") if is_group_beam_gen_mode and do_sample is True: raise ValueError( "Diverse beam search cannot be used in sampling mode. Make sure that `do_sample` is set to `False`." ) # set model_kwargs model_kwargs["use_cache"] = use_cache # get distribution pre_processing samplers logits_processor = self._get_logits_processor( repetition_penalty=repetition_penalty, no_repeat_ngram_size=no_repeat_ngram_size, encoder_no_repeat_ngram_size=encoder_no_repeat_ngram_size, encoder_input_ids=encoder_input_ids, bad_words_ids=bad_words_ids, min_length=min_length, eos_token_id=eos_token_id, prefix_allowed_tokens_fn=prefix_allowed_tokens_fn, num_beams=num_beams, num_beam_groups=num_beam_groups, diversity_penalty=diversity_penalty, ) if is_greedy_gen_mode: if num_return_sequences > 1: raise ValueError( f"num_return_sequences has to be 1, but is {num_return_sequences} when doing greedy search." ) # greedy search return self.greedy_search( input_ids, logits_processor=logits_processor, max_length=max_length, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, **model_kwargs, ) elif is_sample_gen_mode: # get probability distribution warper logits_warper = self._get_logits_warper( top_k=top_k, top_p=top_p, temperature=temperature, num_beams=num_beams ) # expand input_ids with `num_return_sequences` additional sequences per batch input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids, expand_size=num_return_sequences, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs, ) # sample return self.sample( input_ids, logits_processor=logits_processor, logits_warper=logits_warper, max_length=max_length, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, **model_kwargs, ) elif is_beam_gen_mode: batch_size = input_ids.shape[0] length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping if num_return_sequences > num_beams: raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.") beam_scorer = BeamSearchScorer( batch_size=batch_size, max_length=max_length, num_beams=num_beams, device=self.device, length_penalty=length_penalty, do_early_stopping=early_stopping, num_beam_hyps_to_keep=num_return_sequences, ) # interleave with `num_beams` input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids, expand_size=num_beams, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs ) return self.beam_search( input_ids, beam_scorer, logits_processor=logits_processor, max_length=max_length, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, **model_kwargs, ) elif is_beam_sample_gen_mode: logits_warper = self._get_logits_warper( top_k=top_k, top_p=top_p, temperature=temperature, num_beams=num_beams ) batch_size = input_ids.shape[0] * num_return_sequences length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty beam_scorer = BeamSearchScorer( batch_size=batch_size, max_length=max_length, num_beams=num_beams, device=self.device, length_penalty=length_penalty, do_early_stopping=early_stopping, ) # interleave with `num_beams * num_return_sequences` input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids, expand_size=num_beams * num_return_sequences, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs, ) return self.beam_sample( input_ids, beam_scorer, logits_processor=logits_processor, logits_warper=logits_warper, max_length=max_length, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, **model_kwargs, ) elif is_group_beam_gen_mode: batch_size = input_ids.shape[0] length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping if num_return_sequences > num_beams: raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.") if num_beams % num_beam_groups != 0: raise ValueError("`num_beams` should be divisible by `num_beam_groups` for group beam search.") diverse_beam_scorer = BeamSearchScorer( batch_size=batch_size, max_length=max_length, num_beams=num_beams, device=self.device, length_penalty=length_penalty, do_early_stopping=early_stopping, num_beam_hyps_to_keep=num_return_sequences, num_beam_groups=num_beam_groups, ) # interleave with `num_beams` input_ids, model_kwargs = self._expand_inputs_for_generation( input_ids, expand_size=num_beams, is_encoder_decoder=self.config.is_encoder_decoder, **model_kwargs ) return self.group_beam_search( input_ids, diverse_beam_scorer, logits_processor=logits_processor, max_length=max_length, pad_token_id=pad_token_id, eos_token_id=eos_token_id, output_scores=output_scores, return_dict_in_generate=return_dict_in_generate, **model_kwargs, ) def greedy_search( self, input_ids: torch.LongTensor, logits_processor: Optional[LogitsProcessorList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, **model_kwargs, ) -> Union[GreedySearchOutput, torch.LongTensor]: r""" Generates sequences for models with a language modeling head using greedy decoding. Parameters: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): The sequence used as a prompt for the generation. If :obj:`None` the method initializes it as an empty :obj:`torch.LongTensor` of shape :obj:`(1,)`. logits_processor (:obj:`LogitsProcessorList`, `optional`): An instance of :class:`~transformers.LogitsProcessorList`. List of instances of class derived from :class:`~transformers.LogitsProcessor` used to modify the prediction scores of the language modeling head applied at each generation step. max_length (:obj:`int`, `optional`, defaults to 20): The maximum length of the sequence to be generated. pad_token_id (:obj:`int`, `optional`): The id of the `padding` token. eos_token_id (:obj:`int`, `optional`): The id of the `end-of-sequence` token. output_attentions (:obj:`bool`, `optional`, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more details. output_hidden_states (:obj:`bool`, `optional`, defaults to `False`): Whether or not to return trhe hidden states of all layers. See ``hidden_states`` under returned tensors for more details. output_scores (:obj:`bool`, `optional`, defaults to `False`): Whether or not to return the prediction scores. See ``scores`` under returned tensors for more details. return_dict_in_generate (:obj:`bool`, `optional`, defaults to `False`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. model_kwargs: Additional model specific keyword arguments will be forwarded to the :obj:`forward` function of the model. If model is an encoder-decoder model the kwargs should include :obj:`encoder_outputs`. Return: :class:`~transformers.generation_utils.GreedySearchDecoderOnlyOutput`, :class:`~transformers.generation_utils.GreedySearchEncoderDecoderOutput` or obj:`torch.LongTensor`: A :obj:`torch.LongTensor` containing the generated tokens (default behaviour) or a :class:`~transformers.generation_utils.GreedySearchDecoderOnlyOutput` if ``model.config.is_encoder_decoder=False`` and ``return_dict_in_generate=True`` or a :class:`~transformers.generation_utils.GreedySearchEncoderDecoderOutput` if ``model.config.is_encoder_decoder=True``. Examples:: >>> from transformers import ( ... AutoTokenizer, ... AutoModelForCausalLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... ) >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> model = AutoModelForCausalLM.from_pretrained("gpt2") >>> # set pad_token_id to eos_token_id because GPT2 does not have a EOS token >>> model.config.pad_token_id = model.config.eos_token_id >>> input_prompt = "Today is a beautiful day, and" >>> input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList([ ... MinLengthLogitsProcessor(15, eos_token_id=model.config.eos_token_id), ... ]) >>> outputs = model.greedy_search(input_ids, logits_processor=logits_processor) >>> print("Generated:", tokenizer.batch_decode(outputs, skip_special_tokens=True)) """ # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() max_length = max_length if max_length is not None else self.config.max_length pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) # init sequence length tensors sequence_lengths, unfinished_sequences, cur_len = self._init_sequence_length_for_generation( input_ids, max_length ) while cur_len < max_length: # prepare model inputs model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) # forward pass to get next token outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) next_token_logits = outputs.logits[:, -1, :] # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (next_token_logits,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # pre-process distribution next_tokens_scores = logits_processor(input_ids, next_token_logits) # argmax next_tokens = torch.argmax(next_tokens_scores, dim=-1) # add code that transfomers next_tokens to tokens_to_add if eos_token_id is not None: assert pad_token_id is not None, "If eos_token_id is defined, make sure that pad_token_id is defined." next_tokens = next_tokens * unfinished_sequences + (pad_token_id) * (1 - unfinished_sequences) # add token and increase length by one input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) # update sequence length if eos_token_id is not None: sequence_lengths, unfinished_sequences = self._update_seq_length_for_generation( sequence_lengths, unfinished_sequences, cur_len, next_tokens == eos_token_id ) # update model kwargs model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) # stop when there is a </s> in each sentence, or if we exceed the maximul length if unfinished_sequences.max() == 0: break # increase cur_len cur_len = cur_len + 1 if return_dict_in_generate: if self.config.is_encoder_decoder: return GreedySearchEncoderDecoderOutput( sequences=input_ids, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return GreedySearchDecoderOnlyOutput( sequences=input_ids, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return input_ids def sample( self, input_ids: torch.LongTensor, logits_processor: Optional[LogitsProcessorList] = None, logits_warper: Optional[LogitsProcessorList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, **model_kwargs, ) -> Union[SampleOutput, torch.LongTensor]: r""" Generates sequences for models with a language modeling head using multinomial sampling. Parameters: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): The sequence used as a prompt for the generation. If :obj:`None` the method initializes it as an empty :obj:`torch.LongTensor` of shape :obj:`(1,)`. logits_processor (:obj:`LogitsProcessorList`, `optional`): An instance of :class:`~transformers.LogitsProcessorList`. List of instances of class derived from :class:`~transformers.LogitsProcessor` used to modify the prediction scores of the language modeling head applied at each generation step. logits_warper (:obj:`LogitsProcessorList`, `optional`): An instance of :class:`~transformers.LogitsProcessorList`. List of instances of class derived from :class:`~transformers.LogitsWarper` used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. max_length (:obj:`int`, `optional`, defaults to 20): The maximum length of the sequence to be generated. pad_token_id (:obj:`int`, `optional`): The id of the `padding` token. eos_token_id (:obj:`int`, `optional`): The id of the `end-of-sequence` token. output_attentions (:obj:`bool`, `optional`, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more details. output_hidden_states (:obj:`bool`, `optional`, defaults to `False`): Whether or not to return trhe hidden states of all layers. See ``hidden_states`` under returned tensors for more details. output_scores (:obj:`bool`, `optional`, defaults to `False`): Whether or not to return the prediction scores. See ``scores`` under returned tensors for more details. return_dict_in_generate (:obj:`bool`, `optional`, defaults to `False`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. model_kwargs: Additional model specific kwargs will be forwarded to the :obj:`forward` function of the model. If model is an encoder-decoder model the kwargs should include :obj:`encoder_outputs`. Return: :class:`~transformers.generation_utils.SampleDecoderOnlyOutput`, :class:`~transformers.generation_utils.SampleEncoderDecoderOutput` or obj:`torch.LongTensor`: A :obj:`torch.LongTensor` containing the generated tokens (default behaviour) or a :class:`~transformers.generation_utils.SampleDecoderOnlyOutput` if ``model.config.is_encoder_decoder=False`` and ``return_dict_in_generate=True`` or a :class:`~transformers.generation_utils.SampleEncoderDecoderOutput` if ``model.config.is_encoder_decoder=True``. Examples:: >>> from transformers import ( ... AutoTokenizer, ... AutoModelForCausalLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... TopKLogitsWarper, ... TemperatureLogitsWarper, ... ) >>> tokenizer = AutoTokenizer.from_pretrained("gpt2") >>> model = AutoModelForCausalLM.from_pretrained("gpt2") >>> # set pad_token_id to eos_token_id because GPT2 does not have a EOS token >>> model.config.pad_token_id = model.config.eos_token_id >>> input_prompt = "Today is a beautiful day, and" >>> input_ids = tokenizer(input_prompt, return_tensors="pt").input_ids >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList([ ... MinLengthLogitsProcessor(15, eos_token_id=model.config.eos_token_id), ... ]) >>> # instantiate logits processors >>> logits_warper = LogitsProcessorList([ ... TopKLogitsWarper(50), ... TemperatureLogitsWarper(0.7), ... ]) >>> outputs = model.sample(input_ids, logits_processor=logits_processor, logits_warper=logits_warper) >>> print("Generated:", tokenizer.batch_decode(outputs, skip_special_tokens=True)) """ # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() logits_warper = logits_warper if logits_warper is not None else LogitsProcessorList() max_length = max_length if max_length is not None else self.config.max_length pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) # init sequence length tensors sequence_lengths, unfinished_sequences, cur_len = self._init_sequence_length_for_generation( input_ids, max_length ) # auto-regressive generation while cur_len < max_length: # prepare model inputs model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) # forward pass to get next token outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) next_token_logits = outputs.logits[:, -1, :] # pre-process distribution next_token_scores = logits_processor(input_ids, next_token_logits) next_token_scores = logits_warper(input_ids, next_token_scores) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (next_token_scores,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # sample probs = F.softmax(next_token_scores, dim=-1) next_tokens = torch.multinomial(probs, num_samples=1).squeeze(1) # add code that transfomers next_tokens to tokens_to_add if eos_token_id is not None: assert pad_token_id is not None, "If eos_token_id is defined, make sure that pad_token_id is defined." next_tokens = next_tokens * unfinished_sequences + (pad_token_id) * (1 - unfinished_sequences) # add token and increase length by one input_ids = torch.cat([input_ids, next_tokens[:, None]], dim=-1) cur_len = cur_len + 1 # update sequence length if eos_token_id is not None: sequence_lengths, unfinished_sequences = self._update_seq_length_for_generation( sequence_lengths, unfinished_sequences, cur_len, next_tokens == eos_token_id ) # stop when there is a </s> in each sentence, or if we exceed the maximul length if unfinished_sequences.max() == 0: break # update model kwargs model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) if return_dict_in_generate: if self.config.is_encoder_decoder: return SampleEncoderDecoderOutput( sequences=input_ids, scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return SampleDecoderOnlyOutput( sequences=input_ids, scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return input_ids def beam_search( self, input_ids: torch.LongTensor, beam_scorer: BeamScorer, logits_processor: Optional[LogitsProcessorList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, **model_kwargs, ) -> Union[BeamSearchOutput, torch.LongTensor]: r""" Generates sequences for models with a language modeling head using beam search decoding. Parameters: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): The sequence used as a prompt for the generation. If :obj:`None` the method initializes it as an empty :obj:`torch.LongTensor` of shape :obj:`(1,)`. beam_scorer (:obj:`BeamScorer`): An derived instance of :class:`~transformers.BeamScorer` that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of :class:`~transformers.BeamScorer` should be read. logits_processor (:obj:`LogitsProcessorList`, `optional`): An instance of :class:`~transformers.LogitsProcessorList`. List of instances of class derived from :class:`~transformers.LogitsProcessor` used to modify the prediction scores of the language modeling head applied at each generation step. max_length (:obj:`int`, `optional`, defaults to 20): The maximum length of the sequence to be generated. pad_token_id (:obj:`int`, `optional`): The id of the `padding` token. eos_token_id (:obj:`int`, `optional`): The id of the `end-of-sequence` token. output_attentions (:obj:`bool`, `optional`, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more details. output_hidden_states (:obj:`bool`, `optional`, defaults to `False`): Whether or not to return trhe hidden states of all layers. See ``hidden_states`` under returned tensors for more details. output_scores (:obj:`bool`, `optional`, defaults to `False`): Whether or not to return the prediction scores. See ``scores`` under returned tensors for more details. return_dict_in_generate (:obj:`bool`, `optional`, defaults to `False`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. model_kwargs: Additional model specific kwargs will be forwarded to the :obj:`forward` function of the model. If model is an encoder-decoder model the kwargs should include :obj:`encoder_outputs`. Return: :class:`~transformers.generation_utilsBeamSearchDecoderOnlyOutput`, :class:`~transformers.generation_utils.BeamSearchEncoderDecoderOutput` or obj:`torch.LongTensor`: A :obj:`torch.LongTensor` containing the generated tokens (default behaviour) or a :class:`~transformers.generation_utils.BeamSearchDecoderOnlyOutput` if ``model.config.is_encoder_decoder=False`` and ``return_dict_in_generate=True`` or a :class:`~transformers.generation_utils.BeamSearchEncoderDecoderOutput` if ``model.config.is_encoder_decoder=True``. Examples:: >>> from transformers import ( ... AutoTokenizer, ... AutoModelForSeq2SeqLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... BeamSearchScorer, ... ) >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> encoder_input_str = "translate English to German: How old are you?" >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids >>> # lets run beam search using 3 beams >>> num_beams = 3 >>> # define decoder start token ids >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) >>> input_ids = input_ids * model.config.decoder_start_token_id >>> # add encoder_outputs to model keyword arguments >>> model_kwargs = { ... "encoder_outputs": model.get_encoder()(encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True) ... } >>> # instantiate beam scorer >>> beam_scorer = BeamSearchScorer( ... batch_size=1, ... max_length=model.config.max_length, ... num_beams=num_beams, ... device=model.device, ... ) >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList([ ... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ... ]) >>> outputs = model.beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs) >>> print("Generated:", tokenizer.batch_decode(outputs, skip_special_tokens=True)) """ # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() max_length = max_length if max_length is not None else self.config.max_length pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) batch_size = len(beam_scorer._beam_hyps) num_beams = beam_scorer.num_beams batch_beam_size, cur_len = input_ids.shape assert ( num_beams * batch_size == batch_beam_size ), "Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}." beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) beam_scores[:, 1:] = -1e9 beam_scores = beam_scores.view((batch_size * num_beams,)) while cur_len < max_length: model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) next_token_logits = outputs.logits[:, -1, :] # adjust tokens for Bart, *e.g.* next_token_logits = self.adjust_logits_during_generation( next_token_logits, cur_len=cur_len, max_length=max_length ) next_token_scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size * num_beams, vocab_size) next_token_scores = logits_processor(input_ids, next_token_scores) next_token_scores = next_token_scores + beam_scores[:, None].expand_as(next_token_scores) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (next_token_scores,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # reshape for beam search vocab_size = next_token_scores.shape[-1] next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size) next_token_scores, next_tokens = torch.topk( next_token_scores, 2 * num_beams, dim=1, largest=True, sorted=True ) next_indices = next_tokens // vocab_size next_tokens = next_tokens % vocab_size # stateless beam_outputs = beam_scorer.process( input_ids, next_token_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, ) beam_scores = beam_outputs["next_beam_scores"] beam_next_tokens = beam_outputs["next_beam_tokens"] beam_idx = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) cur_len = cur_len + 1 model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) if model_kwargs["past"] is not None: model_kwargs["past"] = self._reorder_cache(model_kwargs["past"], beam_idx) if beam_scorer.is_done: break sequence_outputs = beam_scorer.finalize( input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id ) if return_dict_in_generate: if not output_scores: sequence_outputs["sequence_scores"] = None if self.config.is_encoder_decoder: return BeamSearchEncoderDecoderOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return BeamSearchDecoderOnlyOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return sequence_outputs["sequences"] def beam_sample( self, input_ids: torch.LongTensor, beam_scorer: BeamScorer, logits_processor: Optional[LogitsProcessorList] = None, logits_warper: Optional[LogitsProcessorList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, **model_kwargs, ) -> Union[BeamSampleOutput, torch.LongTensor]: r""" Generates sequences for models with a language modeling head using beam search with multinomial sampling. Parameters: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): The sequence used as a prompt for the generation. If :obj:`None` the method initializes it as an empty :obj:`torch.LongTensor` of shape :obj:`(1,)`. beam_scorer (:obj:`BeamScorer`): A derived instance of :class:`~transformers.BeamScorer` that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of :class:`~transformers.BeamScorer` should be read. logits_processor (:obj:`LogitsProcessorList`, `optional`): An instance of :class:`~transformers.LogitsProcessorList`. List of instances of class derived from :class:`~transformers.LogitsProcessor` used to modify the prediction scores of the language modeling head applied at each generation step. logits_warper (:obj:`LogitsProcessorList`, `optional`): An instance of :class:`~transformers.LogitsProcessorList`. List of instances of class derived from :class:`~transformers.LogitsWarper` used to warp the prediction score distribution of the language modeling head applied before multinomial sampling at each generation step. max_length (:obj:`int`, `optional`, defaults to 20): The maximum length of the sequence to be generated. pad_token_id (:obj:`int`, `optional`): The id of the `padding` token. eos_token_id (:obj:`int`, `optional`): The id of the `end-of-sequence` token. output_attentions (:obj:`bool`, `optional`, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more details. output_hidden_states (:obj:`bool`, `optional`, defaults to `False`): Whether or not to return trhe hidden states of all layers. See ``hidden_states`` under returned tensors for more details. output_scores (:obj:`bool`, `optional`, defaults to `False`): Whether or not to return the prediction scores. See ``scores`` under returned tensors for more details. return_dict_in_generate (:obj:`bool`, `optional`, defaults to `False`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. model_kwargs: Additional model specific kwargs will be forwarded to the :obj:`forward` function of the model. If model is an encoder-decoder model the kwargs should include :obj:`encoder_outputs`. Return: :class:`~transformers.generation_utils.BeamSampleDecoderOnlyOutput`, :class:`~transformers.generation_utils.BeamSampleEncoderDecoderOutput` or obj:`torch.LongTensor`: A :obj:`torch.LongTensor` containing the generated tokens (default behaviour) or a :class:`~transformers.generation_utils.BeamSampleDecoderOnlyOutput` if ``model.config.is_encoder_decoder=False`` and ``return_dict_in_generate=True`` or a :class:`~transformers.generation_utils.BeamSampleEncoderDecoderOutput` if ``model.config.is_encoder_decoder=True``. Examples:: >>> from transformers import ( ... AutoTokenizer, ... AutoModelForSeq2SeqLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... TopKLogitsWarper, ... TemperatureLogitsWarper, ... BeamSearchScorer, ... ) >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> encoder_input_str = "translate English to German: How old are you?" >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids >>> # lets run beam search using 3 beams >>> num_beams = 3 >>> # define decoder start token ids >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) >>> input_ids = input_ids * model.config.decoder_start_token_id >>> # add encoder_outputs to model keyword arguments >>> model_kwargs = { ... "encoder_outputs": model.get_encoder()(encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True) ... } >>> # instantiate beam scorer >>> beam_scorer = BeamSearchScorer( ... batch_size=1, ... max_length=model.config.max_length, ... num_beams=num_beams, ... device=model.device, ... ) >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList([ ... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id) ... ]) >>> # instantiate logits processors >>> logits_warper = LogitsProcessorList([ ... TopKLogitsWarper(50), ... TemperatureLogitsWarper(0.7), ... ]) >>> outputs = model.beam_sample( ... input_ids, beam_scorer, logits_processor=logits_processor, logits_warper=logits_warper, **model_kwargs ... ) >>> print("Generated:", tokenizer.batch_decode(outputs, skip_special_tokens=True)) """ # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() max_length = max_length if max_length is not None else self.config.max_length pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) batch_size = len(beam_scorer._beam_hyps) num_beams = beam_scorer.num_beams batch_beam_size, cur_len = input_ids.shape beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device) beam_scores = beam_scores.view((batch_size * num_beams,)) while cur_len < max_length: model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) next_token_logits = outputs.logits[:, -1, :] # adjust token scores (a no-op by default) next_token_logits = self.adjust_logits_during_generation( next_token_logits, cur_len=cur_len, max_length=max_length ) next_token_scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size * num_beams, vocab_size) next_token_scores = logits_processor(input_ids, next_token_scores) next_token_scores = next_token_scores + beam_scores[:, None].expand_as(next_token_scores) next_token_scores = logits_warper(input_ids, next_token_scores) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (next_token_scores,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) # reshape for beam search vocab_size = next_token_scores.shape[-1] next_token_scores = next_token_scores.view(batch_size, num_beams * vocab_size) probs = F.softmax(next_token_scores, dim=-1) next_tokens = torch.multinomial(probs, num_samples=2 * num_beams) next_token_scores = torch.gather(next_token_scores, -1, next_tokens) next_token_scores, _indices = torch.sort(next_token_scores, descending=True, dim=1) next_tokens = torch.gather(next_tokens, -1, _indices) next_indices = next_tokens // vocab_size next_tokens = next_tokens % vocab_size # stateless beam_outputs = beam_scorer.process( input_ids, next_token_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, ) beam_scores = beam_outputs["next_beam_scores"] beam_next_tokens = beam_outputs["next_beam_tokens"] beam_idx = beam_outputs["next_beam_indices"] input_ids = torch.cat([input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) cur_len = cur_len + 1 model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) if model_kwargs["past"] is not None: model_kwargs["past"] = self._reorder_cache(model_kwargs["past"], beam_idx) if beam_scorer.is_done: break sequence_outputs = beam_scorer.finalize( input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id ) if return_dict_in_generate: if not output_scores: sequence_outputs["sequence_scores"] = None if self.config.is_encoder_decoder: return BeamSearchEncoderDecoderOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return BeamSearchDecoderOnlyOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return sequence_outputs["sequences"] def group_beam_search( self, input_ids: torch.LongTensor, beam_scorer: BeamScorer, logits_processor: Optional[LogitsProcessorList] = None, max_length: Optional[int] = None, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, output_scores: Optional[bool] = None, return_dict_in_generate: Optional[bool] = None, **model_kwargs, ): r""" Generates sequences for models with a language modeling head using beam search decoding. Parameters: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`): The sequence used as a prompt for the generation. If :obj:`None` the method initializes it as an empty :obj:`torch.LongTensor` of shape :obj:`(1,)`. beam_scorer (:obj:`BeamScorer`): An derived instance of :class:`~transformers.BeamScorer` that defines how beam hypotheses are constructed, stored and sorted during generation. For more information, the documentation of :class:`~transformers.BeamScorer` should be read. logits_processor (:obj:`LogitsProcessorList`, `optional`): An instance of :class:`~transformers.LogitsProcessorList`. List of instances of class derived from :class:`~transformers.LogitsProcessor` used to modify the prediction scores of the language modeling head applied at each generation step. max_length (:obj:`int`, `optional`, defaults to 20): The maximum length of the sequence to be generated. pad_token_id (:obj:`int`, `optional`): The id of the `padding` token. eos_token_id (:obj:`int`, `optional`): The id of the `end-of-sequence` token. output_attentions (:obj:`bool`, `optional`, defaults to `False`): Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned tensors for more details. output_hidden_states (:obj:`bool`, `optional`, defaults to `False`): Whether or not to return trhe hidden states of all layers. See ``hidden_states`` under returned tensors for more details. output_scores (:obj:`bool`, `optional`, defaults to `False`): Whether or not to return the prediction scores. See ``scores`` under returned tensors for more details. return_dict_in_generate (:obj:`bool`, `optional`, defaults to `False`): Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. model_kwargs: Additional model specific kwargs that will be forwarded to the :obj:`forward` function of the model. If model is an encoder-decoder model the kwargs should include :obj:`encoder_outputs`. Return: :class:`~transformers.generation_utils.BeamSearchDecoderOnlyOutput`, :class:`~transformers.generation_utils.BeamSearchEncoderDecoderOutput` or obj:`torch.LongTensor`: A :obj:`torch.LongTensor` containing the generated tokens (default behaviour) or a :class:`~transformers.generation_utils.BeamSearchDecoderOnlyOutput` if :class:`~transformers.generation_utils.BeamSearchDecoderOnlyOutput` if ``model.config.is_encoder_decoder=False`` and ``return_dict_in_generate=True`` or a :class:`~transformers.generation_utils.BeamSearchEncoderDecoderOutput` if ``model.config.is_encoder_decoder=True``. Examples:: >>> from transformers import ( ... AutoTokenizer, ... AutoModelForSeq2SeqLM, ... LogitsProcessorList, ... MinLengthLogitsProcessor, ... HammingDiversityLogitsProcessor, ... BeamSearchScorer, ... ) >>> import torch >>> tokenizer = AutoTokenizer.from_pretrained("t5-base") >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> encoder_input_str = "translate English to German: How old are you?" >>> encoder_input_ids = tokenizer(encoder_input_str, return_tensors="pt").input_ids >>> # lets run diverse beam search using 6 beams >>> num_beams = 6 >>> # define decoder start token ids >>> input_ids = torch.ones((num_beams, 1), device=model.device, dtype=torch.long) >>> input_ids = input_ids * model.config.decoder_start_token_id >>> # add encoder_outputs to model keyword arguments >>> model_kwargs = { ... "encoder_outputs": model.get_encoder()(encoder_input_ids.repeat_interleave(num_beams, dim=0), return_dict=True) ... } >>> # instantiate beam scorer >>> beam_scorer = BeamSearchScorer( ... batch_size=1, ... max_length=model.config.max_length, ... num_beams=num_beams, ... device=model.device, ... num_beam_groups=3 ... ) >>> # instantiate logits processors >>> logits_processor = LogitsProcessorList([ ... HammingDiversityLogitsProcessor(5.5, num_beams=6, num_beam_groups=3), ... MinLengthLogitsProcessor(5, eos_token_id=model.config.eos_token_id), ... ]) >>> outputs = model.group_beam_search(input_ids, beam_scorer, logits_processor=logits_processor, **model_kwargs) >>> print("Generated:", tokenizer.batch_decode(outputs, skip_special_tokens=True)) """ # init values logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList() max_length = max_length if max_length is not None else self.config.max_length pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id output_scores = output_scores if output_scores is not None else self.config.output_scores output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict_in_generate = ( return_dict_in_generate if return_dict_in_generate is not None else self.config.return_dict_in_generate ) # init attention / hidden states / scores tuples scores = () if (return_dict_in_generate and output_scores) else None decoder_attentions = () if (return_dict_in_generate and output_attentions) else None decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None # if model is an encoder-decoder, retrieve encoder attention weights and hidden states if return_dict_in_generate and self.config.is_encoder_decoder: encoder_attentions = model_kwargs["encoder_outputs"].get("attentions") if output_attentions else None encoder_hidden_states = ( model_kwargs["encoder_outputs"].get("hidden_states") if output_hidden_states else None ) batch_size = len(beam_scorer._beam_hyps) num_beams = beam_scorer.num_beams num_beam_groups = beam_scorer.num_beam_groups num_sub_beams = num_beams // num_beam_groups device = input_ids.device batch_beam_size, cur_len = input_ids.shape assert ( num_beams * batch_size == batch_beam_size ), f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}." beam_scores = torch.full((batch_size, num_beams), -1e9, dtype=torch.float, device=device) # initialise score of first beam of each group with 0 and the rest with 1e-9. This ensures that the beams in # the same group don't produce same tokens everytime. beam_scores[:, ::num_sub_beams] = 0 beam_scores = beam_scores.view((batch_size * num_beams,)) while cur_len < max_length: # predicted tokens in cur_len step current_tokens = torch.zeros(batch_size * num_beams, dtype=input_ids.dtype, device=device) # indices which will form the beams in the next time step reordering_indices = torch.zeros(batch_size * num_beams, dtype=torch.long, device=device) # do one decoder step on all beams of all sentences in batch model_inputs = self.prepare_inputs_for_generation(input_ids, **model_kwargs) outputs = self( **model_inputs, return_dict=True, output_attentions=output_attentions, output_hidden_states=output_hidden_states, ) for beam_group_idx in range(num_beam_groups): group_start_idx = beam_group_idx * num_sub_beams group_end_idx = min(group_start_idx + num_sub_beams, num_beams) group_size = group_end_idx - group_start_idx # indices of beams of current group among all sentences in batch batch_group_indices = [] if output_scores: processed_score = torch.zeros_like(outputs.logits[:, -1, :]) for batch_idx in range(batch_size): batch_group_indices.extend( [batch_idx * num_beams + idx for idx in range(group_start_idx, group_end_idx)] ) group_input_ids = input_ids[batch_group_indices] # select outputs of beams of current group only next_token_logits = outputs.logits[batch_group_indices, -1, :] # adjust tokens for Bart, *e.g.* next_token_logits = self.adjust_logits_during_generation( next_token_logits, cur_len=cur_len, max_length=max_length ) next_token_scores = F.log_softmax(next_token_logits, dim=-1) # (batch_size * group_size, vocab_size) vocab_size = next_token_scores.shape[-1] next_token_scores = logits_processor( group_input_ids, next_token_scores, current_tokens=current_tokens, beam_group_idx=beam_group_idx ) next_token_scores = next_token_scores + beam_scores[batch_group_indices].unsqueeze(-1).expand_as( next_token_scores ) if output_scores: processed_score[batch_group_indices] = next_token_scores # reshape for beam search next_token_scores = next_token_scores.view(batch_size, group_size * vocab_size) next_token_scores, next_tokens = torch.topk( next_token_scores, 2 * group_size, dim=1, largest=True, sorted=True ) next_indices = next_tokens // vocab_size next_tokens = next_tokens % vocab_size # stateless beam_outputs = beam_scorer.process( group_input_ids, next_token_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id, ) beam_scores[batch_group_indices] = beam_outputs["next_beam_scores"] beam_next_tokens = beam_outputs["next_beam_tokens"] beam_idx = beam_outputs["next_beam_indices"] input_ids[batch_group_indices] = group_input_ids[beam_idx] group_input_ids = torch.cat([group_input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1) current_tokens[batch_group_indices] = group_input_ids[:, -1] # (beam_idx // group_size) -> batch_idx # (beam_idx % group_size) -> offset of idx inside the group reordering_indices[batch_group_indices] = ( num_beams * (beam_idx // group_size) + group_start_idx + (beam_idx % group_size) ) # Store scores, attentions and hidden_states when required if return_dict_in_generate: if output_scores: scores += (processed_score,) if output_attentions: decoder_attentions += ( (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,) ) if output_hidden_states: decoder_hidden_states += ( (outputs.decoder_hidden_states,) if self.config.is_encoder_decoder else (outputs.hidden_states,) ) model_kwargs = self._update_model_kwargs_for_generation( outputs, model_kwargs, is_encoder_decoder=self.config.is_encoder_decoder ) if model_kwargs["past"] is not None: model_kwargs["past"] = self._reorder_cache(model_kwargs["past"], reordering_indices) input_ids = torch.cat([input_ids, current_tokens.unsqueeze(-1)], dim=-1) cur_len = cur_len + 1 if beam_scorer.is_done: break sequence_outputs = beam_scorer.finalize( input_ids, beam_scores, next_tokens, next_indices, pad_token_id=pad_token_id, eos_token_id=eos_token_id ) if return_dict_in_generate: if not output_scores: sequence_outputs["sequence_scores"] if self.config.is_encoder_decoder: return BeamSearchEncoderDecoderOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, encoder_attentions=encoder_attentions, encoder_hidden_states=encoder_hidden_states, decoder_attentions=decoder_attentions, decoder_hidden_states=decoder_hidden_states, ) else: return BeamSearchDecoderOnlyOutput( sequences=sequence_outputs["sequences"], sequences_scores=sequence_outputs["sequence_scores"], scores=scores, attentions=decoder_attentions, hidden_states=decoder_hidden_states, ) else: return sequence_outputs["sequences"] def top_k_top_p_filtering( logits: torch.FloatTensor, top_k: int = 0, top_p: float = 1.0, filter_value: float = -float("Inf"), min_tokens_to_keep: int = 1, ) -> torch.FloatTensor: """ Filter a distribution of logits using top-k and/or nucleus (top-p) filtering Args: logits: logits distribution shape (batch size, vocabulary size) if top_k > 0: keep only top k tokens with highest probability (top-k filtering). if top_p < 1.0: keep the top tokens with cumulative probability >= top_p (nucleus filtering). Nucleus filtering is described in Holtzman et al. (http://arxiv.org/abs/1904.09751) Make sure we keep at least min_tokens_to_keep per batch example in the output From: https://gist.github.com/thomwolf/1a5a29f6962089e871b94cbd09daf317 """ if top_k > 0: logits = TopKLogitsWarper(top_k=top_k, filter_value=filter_value, min_tokens_to_keep=min_tokens_to_keep)( None, logits ) if 0 <= top_p <= 1.0: logits = TopPLogitsWarper(top_p=top_p, min_tokens_to_keep=min_tokens_to_keep)(None, logits) return logits
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/utils/generation_utils.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import numpy as np import torch try: from .utils import LegacySeq2SeqDataset except ImportError: from utils.utils import LegacySeq2SeqDataset from torch.utils.data import DataLoader import distributed_utils class Seq2SeqDataLoader(DataLoader): def __init__(self, type_path, data_dir, tokenizer, batch_size, device='cpu', max_source_length=1024, max_target_length=1024, n_obs=None, shuffle=False, sortish_sampler=False, num_workers=4): """ data -- list[LongTensor] -- there is no order among the LongTensors """ self.data_dir = data_dir self.tokenizer = tokenizer self.n_obs = n_obs self.sortish_sampler = sortish_sampler self.device = device self.max_source_length = max_source_length self.max_target_length = max_target_length self.dataset = self.get_dataset(type_path) # Partition data for DistributedDataParallel world_size = distributed_utils.get_world_size() rank = distributed_utils.get_rank() sampler = None if world_size > 1 and type_path == "train": sampler =self.dataset.make_sortish_sampler(batch_size, distributed=True, rank=rank, num_replicas=world_size) shuffle = False super().__init__( self.dataset, batch_size=batch_size, collate_fn=self.dataset.collate_fn, shuffle=shuffle, num_workers=num_workers, sampler=sampler, ) def get_dataset(self, type_path): dataset = LegacySeq2SeqDataset( data_dir=self.data_dir, tokenizer=self.tokenizer, type_path=type_path, n_obs=self.n_obs, max_source_length=self.max_source_length, max_target_length=self.max_target_length, src_lang="", tgt_lang="" ) return dataset
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/utils/data_utils.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import itertools import json import linecache import math import os import pickle import socket import time import logging from logging import getLogger from pathlib import Path from typing import Callable, Dict, Iterable, List, Union import git import random import numpy as np import torch import torch.distributed as dist from rouge_score import rouge_scorer, scoring from torch import nn from torch.utils.data import Dataset, Sampler, IterableDataset from bart.tokenization.tokenization_bart import BartTokenizer from utils.file_utils import cached_property from lddl.torch.datasets import ParquetDataset from lddl.torch.log import DatasetLogger from lddl.torch.utils import get_node_rank, get_nproc_per_node try: from fairseq.data.data_utils import batch_by_size FAIRSEQ_AVAILABLE = True except (ImportError, ModuleNotFoundError): FAIRSEQ_AVAILABLE = False def label_smoothed_nll_loss(lprobs, target, epsilon, ignore_index=-100): """From fairseq""" if target.dim() == lprobs.dim() - 1: target = target.unsqueeze(-1) nll_loss = -lprobs.gather(dim=-1, index=target) smooth_loss = -lprobs.sum(dim=-1, keepdim=True) if ignore_index is not None: pad_mask = target.eq(ignore_index) nll_loss.masked_fill_(pad_mask, 0.0) smooth_loss.masked_fill_(pad_mask, 0.0) else: nll_loss = nll_loss.squeeze(-1) smooth_loss = smooth_loss.squeeze(-1) nll_loss = nll_loss.sum() # mean()? Scared to break other math. smooth_loss = smooth_loss.sum() eps_i = epsilon / lprobs.size(-1) loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss return loss, nll_loss def encode_line(tokenizer, line, max_length, pad_to_max_length=True, return_tensors="pt"): """Only used by LegacyDataset""" extra_kw = {"add_prefix_space": True} if isinstance(tokenizer, BartTokenizer) else {} return tokenizer( [line] if isinstance(line, str) else line, max_length=max_length, padding="max_length" if pad_to_max_length else None, truncation=True, return_tensors=return_tensors, **extra_kw, ) def lmap(f: Callable, x: Iterable) -> List: """list(map(f, x))""" return list(map(f, x)) def calculate_bleu(output_lns, refs_lns, **kwargs) -> dict: """Uses sacrebleu's corpus_bleu implementation.""" return {"bleu": round(corpus_bleu(output_lns, [refs_lns], **kwargs).score, 4)} def trim_batch( input_ids, pad_token_id, attention_mask=None, ): """Remove columns that are populated exclusively by pad_token_id""" keep_column_mask = input_ids.ne(pad_token_id).any(dim=0) num_keeps = torch.count_nonzero(keep_column_mask) #Pad to multiples of 8 pad_num_keeps = num_keeps if num_keeps % 8 == 0 else (torch.div(num_keeps, 8, rounding_mode='floor') + 1) * 8 keep_column_mask[num_keeps:pad_num_keeps] = True if attention_mask is None: return input_ids[:, keep_column_mask] else: return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask]) class AbstractSeq2SeqDataset(Dataset): def __init__( self, tokenizer, data_dir, max_source_length, max_target_length, type_path="train", n_obs=None, prefix="", **dataset_kwargs ): super().__init__() if dataset_kwargs.get("src_file", None) is not None: src_file_path = dataset_kwargs["src_file"] self.src_file = Path(src_file_path) self.tgt_file = Path(dataset_kwargs.get("tgt_file", src_file_path.split(".")[0] + ".target")) self.len_file = Path(dataset_kwargs.get("len_file", src_file_path.split(".")[0] + ".len")) elif type_path is not None: self.src_file = Path(data_dir).joinpath(type_path + ".source") self.tgt_file = Path(data_dir).joinpath(type_path + ".target") self.len_file = Path(data_dir).joinpath(type_path + ".len") else: raise ValueError("Unable to locate dataset files without type_path and src_file defined") if os.path.exists(self.len_file): self.src_lens = pickle_load(self.len_file) self.used_char_len = False else: self.src_lens = self.get_char_lens(self.src_file) self.used_char_len = True try: pickle_dump(self.src_lens, self.len_file) print("Saving dataset lens file to cache at ", self.len_file) except: print("Unable to save dataset lens file, will be recomputed every time") self.max_source_length = max_source_length self.max_target_length = max_target_length assert min(self.src_lens) > 0, f"found empty line in {self.src_file}" self.tokenizer = tokenizer self.prefix = prefix if prefix is not None else "" if n_obs is not None: self.src_lens = self.src_lens[:n_obs] self.pad_token_id = self.tokenizer.pad_token_id self.dataset_kwargs = dataset_kwargs dataset_kwargs.update({"add_prefix_space": True} if isinstance(self.tokenizer, BartTokenizer) else {}) def __len__(self): return len(self.src_lens) @staticmethod def get_char_lens(data_file): return [len(x) for x in Path(data_file).open().readlines()] @cached_property def tgt_lens(self): """Length in characters of target documents""" return self.get_char_lens(self.tgt_file) def make_sortish_sampler(self, batch_size, distributed=False, shuffle=True, **kwargs): if distributed: return DistributedSortishSampler(self, batch_size, shuffle=shuffle, **kwargs) else: return SortishSampler(self.src_lens, batch_size, shuffle=shuffle) def make_dynamic_sampler(self, max_tokens_per_batch=1024, **kwargs): assert FAIRSEQ_AVAILABLE, "Dynamic batch size requires `pip install fairseq`" assert not self.used_char_len, "You must call python make_len_file.py before calling make_dynamic_sampler" sorted_indices = list(self.make_sortish_sampler(1024, shuffle=False)) def num_tokens_in_example(i): return min(self.src_lens[i], self.max_target_length) # call fairseq cython function batch_sampler: List[List[int]] = batch_by_size( sorted_indices, num_tokens_fn=num_tokens_in_example, max_tokens=max_tokens_per_batch, required_batch_size_multiple=64, ) shuffled_batches = [batch_sampler[i] for i in np.random.permutation(range(len(batch_sampler)))] # move the largest batch to the front to OOM quickly (uses an approximation for padding) approximate_toks_per_batch = [max(self.src_lens[i] for i in batch) * len(batch) for batch in shuffled_batches] largest_batch_idx = np.argmax(approximate_toks_per_batch) shuffled_batches[0], shuffled_batches[largest_batch_idx] = ( shuffled_batches[largest_batch_idx], shuffled_batches[0], ) return shuffled_batches def __getitem__(self, item): raise NotImplementedError("You must implement this") def collate_fn(self, batch): raise NotImplementedError("You must implement this") class LegacySeq2SeqDataset(AbstractSeq2SeqDataset): def __getitem__(self, index) -> Dict[str, torch.Tensor]: """Call tokenizer on src and tgt_lines""" index = index + 1 # linecache starts at 1 source_line = self.prefix + linecache.getline(str(self.src_file), index).rstrip("\n") tgt_line = linecache.getline(str(self.tgt_file), index).rstrip("\n") # assert source_line, f"empty source line for index {index}" # assert tgt_line, f"empty tgt line for index {index}" # Some CNN/dm source lines are empty source_inputs = encode_line(self.tokenizer, source_line, self.max_source_length) target_inputs = encode_line(self.tokenizer, tgt_line, self.max_target_length) source_ids = source_inputs["input_ids"].squeeze() target_ids = target_inputs["input_ids"].squeeze() src_mask = source_inputs["attention_mask"].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "labels": target_ids, } def collate_fn(self, batch) -> Dict[str, torch.Tensor]: input_ids = torch.stack([x["input_ids"] for x in batch]) masks = torch.stack([x["attention_mask"] for x in batch]) target_ids = torch.stack([x["labels"] for x in batch]) pad_token_id = self.pad_token_id y = trim_batch(target_ids, pad_token_id) source_ids, source_mask = trim_batch(input_ids, pad_token_id, attention_mask=masks) batch = { "input_ids": source_ids, "attention_mask": source_mask, "labels": y, } return batch class PretrainingSeq2SeqDataset(ParquetDataset): def __init__( self, path, tokenizer, max_source_length, type_path="train", n_obs=None, #@TODO fix n_obs input, not used prefix="", log_dir=None, log_level=logging.INFO, **dataset_kwargs ): logger = DatasetLogger( log_dir=log_dir, node_rank=get_node_rank(nproc_per_node=get_nproc_per_node(dataset_kwargs["local_rank"])), local_rank=dataset_kwargs["local_rank"], log_level=log_level, ) super().__init__( path, transform=dataset_kwargs["transform"], local_rank=dataset_kwargs["local_rank"], shuffle_buffer_size=dataset_kwargs["shuffle_buffer_size"], shuffle_buffer_warmup_factor=dataset_kwargs["shuffle_buffer_warmup_factor"], base_seed=dataset_kwargs["base_seed"], logger=logger ) self.max_source_length = max_source_length self.tokenizer = tokenizer self.prefix = prefix if prefix is not None else "" self.pad_token_id = self.tokenizer.pad_token_id self.dataset_kwargs = dataset_kwargs dataset_kwargs.update({"add_prefix_space": True} if isinstance(self.tokenizer, BartTokenizer) else {}) def _decode_record_batch(self, batch): batch = batch.to_pydict() for source_line in batch["sentences"]: source_line = self.prefix + source_line.rstrip("\n") assert source_line, f"empty source line for index {index}" source_inputs = encode_line(self.tokenizer, source_line, self.max_source_length) source_ids = source_inputs["input_ids"].squeeze() src_mask = source_inputs["attention_mask"].squeeze() yield { "input_ids": source_ids, "attention_mask": src_mask, } class LegacySeq2SeqDataset(AbstractSeq2SeqDataset): def __getitem__(self, index) -> Dict[str, torch.Tensor]: """Call tokenizer on src and tgt_lines""" index = index + 1 # linecache starts at 1 source_line = self.prefix + linecache.getline(str(self.src_file), index).rstrip("\n") tgt_line = linecache.getline(str(self.tgt_file), index).rstrip("\n") # assert source_line, f"empty source line for index {index}" # assert tgt_line, f"empty tgt line for index {index}" # Some CNN/dm source lines are empty source_inputs = encode_line(self.tokenizer, source_line, self.max_source_length) target_inputs = encode_line(self.tokenizer, tgt_line, self.max_target_length) source_ids = source_inputs["input_ids"].squeeze() target_ids = target_inputs["input_ids"].squeeze() src_mask = source_inputs["attention_mask"].squeeze() return { "input_ids": source_ids, "attention_mask": src_mask, "labels": target_ids, } def collate_fn(self, batch) -> Dict[str, torch.Tensor]: input_ids = torch.stack([x["input_ids"] for x in batch]) masks = torch.stack([x["attention_mask"] for x in batch]) target_ids = torch.stack([x["labels"] for x in batch]) pad_token_id = self.pad_token_id y = trim_batch(target_ids, pad_token_id) source_ids, source_mask = trim_batch(input_ids, pad_token_id, attention_mask=masks) batch = { "input_ids": source_ids, "attention_mask": source_mask, "labels": y, } return batch class ShuffleAndChainDataset(IterableDataset): def __init__(self, datasets, buffer_size): super().__init__() self.datasets = datasets self.buffer_size = buffer_size def chain_iter(self): for i, d in enumerate(self.datasets): for j, x in enumerate(d): yield x def __iter__(self): shufbuf = [] try: dataset_iter = self.chain_iter() for i in range(self.buffer_size): shufbuf.append(next(dataset_iter)) except: self.buffer_size = len(shufbuf) try: while True: try: item = next(dataset_iter) evict_idx = random.randint(0, self.buffer_size - 1) yield shufbuf[evict_idx] shufbuf[evict_idx] = item except StopIteration: break while len(shufbuf) > 0: yield shufbuf.pop() except GeneratorExit: pass class Seq2SeqDataset(AbstractSeq2SeqDataset): """A dataset that calls prepare_seq2seq_batch.""" def __getitem__(self, index) -> Dict[str, str]: index = index + 1 # linecache starts at 1 source_line = self.prefix + linecache.getline(str(self.src_file), index).rstrip("\n") tgt_line = linecache.getline(str(self.tgt_file), index).rstrip("\n") assert source_line, f"empty source line for index {index}" assert tgt_line, f"empty tgt line for index {index}" return {"tgt_texts": tgt_line, "src_texts": source_line, "id": index - 1} def collate_fn(self, batch) -> Dict[str, torch.Tensor]: """Call prepare_seq2seq_batch.""" batch_encoding: Dict[str, torch.Tensor] = self.tokenizer.prepare_seq2seq_batch( [x["src_texts"] for x in batch], tgt_texts=[x["tgt_texts"] for x in batch], max_length=self.max_source_length, max_target_length=self.max_target_length, return_tensors="pt", **self.dataset_kwargs, ).data batch_encoding["ids"] = torch.tensor([x["id"] for x in batch]) return batch_encoding class SortishSampler(Sampler): "Go through the text data by order of src length with a bit of randomness. From fastai repo." def __init__(self, data, batch_size, shuffle=True): self.data, self.bs, self.shuffle = data, batch_size, shuffle def __len__(self) -> int: return len(self.data) def __iter__(self): return iter(sortish_sampler_indices(self.data, self.bs, shuffle=self.shuffle)) def sortish_sampler_indices(data: List, bs: int, shuffle=True) -> np.array: "Go through the text data by order of src length with a bit of randomness. From fastai repo." if not shuffle: return np.argsort(np.array(data) * -1) def key_fn(i): return data[i] idxs = np.random.permutation(len(data)) sz = bs * 50 ck_idx = [idxs[i : i + sz] for i in range(0, len(idxs), sz)] sort_idx = np.concatenate([sorted(s, key=key_fn, reverse=True) for s in ck_idx]) sz = bs ck_idx = [sort_idx[i : i + sz] for i in range(0, len(sort_idx), sz)] max_ck = np.argmax([key_fn(ck[0]) for ck in ck_idx]) # find the chunk with the largest key, ck_idx[0], ck_idx[max_ck] = ck_idx[max_ck], ck_idx[0] # then make sure it goes first. sort_idx = np.concatenate(np.random.permutation(ck_idx[1:])) if len(ck_idx) > 1 else np.array([], dtype=np.int) sort_idx = np.concatenate((ck_idx[0], sort_idx)) return sort_idx class DistributedSortishSampler(Sampler): """Copied from torch DistributedSampler""" def __init__(self, dataset, batch_size, num_replicas=None, rank=None, add_extra_examples=True, shuffle=True): if num_replicas is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") num_replicas = dist.get_world_size() if rank is None: if not dist.is_available(): raise RuntimeError("Requires distributed package to be available") rank = dist.get_rank() self.dataset = dataset self.num_replicas = num_replicas self.rank = rank self.epoch = 0 if add_extra_examples: self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas)) self.total_size = self.num_samples * self.num_replicas else: self.total_size = len(dataset) self.num_samples = len(self.available_indices) self.batch_size = batch_size self.add_extra_examples = add_extra_examples self.shuffle = shuffle def __iter__(self) -> Iterable: g = torch.Generator() g.manual_seed(self.epoch) sortish_data = [self.dataset.src_lens[i] for i in self.available_indices] sortish_indices = sortish_sampler_indices(sortish_data, self.batch_size, shuffle=self.shuffle) indices = [self.available_indices[i] for i in sortish_indices] assert len(indices) == self.num_samples return iter(indices) @cached_property def available_indices(self) -> np.array: indices = list(range(len(self.dataset))) # add extra samples to make it evenly divisible indices += indices[: (self.total_size - len(indices))] assert len(indices) == self.total_size # subsample available_indices = indices[self.rank : self.total_size : self.num_replicas] return available_indices def __len__(self): return self.num_samples def set_epoch(self, epoch): self.epoch = epoch logger = getLogger(__name__) def use_task_specific_params(model, task): """Update config with summarization specific params.""" task_specific_params = model.config.task_specific_params if task_specific_params is not None: pars = task_specific_params.get(task, {}) logger.info(f"using task specific params for {task}: {pars}") model.config.update(pars) def pickle_load(path): """pickle.load(path)""" with open(path, "rb") as f: return pickle.load(f) def pickle_dump(var, path): """pickle.dump(var, path)""" with open(path, "wb") as f: return pickle.dump(var, f) def pickle_save(obj, path): """pickle.dump(obj, path)""" with open(path, "wb") as f: return pickle.dump(obj, f) def flatten_list(summary_ids: List[List]): return [x for x in itertools.chain.from_iterable(summary_ids)] def save_git_info(folder_path: str) -> None: """Save git information to output_dir/git_log.json""" repo_infos = get_git_info() save_json(repo_infos, os.path.join(folder_path, "git_log.json")) def save_json(content, path, indent=4, **json_dump_kwargs): with open(path, "w") as f: json.dump(content, f, indent=indent, **json_dump_kwargs) def load_json(path): with open(path) as f: return json.load(f) def get_git_info(): try: repo = git.Repo(search_parent_directories=True) repo_infos = { "repo_id": str(repo), "repo_sha": str(repo.head.object.hexsha), "repo_branch": str(repo.active_branch), "hostname": str(socket.gethostname()), } except: logger.info("Unable to provide git repository information from .git folder") repo_infos = { "repo_id": "N/A", "repo_sha": "N/A", "repo_branch": "N/A", "hostname": "N/A", } return repo_infos ROUGE_KEYS = ["rouge1", "rouge2", "rougeL", "rougeLsum"] def calculate_rouge(output_lns: List[str], reference_lns: List[str], cleaned_up_tokenization_spaces=False, use_stemmer=True) -> Dict: scorer = rouge_scorer.RougeScorer(ROUGE_KEYS, use_stemmer=use_stemmer) aggregator = scoring.BootstrapAggregator() split_txt = ". " if cleaned_up_tokenization_spaces else " . " for reference_ln, output_ln in zip(reference_lns, output_lns): # rouge_score expects \n separated sentences within a summary reference_ln_formatted = " . \n".join(reference_ln.split(". ")) output_ln_formatted = " . \n".join(output_ln.split(split_txt)) scores = scorer.score(reference_ln_formatted, output_ln_formatted) aggregator.add_scores(scores) result = aggregator.aggregate() return {k: round(v.mid.fmeasure * 100, 4) for k, v in result.items()} # Utilities for freezing parameters and checking whether they are frozen def freeze_params(model: nn.Module): """Set requires_grad=False for each of model.parameters()""" for par in model.parameters(): par.requires_grad = False def grad_status(model: nn.Module) -> Iterable: return (par.requires_grad for par in model.parameters()) def any_requires_grad(model: nn.Module) -> bool: return any(grad_status(model)) def assert_all_frozen(model): model_grads: List[bool] = list(grad_status(model)) n_require_grad = sum(lmap(int, model_grads)) npars = len(model_grads) assert not any(model_grads), f"{n_require_grad/npars:.1%} of {npars} weights require grad" def assert_not_all_frozen(model): model_grads: List[bool] = list(grad_status(model)) npars = len(model_grads) assert any(model_grads), f"none of {npars} weights require grad" # CLI Parsing utils def parse_numeric_n_bool_cl_kwargs(unparsed_args: List[str]) -> Dict[str, Union[int, float, bool]]: """ Parse an argv list of unspecified command line args to a dict. Assumes all values are either numeric or boolean in the form of true/false. """ result = {} assert len(unparsed_args) % 2 == 0, f"got odd number of unparsed args: {unparsed_args}" num_pairs = len(unparsed_args) // 2 for pair_num in range(num_pairs): i = 2 * pair_num assert unparsed_args[i].startswith("--") if unparsed_args[i + 1].lower() == "true": value = True elif unparsed_args[i + 1].lower() == "false": value = False else: try: value = int(unparsed_args[i + 1]) except ValueError: value = float(unparsed_args[i + 1]) # this can raise another informative ValueError result[unparsed_args[i][2:]] = value return result def write_txt_file(ordered_tgt, path): f = Path(path).open("w") for ln in ordered_tgt: f.write(ln + "\n") f.flush() def chunks(lst, n): """Yield successive n-sized chunks from lst.""" for i in range(0, len(lst), n): yield lst[i : i + n] def format_step(step): if isinstance(step, str): return step s = "" if len(step) > 0: s += "Global Step : {} ".format(step[0]) return s def get_readable_time(elapsed): d, h, m, s = [int(x) for x in time.strftime("%d:%H:%M:%S", time.gmtime(elapsed)).split(':')] d -= 1 return '{:2d}h{:2d}m{:2d}s'.format(24*d + h, m, s) class Mean: def __init__(self, **kwargs): self.reset() def reset(self): self._total = 0.0 self._num_examples = 0 def update(self, values, sample_weight=None): if sample_weight is None: if not isinstance(values, torch.Tensor): values = torch.tensor(values) if len(values.shape) == 0: values = values.unsqueeze(-1) self._total += torch.sum(values).item() self._num_examples += values.shape[0] else: self._total += torch.sum(values * sample_weight).item() self._num_examples += torch.sum(sample_weight).item() def result(self): if self._num_examples == 0: return float("nan") return self._total / self._num_examples
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/utils/utils.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import os from contextlib import contextmanager import torch import math import pynvml pynvml.nvmlInit() def init_distributed(cuda): """ Initializes distributed backend. :param cuda: (bool) if True initializes nccl backend, if False initializes gloo backend """ world_size = int(os.environ.get('WORLD_SIZE', 1)) distributed = (world_size > 1) if distributed: backend = 'nccl' if cuda else 'gloo' torch.distributed.init_process_group(backend=backend, init_method='env://') assert torch.distributed.is_initialized() return distributed def barrier(): """ Call torch.distributed.barrier() if distritubed is in use """ if torch.distributed.is_available() and torch.distributed.is_initialized(): torch.distributed.barrier() def get_rank(): """ Gets distributed rank or returns zero if distributed is not initialized. """ if torch.distributed.is_available() and torch.distributed.is_initialized(): rank = torch.distributed.get_rank() else: rank = 0 return rank def get_world_size(): """ Gets total number of distributed workers or returns one if distributed is not initialized. """ if torch.distributed.is_available() and torch.distributed.is_initialized(): world_size = torch.distributed.get_world_size() else: world_size = 1 return world_size def get_device_count(): """ Gets total number of devices per node """ if torch.distributed.is_available() and torch.distributed.is_initialized(): nproc_per_node = torch.cuda.device_count() else: nproc_per_node = 1 return nproc_per_node def all_reduce_item(value, op='sum'): """ All-reduces single scalar value if distributed is in use """ if torch.distributed.is_available() and torch.distributed.is_initialized(): if op == 'sum' or op == 'mean': dop = torch.distributed.ReduceOp.SUM elif op == 'min': dop = torch.distributed.ReduceOp.MIN elif op == 'max': dop = torch.distributed.ReduceOp.MAX elif op == 'product': dop = torch.distributed.ReduceOp.PRODUCT else: raise RuntimeError('Unsupported reduce op') backend = torch.distributed.get_backend() if backend == torch.distributed.Backend.NCCL: device = torch.device('cuda') elif backend == torch.distributed.Backend.GLOO: device = torch.device('cpu') else: raise RuntimeError('Unsupported distributed backend') tensor = torch.tensor(value, device=device) torch.distributed.all_reduce(tensor, dop) if op == 'mean': tensor /= get_world_size() ret = tensor.item() else: if torch.is_tensor(value): ret = value.item() else: ret = value return ret @contextmanager def sync_workers(): """ Yields distributed rank and synchronizes all workers on exit. """ rank = get_rank() yield rank barrier() def systemGetDriverVersion(): return pynvml.nvmlSystemGetDriverVersion() def deviceGetCount(): return pynvml.nvmlDeviceGetCount() class device: # assume nvml returns list of 64 bit ints _nvml_affinity_elements = math.ceil(os.cpu_count() / 64) def __init__(self, device_idx): super().__init__() self.handle = pynvml.nvmlDeviceGetHandleByIndex(device_idx) def getName(self): return pynvml.nvmlDeviceGetName(self.handle) def getCpuAffinity(self): affinity_string = '' for j in pynvml.nvmlDeviceGetCpuAffinity( self.handle, device._nvml_affinity_elements ): # assume nvml returns list of 64 bit ints affinity_string = '{:064b}'.format(j) + affinity_string affinity_list = [int(x) for x in affinity_string] affinity_list.reverse() # so core 0 is in 0th element of list return [i for i, e in enumerate(affinity_list) if e != 0] def set_affinity(gpu_id=None): if gpu_id is None: gpu_id = int(os.getenv('LOCAL_RANK', 0)) dev = device(gpu_id) os.sched_setaffinity(0, dev.getCpuAffinity()) # list of ints representing the logical cores this process is now affinitied with return os.sched_getaffinity(0)
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/utils/distributed_utils.py
# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import logging import os from pathlib import Path import numpy as np import pytorch_lightning as pl import torch from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint, ProgressBar from pytorch_lightning.utilities import rank_zero_only from utils.utils import save_json from utils.distributed_utils import all_reduce_item, get_world_size import time def count_trainable_parameters(model): model_parameters = filter(lambda p: p.requires_grad, model.parameters()) params = sum([np.prod(p.size()) for p in model_parameters]) return params logger = logging.getLogger(__name__) class Seq2SeqLoggingCallback(pl.Callback): @rank_zero_only def on_batch_end(self, trainer, pl_module): lrs = {f"lr_group_{i}": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)} pl_module.logger.log_metrics(lrs) @rank_zero_only def _write_logs( self, trainer: pl.Trainer, pl_module: pl.LightningModule, type_path: str, save_generations=True ) -> None: logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****") metrics = trainer.callback_metrics trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]}) # Log results od = Path(pl_module.hparams.output_dir) if type_path == "test": results_file = od / "test_results.txt" generations_file = od / "test_generations.txt" else: # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json # If people want this it will be easy enough to add back. results_file = od / f"{type_path}_results/{trainer.global_step:05d}.txt" generations_file = od / f"{type_path}_generations/{trainer.global_step:05d}.txt" results_file.parent.mkdir(exist_ok=True) generations_file.parent.mkdir(exist_ok=True) with open(results_file, "a+") as writer: for key in sorted(metrics): if key in ["log", "progress_bar", "preds"]: continue val = metrics[key] if isinstance(val, torch.Tensor): val = val.item() msg = f"{key}: {val:.6f}\n" writer.write(msg) if not save_generations: return if "preds" in metrics: content = "\n".join(metrics["preds"]) generations_file.open("w+").write(content) def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx): self.train_tob_list.append(outputs[0][0]["log"]["tpb"]) self.train_time_epoch_list.append(time.time() - self.t0) #Measures ~time for forward + backward + optimizer_step def on_train_batch_start(self, trainer, pl_module, batch, batch_idx, dataloader_idx): self.t0 = time.time() def on_train_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule): try: npars = pl_module.model.model.num_parameters() except AttributeError: npars = pl_module.model.num_parameters() n_trainable_pars = count_trainable_parameters(pl_module) # mp stands for million parameters trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6}) self.train_time_epoch_list = [] self.train_tob_list = [] self.tokens = 0 self.train_time = 0.0 self.avg_steps_per_sec = 0.0 self.epochs = 0 try: self.sync_dist = pl_module.sync_dist except: self.sync_dist = get_world_size() > 1 def process_stats(self, train_times, outputs, filter_p=0.8): index_list = np.argsort(train_times) #sort based on train_times best_n = int(len(outputs) * 0.8) train_time = 0.0 unpadded_tokens = 0 for i in index_list[:best_n]: train_time += train_times[i] unpadded_tokens += outputs[i] avg_steps_per_sec = train_time / best_n return train_time, unpadded_tokens, best_n, avg_steps_per_sec def on_train_epoch_end(self, trainer, pl_module, outputs): try: outputs = self.train_tob_list train_time, unpadded_tokens, train_batches, avg_steps_per_sec = self.process_stats(self.train_time_epoch_list, outputs) pl_module.log("train_throughput", unpadded_tokens/train_time, on_step=False, on_epoch=True, prog_bar=True, logger=True, sync_dist=self.sync_dist) all_reduce_tokens = all_reduce_item(unpadded_tokens, "sum") all_reduce_time = all_reduce_item(train_time, "mean") all_reduce_avg_steps_per_sec = all_reduce_item(avg_steps_per_sec, "mean") #Accumulate self.tokens = ((self.tokens * self.epochs) + all_reduce_tokens) / (self.epochs + 1) self.train_time = ((self.train_time * self.epochs) + all_reduce_time) / (self.epochs + 1) self.avg_steps_per_sec = ((self.avg_steps_per_sec * self.epochs) + all_reduce_avg_steps_per_sec) / (self.epochs + 1.0) self.epochs +=1 #Reset self.train_time_epoch_list = [] self.train_tob_list = [] except ZeroDivisionError: print("Train time is reported as 0? It's possible training is already complete!") pass def on_train_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): if self.epochs < 1: outputs = self.train_tob_list train_time, unpadded_tokens, train_batches, avg_steps_per_sec = self.process_stats(self.train_time_epoch_list, outputs) pl_module.log("train_throughput", unpadded_tokens/train_time, on_step=False, on_epoch=True, prog_bar=True, logger=True, sync_dist=self.sync_dist) all_reduce_tokens = all_reduce_item(unpadded_tokens, "sum") all_reduce_time = all_reduce_item(train_time, "mean") all_reduce_avg_steps_per_sec = all_reduce_item(avg_steps_per_sec, "mean") #Accumulate self.tokens = ((self.tokens * self.epochs) + all_reduce_tokens) / (self.epochs + 1) self.train_time = ((self.train_time * self.epochs) + all_reduce_time) / (self.epochs + 1) self.avg_steps_per_sec = ((self.avg_steps_per_sec * self.epochs) + all_reduce_avg_steps_per_sec) / (self.epochs + 1.0) def get_checkpoint_callback(output_dir, metric, save_top_k=1): """Saves the best model by validation ROUGE2 score.""" monitor = f"val_{metric}" if metric == "rouge2": exp = "{val_avg_rouge2:.4f}-{step_count}" elif metric == "bleu": exp = "{val_avg_bleu:.4f}-{step_count}" elif metric == "loss": exp = "{loss:.4f}-{epoch}" monitor = metric else: raise NotImplementedError( f"seq2seq callbacks only support rouge2, bleu and loss, got {metric}, You can make your own by adding to this function." ) checkpoint_callback = ModelCheckpoint( filename=os.path.join(output_dir, exp), monitor=monitor, mode="min" if "loss" in metric else "max", save_top_k=save_top_k, period=1, # maybe save a checkpoint every time val is run, not just end of epoch. ) return checkpoint_callback class CheckpointEveryNSteps(pl.Callback): """ Save a checkpoint every N steps, instead of Lightning's default that checkpoints based on validation loss. """ def __init__( self, output_dir, save_step_frequency, prefix="", use_modelcheckpoint_filename=False, ): """ Args: save_step_frequency: how often to save in steps prefix: add a prefix to the name, only used if use_modelcheckpoint_filename=False use_modelcheckpoint_filename: just use the ModelCheckpoint callback's default filename, don't use ours. """ self.output_dir = output_dir self.save_step_frequency = save_step_frequency self.prefix = prefix self.use_modelcheckpoint_filename = use_modelcheckpoint_filename def on_batch_end(self, trainer: pl.Trainer, _): """ Check if we should save a checkpoint after every train batch """ epoch = trainer.current_epoch global_step = trainer.global_step if global_step % self.save_step_frequency == 0: if self.use_modelcheckpoint_filename: filename = trainer.checkpoint_callback.filename else: filename = f"{self.prefix}_epoch{epoch}_step{global_step}.ckpt" ckpt_path = os.path.join(self.output_dir, filename) trainer.save_checkpoint(ckpt_path) def on_train_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): epoch = trainer.current_epoch global_step = trainer.global_step if self.use_modelcheckpoint_filename: filename = trainer.checkpoint_callback.filename else: filename = f"{self.prefix}_epoch{epoch}_step{global_step}.ckpt" ckpt_path = os.path.join(self.output_dir, filename) trainer.save_checkpoint(ckpt_path) def get_early_stopping_callback(metric, patience): return EarlyStopping( monitor=metric, # does this need avg? mode="min" if "loss" in metric else "max", patience=patience, verbose=True, )
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/utils/callbacks.py
# Copyright (c) 2022, NVIDIA CORPORATION. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ Utilities for working with the local dataset cache. This file is adapted from the AllenNLP library at https://github.com/allenai/allennlp Copyright by the AllenNLP authors. """ import fnmatch import json import logging import os import re import shutil import sys import tarfile import tempfile from collections import OrderedDict from contextlib import contextmanager from dataclasses import fields from functools import partial, wraps from hashlib import sha256 from pathlib import Path from typing import Any, Dict, Optional, Tuple, Union from urllib.parse import urlparse from zipfile import ZipFile, is_zipfile import numpy as np from tqdm.auto import tqdm import requests from filelock import FileLock __version__ = "3.0.2" logger = logging.getLogger(__name__) # pylint: disable=invalid-name try: USE_TF = os.environ.get("USE_TF", "AUTO").upper() USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper() if USE_TORCH in ("1", "ON", "YES", "AUTO") and USE_TF not in ("1", "ON", "YES"): import torch _torch_available = True # pylint: disable=invalid-name logger.info("PyTorch version {} available.".format(torch.__version__)) else: logger.info("Disabling PyTorch because USE_TF is set") _torch_available = False except ImportError: _torch_available = False # pylint: disable=invalid-name try: USE_TF = os.environ.get("USE_TF", "AUTO").upper() USE_TORCH = os.environ.get("USE_TORCH", "AUTO").upper() if USE_TF in ("1", "ON", "YES", "AUTO") and USE_TORCH not in ("1", "ON", "YES"): import tensorflow as tf assert hasattr(tf, "__version__") and int(tf.__version__[0]) >= 2 _tf_available = True # pylint: disable=invalid-name logger.info("TensorFlow version {} available.".format(tf.__version__)) else: logger.info("Disabling Tensorflow because USE_TORCH is set") _tf_available = False except (ImportError, AssertionError): _tf_available = False # pylint: disable=invalid-name try: import nlp # noqa: F401 _nlp_available = True except ImportError: _nlp_available = False try: from torch.hub import _get_torch_home torch_cache_home = _get_torch_home() except ImportError: torch_cache_home = os.path.expanduser( os.getenv("TORCH_HOME", os.path.join(os.getenv("XDG_CACHE_HOME", "~/.cache"), "torch")) ) try: import torch_xla.core.xla_model as xm # noqa: F401 if _torch_available: _torch_tpu_available = True # pylint: disable= else: _torch_tpu_available = False except ImportError: _torch_tpu_available = False try: import psutil # noqa: F401 _psutil_available = True except ImportError: _psutil_available = False try: import py3nvml # noqa: F401 _py3nvml_available = True except ImportError: _py3nvml_available = False try: from apex import amp # noqa: F401 _has_apex = True except ImportError: _has_apex = False default_cache_path = os.path.join(torch_cache_home, "transformers") PYTORCH_PRETRAINED_BERT_CACHE = os.getenv("PYTORCH_PRETRAINED_BERT_CACHE", default_cache_path) PYTORCH_TRANSFORMERS_CACHE = os.getenv("PYTORCH_TRANSFORMERS_CACHE", PYTORCH_PRETRAINED_BERT_CACHE) TRANSFORMERS_CACHE = os.getenv("TRANSFORMERS_CACHE", PYTORCH_TRANSFORMERS_CACHE) WEIGHTS_NAME = "pytorch_model.bin" TF2_WEIGHTS_NAME = "tf_model.h5" TF_WEIGHTS_NAME = "model.ckpt" CONFIG_NAME = "config.json" MODEL_CARD_NAME = "modelcard.json" MULTIPLE_CHOICE_DUMMY_INPUTS = [[[0], [1]], [[0], [1]]] DUMMY_INPUTS = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]] DUMMY_MASK = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]] S3_BUCKET_PREFIX = "https://s3.amazonaws.com/models.huggingface.co/bert" CLOUDFRONT_DISTRIB_PREFIX = "https://cdn.huggingface.co" def is_torch_available(): return _torch_available def is_tf_available(): return _tf_available def is_torch_tpu_available(): return _torch_tpu_available def is_nlp_available(): return _nlp_available def is_psutil_available(): return _psutil_available def is_py3nvml_available(): return _py3nvml_available def is_apex_available(): return _has_apex def add_start_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "") return fn return docstring_decorator def add_start_docstrings_to_model_forward(*docstr): def docstring_decorator(fn): class_name = ":class:`~transformers.{}`".format(fn.__qualname__.split(".")[0]) intro = " The {} forward method, overrides the :func:`__call__` special method.".format(class_name) note = r""" .. note:: Although the recipe for forward pass needs to be defined within this function, one should call the :class:`Module` instance afterwards instead of this since the former takes care of running the pre and post processing steps while the latter silently ignores them. """ fn.__doc__ = intro + note + "".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else "") return fn return docstring_decorator def add_end_docstrings(*docstr): def docstring_decorator(fn): fn.__doc__ = fn.__doc__ + "".join(docstr) return fn return docstring_decorator PT_RETURN_INTRODUCTION = r""" Returns: :class:`~{full_output_type}` or :obj:`tuple(torch.FloatTensor)`: A :class:`~{full_output_type}` (if ``return_dict=True`` is passed or when ``config.return_dict=True``) or a tuple of :obj:`torch.FloatTensor` comprising various elements depending on the configuration (:class:`~transformers.{config_class}`) and inputs. """ TF_RETURN_INTRODUCTION = r""" Returns: :class:`~{full_output_type}` or :obj:`tuple(tf.Tensor)`: A :class:`~{full_output_type}` (if ``return_dict=True`` is passed or when ``config.return_dict=True``) or a tuple of :obj:`tf.Tensor` comprising various elements depending on the configuration (:class:`~transformers.{config_class}`) and inputs. """ def _get_indent(t): """Returns the indentation in the first line of t""" search = re.search(r"^(\s*)\S", t) return "" if search is None else search.groups()[0] def _convert_output_args_doc(output_args_doc): """Convert output_args_doc to display properly.""" # Split output_arg_doc in blocks argument/description indent = _get_indent(output_args_doc) blocks = [] current_block = "" for line in output_args_doc.split("\n"): # If the indent is the same as the beginning, the line is the name of new arg. if _get_indent(line) == indent: if len(current_block) > 0: blocks.append(current_block[:-1]) current_block = f"{line}\n" else: # Otherwise it's part of the description of the current arg. # We need to remove 2 spaces to the indentation. current_block += f"{line[2:]}\n" blocks.append(current_block[:-1]) # Format each block for proper rendering for i in range(len(blocks)): blocks[i] = re.sub(r"^(\s+)(\S+)(\s+)", r"\1- **\2**\3", blocks[i]) blocks[i] = re.sub(r":\s*\n\s*(\S)", r" -- \1", blocks[i]) return "\n".join(blocks) def _prepare_output_docstrings(output_type, config_class): """ Prepares the return part of the docstring using `output_type`. """ docstrings = output_type.__doc__ # Remove the head of the docstring to keep the list of args only lines = docstrings.split("\n") i = 0 while i < len(lines) and re.search(r"^\s*(Args|Parameters):\s*$", lines[i]) is None: i += 1 if i < len(lines): docstrings = "\n".join(lines[(i + 1) :]) docstrings = _convert_output_args_doc(docstrings) # Add the return introduction full_output_type = f"{output_type.__module__}.{output_type.__name__}" intro = TF_RETURN_INTRODUCTION if output_type.__name__.startswith("TF") else PT_RETURN_INTRODUCTION intro = intro.format(full_output_type=full_output_type, config_class=config_class) return intro + docstrings PT_TOKEN_CLASSIFICATION_SAMPLE = r""" Example:: >>> from transformers import {tokenizer_class}, {model_class} >>> import torch >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> labels = torch.tensor([1] * inputs["input_ids"].size(1)).unsqueeze(0) # Batch size 1 >>> outputs = model(**inputs, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits """ PT_QUESTION_ANSWERING_SAMPLE = r""" Example:: >>> from transformers import {tokenizer_class}, {model_class} >>> import torch >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> start_positions = torch.tensor([1]) >>> end_positions = torch.tensor([3]) >>> outputs = model(**inputs, start_positions=start_positions, end_positions=end_positions) >>> loss = outputs.loss >>> start_scores = outputs.start_scores >>> end_scores = outputs.end_scores """ PT_SEQUENCE_CLASSIFICATION_SAMPLE = r""" Example:: >>> from transformers import {tokenizer_class}, {model_class} >>> import torch >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> labels = torch.tensor([1]).unsqueeze(0) # Batch size 1 >>> outputs = model(**inputs, labels=labels) >>> loss = outputs.loss >>> logits = outputs.logits """ PT_MASKED_LM_SAMPLE = r""" Example:: >>> from transformers import {tokenizer_class}, {model_class} >>> import torch >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True) >>> input_ids = tokenizer("Hello, my dog is cute", return_tensors="pt")["input_ids"] >>> outputs = model(input_ids, labels=input_ids) >>> loss = outputs.loss >>> prediction_logits = outputs.logits """ PT_BASE_MODEL_SAMPLE = r""" Example:: >>> from transformers import {tokenizer_class}, {model_class} >>> import torch >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs) >>> last_hidden_states = outputs.last_hidden_state """ PT_MULTIPLE_CHOICE_SAMPLE = r""" Example:: >>> from transformers import {tokenizer_class}, {model_class} >>> import torch >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True) >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> choice0 = "It is eaten with a fork and a knife." >>> choice1 = "It is eaten while held in the hand." >>> labels = torch.tensor(0).unsqueeze(0) # choice0 is correct (according to Wikipedia ;)), batch size 1 >>> encoding = tokenizer([[prompt, prompt], [choice0, choice1]], return_tensors='pt', padding=True) >>> outputs = model(**{{k: v.unsqueeze(0) for k,v in encoding.items()}}, labels=labels) # batch size is 1 >>> # the linear classifier still needs to be trained >>> loss = outputs.loss >>> logits = outputs.logits """ PT_CAUSAL_LM_SAMPLE = r""" Example:: >>> import torch >>> from transformers import {tokenizer_class}, {model_class} >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}', return_dict=True) >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") >>> outputs = model(**inputs, labels=inputs["input_ids"]) >>> loss = outputs.loss >>> logits = outputs.logits """ TF_TOKEN_CLASSIFICATION_SAMPLE = r""" Example:: >>> from transformers import {tokenizer_class}, {model_class} >>> import tensorflow as tf >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}') >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") >>> input_ids = inputs["input_ids"] >>> inputs["labels"] = tf.reshape(tf.constant([1] * tf.size(input_ids).numpy()), (-1, tf.size(input_ids))) # Batch size 1 >>> outputs = model(inputs) >>> loss, scores = outputs[:2] """ TF_QUESTION_ANSWERING_SAMPLE = r""" Example:: >>> from transformers import {tokenizer_class}, {model_class} >>> import tensorflow as tf >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}') >>> question, text = "Who was Jim Henson?", "Jim Henson was a nice puppet" >>> input_dict = tokenizer(question, text, return_tensors='tf') >>> start_scores, end_scores = model(input_dict) >>> all_tokens = tokenizer.convert_ids_to_tokens(input_dict["input_ids"].numpy()[0]) >>> answer = ' '.join(all_tokens[tf.math.argmax(start_scores, 1)[0] : tf.math.argmax(end_scores, 1)[0]+1]) """ TF_SEQUENCE_CLASSIFICATION_SAMPLE = r""" Example:: >>> from transformers import {tokenizer_class}, {model_class} >>> import tensorflow as tf >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}') >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") >>> inputs["labels"] = tf.reshape(tf.constant(1), (-1, 1)) # Batch size 1 >>> outputs = model(inputs) >>> loss, logits = outputs[:2] """ TF_MASKED_LM_SAMPLE = r""" Example:: >>> from transformers import {tokenizer_class}, {model_class} >>> import tensorflow as tf >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}') >>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True))[None, :] # Batch size 1 >>> outputs = model(input_ids) >>> prediction_scores = outputs[0] """ TF_BASE_MODEL_SAMPLE = r""" Example:: >>> from transformers import {tokenizer_class}, {model_class} >>> import tensorflow as tf >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}') >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") >>> outputs = model(inputs) >>> last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple """ TF_MULTIPLE_CHOICE_SAMPLE = r""" Example:: >>> from transformers import {tokenizer_class}, {model_class} >>> import tensorflow as tf >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}') >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced." >>> choice0 = "It is eaten with a fork and a knife." >>> choice1 = "It is eaten while held in the hand." >>> encoding = tokenizer([[prompt, prompt], [choice0, choice1]], return_tensors='tf', padding=True) >>> inputs = {{k: tf.expand_dims(v, 0) for k, v in encoding.items()}} >>> outputs = model(inputs) # batch size is 1 >>> # the linear classifier still needs to be trained >>> logits = outputs[0] """ TF_CAUSAL_LM_SAMPLE = r""" Example:: >>> from transformers import {tokenizer_class}, {model_class} >>> import tensorflow as tf >>> tokenizer = {tokenizer_class}.from_pretrained('{checkpoint}') >>> model = {model_class}.from_pretrained('{checkpoint}') >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") >>> outputs = model(inputs) >>> logits = outputs[0] """ def add_code_sample_docstrings(*docstr, tokenizer_class=None, checkpoint=None, output_type=None, config_class=None): def docstring_decorator(fn): model_class = fn.__qualname__.split(".")[0] is_tf_class = model_class[:2] == "TF" if "SequenceClassification" in model_class: code_sample = TF_SEQUENCE_CLASSIFICATION_SAMPLE if is_tf_class else PT_SEQUENCE_CLASSIFICATION_SAMPLE elif "QuestionAnswering" in model_class: code_sample = TF_QUESTION_ANSWERING_SAMPLE if is_tf_class else PT_QUESTION_ANSWERING_SAMPLE elif "TokenClassification" in model_class: code_sample = TF_TOKEN_CLASSIFICATION_SAMPLE if is_tf_class else PT_TOKEN_CLASSIFICATION_SAMPLE elif "MultipleChoice" in model_class: code_sample = TF_MULTIPLE_CHOICE_SAMPLE if is_tf_class else PT_MULTIPLE_CHOICE_SAMPLE elif "MaskedLM" in model_class: code_sample = TF_MASKED_LM_SAMPLE if is_tf_class else PT_MASKED_LM_SAMPLE elif "LMHead" in model_class: code_sample = TF_CAUSAL_LM_SAMPLE if is_tf_class else PT_CAUSAL_LM_SAMPLE elif "Model" in model_class: code_sample = TF_BASE_MODEL_SAMPLE if is_tf_class else PT_BASE_MODEL_SAMPLE else: raise ValueError(f"Docstring can't be built for model {model_class}") output_doc = _prepare_output_docstrings(output_type, config_class) if output_type is not None else "" built_doc = code_sample.format(model_class=model_class, tokenizer_class=tokenizer_class, checkpoint=checkpoint) fn.__doc__ = (fn.__doc__ or "") + "".join(docstr) + output_doc + built_doc return fn return docstring_decorator def replace_return_docstrings(output_type=None, config_class=None): def docstring_decorator(fn): docstrings = fn.__doc__ lines = docstrings.split("\n") i = 0 while i < len(lines) and re.search(r"^\s*Returns?:\s*$", lines[i]) is None: i += 1 if i < len(lines): lines[i] = _prepare_output_docstrings(output_type, config_class) docstrings = "\n".join(lines) else: raise ValueError( f"The function {fn} should have an empty 'Return:' or 'Returns:' in its docstring as placeholder, current docstring is:\n{docstrings}" ) fn.__doc__ = docstrings return fn return docstring_decorator def is_remote_url(url_or_filename): parsed = urlparse(url_or_filename) return parsed.scheme in ("http", "https") def hf_bucket_url(model_id: str, filename: str, use_cdn=True) -> str: """ Resolve a model identifier, and a file name, to a HF-hosted url on either S3 or Cloudfront (a Content Delivery Network, or CDN). Cloudfront is replicated over the globe so downloads are way faster for the end user (and it also lowers our bandwidth costs). However, it is more aggressively cached by default, so may not always reflect the latest changes to the underlying file (default TTL is 24 hours). In terms of client-side caching from this library, even though Cloudfront relays the ETags from S3, using one or the other (or switching from one to the other) will affect caching: cached files are not shared between the two because the cached file's name contains a hash of the url. """ endpoint = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX legacy_format = "/" not in model_id if legacy_format: return f"{endpoint}/{model_id}-{filename}" else: return f"{endpoint}/{model_id}/{filename}" def url_to_filename(url, etag=None): """ Convert `url` into a hashed filename in a repeatable way. If `etag` is specified, append its hash to the url's, delimited by a period. If the url ends with .h5 (Keras HDF5 weights) adds '.h5' to the name so that TF 2.0 can identify it as a HDF5 file (see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1380) """ url_bytes = url.encode("utf-8") url_hash = sha256(url_bytes) filename = url_hash.hexdigest() if etag: etag_bytes = etag.encode("utf-8") etag_hash = sha256(etag_bytes) filename += "." + etag_hash.hexdigest() if url.endswith(".h5"): filename += ".h5" return filename def filename_to_url(filename, cache_dir=None): """ Return the url and etag (which may be ``None``) stored for `filename`. Raise ``EnvironmentError`` if `filename` or its stored metadata do not exist. """ if cache_dir is None: cache_dir = TRANSFORMERS_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) cache_path = os.path.join(cache_dir, filename) if not os.path.exists(cache_path): raise EnvironmentError("file {} not found".format(cache_path)) meta_path = cache_path + ".json" if not os.path.exists(meta_path): raise EnvironmentError("file {} not found".format(meta_path)) with open(meta_path, encoding="utf-8") as meta_file: metadata = json.load(meta_file) url = metadata["url"] etag = metadata["etag"] return url, etag def cached_path( url_or_filename, cache_dir=None, force_download=False, proxies=None, resume_download=False, user_agent: Union[Dict, str, None] = None, extract_compressed_file=False, force_extract=False, local_files_only=False, ) -> Optional[str]: """ Given something that might be a URL (or might be a local path), determine which. If it's a URL, download the file and cache it, and return the path to the cached file. If it's already a local path, make sure the file exists and then return the path. Args: cache_dir: specify a cache directory to save the file to (overwrite the default cache dir). force_download: if True, re-dowload the file even if it's already cached in the cache dir. resume_download: if True, resume the download if incompletly recieved file is found. user_agent: Optional string or dict that will be appended to the user-agent on remote requests. extract_compressed_file: if True and the path point to a zip or tar file, extract the compressed file in a folder along the archive. force_extract: if True when extract_compressed_file is True and the archive was already extracted, re-extract the archive and overide the folder where it was extracted. Return: None in case of non-recoverable file (non-existent or inaccessible url + no cache on disk). Local path (string) otherwise """ if cache_dir is None: cache_dir = TRANSFORMERS_CACHE if isinstance(url_or_filename, Path): url_or_filename = str(url_or_filename) if isinstance(cache_dir, Path): cache_dir = str(cache_dir) if is_remote_url(url_or_filename): # URL, so get it from the cache (downloading if necessary) output_path = get_from_cache( url_or_filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, user_agent=user_agent, local_files_only=local_files_only, ) elif os.path.exists(url_or_filename): # File, and it exists. output_path = url_or_filename elif urlparse(url_or_filename).scheme == "": # File, but it doesn't exist. raise EnvironmentError("file {} not found".format(url_or_filename)) else: # Something unknown raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename)) if extract_compressed_file: if not is_zipfile(output_path) and not tarfile.is_tarfile(output_path): return output_path # Path where we extract compressed archives # We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/" output_dir, output_file = os.path.split(output_path) output_extract_dir_name = output_file.replace(".", "-") + "-extracted" output_path_extracted = os.path.join(output_dir, output_extract_dir_name) if os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and not force_extract: return output_path_extracted # Prevent parallel extractions lock_path = output_path + ".lock" with FileLock(lock_path): shutil.rmtree(output_path_extracted, ignore_errors=True) os.makedirs(output_path_extracted) if is_zipfile(output_path): with ZipFile(output_path, "r") as zip_file: zip_file.extractall(output_path_extracted) zip_file.close() elif tarfile.is_tarfile(output_path): tar_file = tarfile.open(output_path) tar_file.extractall(output_path_extracted) tar_file.close() else: raise EnvironmentError("Archive format of {} could not be identified".format(output_path)) return output_path_extracted return output_path def http_get(url, temp_file, proxies=None, resume_size=0, user_agent: Union[Dict, str, None] = None): ua = "transformers/{}; python/{}".format(__version__, sys.version.split()[0]) if is_torch_available(): ua += "; torch/{}".format(torch.__version__) if is_tf_available(): ua += "; tensorflow/{}".format(tf.__version__) if isinstance(user_agent, dict): ua += "; " + "; ".join("{}/{}".format(k, v) for k, v in user_agent.items()) elif isinstance(user_agent, str): ua += "; " + user_agent headers = {"user-agent": ua} if resume_size > 0: headers["Range"] = "bytes=%d-" % (resume_size,) response = requests.get(url, stream=True, proxies=proxies, headers=headers) if response.status_code == 416: # Range not satisfiable return content_length = response.headers.get("Content-Length") total = resume_size + int(content_length) if content_length is not None else None progress = tqdm( unit="B", unit_scale=True, total=total, initial=resume_size, desc="Downloading", disable=bool(logger.getEffectiveLevel() == logging.NOTSET), ) for chunk in response.iter_content(chunk_size=1024): if chunk: # filter out keep-alive new chunks progress.update(len(chunk)) temp_file.write(chunk) progress.close() def get_from_cache( url, cache_dir=None, force_download=False, proxies=None, etag_timeout=10, resume_download=False, user_agent: Union[Dict, str, None] = None, local_files_only=False, ) -> Optional[str]: """ Given a URL, look for the corresponding file in the local cache. If it's not there, download it. Then return the path to the cached file. Return: None in case of non-recoverable file (non-existent or inaccessible url + no cache on disk). Local path (string) otherwise """ if cache_dir is None: cache_dir = TRANSFORMERS_CACHE if isinstance(cache_dir, Path): cache_dir = str(cache_dir) os.makedirs(cache_dir, exist_ok=True) etag = None if not local_files_only: try: response = requests.head(url, allow_redirects=True, proxies=proxies, timeout=etag_timeout) if response.status_code == 200: etag = response.headers.get("ETag") except (EnvironmentError, requests.exceptions.Timeout): # etag is already None pass filename = url_to_filename(url, etag) # get cache path to put the file cache_path = os.path.join(cache_dir, filename) # etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible. # try to get the last downloaded one if etag is None: if os.path.exists(cache_path): return cache_path else: matching_files = [ file for file in fnmatch.filter(os.listdir(cache_dir), filename + ".*") if not file.endswith(".json") and not file.endswith(".lock") ] if len(matching_files) > 0: return os.path.join(cache_dir, matching_files[-1]) else: # If files cannot be found and local_files_only=True, # the models might've been found if local_files_only=False # Notify the user about that if local_files_only: raise ValueError( "Cannot find the requested files in the cached path and outgoing traffic has been" " disabled. To enable model look-ups and downloads online, set 'local_files_only'" " to False." ) return None # From now on, etag is not None. if os.path.exists(cache_path) and not force_download: return cache_path # Prevent parallel downloads of the same file with a lock. lock_path = cache_path + ".lock" with FileLock(lock_path): # If the download just completed while the lock was activated. if os.path.exists(cache_path) and not force_download: # Even if returning early like here, the lock will be released. return cache_path if resume_download: incomplete_path = cache_path + ".incomplete" @contextmanager def _resumable_file_manager(): with open(incomplete_path, "a+b") as f: yield f temp_file_manager = _resumable_file_manager if os.path.exists(incomplete_path): resume_size = os.stat(incomplete_path).st_size else: resume_size = 0 else: temp_file_manager = partial(tempfile.NamedTemporaryFile, dir=cache_dir, delete=False) resume_size = 0 # Download to temporary file, then copy to cache dir once finished. # Otherwise you get corrupt cache entries if the download gets interrupted. with temp_file_manager() as temp_file: logger.info("%s not found in cache or force_download set to True, downloading to %s", url, temp_file.name) http_get(url, temp_file, proxies=proxies, resume_size=resume_size, user_agent=user_agent) logger.info("storing %s in cache at %s", url, cache_path) os.replace(temp_file.name, cache_path) logger.info("creating metadata file for %s", cache_path) meta = {"url": url, "etag": etag} meta_path = cache_path + ".json" with open(meta_path, "w") as meta_file: json.dump(meta, meta_file) return cache_path class cached_property(property): """ Descriptor that mimics @property but caches output in member variable. From tensorflow_datasets Built-in in functools from Python 3.8. """ def __get__(self, obj, objtype=None): # See docs.python.org/3/howto/descriptor.html#properties if obj is None: return self if self.fget is None: raise AttributeError("unreadable attribute") attr = "__cached_" + self.fget.__name__ cached = getattr(obj, attr, None) if cached is None: cached = self.fget(obj) setattr(obj, attr, cached) return cached def torch_required(func): # Chose a different decorator name than in tests so it's clear they are not the same. @wraps(func) def wrapper(*args, **kwargs): if is_torch_available(): return func(*args, **kwargs) else: raise ImportError(f"Method `{func.__name__}` requires PyTorch.") return wrapper def tf_required(func): # Chose a different decorator name than in tests so it's clear they are not the same. @wraps(func) def wrapper(*args, **kwargs): if is_tf_available(): return func(*args, **kwargs) else: raise ImportError(f"Method `{func.__name__}` requires TF.") return wrapper def is_tensor(x): """ Tests if ``x`` is a :obj:`torch.Tensor`, :obj:`tf.Tensor` or :obj:`np.ndarray`. """ if is_torch_available(): import torch if isinstance(x, torch.Tensor): return True if is_tf_available(): import tensorflow as tf if isinstance(x, tf.Tensor): return True return isinstance(x, np.ndarray) class ModelOutput(OrderedDict): """ Base class for all model outputs as dataclass. Has a ``__getitem__`` that allows indexing by integer or slice (like a tuple) or strings (like a dictionnary) that will ignore the ``None`` attributes. Otherwise behaves like a regular python dictionary. .. warning:: You can't unpack a :obj:`ModelOutput` directly. Use the :meth:`~transformers.file_utils.ModelOutput.to_tuple` method to convert it to a tuple before. """ def __post_init__(self): class_fields = fields(self) # Safety and consistency checks assert len(class_fields), f"{self.__class__.__name__} has no fields." assert all( field.default is None for field in class_fields[1:] ), f"{self.__class__.__name__} should not have more than one required field." first_field = getattr(self, class_fields[0].name) other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:]) if other_fields_are_none and not is_tensor(first_field): try: iterator = iter(first_field) first_field_iterator = True except TypeError: first_field_iterator = False # if we provided an iterator as first field and the iterator is a (key, value) iterator # set the associated fields if first_field_iterator: for element in iterator: if ( not isinstance(element, (list, tuple)) or not len(element) == 2 or not isinstance(element[0], str) ): break setattr(self, element[0], element[1]) if element[1] is not None: self[element[0]] = element[1] else: for field in class_fields: v = getattr(self, field.name) if v is not None: self[field.name] = v def __delitem__(self, *args, **kwargs): raise Exception(f"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.") def setdefault(self, *args, **kwargs): raise Exception(f"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.") def pop(self, *args, **kwargs): raise Exception(f"You cannot use ``pop`` on a {self.__class__.__name__} instance.") def update(self, *args, **kwargs): raise Exception(f"You cannot use ``update`` on a {self.__class__.__name__} instance.") def __getitem__(self, k): if isinstance(k, str): inner_dict = {k: v for (k, v) in self.items()} return inner_dict[k] else: return self.to_tuple()[k] def to_tuple(self) -> Tuple[Any]: """ Convert self to a tuple containing all the attributes/keys that are not ``None``. """ return tuple(self[k] for k in self.keys())
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/utils/file_utils.py
# Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2020 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import random import math import warnings from dataclasses import dataclass from typing import Any, Callable, Dict, List, NewType, Optional, Tuple, Union import torch from torch.nn.utils.rnn import pad_sequence from bart.tokenization.tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTrainedTokenizerBase from bart.modeling.modeling_bart import shift_tokens_right InputDataClass = NewType("InputDataClass", Any) """ A DataCollator is a function that takes a list of samples from a Dataset and collate them into a batch, as a dictionary of Tensors. """ DataCollator = NewType("DataCollator", Callable[[List[InputDataClass]], Dict[str, torch.Tensor]]) def default_data_collator(features: List[InputDataClass]) -> Dict[str, torch.Tensor]: """ Very simple data collator that simply collates batches of dict-like objects and performs special handling for potential keys named: - ``label``: handles a single value (int or float) per object - ``label_ids``: handles a list of values per object Does not do any additional preprocessing: property names of the input object will be used as corresponding inputs to the model. See glue and ner for example of how it's useful. """ # In this function we'll make the assumption that all `features` in the batch # have the same attributes. # So we will look at the first element as a proxy for what attributes exist # on the whole batch. if not isinstance(features[0], (dict, BatchEncoding)): features = [vars(f) for f in features] first = features[0] batch = {} # Special handling for labels. # Ensure that tensor is created with the correct type # (it should be automatically the case, but let's make sure of it.) if "label" in first and first["label"] is not None: label = first["label"].item() if isinstance(first["label"], torch.Tensor) else first["label"] dtype = torch.long if isinstance(label, int) else torch.float batch["labels"] = torch.tensor([f["label"] for f in features], dtype=dtype) elif "label_ids" in first and first["label_ids"] is not None: if isinstance(first["label_ids"], torch.Tensor): batch["labels"] = torch.stack([f["label_ids"] for f in features]) else: dtype = torch.long if type(first["label_ids"][0]) is int else torch.float batch["labels"] = torch.tensor([f["label_ids"] for f in features], dtype=dtype) # Handling of all other possible keys. # Again, we will use the first element to figure out which key/values are not None for this model. for k, v in first.items(): if k not in ("label", "label_ids") and v is not None and not isinstance(v, str): if isinstance(v, torch.Tensor): batch[k] = torch.stack([f[k] for f in features]) else: batch[k] = torch.tensor([f[k] for f in features]) return batch @dataclass class DataCollatorWithPadding: """ Data collator that will dynamically pad the inputs received. Args: tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`): The tokenizer used for encoding the data. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (:obj:`int`, `optional`): Maximum length of the returned list and optionally padding length (see above). pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). """ tokenizer: PreTrainedTokenizerBase padding: Union[bool, str, PaddingStrategy] = True max_length: Optional[int] = None pad_to_multiple_of: Optional[int] = None def __call__(self, features: List[Dict[str, Union[List[int], torch.Tensor]]]) -> Dict[str, torch.Tensor]: batch = self.tokenizer.pad( features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors="pt", ) if "label" in batch: batch["labels"] = batch["label"] del batch["label"] if "label_ids" in batch: batch["labels"] = batch["label_ids"] del batch["label_ids"] return batch @dataclass class DataCollatorForTokenClassification: """ Data collator that will dynamically pad the inputs received, as well as the labels. Args: tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`): The tokenizer used for encoding the data. padding (:obj:`bool`, :obj:`str` or :class:`~transformers.tokenization_utils_base.PaddingStrategy`, `optional`, defaults to :obj:`True`): Select a strategy to pad the returned sequences (according to the model's padding side and padding index) among: * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence if provided). * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the maximum acceptable input length for the model if that argument is not provided. * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different lengths). max_length (:obj:`int`, `optional`): Maximum length of the returned list and optionally padding length (see above). pad_to_multiple_of (:obj:`int`, `optional`): If set will pad the sequence to a multiple of the provided value. This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >= 7.5 (Volta). label_pad_token_id (:obj:`int`, `optional`, defaults to -100): The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions). """ tokenizer: PreTrainedTokenizerBase padding: Union[bool, str, PaddingStrategy] = True max_length: Optional[int] = None pad_to_multiple_of: Optional[int] = None label_pad_token_id: int = -100 def __call__(self, features): label_name = "label" if "label" in features[0].keys() else "labels" labels = [feature[label_name] for feature in features] if label_name in features[0].keys() else None batch = self.tokenizer.pad( features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, # Conversion to tensors will fail if we have labels as they are not of the same length yet. return_tensors="pt" if labels is None else None, ) if labels is None: return batch sequence_length = torch.tensor(batch["input_ids"]).shape[1] padding_side = self.tokenizer.padding_side if padding_side == "right": batch["labels"] = [label + [self.label_pad_token_id] * (sequence_length - len(label)) for label in labels] else: batch["labels"] = [[self.label_pad_token_id] * (sequence_length - len(label)) + label for label in labels] batch = {k: torch.tensor(v, dtype=torch.int64) for k, v in batch.items()} return batch def _collate_batch(examples, tokenizer, masks=None, max_length=None): """Collate `examples` into a batch, using the information in `tokenizer` for padding if necessary.""" # Tensorize if necessary. if isinstance(examples[0], (list, tuple)): examples = [torch.tensor(e, dtype=torch.long) for e in examples] # Check if padding is necessary. length_of_first = examples[0].size(0) are_tensors_same_length = ( all(x.size(0) == length_of_first for x in examples) and (max_length is None or max_length == length_of_first)) if are_tensors_same_length: if masks is None: return torch.stack(examples, dim=0) else: return torch.stack(examples, dim=0), torch.stack(masks, dim=0) # If yes, check if we have a `pad_token`. if tokenizer._pad_token is None: raise ValueError( "You are attempting to pad samples but the tokenizer you are using" f" ({tokenizer.__class__.__name__}) does not have a pad token." ) # Creating the full tensor and filling it with our data. max_length = max_length if max_length is not None else max(x.size(0) for x in examples) result = examples[0].new_full([len(examples), max_length], tokenizer.pad_token_id) for i, example in enumerate(examples): if tokenizer.padding_side == "right": result[i, : example.shape[0]] = example else: result[i, -example.shape[0] :] = example if masks is not None: result_mask = masks[0].new_full([len(masks), max_length], 0) for i, mask in enumerate(masks): if tokenizer.padding_side == "right": result_mask[i, : mask.shape[0]] = mask else: result_mask[i, -mask.shape[0] :] = mask return result, result_mask return result def tolist(x: Union[List[Any], torch.Tensor]): return x.tolist() if isinstance(x, torch.Tensor) else x @dataclass class DataCollatorForLanguageModeling: """ Data collator used for language modeling. Inputs are dynamically padded to the maximum length of a batch if they are not all of the same length. Args: tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`): The tokenizer used for encoding the data. mlm (:obj:`bool`, `optional`, defaults to :obj:`True`): Whether or not to use masked language modeling. If set to :obj:`False`, the labels are the same as the inputs with the padding tokens ignored (by setting them to -100). Otherwise, the labels are -100 for non-masked tokens and the value to predict for the masked token. mlm_probability (:obj:`float`, `optional`, defaults to 0.15): The probability with which to (randomly) mask tokens in the input, when :obj:`mlm` is set to :obj:`True`. .. note:: For best performance, this data collator should be used with a dataset having items that are dictionaries or BatchEncoding, with the :obj:`"special_tokens_mask"` key, as returned by a :class:`~transformers.PreTrainedTokenizer` or a :class:`~transformers.PreTrainedTokenizerFast` with the argument :obj:`return_special_tokens_mask=True`. """ tokenizer: PreTrainedTokenizerBase mlm: bool = True mlm_probability: float = 0.15 def __post_init__(self): if self.mlm and self.tokenizer.mask_token is None: raise ValueError( "This tokenizer does not have a mask token which is necessary for masked language modeling. " "You should pass `mlm=False` to train on causal language modeling instead." ) def __call__( self, examples: List[Union[List[int], torch.Tensor, Dict[str, torch.Tensor]]] ) -> Dict[str, torch.Tensor]: # Handle dict or lists with proper padding and conversion to tensor. if isinstance(examples[0], (dict, BatchEncoding)): batch = self.tokenizer.pad(examples, return_tensors="pt") else: batch = {"input_ids": _collate_batch(examples, self.tokenizer)} # If special token mask has been preprocessed, pop it from the dict. special_tokens_mask = batch.pop("special_tokens_mask", None) if self.mlm: batch["input_ids"], batch["labels"] = self.mask_tokens( batch["input_ids"], special_tokens_mask=special_tokens_mask ) else: labels = batch["input_ids"].clone() if self.tokenizer.pad_token_id is not None: labels[labels == self.tokenizer.pad_token_id] = -100 batch["labels"] = labels return batch def mask_tokens( self, inputs: torch.Tensor, special_tokens_mask: Optional[torch.Tensor] = None ) -> Tuple[torch.Tensor, torch.Tensor]: """ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. """ labels = inputs.clone() # We sample a few tokens in each sequence for MLM training (with probability `self.mlm_probability`) probability_matrix = torch.full(labels.shape, self.mlm_probability) if special_tokens_mask is None: special_tokens_mask = [ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist() ] special_tokens_mask = torch.tensor(special_tokens_mask, dtype=torch.bool) else: special_tokens_mask = special_tokens_mask.bool() probability_matrix.masked_fill_(special_tokens_mask, value=0.0) masked_indices = torch.bernoulli(probability_matrix).bool() labels[~masked_indices] = -100 # We only compute loss on masked tokens # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token) # 10% of the time, we replace masked input tokens with random word indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long) inputs[indices_random] = random_words[indices_random] # The rest of the time (10% of the time) we keep the masked input tokens unchanged return inputs, labels @dataclass class DataCollatorForWholeWordMask(DataCollatorForLanguageModeling): """ Data collator used for language modeling. - collates batches of tensors, honoring their tokenizer's pad_token - preprocesses batches for masked language modeling """ def __call__( self, examples: List[Union[List[int], torch.Tensor, Dict[str, torch.Tensor]]] ) -> Dict[str, torch.Tensor]: if isinstance(examples[0], (dict, BatchEncoding)): input_ids = [e["input_ids"] for e in examples] else: input_ids = examples examples = [{"input_ids": e} for e in examples] batch_input = _collate_batch(input_ids, self.tokenizer) mask_labels = [] for e in examples: ref_tokens = [] for id in tolist(e["input_ids"]): token = self.tokenizer._convert_id_to_token(id) ref_tokens.append(token) # For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢] if "chinese_ref" in e: ref_pos = tolist(e["chinese_ref"]) len_seq = e["input_ids"].size(0) for i in range(len_seq): if i in ref_pos: ref_tokens[i] = "##" + ref_tokens[i] mask_labels.append(self._whole_word_mask(ref_tokens)) batch_mask = _collate_batch(mask_labels, self.tokenizer) inputs, labels = self.mask_tokens(batch_input, batch_mask) return {"input_ids": inputs, "labels": labels} def _whole_word_mask(self, input_tokens: List[str], max_predictions=512): """ Get 0/1 labels for masked tokens with whole word mask proxy """ cand_indexes = [] for (i, token) in enumerate(input_tokens): if token == "[CLS]" or token == "[SEP]": continue if len(cand_indexes) >= 1 and token.startswith("##"): cand_indexes[-1].append(i) else: cand_indexes.append([i]) random.shuffle(cand_indexes) num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) * self.mlm_probability)))) masked_lms = [] covered_indexes = set() for index_set in cand_indexes: if len(masked_lms) >= num_to_predict: break # If adding a whole-word mask would exceed the maximum number of # predictions, then just skip this candidate. if len(masked_lms) + len(index_set) > num_to_predict: continue is_any_index_covered = False for index in index_set: if index in covered_indexes: is_any_index_covered = True break if is_any_index_covered: continue for index in index_set: covered_indexes.add(index) masked_lms.append(index) assert len(covered_indexes) == len(masked_lms) mask_labels = [1 if i in covered_indexes else 0 for i in range(len(input_tokens))] return mask_labels def mask_tokens(self, inputs: torch.Tensor, mask_labels: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set 'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref. """ if self.tokenizer.mask_token is None: raise ValueError( "This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer." ) labels = inputs.clone() # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa) probability_matrix = mask_labels special_tokens_mask = [ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist() ] probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0) if self.tokenizer._pad_token is not None: padding_mask = labels.eq(self.tokenizer.pad_token_id) probability_matrix.masked_fill_(padding_mask, value=0.0) masked_indices = probability_matrix.bool() labels[~masked_indices] = -100 # We only compute loss on masked tokens # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token) # 10% of the time, we replace masked input tokens with random word indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long) inputs[indices_random] = random_words[indices_random] # The rest of the time (10% of the time) we keep the masked input tokens unchanged return inputs, labels @dataclass class DataCollatorForSOP(DataCollatorForLanguageModeling): """ Data collator used for sentence order prediction task. - collates batches of tensors, honoring their tokenizer's pad_token - preprocesses batches for both masked language modeling and sentence order prediction """ def __init__(self, *args, **kwargs): warnings.warn( "DataCollatorForSOP is deprecated and will be removed in a future version, you can now use " "DataCollatorForLanguageModeling instead.", FutureWarning, ) def __call__(self, examples: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]: input_ids = [example["input_ids"] for example in examples] input_ids = _collate_batch(input_ids, self.tokenizer) input_ids, labels, attention_mask = self.mask_tokens(input_ids) token_type_ids = [example["token_type_ids"] for example in examples] # size of segment_ids varied because randomness, padding zero to the end as the original implementation token_type_ids = pad_sequence(token_type_ids, batch_first=True, padding_value=self.tokenizer.pad_token_id) sop_label_list = [example["sentence_order_label"] for example in examples] sentence_order_label = torch.stack(sop_label_list) return { "input_ids": input_ids, "labels": labels, "attention_mask": attention_mask, "token_type_ids": token_type_ids, "sentence_order_label": sentence_order_label, } def mask_tokens(self, inputs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Prepare masked tokens inputs/labels/attention_mask for masked language modeling: 80% MASK, 10% random, 10% original. N-gram not applied yet. """ if self.tokenizer.mask_token is None: raise ValueError( "This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer." ) labels = inputs.clone() # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa) probability_matrix = torch.full(labels.shape, self.mlm_probability) special_tokens_mask = [ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist() ] probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0) if self.tokenizer._pad_token is not None: padding_mask = labels.eq(self.tokenizer.pad_token_id) probability_matrix.masked_fill_(padding_mask, value=0.0) masked_indices = torch.bernoulli(probability_matrix).bool() # probability be `1` (masked), however in albert model attention mask `0` means masked, revert the value attention_mask = (~masked_indices).float() if self.tokenizer._pad_token is not None: attention_padding_mask = labels.eq(self.tokenizer.pad_token_id) attention_mask.masked_fill_(attention_padding_mask, value=1.0) labels[~masked_indices] = -100 # We only compute loss on masked tokens, -100 is default for CE compute # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) indices_replaced = torch.bernoulli(torch.full(labels.shape, 0.8)).bool() & masked_indices inputs[indices_replaced] = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token) # 10% of the time, we replace masked input tokens with random word indices_random = torch.bernoulli(torch.full(labels.shape, 0.5)).bool() & masked_indices & ~indices_replaced random_words = torch.randint(len(self.tokenizer), labels.shape, dtype=torch.long) inputs[indices_random] = random_words[indices_random] # The rest of the time (10% of the time) we keep the masked input tokens unchanged return inputs, labels, attention_mask @dataclass class DataCollatorForPermutationLanguageModeling: """ Data collator used for permutation language modeling. - collates batches of tensors, honoring their tokenizer's pad_token - preprocesses batches for permutation language modeling with procedures specific to XLNet """ tokenizer: PreTrainedTokenizerBase plm_probability: float = 1 / 6 max_span_length: int = 5 # maximum length of a span of masked tokens def __call__( self, examples: List[Union[List[int], torch.Tensor, Dict[str, torch.Tensor]]] ) -> Dict[str, torch.Tensor]: if isinstance(examples[0], (dict, BatchEncoding)): examples = [e["input_ids"] for e in examples] batch = _collate_batch(examples, self.tokenizer) inputs, perm_mask, target_mapping, labels = self.mask_tokens(batch) return {"input_ids": inputs, "perm_mask": perm_mask, "target_mapping": target_mapping, "labels": labels} def mask_tokens(self, inputs: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: """ The masked tokens to be predicted for a particular sequence are determined by the following algorithm: 0. Start from the beginning of the sequence by setting ``cur_len = 0`` (number of tokens processed so far). 1. Sample a ``span_length`` from the interval ``[1, max_span_length]`` (length of span of tokens to be masked) 2. Reserve a context of length ``context_length = span_length / plm_probability`` to surround span to be masked 3. Sample a starting point ``start_index`` from the interval ``[cur_len, cur_len + context_length - span_length]`` and mask tokens ``start_index:start_index + span_length`` 4. Set ``cur_len = cur_len + context_length``. If ``cur_len < max_len`` (i.e. there are tokens remaining in the sequence to be processed), repeat from Step 1. """ if self.tokenizer.mask_token is None: raise ValueError( "This tokenizer does not have a mask token which is necessary for permutation language modeling. Please add a mask token if you want to use this tokenizer." ) if inputs.size(1) % 2 != 0: raise ValueError( "This collator requires that sequence lengths be even to create a leakage-free perm_mask. Please see relevant comments in source code for details." ) labels = inputs.clone() # Creating the mask and target_mapping tensors masked_indices = torch.full(labels.shape, 0, dtype=torch.bool) target_mapping = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32) for i in range(labels.size(0)): # Start from the beginning of the sequence by setting `cur_len = 0` (number of tokens processed so far). cur_len = 0 max_len = labels.size(1) while cur_len < max_len: # Sample a `span_length` from the interval `[1, max_span_length]` (length of span of tokens to be masked) span_length = torch.randint(1, self.max_span_length + 1, (1,)).item() # Reserve a context of length `context_length = span_length / plm_probability` to surround the span to be masked context_length = int(span_length / self.plm_probability) # Sample a starting point `start_index` from the interval `[cur_len, cur_len + context_length - span_length]` and mask tokens `start_index:start_index + span_length` start_index = cur_len + torch.randint(context_length - span_length + 1, (1,)).item() masked_indices[i, start_index : start_index + span_length] = 1 # Set `cur_len = cur_len + context_length` cur_len += context_length # Since we're replacing non-masked tokens with -100 in the labels tensor instead of skipping them altogether, # the i-th predict corresponds to the i-th token. target_mapping[i] = torch.eye(labels.size(1)) special_tokens_mask = torch.tensor( [self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in labels.tolist()], dtype=torch.bool, ) masked_indices.masked_fill_(special_tokens_mask, value=0.0) if self.tokenizer._pad_token is not None: padding_mask = labels.eq(self.tokenizer.pad_token_id) masked_indices.masked_fill_(padding_mask, value=0.0) # Mask indicating non-functional tokens, where functional tokens are [SEP], [CLS], padding, etc. non_func_mask = ~(padding_mask | special_tokens_mask) inputs[masked_indices] = self.tokenizer.mask_token_id labels[~masked_indices] = -100 # We only compute loss on masked tokens perm_mask = torch.zeros((labels.size(0), labels.size(1), labels.size(1)), dtype=torch.float32) for i in range(labels.size(0)): # Generate permutation indices i.e. sample a random factorisation order for the sequence. This will # determine which tokens a given token can attend to (encoded in `perm_mask`). # Note: Length of token sequence being permuted has to be less than or equal to reused sequence length # (see documentation for `mems`), otherwise information may leak through due to reuse. In this implementation, # we assume that reused length is half of sequence length and permutation length is equal to reused length. # This requires that the sequence length be even. # Create a linear factorisation order perm_index = torch.arange(labels.size(1)) # Split this into two halves, assuming that half the sequence is reused each time perm_index = perm_index.reshape((-1, labels.size(1) // 2)).transpose(0, 1) # Permute the two halves such that they do not cross over perm_index = perm_index[torch.randperm(labels.size(1) // 2)] # Flatten this out into the desired permuted factorisation order perm_index = torch.flatten(perm_index.transpose(0, 1)) # Set the permutation indices of non-masked (non-functional) tokens to the # smallest index (-1) so that: # (1) They can be seen by all other positions # (2) They cannot see masked positions, so there won't be information leak perm_index.masked_fill_(~masked_indices[i] & non_func_mask[i], -1) # The logic for whether the i-th token can attend on the j-th token based on the factorisation order: # 0 (can attend): If perm_index[i] > perm_index[j] or j is neither masked nor a functional token # 1 (cannot attend): If perm_index[i] <= perm_index[j] and j is either masked or a functional token perm_mask[i] = ( perm_index.reshape((labels.size(1), 1)) <= perm_index.reshape((1, labels.size(1))) ) & masked_indices[i] return inputs.long(), perm_mask, target_mapping, labels.long() @dataclass class DataCollatorForBART(DataCollatorForLanguageModeling): """ Data collator used for language modeling. - collates batches of tensors, honoring their tokenizer's pad_token - preprocesses batches for masked language modeling - includes sentence permutation and whole work masking """ permute_sentence_ratio: float = 1.0 decoder_start_token_id: int = None def __call__( self, examples: List[Union[List[int], torch.Tensor, Dict[str, torch.Tensor]]] ) -> Dict[str, torch.Tensor]: assert (self.decoder_start_token_id is not None, "This collator requires that decoder_start_token_id to be defined!") input_attention_mask = None batch = {} if isinstance(examples[0], (dict, BatchEncoding)): input_ids = [e["input_ids"] for e in examples] input_attention_mask = [e["attention_mask"] for e in examples] else: input_ids = examples examples = [{"input_ids": e} for e in examples] if input_attention_mask is None: batch_input = _collate_batch(input_ids, self.tokenizer) else: batch_input, input_attention_mask = _collate_batch(input_ids, self.tokenizer, input_attention_mask) batch["attention_mask"] = input_attention_mask max_length = batch_input.shape[1] batch["labels"] = batch_input.clone() batch["decoder_input_ids"] = shift_tokens_right(batch_input, self.tokenizer.pad_token_id, self.decoder_start_token_id) if self.permute_sentence_ratio > 0.0: batch_input = torch.stack([ self._permute_sentences( input_id, self.tokenizer._convert_token_to_id("."), self.permute_sentence_ratio) for input_id in batch_input]) mask_labels = [] for i, input_id in enumerate(input_ids): ref_tokens = [] for id in tolist(input_id): token = self.tokenizer._convert_id_to_token(id) ref_tokens.append(token) #@TODO need to permute examples[i]["chinese"] according to sentence permutation above # # For Chinese tokens, we need extra inf to mark sub-word, e.g [喜,欢]-> [喜,##欢] # if "chinese_ref" in examples[i]: # ref_pos = tolist(examples[i]["chinese_ref"]) # len_seq = input_id.size(0) # for i in range(len_seq): # if i in ref_pos: # ref_tokens[i] = "##" + ref_tokens[i] mask_labels.append(self._whole_word_mask(ref_tokens)) batch_mask = _collate_batch(mask_labels, self.tokenizer) batch_input, input_attention_mask = self.mask_tokens_span(batch_input, batch_mask, input_attention_mask) # Collate to max_length to match decoder inputs and labels if input_attention_mask is None: batch["input_ids"] = _collate_batch(batch_input, self.tokenizer, max_length=max_length) else: batch["input_ids"], batch["attention_mask"] = _collate_batch( batch_input, self.tokenizer, input_attention_mask, max_length) return batch def _whole_word_mask(self, input_tokens: List[str], max_predictions=512): """ Get 0/1 labels for masked tokens with whole word mask proxy """ cand_indexes = [] for (i, token) in enumerate(input_tokens): if token == "[CLS]" or token == "[SEP]": continue if len(cand_indexes) >= 1 and (not token.startswith("Ġ") or token.startswith("##")): #@TODO hf error in start with token? cand_indexes[-1].append(i) else: cand_indexes.append([i]) random.shuffle(cand_indexes) num_to_predict = min(max_predictions, max(1, int(round(len(input_tokens) * self.mlm_probability)))) masked_lms = [] covered_indexes = set() for index_set in cand_indexes: if len(masked_lms) >= num_to_predict: break # If adding a whole-word mask would exceed the maximum number of # predictions, then just skip this candidate. if len(masked_lms) + len(index_set) > num_to_predict: continue is_any_index_covered = False for index in index_set: if index in covered_indexes: is_any_index_covered = True break if is_any_index_covered: continue for index in index_set: covered_indexes.add(index) masked_lms.append(index) assert len(covered_indexes) == len(masked_lms) mask_labels = [1 if i in covered_indexes else 0 for i in range(len(input_tokens))] return mask_labels def mask_tokens_span(self, inputs: torch.Tensor, mask_labels: torch.Tensor, attention_mask) -> Tuple[torch.Tensor, torch.Tensor]: """ Prepare masked tokens inputs/labels for masked language modeling: 80% MASK, 10% random, 10% original. Set 'mask_labels' means we use whole word mask (wwm), we directly mask idxs according to it's ref. """ if self.tokenizer.mask_token is None: raise ValueError( "This tokenizer does not have a mask token which is necessary for masked language modeling. Remove the --mlm flag if you want to use this tokenizer." ) # We sample a few tokens in each sequence for masked-LM training (with probability args.mlm_probability defaults to 0.15 in Bert/RoBERTa) probability_matrix = mask_labels special_tokens_mask = [ self.tokenizer.get_special_tokens_mask(val, already_has_special_tokens=True) for val in inputs.tolist() ] probability_matrix.masked_fill_(torch.tensor(special_tokens_mask, dtype=torch.bool), value=0.0) if self.tokenizer._pad_token is not None: padding_mask = inputs.eq(self.tokenizer.pad_token_id) probability_matrix.masked_fill_(padding_mask, value=0.0) masked_indices = probability_matrix.bool() #@Todo we are now computing loss on all labels # labels[~masked_indices] = self.tokenizer.pad_token_id # We only compute loss on masked tokens # 80% of the time, we replace masked input tokens with tokenizer.mask_token ([MASK]) indices_replaced = torch.bernoulli(torch.full(inputs.shape, 0.8)).bool() & masked_indices mask_token_id = self.tokenizer.convert_tokens_to_ids(self.tokenizer.mask_token) inputs[indices_replaced] = mask_token_id # 10% of the time, we replace masked input tokens with random word indices_random = torch.bernoulli(torch.full(inputs.shape, 0.5)).bool() & masked_indices & ~indices_replaced random_words = torch.randint(len(self.tokenizer), inputs.shape, dtype=torch.long) inputs[indices_random] = random_words[indices_random] # The rest of the time (10% of the time) we keep the masked input tokens unchanged # return inputs, labels #Remove consecutive duplicate mask tokens. One mask token represents whole span inputs_left_shift = torch.cat((inputs[:, 1:], torch.zeros(inputs.shape[0],1)), dim=-1) mask_left_shift = torch.not_equal((inputs - inputs_left_shift), 0) mask = torch.cat((torch.full((inputs.shape[0],1),True), mask_left_shift[:, :-1]), dim=-1) | torch.not_equal(inputs, mask_token_id) inputs = [torch.masked_select(inputs[i,:], mask[i,:]) for i in range(inputs.shape[0])] if attention_mask is not None: attention_mask = [torch.masked_select(attention_mask[i, :], mask[i,:]) for i in range(attention_mask.shape[0])] return inputs, attention_mask def _permute_sentences(self, source, full_stop_index, p=1.0): # Pretend it ends with a full stop so last span is a sentence span_end = self.tokenizer.convert_tokens_to_ids(self.tokenizer.eos_token) source[source == span_end] = full_stop_index full_stops = source == full_stop_index # Tokens that are full stops, where the previous token is not sentence_ends = (full_stops[1:] * ~full_stops[:-1]).nonzero(as_tuple=False) + 2 result = source.clone() num_sentences = sentence_ends.size(0) num_to_permute = math.ceil((num_sentences * 2 * p) / 2.0) substitutions = torch.randperm(num_sentences)[:num_to_permute] ordering = torch.arange(0, num_sentences) ordering[substitutions] = substitutions[torch.randperm(num_to_permute)] # Ignore <bos> at start index = 1 for i in ordering: sentence = source[(sentence_ends[i - 1] if i > 0 else 1) : sentence_ends[i]] result[index : index + sentence.size(0)] = sentence index += sentence.size(0) last_fullstop = (source == full_stop_index).nonzero(as_tuple=False)[-1] #Convert last full stop to span end source[last_fullstop] = span_end result[last_fullstop] = span_end return result
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/utils/data_collator.py
# coding=utf-8 # Copyright (c) 2022 NVIDIA CORPORATION. All rights reserved. # Copyright 2020 The HuggingFace Inc. team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from abc import ABC, abstractmethod from collections import UserDict from typing import Optional, Tuple import torch from .file_utils import add_start_docstrings PROCESS_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size * num_beams, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using any class inheriting from :class:`~transformers.PretrainedTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ next_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, 2 * num_beams)`): Current scores of the top :obj:`2 * num_beams` non-finished beam hypotheses. next_tokens (:obj:`torch.LongTensor` of shape :obj:`(batch_size, 2 * num_beams)`): :obj:`input_ids` of the tokens corresponding to the top :obj:`2 * num_beams` non-finished beam hypotheses. next_indices (:obj:`torch.LongTensor` of shape :obj:`(batch_size, 2 * num_beams)`): Beam indices indicating to which beam hypothesis the :obj:`next_tokens` correspond. pad_token_id (:obj:`int`, `optional`): The id of the `padding` token. eos_token_id (:obj:`int`, `optional`): The id of the `end-of-sequence` token. Return: :obj:`UserDict`: A dictionary composed of the fields as defined above: - **next_beam_scores** (:obj:`torch.FloatTensor` of shape :obj:`(batch_size * num_beams)`) -- Updated scores of all non-finished beams. - **next_beam_tokens** (:obj:`torch.FloatTensor` of shape :obj:`(batch_size * num_beams)`) -- Next tokens to be added to the non-finished beam_hypotheses. - **next_beam_indices** (:obj:`torch.FloatTensor` of shape :obj:`(batch_size * num_beams)`) -- Beam indices indicating to which beam the next tokens shall be added. """ FINALIZE_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size * num_beams, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using any class inheriting from :class:`~transformers.PretrainedTokenizer`. See :meth:`transformers.PreTrainedTokenizer.encode` and :meth:`transformers.PreTrainedTokenizer.__call__` for details. `What are input IDs? <../glossary.html#input-ids>`__ final_beam_scores (:obj:`torch.FloatTensor` of shape :obj:`(batch_size * num_beams)`): The final scores of all non-finished beams. final_beam_tokens (:obj:`torch.FloatTensor` of shape :obj:`(batch_size * num_beams)`): The last tokens to be added to the non-finished beam_hypotheses. final_beam_indices (:obj:`torch.FloatTensor` of shape :obj:`(batch_size * num_beams)`): The beam indices indicating to which beam the :obj:`final_beam_tokens` shall be added. pad_token_id (:obj:`int`, `optional`): The id of the `padding` token. eos_token_id (:obj:`int`, `optional`): The id of the `end-of-sequence` token. Return: :obj:`torch.LongTensor` of shape :obj:`(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The second dimension (sequence_length) is either equal to :obj:`max_length` or shorter if all batches finished early due to the :obj:`eos_token_id`. """ class BeamScorer(ABC): """ Abstract base class for all beam scorers that are used for :meth:`~transformers.PretrainedModel.beam_search` and :meth:`~transformers.PretrainedModel.beam_sample`. """ @abstractmethod @add_start_docstrings(PROCESS_INPUTS_DOCSTRING) def process( self, input_ids: torch.LongTensor, next_scores: torch.FloatTensor, next_tokens: torch.LongTensor, next_indices: torch.LongTensor, **kwargs ) -> Tuple[torch.Tensor]: raise NotImplementedError("This is an abstract method.") @abstractmethod @add_start_docstrings(FINALIZE_INPUTS_DOCSTRING) def finalize( self, input_ids: torch.LongTensor, next_scores: torch.FloatTensor, next_tokens: torch.LongTensor, next_indices: torch.LongTensor, **kwargs ) -> torch.LongTensor: raise NotImplementedError("This is an abstract method.") class BeamSearchScorer(BeamScorer): r""" :class:`transformers.BeamScorer` implementing standard beam search decoding. Adapted in part from `Facebook's XLM beam search code <https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529>`__. Reference for the diverse beam search algorithm and implementation `Ashwin Kalyan's DBS implementation <https://github.com/ashwinkalyan/dbs/blob/master/dbs/beam_utils.lua>`__ Args: batch_size (:obj:`int`): Batch Size of :obj:`input_ids` for which standard beam search decoding is run in parallel. max_length (:obj:`int`): The maximum length of the sequence to be generated. num_beams (:obj:`int`): Number of beams for beam search. device (:obj:`torch.device`): Defines the device type (*e.g.*, :obj:`"cpu"` or :obj:`"cuda"`) on which this instance of :obj:`BeamSearchScorer` will be allocated. length_penalty (:obj:`float`, `optional`, defaults to 1.0): Exponential penalty to the length. 1.0 means no penalty. Set to values < 1.0 in order to encourage the model to generate shorter sequences, to a value > 1.0 in order to encourage the model to produce longer sequences. do_early_stopping (:obj:`bool`, `optional`, defaults to :obj:`False`): Whether to stop the beam search when at least ``num_beams`` sentences are finished per batch or not. num_beam_hyps_to_keep (:obj:`int`, `optional`, defaults to 1): The number of beam hypotheses that shall be returned upon calling :meth:`~transformer.BeamSearchScorer.finalize`. num_beam_groups (:obj:`int`): Number of groups to divide :obj:`num_beams` into in order to ensure diversity among different groups of beams. See `this paper <https://arxiv.org/pdf/1610.02424.pdf>`__ for more details. """ def __init__( self, batch_size: int, max_length: int, num_beams: int, device: torch.device, length_penalty: Optional[float] = 1.0, do_early_stopping: Optional[bool] = False, num_beam_hyps_to_keep: Optional[int] = 1, num_beam_groups: Optional[int] = 1, ): self.max_length = max_length self.num_beams = num_beams self.device = device self.length_penalty = length_penalty self.do_early_stopping = do_early_stopping self.num_beam_hyps_to_keep = num_beam_hyps_to_keep self.num_beam_groups = num_beam_groups self.group_size = self.num_beams // self.num_beam_groups self._is_init = False self._beam_hyps = [ BeamHypotheses( num_beams=self.num_beams, max_length=self.max_length, length_penalty=self.length_penalty, early_stopping=self.do_early_stopping, ) for _ in range(batch_size) ] self._done = torch.tensor([False for _ in range(batch_size)], dtype=torch.bool, device=self.device) if not isinstance(num_beams, int) or num_beams <= 1: raise ValueError( f"`num_beams` has to be an integer strictly greater than 1, but is {num_beams}. For `num_beams` == 1, one should make use of `greedy_search` instead." ) if not isinstance(num_beam_groups, int) or (num_beam_groups > num_beams) or (num_beams % num_beam_groups != 0): raise ValueError( f"`num_beam_groups` has to be an integer smaller or equal than `num_beams` and `num_beams` " f"has to be divisible by `num_beam_groups`, but is {num_beam_groups} with `num_beams` being {num_beams}." ) @property def is_done(self) -> bool: return self._done.all() def process( self, input_ids: torch.LongTensor, next_scores: torch.FloatTensor, next_tokens: torch.LongTensor, next_indices: torch.LongTensor, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, ) -> Tuple[torch.Tensor]: cur_len = input_ids.shape[-1] batch_size = len(self._beam_hyps) assert batch_size == (input_ids.shape[0] // self.group_size) device = input_ids.device next_beam_scores = torch.zeros((batch_size, self.group_size), dtype=next_scores.dtype, device=device) next_beam_tokens = torch.zeros((batch_size, self.group_size), dtype=next_tokens.dtype, device=device) next_beam_indices = torch.zeros((batch_size, self.group_size), dtype=next_indices.dtype, device=device) for batch_idx, beam_hyp in enumerate(self._beam_hyps): if self._done[batch_idx]: assert ( len(beam_hyp) >= self.num_beams ), "Batch can only be done if at least {} beams have been generated".format(self.num_beams) assert ( eos_token_id is not None and pad_token_id is not None ), "generated beams >= num_beams -> eos_token_id and pad_token have to be defined" # pad the batch next_beam_scores[batch_idx, :] = 0 next_beam_tokens[batch_idx, :] = pad_token_id next_beam_indices[batch_idx, :] = 0 continue # next tokens for this sentence beam_idx = 0 for beam_token_rank, (next_token, next_score, next_index) in enumerate( zip(next_tokens[batch_idx], next_scores[batch_idx], next_indices[batch_idx]) ): batch_beam_idx = batch_idx * self.group_size + next_index # add to generated hypotheses if end of sentence if (eos_token_id is not None) and (next_token.item() == eos_token_id): # if beam_token does not belong to top num_beams tokens, it should not be added is_beam_token_worse_than_top_num_beams = beam_token_rank >= self.group_size if is_beam_token_worse_than_top_num_beams: continue beam_hyp.add( input_ids[batch_beam_idx].clone(), next_score.item(), ) else: # add next predicted token since it is not eos_token next_beam_scores[batch_idx, beam_idx] = next_score next_beam_tokens[batch_idx, beam_idx] = next_token next_beam_indices[batch_idx, beam_idx] = batch_beam_idx beam_idx += 1 # once the beam for next step is full, don't add more tokens to it. if beam_idx == self.group_size: break if beam_idx < self.group_size: raise ValueError( f"At most {self.group_size} tokens in {next_tokens[batch_idx]} can be equal to `eos_token_id: {eos_token_id}`. Make sure {next_tokens[batch_idx]} are corrected." ) # Check if we are done so that we can save a pad step if all(done) self._done[batch_idx] = self._done[batch_idx] or beam_hyp.is_done( next_scores[batch_idx].max().item(), cur_len ) return UserDict( { "next_beam_scores": next_beam_scores.view(-1), "next_beam_tokens": next_beam_tokens.view(-1), "next_beam_indices": next_beam_indices.view(-1), } ) def finalize( self, input_ids: torch.LongTensor, final_beam_scores: torch.FloatTensor, final_beam_tokens: torch.LongTensor, final_beam_indices: torch.LongTensor, pad_token_id: Optional[int] = None, eos_token_id: Optional[int] = None, ) -> Tuple[torch.LongTensor]: batch_size = len(self._beam_hyps) # finalize all open beam hypotheses and add to generated hypotheses for batch_idx, beam_hyp in enumerate(self._beam_hyps): if self._done[batch_idx]: continue # all open beam hypotheses are added to the beam hypothesis # beam hypothesis class automatically keeps the best beams for beam_id in range(self.num_beams): batch_beam_idx = batch_idx * self.num_beams + beam_id final_score = final_beam_scores[batch_beam_idx].item() final_tokens = input_ids[batch_beam_idx] beam_hyp.add(final_tokens, final_score) # select the best hypotheses sent_lengths = input_ids.new(batch_size * self.num_beam_hyps_to_keep) best = [] best_scores = torch.zeros(batch_size * self.num_beam_hyps_to_keep, device=self.device, dtype=torch.float32) # retrieve best hypotheses for i, beam_hyp in enumerate(self._beam_hyps): sorted_hyps = sorted(beam_hyp.beams, key=lambda x: x[0]) for j in range(self.num_beam_hyps_to_keep): best_hyp_tuple = sorted_hyps.pop() best_score = best_hyp_tuple[0] best_hyp = best_hyp_tuple[1] sent_lengths[self.num_beam_hyps_to_keep * i + j] = len(best_hyp) # append to lists best.append(best_hyp) best_scores[i * self.num_beam_hyps_to_keep + j] = best_score # prepare for adding eos sent_max_len = min(sent_lengths.max().item() + 1, self.max_length) decoded: torch.LongTensor = input_ids.new(batch_size * self.num_beam_hyps_to_keep, sent_max_len) # shorter batches are padded if needed if sent_lengths.min().item() != sent_lengths.max().item(): assert pad_token_id is not None, "`pad_token_id` has to be defined" decoded.fill_(pad_token_id) # fill with hypotheses and eos_token_id if the latter fits in for i, hypo in enumerate(best): decoded[i, : sent_lengths[i]] = hypo if sent_lengths[i] < self.max_length: decoded[i, sent_lengths[i]] = eos_token_id return UserDict( { "sequences": decoded, "sequence_scores": best_scores, } ) class BeamHypotheses: def __init__(self, num_beams: int, max_length: int, length_penalty: float, early_stopping: bool): """ Initialize n-best list of hypotheses. """ self.max_length = max_length - 1 # ignoring bos_token self.length_penalty = length_penalty self.early_stopping = early_stopping self.num_beams = num_beams self.beams = [] self.worst_score = 1e9 def __len__(self): """ Number of hypotheses in the list. """ return len(self.beams) def add(self, hyp: torch.LongTensor, sum_logprobs: float): """ Add a new hypothesis to the list. """ score = sum_logprobs / (hyp.shape[-1] ** self.length_penalty) if len(self) < self.num_beams or score > self.worst_score: self.beams.append((score, hyp)) if len(self) > self.num_beams: sorted_next_scores = sorted([(s, idx) for idx, (s, _) in enumerate(self.beams)]) del self.beams[sorted_next_scores[0][1]] self.worst_score = sorted_next_scores[1][0] else: self.worst_score = min(score, self.worst_score) def is_done(self, best_sum_logprobs: float, cur_len: int) -> bool: """ If there are enough hypotheses and that none of the hypotheses being generated can become better than the worst one in the heap, then we are done with this sentence. """ if len(self) < self.num_beams: return False elif self.early_stopping: return True else: cur_score = best_sum_logprobs / cur_len ** self.length_penalty ret = self.worst_score >= cur_score return ret
DeepLearningExamples-master
PyTorch/LanguageModeling/BART/utils/generation_beam_search.py