python_code
stringlengths
0
229k
''' USAGE: python create_csv.py ''' import pandas as pd import numpy as np import os import joblib from sklearn.preprocessing import LabelBinarizer from tqdm import tqdm from imutils import paths # get all the image paths image_paths = list(paths.list_images('preprocessed_image')) # create a DataFrame data = pd.DataFrame() labels = [] for i, image_path in tqdm(enumerate(image_paths), total=len(image_paths)): label = image_path.split(os.path.sep)[-2] # save the relative path for mapping image to target data.loc[i, 'image_path'] = image_path labels.append(label) labels = np.array(labels) # one hot encode the labels lb = LabelBinarizer() labels = lb.fit_transform(labels) print(f"The first one hot encoded labels: {labels[0]}") print(f"Mapping the first one hot encoded label to its category: {lb.classes_[0]}") print(f"Total instances: {len(labels)}") for i in range(len(labels)): index = np.argmax(labels[i]) data.loc[i, 'target'] = int(index) # shuffle the dataset data = data.sample(frac=1).reset_index(drop=True) # save as CSV file data.to_csv('data.csv', index=False) # pickle the binarized labels print('Saving the binarized labels as pickled file') joblib.dump(lb, 'lb.pkl') print(data.head(5))
''' USAGE: python preprocess_image.py --num-images 1200 ''' import os import cv2 import random import numpy as np import argparse from tqdm import tqdm parser = argparse.ArgumentParser() parser.add_argument('-n', '--num-images', default=1200, type=int, help='number of images to preprocess for each category') args = vars(parser.parse_args()) print(f"Preprocessing {args['num_images']} from each category...") # get all the directory paths dir_paths = os.listdir('asl_alphabet_train/asl_alphabet_train') dir_paths.sort() root_path = 'asl_alphabet_train/asl_alphabet_train' # get --num-images images from each category for idx, dir_path in tqdm(enumerate(dir_paths), total=len(dir_paths)): all_images = os.listdir(f"{root_path}/{dir_path}") os.makedirs(f"preprocessed_image/{dir_path}", exist_ok=True) for i in range(args['num_images']): # how many images to preprocess for each category # generate a random id between 0 and 2999 rand_id = (random.randint(0, 2999)) image = cv2.imread(f"{root_path}/{dir_path}/{all_images[rand_id]}") image = cv2.resize(image, (224, 224)) cv2.imwrite(f"preprocessed_image/{dir_path}/{dir_path}{i}.jpg", image) print('DONE')
import torch.nn as nn import torch.nn.functional as F import joblib # load the binarized labels print('Loading label binarizer...') lb = joblib.load('lb.pkl') class CustomCNN(nn.Module): def __init__(self): super(CustomCNN, self).__init__() self.conv1 = nn.Conv2d(3, 16, 5) self.conv2 = nn.Conv2d(16, 32, 5) self.conv3 = nn.Conv2d(32, 64, 3) self.conv4 = nn.Conv2d(64, 128, 5) self.fc1 = nn.Linear(128, 256) self.fc2 = nn.Linear(256, len(lb.classes_)) self.pool = nn.MaxPool2d(2, 2) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = self.pool(F.relu(self.conv3(x))) x = self.pool(F.relu(self.conv4(x))) bs, _, _, _ = x.shape x = F.adaptive_avg_pool2d(x, 1).reshape(bs, -1) x = F.relu(self.fc1(x)) x = self.fc2(x) return x
import torch import joblib import cnn_models from torch.utils.mobile_optimizer import optimize_for_mobile lb = joblib.load('lb.pkl') model = cnn_models.CustomCNN() model.load_state_dict(torch.load('asl.pth')) scripted_module = torch.jit.script(model) optimized_scripted_module = optimize_for_mobile(scripted_module) optimized_scripted_module._save_for_lite_interpreter("asl.ptl")
''' USAGE: python test.py --img A_test.jpg ''' import torch import joblib import torch.nn as nn import numpy as np import cv2 import argparse import torchvision.transforms as transforms import torch.nn.functional as F import time import cnn_models from PIL import Image # construct the argument parser and parse the arguments parser = argparse.ArgumentParser() parser.add_argument('-i', '--img', default='../app/src/main/assets/C1.jpg', type=str, help='path for the image to test on') args = vars(parser.parse_args()) aug = transforms.Compose([ transforms.Resize((224, 224)), ]) # load label binarizer lb = joblib.load('lb.pkl') model = cnn_models.CustomCNN() model.load_state_dict(torch.load('asl.pth')) print(model) print('Model loaded') image = Image.open(f"{args['img']}") image = aug(image) image = np.transpose(image, (2, 0, 1)).astype(np.float32) image = torch.tensor(image, dtype=torch.float) image = image.unsqueeze(0) print(image.shape) start = time.time() outputs = model(image) _, preds = torch.max(outputs.data, 1) print('PREDS', preds) print(f"Predicted output: {lb.classes_[preds]}") end = time.time() print(f"{(end-start):.3f} seconds")
''' USAGE: python train.py --epochs 10 ''' import pandas as pd import joblib import numpy as np import torch import random from PIL import Image import matplotlib.pyplot as plt import argparse import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torchvision.transforms as transforms import time import cnn_models from tqdm import tqdm from sklearn.model_selection import train_test_split from torch.utils.data import Dataset, DataLoader # construct the argument parser and parse the arguments parser = argparse.ArgumentParser() parser.add_argument('-e', '--epochs', default=10, type=int, help='number of epochs to train the model for') args = vars(parser.parse_args()) ''' SEED Everything ''' def seed_everything(SEED=42): random.seed(SEED) np.random.seed(SEED) torch.manual_seed(SEED) torch.cuda.manual_seed(SEED) torch.cuda.manual_seed_all(SEED) torch.backends.cudnn.benchmark = True SEED=42 seed_everything(SEED=SEED) ''' SEED Everything ''' # set computation device device = ('cuda:0' if torch.cuda.is_available() else 'cpu') print(f"Computation device: {device}") # read the data.csv file and get the image paths and labels df = pd.read_csv('data.csv') X = df.image_path.values y = df.target.values (xtrain, xtest, ytrain, ytest) = (train_test_split(X, y, test_size=0.15, random_state=42)) print(f"Training on {len(xtrain)} images") print(f"Validationg on {len(xtest)} images") # image dataset module class ASLImageDataset(Dataset): def __init__(self, path, labels): self.X = path self.y = labels # apply augmentations self.aug = transforms.Compose([ transforms.Resize((224, 224)) ]) def __len__(self): return (len(self.X)) def __getitem__(self, i): image = Image.open(self.X[i]) image = self.aug(image) image = np.transpose(image, (2, 0, 1)).astype(np.float32) label = self.y[i] return torch.tensor(image, dtype=torch.float), torch.tensor(label, dtype=torch.long) train_data = ASLImageDataset(xtrain, ytrain) test_data = ASLImageDataset(xtest, ytest) # dataloaders trainloader = DataLoader(train_data, batch_size=32, shuffle=True) testloader = DataLoader(test_data, batch_size=32, shuffle=False) # model = models.MobineNetV2(pretrained=True, requires_grad=False) model = cnn_models.CustomCNN().to(device) print(model) # total parameters and trainable parameters total_params = sum(p.numel() for p in model.parameters()) print(f"{total_params:,} total parameters.") total_trainable_params = sum( p.numel() for p in model.parameters() if p.requires_grad) print(f"{total_trainable_params:,} training parameters.") # optimizer optimizer = optim.Adam(model.parameters(), lr=0.001) # loss function criterion = nn.CrossEntropyLoss() # training function def fit(model, dataloader): print('Training') model.train() running_loss = 0.0 running_correct = 0 for i, data in tqdm(enumerate(dataloader), total=int(len(train_data)/dataloader.batch_size)): data, target = data[0].to(device), data[1].to(device) optimizer.zero_grad() outputs = model(data) loss = criterion(outputs, target) running_loss += loss.item() _, preds = torch.max(outputs.data, 1) running_correct += (preds == target).sum().item() loss.backward() optimizer.step() train_loss = running_loss/len(dataloader.dataset) train_accuracy = 100. * running_correct/len(dataloader.dataset) print(f"Train Loss: {train_loss:.4f}, Train Acc: {train_accuracy:.2f}") return train_loss, train_accuracy #validation function def validate(model, dataloader): print('Validating') model.eval() running_loss = 0.0 running_correct = 0 with torch.no_grad(): for i, data in tqdm(enumerate(dataloader), total=int(len(test_data)/dataloader.batch_size)): data, target = data[0].to(device), data[1].to(device) outputs = model(data) loss = criterion(outputs, target) running_loss += loss.item() _, preds = torch.max(outputs.data, 1) running_correct += (preds == target).sum().item() val_loss = running_loss/len(dataloader.dataset) val_accuracy = 100. * running_correct/len(dataloader.dataset) print(f'Val Loss: {val_loss:.4f}, Val Acc: {val_accuracy:.2f}') return val_loss, val_accuracy train_loss , train_accuracy = [], [] val_loss , val_accuracy = [], [] start = time.time() for epoch in range(args['epochs']): print(f"Epoch {epoch+1} of {args['epochs']}") train_epoch_loss, train_epoch_accuracy = fit(model, trainloader) val_epoch_loss, val_epoch_accuracy = validate(model, testloader) train_loss.append(train_epoch_loss) train_accuracy.append(train_epoch_accuracy) val_loss.append(val_epoch_loss) val_accuracy.append(val_epoch_accuracy) print("loss: {val_epoch_loss}, accuracy: {val_epoch_accuracy}") end = time.time() print('Saving model...') torch.save(model.state_dict(), 'asl.pth')
"""test_bench.py Runs hub models in benchmark mode using pytest-benchmark. Run setup separately first. Usage: python install.py pytest test_bench.py See pytest-benchmark help (pytest test_bench.py -h) for additional options e.g. --benchmark-autosave --benchmark-compare -k <filter expression> ... """ import os import pytest import time import torch from components._impl.workers import subprocess_worker from torchbenchmark import _list_model_paths, ModelTask, get_metadata_from_yaml from torchbenchmark.util.machine_config import get_machine_state from torchbenchmark.util.metadata_utils import skip_by_metadata def pytest_generate_tests(metafunc): # This is where the list of models to test can be configured # e.g. by using info in metafunc.config devices = ['cpu', 'cuda'] if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): devices.append('mps') if metafunc.config.option.cpu_only: devices = ['cpu'] if metafunc.config.option.cuda_only: devices = ['cuda'] if metafunc.config.option.mps_only: devices = ['mps'] if metafunc.cls and metafunc.cls.__name__ == "TestBenchNetwork": paths = _list_model_paths() metafunc.parametrize( 'model_path', paths, ids=[os.path.basename(path) for path in paths], scope="class") metafunc.parametrize('device', devices, scope='class') @pytest.mark.benchmark( warmup=True, warmup_iterations=3, disable_gc=False, timer=time.perf_counter, group='hub', ) class TestBenchNetwork: def test_train(self, model_path, device, compiler, benchmark): try: if skip_by_metadata(test="train", device=device, extra_args=[], \ metadata=get_metadata_from_yaml(model_path)): raise NotImplementedError("Test skipped by its metadata.") # TODO: skipping quantized tests for now due to BC-breaking changes for prepare # api, enable after PyTorch 1.13 release if "quantized" in model_path: return task = ModelTask(model_path) if not task.model_details.exists: return # Model is not supported. task.make_model_instance(test="train", device=device) benchmark(task.invoke) benchmark.extra_info['machine_state'] = get_machine_state() benchmark.extra_info['batch_size'] = task.get_model_attribute('batch_size') benchmark.extra_info['precision'] = task.get_model_attribute("dargs", "precision") benchmark.extra_info['test'] = 'train' except NotImplementedError: print(f'Test train on {device} is not implemented, skipping...') def test_eval(self, model_path, device, compiler, benchmark, pytestconfig): try: if skip_by_metadata(test="eval", device=device, extra_args=[], \ metadata=get_metadata_from_yaml(model_path)): raise NotImplementedError("Test skipped by its metadata.") # TODO: skipping quantized tests for now due to BC-breaking changes for prepare # api, enable after PyTorch 1.13 release if "quantized" in model_path: return task = ModelTask(model_path) if not task.model_details.exists: return # Model is not supported. task.make_model_instance(test="eval", device=device) with task.no_grad(disable_nograd=pytestconfig.getoption("disable_nograd")): benchmark(task.invoke) benchmark.extra_info['machine_state'] = get_machine_state() benchmark.extra_info['batch_size'] = task.get_model_attribute('batch_size') benchmark.extra_info['precision'] = task.get_model_attribute("dargs", "precision") benchmark.extra_info['test'] = 'eval' except NotImplementedError: print(f'Test eval on {device} is not implemented, skipping...') @pytest.mark.benchmark( warmup=True, warmup_iterations=3, disable_gc=False, timer=time.perf_counter, group='hub', ) class TestWorker: """Benchmark SubprocessWorker to make sure we aren't skewing results.""" def test_worker_noop(self, benchmark): worker = subprocess_worker.SubprocessWorker() benchmark(lambda: worker.run("pass")) def test_worker_store(self, benchmark): worker = subprocess_worker.SubprocessWorker() benchmark(lambda: worker.store("x", 1)) def test_worker_load(self, benchmark): worker = subprocess_worker.SubprocessWorker() worker.store("x", 1) benchmark(lambda: worker.load("x"))
import os import traceback import argparse import importlib from pathlib import Path from typing import Dict CURRENT_DIR = os.path.dirname(os.path.realpath(__file__)) def list_benchmarks() -> Dict[str, str]: benchmarks = {} import userbenchmark bdir = Path(userbenchmark.__file__).parent.resolve() fb_bdir = bdir.joinpath("fb") if fb_bdir.exists(): for fb_bm in filter(lambda x: x.is_dir(), fb_bdir.iterdir()): benchmarks[fb_bm.name] = f"fb.{fb_bm.name}" for bm in filter(lambda x: x.is_dir() and not x.name == "fb", bdir.iterdir()): benchmarks[bm.name] = bm.name return benchmarks def run(): available_benchmarks = list_benchmarks() parser = argparse.ArgumentParser(description="Run a TorchBench user benchmark") parser.add_argument("bm_name", choices=available_benchmarks.keys(), help='name of the user benchmark') args, bm_args = parser.parse_known_args() try: benchmark = importlib.import_module(f"userbenchmark.{available_benchmarks[args.bm_name]}.run") benchmark.run(bm_args) except ImportError as e: print(f"Failed to import user benchmark module {args.bm_name}, error: {str(e)}") traceback.print_exc() if __name__ == "__main__": run()
""" A lightweight runner that just sets up a model and runs one of its functions in a particular configuration. Intended for debugging/exploration/profiling use cases, where the test/measurement harness is overhead. DANGER: make sure to `python install.py` first or otherwise make sure the benchmark you are going to run has been installed. This script intentionally does not automate or enforce setup steps. Wall time provided for sanity but is not a sane benchmark measurement. """ import argparse import logging import random import string import time import traceback from datetime import datetime from functools import partial import numpy as np import torch import torch.profiler as profiler from torchbenchmark import ( load_canary_model_by_name, load_model_by_name, ModelNotFoundError, ) from torchbenchmark.util.experiment.metrics import get_model_flops, get_peak_memory if not hasattr(torch.version, "git_version"): from pytorch.benchmark.fb.run_utils import trace_handler, usage_report_logger else: usage_report_logger = lambda: None WARMUP_ROUNDS = 3 SUPPORT_DEVICE_LIST = ["cpu", "cuda"] if hasattr(torch.backends, "mps") and torch.backends.mps.is_available(): SUPPORT_DEVICE_LIST.append("mps") SUPPORT_PROFILE_LIST = [ "record_shapes", "profile_memory", "with_stack", "with_flops", "with_modules", ] def run_one_step_with_cudastreams(func, streamcount): print("Running Utilization Scaling Using Cuda Streams") streamlist = [] for i in range(1, streamcount + 1, 1): # create additional streams and prime with load while len(streamlist) < i : s = torch.cuda.Stream() streamlist.append(s) for s in streamlist: with torch.cuda.stream(s): func() torch.cuda.synchronize() # Wait for the events to be recorded! # now run benchmark using streams start_event = torch.cuda.Event(enable_timing=True) end_event = torch.cuda.Event(enable_timing=True) start_event.record() for s in streamlist: with torch.cuda.stream(s): func() end_event.record() torch.cuda.synchronize() print(f"Cuda StreamCount:{len(streamlist)}") print('{:<20} {:>20}'.format("GPU Time:", "%.3f milliseconds" % start_event.elapsed_time(end_event)), sep='') def printResultSummaryTime(result_summary, metrics_needed=[], model=None, flops_model_analyzer=None, model_flops=None, cpu_peak_mem=None, mem_device_id=None, gpu_peak_mem=None): if args.device == "cuda": gpu_time = np.median(list(map(lambda x: x[0], result_summary))) cpu_walltime = np.median(list(map(lambda x: x[1], result_summary))) if hasattr(model, "NUM_BATCHES"): print('{:<20} {:>20}'.format("GPU Time per batch:", "%.3f milliseconds" % (gpu_time / model.NUM_BATCHES), sep='')) print('{:<20} {:>20}'.format("CPU Wall Time per batch:", "%.3f milliseconds" % (cpu_walltime / model.NUM_BATCHES), sep='')) else: print('{:<20} {:>20}'.format("GPU Time:", "%.3f milliseconds" % gpu_time, sep='')) print('{:<20} {:>20}'.format("CPU Total Wall Time:", "%.3f milliseconds" % cpu_walltime, sep='')) else: cpu_walltime = np.median(list(map(lambda x: x[0], result_summary))) print('{:<20} {:>20}'.format("CPU Total Wall Time:", "%.3f milliseconds" % cpu_walltime, sep='')) # if model_flops is not None, output the TFLOPs per sec if 'flops' in metrics_needed: if flops_model_analyzer.metrics_backend_mapping['flops'] == 'dcgm': tflops_device_id, tflops = flops_model_analyzer.calculate_flops() else: flops = model.get_flops() tflops = flops / (cpu_walltime / 1.0e3) / 1.0e12 print('{:<20} {:>20}'.format("GPU FLOPS:", "%.4f TFLOPs per second" % tflops, sep='')) if model_flops is not None: tflops = model_flops / (cpu_walltime / 1.0e3) / 1.0e12 print('{:<20} {:>20}'.format("Model Flops:", "%.4f TFLOPs per second" % tflops, sep='')) if gpu_peak_mem is not None: print('{:<20} {:>20}'.format("GPU %d Peak Memory:" % mem_device_id, "%.4f GB" % gpu_peak_mem, sep='')) if cpu_peak_mem is not None: print('{:<20} {:>20}'.format("CPU Peak Memory:", "%.4f GB" % cpu_peak_mem, sep='')) def run_one_step(func, nwarmup=WARMUP_ROUNDS, num_iter=10, model=None, export_metrics_file=None, stress=0, metrics_needed=[], metrics_gpu_backend=None): # Warm-up `nwarmup` rounds for _i in range(nwarmup): func() result_summary = [] flops_model_analyzer = None if 'flops' in metrics_needed: from components.model_analyzer.TorchBenchAnalyzer import ModelAnalyzer flops_model_analyzer = ModelAnalyzer(export_metrics_file, ['flops'], metrics_gpu_backend) flops_model_analyzer.start_monitor() if stress: cur_time = time.time_ns() start_time = cur_time target_time = stress * 1e9 + start_time num_iter = -1 last_time = start_time _i = 0 last_it = 0 first_print_out = True while (not stress and _i < num_iter) or (stress and cur_time < target_time) : if args.device == "cuda": torch.cuda.synchronize() start_event = torch.cuda.Event(enable_timing=True) end_event = torch.cuda.Event(enable_timing=True) # Collect time_ns() instead of time() which does not provide better precision than 1 # second according to https://docs.python.org/3/library/time.html#time.time. t0 = time.time_ns() start_event.record() func() end_event.record() torch.cuda.synchronize() t1 = time.time_ns() result_summary.append((start_event.elapsed_time(end_event), (t1 - t0) / 1_000_000)) elif args.device == "mps": t0 = time.time_ns() func() t1 = time.time_ns() wall_latency = t1 - t0 # TODO: modify this to add GPU time as well result_summary.append([(t1 - t0) / 1_000_000]) else: t0 = time.time_ns() func() t1 = time.time_ns() result_summary.append([(t1 - t0) / 1_000_000]) if stress: cur_time = time.time_ns() # print out the status every 10s. if (cur_time - last_time) >= 10 * 1e9: if first_print_out: print('|{:^20}|{:^20}|{:^20}|'.format("Iterations", "Time/Iteration(ms)", "Rest Time(s)")) first_print_out = False est = (target_time - cur_time) / 1e9 time_per_it = (cur_time - last_time) / (_i - last_it) / 1e6 print('|{:^20}|{:^20}|{:^20}|'.format("%d" % _i, "%.2f" % time_per_it , "%d" % int(est))) last_time = cur_time last_it = _i _i += 1 if flops_model_analyzer is not None: flops_model_analyzer.stop_monitor() flops_model_analyzer.aggregate() cpu_peak_mem = None gpu_peak_mem = None mem_device_id = None model_flops = None if 'cpu_peak_mem' in metrics_needed or 'gpu_peak_mem' in metrics_needed: cpu_peak_mem, mem_device_id, gpu_peak_mem = get_peak_memory(func, model.device, export_metrics_file=export_metrics_file, metrics_needed=metrics_needed, metrics_gpu_backend=metrics_gpu_backend) if 'model_flops' in metrics_needed: model_flops = get_model_flops(model) printResultSummaryTime(result_summary, metrics_needed, model, flops_model_analyzer, model_flops, cpu_peak_mem, mem_device_id, gpu_peak_mem) def profile_one_step(func, nwarmup=WARMUP_ROUNDS): activity_groups = [] result_summary = [] device_to_activity = {'cuda': profiler.ProfilerActivity.CUDA, 'cpu': profiler.ProfilerActivity.CPU} if args.profile_devices: activity_groups = [ device_to_activity[device] for device in args.profile_devices if (device in device_to_activity) ] else: if args.device == 'cuda': activity_groups = [ profiler.ProfilerActivity.CUDA, profiler.ProfilerActivity.CPU, ] elif args.device == 'cpu': activity_groups = [profiler.ProfilerActivity.CPU] profile_opts = {} for opt in SUPPORT_PROFILE_LIST: profile_opts[opt] = True if args.profile_options is not None and opt in args.profile_options else False if args.profile_eg: from datetime import datetime import os from torch.profiler import ExecutionTraceObserver start_time = datetime.now() timestamp = int(datetime.timestamp(start_time)) eg_file = f"{args.model}_{timestamp}_eg.json" eg = ExecutionTraceObserver() if not os.path.exists(args.profile_eg_folder): os.makedirs(args.profile_eg_folder) eg.register_callback(f"{args.profile_eg_folder}/{eg_file}") nwarmup = 0 eg.start() with profiler.profile( schedule=profiler.schedule(wait=0, warmup=nwarmup, active=1, repeat=1), activities=activity_groups, record_shapes=args.profile_detailed if args.profile_detailed else profile_opts["record_shapes"], profile_memory=args.profile_detailed if args.profile_detailed else profile_opts["profile_memory"], with_stack=args.profile_detailed if args.profile_detailed else profile_opts["with_stack"], with_flops=args.profile_detailed if args.profile_detailed else profile_opts["with_flops"], with_modules=args.profile_detailed if args.profile_detailed else profile_opts["with_modules"], on_trace_ready= partial(trace_handler, f"torchbench_{args.model}") if (not hasattr(torch.version, "git_version") and args.profile_export_chrome_trace) else profiler.tensorboard_trace_handler(args.profile_folder), ) as prof: if args.device == "cuda": start_event = torch.cuda.Event(enable_timing=True) end_event = torch.cuda.Event(enable_timing=True) for i in range(nwarmup + 1): t0 = time.time_ns() start_event.record() func() torch.cuda.synchronize() # Need to sync here to match run_one_step()'s timed run. end_event.record() t1 = time.time_ns() if i >= nwarmup: result_summary.append((start_event.elapsed_time(end_event), (t1 - t0) / 1_000_000)) prof.step() else: for i in range(nwarmup + 1): t0 = time.time_ns() func() t1 = time.time_ns() if i >= nwarmup: result_summary.append([(t1 - t0) / 1_000_000]) prof.step() if args.profile_eg and eg: eg.stop() eg.unregister_callback() print(f"Save Exeution Trace to : {args.profile_eg_folder}/{eg_file}") print(prof.key_averages(group_by_input_shape=True).table(sort_by="cpu_time_total", row_limit=30)) print(f"Saved TensorBoard Profiler traces to {args.profile_folder}.") printResultSummaryTime(result_summary) def _validate_devices(devices: str): devices_list = devices.split(",") valid_devices = SUPPORT_DEVICE_LIST for d in devices_list: if d not in valid_devices: raise ValueError(f'Invalid device {d} passed into --profile-devices. Expected devices: {valid_devices}.') return devices_list def _validate_profile_options(profile_options: str): profile_options_list = profile_options.split(",") for opt in profile_options_list: if opt not in SUPPORT_PROFILE_LIST: raise ValueError(f'Invalid profile option {opt} passed into --profile-options. Expected options: {SUPPORT_PROFILE_LIST}.') return profile_options_list if __name__ == "__main__": parser = argparse.ArgumentParser(__doc__) parser.add_argument( "model", help="Full or partial name of a model to run. If partial, picks the first match.", ) parser.add_argument( "-d", "--device", choices=SUPPORT_DEVICE_LIST, default="cpu", help="Which device to use.", ) parser.add_argument( "-t", "--test", choices=["eval", "train"], default="eval", help="Which test to run.", ) parser.add_argument( "--profile", action="store_true", help="Run the profiler around the function" ) parser.add_argument( "--profile-options", type=_validate_profile_options, help=f"Select which profile options to enable. Valid options: {SUPPORT_PROFILE_LIST}.", ) parser.add_argument("--amp", action="store_true", help="enable torch.autocast()") parser.add_argument( "--profile-folder", default="./logs", help="Save profiling model traces to this directory.", ) parser.add_argument( "--profile-detailed", action="store_true", help=f"Enable all profile options, including {SUPPORT_PROFILE_LIST}. Overrides --profile-options.", ) parser.add_argument( "--profile-export-chrome-trace", action="store_true", help="Export Chrome tracing files. (internal only)", ) parser.add_argument( "--profile-devices", type=_validate_devices, help="Profile comma separated list of activities such as cpu,cuda.", ) parser.add_argument( "--profile-eg", action="store_true", help="Collect execution trace by PARAM" ) parser.add_argument( "--profile-eg-folder", default="./eg_logs", help="Save execution traces to this directory.", ) parser.add_argument( "--cudastreams", action="store_true", help="Utilization test using increasing number of cuda streams.", ) parser.add_argument("--bs", type=int, help="Specify batch size to the test.") parser.add_argument( "--export-metrics", action="store_true", help="Export all specified metrics records to a csv file. The default csv file name is [model_name]_all_metrics.csv.", ) parser.add_argument( "--stress", type=float, default=0, help="Specify execution time (seconds) to stress devices.", ) parser.add_argument( "--metrics", type=str, default="cpu_peak_mem,gpu_peak_mem", help="Specify metrics [cpu_peak_mem,gpu_peak_mem,flops,model_flops]to be collected. You can also set `none` to disable all metrics. The metrics are separated by comma such as cpu_peak_mem,gpu_peak_mem.", ) parser.add_argument( "--metrics-gpu-backend", choices=["dcgm", "default"], default="default", help="""Specify the backend [dcgm, default] to collect metrics. \nIn default mode, the latency(execution time) is collected by time.time_ns() and it is always enabled. Optionally, \n - you can specify cpu peak memory usage by --metrics cpu_peak_mem, and it is collected by psutil.Process(). \n - you can specify gpu peak memory usage by --metrics gpu_peak_mem, and it is collected by nvml library.\n - you can specify flops by --metrics flops, and it is collected by fvcore.\nIn dcgm mode, the latency(execution time) is collected by time.time_ns() and it is always enabled. Optionally,\n - you can specify cpu peak memory usage by --metrics cpu_peak_mem, and it is collected by psutil.Process().\n - you can specify cpu and gpu peak memory usage by --metrics cpu_peak_mem,gpu_peak_mem, and they are collected by dcgm library.""", ) parser.add_argument( "--channels-last", action="store_true", help="enable torch.channels_last()" ) args, extra_args = parser.parse_known_args() if args.cudastreams and not args.device == "cuda": print("cuda device required to use --cudastreams option!") exit(-1) # Log the tool usage usage_report_logger() found = False Model = None try: Model = load_model_by_name(args.model) except ModuleNotFoundError: traceback.print_exc() exit(-1) except ModelNotFoundError: print(f"Warning: The model {args.model} cannot be found at core set.") if not Model: try: Model = load_canary_model_by_name(args.model) except ModuleNotFoundError: traceback.print_exc() exit(-1) except ModelNotFoundError: print( f"Error: The model {args.model} cannot be found at either core or canary model set." ) exit(-1) m = Model( device=args.device, test=args.test, batch_size=args.bs, extra_args=extra_args, ) if m.dynamo: mode = f"dynamo {m.opt_args.torchdynamo}" elif m.opt_args.backend: mode = f"{m.opt_args.backend}" else: mode = "eager" print( f"Running {args.test} method from {Model.name} on {args.device} in {mode} mode with input batch size {m.batch_size} and precision {m.dargs.precision}." ) if "--accuracy" in extra_args: print("{:<20} {:>20}".format("Accuracy: ", str(m.accuracy)), sep="") exit(0) if args.channels_last: m.enable_channels_last() test = m.invoke if args.amp: test = torch.autocast(m.device)(test) metrics_needed = ( [_ for _ in args.metrics.split(",") if _.strip()] if args.metrics else [] ) if "none" in metrics_needed: metrics_needed = [] # only enabled gpu_peak_mem for cuda device if args.device != "cuda" and "gpu_peak_mem" in metrics_needed: metrics_needed.remove("gpu_peak_mem") metrics_needed = list(set(metrics_needed)) metrics_gpu_backend = args.metrics_gpu_backend if metrics_needed: if metrics_gpu_backend == "dcgm": from components.model_analyzer.TorchBenchAnalyzer import check_dcgm check_dcgm() elif "gpu_peak_mem" in metrics_needed: from components.model_analyzer.TorchBenchAnalyzer import check_nvml check_nvml() if "gpu_peak_mem" in metrics_needed or ( "flops" in metrics_needed and metrics_gpu_backend == "dcgm" ): assert ( args.device == "cuda" ), "gpu_peak_mem and flops:dcgm are only available for cuda device." if "flops" in metrics_needed and metrics_gpu_backend == "default": assert hasattr( m, "get_flops" ), f"The model {args.model} does not support calculating flops." m.get_flops() if args.export_metrics: if not args.metrics: print("You have to specifiy at least one metrics to export.") exit(-1) export_metrics_file = "%s_all_metrics.csv" % args.model else: export_metrics_file = None if args.profile: profile_one_step(test) elif args.cudastreams: run_one_step_with_cudastreams(test, 10) else: run_one_step( test, model=m, export_metrics_file=export_metrics_file, stress=args.stress, metrics_needed=metrics_needed, metrics_gpu_backend=args.metrics_gpu_backend, ) # Print dynamo compilation metrics, if there are any. try: if m.pt2_compilation_time: print( "{:<20} {:>18}".format( "PT2 Compilation time: ", "%.3f seconds" % m.pt2_compilation_time ), sep="", ) if m.pt2_graph_breaks: print( "{:<20} {:>18}".format( "PT2 Graph Breaks: ", "%.3f" % m.pt2_graph_breaks ), sep="", ) except: pass
import os import pytest import torch from torchbenchmark.util.machine_config import get_machine_config, check_machine_configured def pytest_addoption(parser): parser.addoption("--fuser", help="Use one of the available fusers: te, old, nvfuser", default="te", choices=["te", "old", "nvfuser"]) parser.addoption("--ignore_machine_config", action='store_true', help="Disable checks/assertions for machine configuration for stable benchmarks") parser.addoption("--disable_nograd", action='store_true', help="Disable no_grad for eval() runs") parser.addoption("--cpu_only", action='store_true', help="Run benchmarks on cpu only and ignore machine configuration checks") parser.addoption("--cuda_only", action='store_true', help="Run benchmarks on cuda only and ignore machine configuration checks") parser.addoption("--mps_only", action='store_true', help="Run benchmarks on mps only and ignore machine configuration checks") def set_fuser(fuser): if fuser == "te": torch._C._jit_set_profiling_executor(True) torch._C._jit_set_profiling_mode(True) torch._C._jit_override_can_fuse_on_cpu(True) torch._C._jit_override_can_fuse_on_gpu(True) torch._C._jit_set_texpr_fuser_enabled(True) elif fuser == "old": torch._C._jit_set_profiling_executor(False) torch._C._jit_set_profiling_mode(False) torch._C._jit_override_can_fuse_on_gpu(True) torch._C._jit_set_texpr_fuser_enabled(False) elif fuser == "nvfuser": os.environ['PYTORCH_NVFUSER_DISABLE_FALLBACK'] = '1' torch._C._jit_set_texpr_fuser_enabled(False) torch._C._jit_set_profiling_executor(True) torch._C._jit_set_profiling_mode(True) torch._C._jit_can_fuse_on_cpu() torch._C._jit_can_fuse_on_gpu() torch._C._jit_override_can_fuse_on_cpu(False) torch._C._jit_override_can_fuse_on_gpu(False) torch._C._jit_set_nvfuser_guard_mode(True) torch._C._jit_set_nvfuser_enabled(True) else: # pytest_addoption should always set the default fuser assert(False) def pytest_sessionstart(session): try: check_machine_configured() except Exception as e: if not session.config.getoption('ignore_machine_config'): pytest.exit(f"{e}\nSee README.md for machine tuning script usage, or use --ignore_machine_config") def pytest_configure(config): set_fuser(config.getoption("fuser")) def pytest_benchmark_update_machine_info(config, machine_info): machine_info['pytorch_version'] = torch.__version__ machine_info['pytorch_git_version'] = torch.version.git_version machine_info['cuda_version'] = torch.version.cuda try: import torchvision machine_info['torchvision_version'] = torchvision.__version__ except ImportError: machine_info['torchvision_version'] = '*not-installed*' machine_info['github_run_id'] = os.environ.get("GITHUB_RUN_ID") machine_info['torchbench_score_version'] = os.environ.get("TORCHBENCH_VER") try: # if running on unexpected machine/os, get_machine_config _may_ not work machine_info['torchbench_machine_config'] = get_machine_config() except Exception: if not config.getoption('ignore_machine_config'): raise
import time import torch import argparse import json from dataclasses import asdict from torchbenchmark.e2e import E2EBenchmarkResult, load_e2e_model_by_name from typing import Dict SUPPORT_DEVICE_LIST = ["cpu", "cuda"] def run(func) -> Dict[str, float]: if torch.cuda.is_available(): torch.cuda.synchronize() result = {} # Collect time_ns() instead of time() which does not provide better precision than 1 # second according to https://docs.python.org/3/library/time.html#time.time. t0 = time.time_ns() func() if torch.cuda.is_available(): torch.cuda.synchronize() t2 = time.time_ns() result["latency_ms"] = (t2 - t0) / 1_000_000.0 return result def gen_result(m, run_result): num_epochs = getattr(m, "num_epochs", 1) r = E2EBenchmarkResult(device=m.device, device_num=m.device_num, test=m.test, num_examples=m.num_examples, num_epochs=num_epochs, batch_size=m.batch_size, result=dict()) r.result["latency"] = run_result["latency_ms"] / 1000.0 r.result["qps"] = r.num_examples / r.result["latency"] * r.num_epochs # add accuracy result if available if hasattr(m, "accuracy"): r.result["accuracy"] = m.accuracy return r if __name__ == "__main__": parser = argparse.ArgumentParser(__doc__) parser.add_argument("model", help="Full name of the end-to-end model.") parser.add_argument("-t", "--test", choices=["eval", "train"], default="eval", help="Which test to run.") parser.add_argument("--bs", type=int, help="Specify batch size.") args, extra_args = parser.parse_known_args() found = False Model = load_e2e_model_by_name(args.model) if not Model: print(f"Unable to find model matching {args.model}.") exit(-1) m = Model(test=args.test, batch_size=args.bs, extra_args=extra_args) test = getattr(m, args.test) result = gen_result(m, run(test)) result_json = json.dumps(asdict(result)) print(result_json)
""" A Benchmark Summary Metadata tool to extract and generate metadata from models at runtime. """ import argparse from copy import deepcopy import os import yaml from typing import Any, Dict, List, Tuple import torch from torchbenchmark import list_models, load_model_by_name, _list_model_paths, ModelTask, ModelDetails, str_to_bool TIMEOUT = 300 # seconds torchbench_dir = 'torchbenchmark' model_dir = 'models' _DEFAULT_METADATA_ = { 'train_benchmark': True, 'train_deterministic': False, 'eval_benchmark': True, 'eval_deterministic': False, 'eval_nograd': True, # 'origin': None, # 'train_dtype': 'float32', # 'eval_dtype': 'float32', } def _parser_helper(input): return None if input is None else str_to_bool(str(input)) def _process_model_details_to_metadata(train_detail: ModelDetails, eval_detail: ModelDetails) -> Dict[str, Any]: metadata = {} for k, v in _DEFAULT_METADATA_.items(): if hasattr(train_detail, k): metadata[k] = getattr(train_detail, k) elif train_detail and k in train_detail.metadata: metadata[k] = train_detail.metadata[k] elif eval_detail and k in eval_detail.metadata: metadata[k] = eval_detail.metadata[k] else: metadata[k] = v return metadata def _extract_detail(path: str) -> Dict[str, Any]: name = os.path.basename(path) device = "cuda" t_detail = None e_detail = None # Separate train and eval to isolated processes. task_t = ModelTask(path, timeout=TIMEOUT) try: task_t.make_model_instance(device=device) task_t.set_train() task_t.train() task_t.extract_details_train() task_t.del_model_instance() t_detail = deepcopy(task_t._details) except NotImplementedError: print(f'Model {name} train is not fully implemented. skipping...') del task_t task_e = ModelTask(path, timeout=TIMEOUT) try: task_e.make_model_instance(device=device) task_e.set_eval() task_e.eval() task_e.extract_details_eval() task_e.del_model_instance() e_detail = deepcopy(task_e._details) except NotImplementedError: print(f'Model {name} eval is not fully implemented. skipping...') del task_e return _process_model_details_to_metadata(t_detail, e_detail) def _extract_all_details(model_names: List[str]) -> List[Tuple[str, Dict[str, Any]]]: details = [] for model_path in _list_model_paths(): model_name = os.path.basename(model_path) if model_name not in model_names: continue ed = _extract_detail(model_path) details.append((model_path, ed)) return details def _print_extracted_details(extracted_details: List[Tuple[str, Dict[str, Any]]]): for path, ex_detail in extracted_details: name = os.path.basename(path) print(f'Model: {name} , Details: {ex_detail}') def _maybe_override_extracted_details(args, extracted_details: List[Tuple[str, Dict[str, Any]]]): for _path, ex_detail in extracted_details: if args.train_benchmark is not None: ex_detail['train_benchmark'] = args.train_benchmark elif args.train_deterministic is not None: ex_detail['train_deterministic'] = args.train_deterministic elif args.eval_benchmark is not None: ex_detail['eval_benchmark'] = args.eval_benchmark elif args.eval_deterministic is not None: ex_detail['eval_deterministic'] = args.eval_deterministic elif args.eval_nograd is not None: ex_detail['eval_nograd'] = args.eval_nograd def _write_metadata_yaml_files(extracted_details: List[Tuple[str, Dict[str, Any]]]): for path, ex_detail in extracted_details: metadata_path = path + "/metadata.yaml" with open(metadata_path, 'w') as file: yaml.dump(ex_detail, file) print(f"Processed file: {metadata_path}") if __name__ == "__main__": parser = argparse.ArgumentParser(__doc__) parser.add_argument("--model", default=None, help="Full name of a model to update. If absent, applies to all models.") parser.add_argument("--extract-only", default=False, action="store_true", help="Only extract model details.") parser.add_argument("--train-benchmark", default=None, type=_parser_helper, help="Whether to enable PyTorch benchmark mode during train.") parser.add_argument("--train-deterministic", default=None, type=_parser_helper, help="Whether to enable deterministic during train.") parser.add_argument("--eval-benchmark", default=None, type=_parser_helper, help="Whether to enable PyTorch benchmark mode during eval.") parser.add_argument("--eval-deterministic", default=None, type=_parser_helper, help="Whether to enable deterministic during eval.") parser.add_argument("--eval-nograd", default=None, type=_parser_helper, help="Whether to enable no_grad during eval.") # parser.add_argument("--origin", default=None, # help="Location of benchmark's origin. Such as torchaudio or torchvision.") # parser.add_argument("--train-dtype", default=None, # choices=['float32', 'float16', 'bfloat16', 'amp'], help="Which fp type to perform training.") # parser.add_argument("--eval-dtype", default=None, # choices=['float32', 'float16', 'bfloat16', 'amp'], help="Which fp type to perform eval.") args = parser.parse_args() # Only allow this script for cuda for now. if not torch.cuda.is_available(): print("This tool is currently only supported when the system has a cuda device.") exit(1) # Find the matching model, or use all models. models = [] model_names = [] if args.model is not None: Model = load_model_by_name(args.model) if not Model: print(f"Unable to find model matching: {args.model}.") exit(-1) models.append(Model) model_names.append(Model.name) print(f"Generating metadata to select model: {model_names}.") else: models.extend(list_models(model_match=args.model)) model_names.extend([m.name for m in models]) print("Generating metadata to all models.") # Extract all model details from models. extracted_details = _extract_all_details(model_names) print("Printing extracted metadata.") _print_extracted_details(extracted_details) # Stop here for extract-only. if args.extract_only: print("--extract-only is set. Stop here.") exit(0) # Apply details passed in by flags. _maybe_override_extracted_details(args, extracted_details) print("Printing metadata after applying any modifications.") _print_extracted_details(extracted_details) # TODO: Modify and update the model to apply metadata changes by the user. # Generate metadata files for each matching models. _write_metadata_yaml_files(extracted_details)
"""test.py Setup and Run hub models. Make sure to enable an https proxy if necessary, or the setup steps may hang. """ # This file shows how to use the benchmark suite from user end. import gc import functools import os import traceback import unittest from unittest.mock import patch import yaml import torch from torchbenchmark import _list_model_paths, ModelTask, get_metadata_from_yaml from torchbenchmark.util.metadata_utils import skip_by_metadata # Some of the models have very heavyweight setup, so we have to set a very # generous limit. That said, we don't want the entire test suite to hang if # a single test encounters an extreme failure, so we give up after a test is # unresponsive to 5 minutes by default. (Note: this does not require that the # entire test case completes in 5 minutes. It requires that if the worker is # unresponsive for 5 minutes the parent will presume it dead / incapacitated.) TIMEOUT = int(os.getenv("TIMEOUT", 300)) # Seconds class TestBenchmark(unittest.TestCase): def setUp(self): gc.collect() def tearDown(self): gc.collect() def _create_example_model_instance(task: ModelTask, device: str): skip = False try: task.make_model_instance(test="eval", device=device, extra_args=["--accuracy"]) except NotImplementedError: try: task.make_model_instance(test="train", device=device, extra_args=["--accuracy"]) except NotImplementedError: skip = True finally: if skip: raise NotImplementedError(f"Model is not implemented on the device {device}") def _load_test(path, device): def _skip_cuda_memory_check_p(metadata): if device != "cuda": return True if "skip_cuda_memory_leak" in metadata and metadata["skip_cuda_memory_leak"]: return True return False def example_fn(self): task = ModelTask(path, timeout=TIMEOUT) with task.watch_cuda_memory(skip=_skip_cuda_memory_check_p(metadata), assert_equal=self.assertEqual): try: _create_example_model_instance(task, device) accuracy = task.get_model_attribute("accuracy") assert accuracy == "pass" or accuracy == "eager_1st_run_OOM", f"Expected accuracy pass, get {accuracy}" task.del_model_instance() except NotImplementedError: self.skipTest(f'Method `get_module()` on {device} is not implemented, skipping...') def train_fn(self): metadata = get_metadata_from_yaml(path) task = ModelTask(path, timeout=TIMEOUT) allow_customize_batch_size = task.get_model_attribute("ALLOW_CUSTOMIZE_BSIZE", classattr=True) # to speedup test, use batch size 1 if possible batch_size = 1 if allow_customize_batch_size else None with task.watch_cuda_memory(skip=_skip_cuda_memory_check_p(metadata), assert_equal=self.assertEqual): try: task.make_model_instance(test="train", device=device, batch_size=batch_size) task.invoke() task.check_details_train(device=device, md=metadata) task.del_model_instance() except NotImplementedError: self.skipTest(f'Method train on {device} is not implemented, skipping...') def eval_fn(self): metadata = get_metadata_from_yaml(path) task = ModelTask(path, timeout=TIMEOUT) allow_customize_batch_size = task.get_model_attribute("ALLOW_CUSTOMIZE_BSIZE", classattr=True) # to speedup test, use batch size 1 if possible batch_size = 1 if allow_customize_batch_size else None with task.watch_cuda_memory(skip=_skip_cuda_memory_check_p(metadata), assert_equal=self.assertEqual): try: task.make_model_instance(test="eval", device=device, batch_size=batch_size) task.invoke() task.check_details_eval(device=device, md=metadata) task.check_eval_output() task.del_model_instance() except NotImplementedError: self.skipTest(f'Method eval on {device} is not implemented, skipping...') def check_device_fn(self): task = ModelTask(path, timeout=TIMEOUT) with task.watch_cuda_memory(skip=_skip_cuda_memory_check_p(metadata), assert_equal=self.assertEqual): try: task.make_model_instance(test="eval", device=device) task.check_device() task.del_model_instance() except NotImplementedError: self.skipTest(f'Method check_device on {device} is not implemented, skipping...') name = os.path.basename(path) metadata = get_metadata_from_yaml(path) for fn, fn_name in zip([example_fn, train_fn, eval_fn, check_device_fn], ["example", "train", "eval", "check_device"]): # set exclude list based on metadata setattr(TestBenchmark, f'test_{name}_{fn_name}_{device}', (unittest.skipIf(skip_by_metadata(test=fn_name, device=device, extra_args=[], metadata=metadata), \ "This test is skipped by its metadata")(fn))) def _load_tests(): devices = ['cpu'] if torch.cuda.is_available(): devices.append('cuda') if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available(): devices.append('mps') if device := os.getenv('ACCELERATOR'): devices.append(device) for path in _list_model_paths(): # TODO: skipping quantized tests for now due to BC-breaking changes for prepare # api, enable after PyTorch 1.13 release if "quantized" in path: continue for device in devices: _load_test(path, device) _load_tests() if __name__ == '__main__': unittest.main()
import argparse import subprocess import os import sys from utils import TORCH_DEPS, proxy_suggestion, get_pkg_versions, _test_https from userbenchmark import list_userbenchmarks from pathlib import Path REPO_ROOT = Path(__file__).parent def pip_install_requirements(requirements_txt="requirements.txt"): if not _test_https(): print(proxy_suggestion) sys.exit(-1) try: subprocess.run([sys.executable, '-m', 'pip', 'install', '-q', '-r', requirements_txt], check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: return (False, e.output) except Exception as e: return (False, e) return True, None if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("models", nargs='*', default=[], help="Specify one or more models to install. If not set, install all models.") parser.add_argument("--test-mode", action="store_true", help="Run in test mode and check package versions") parser.add_argument("--canary", action="store_true", help="Install canary model.") parser.add_argument("--continue_on_fail", action="store_true") parser.add_argument("--verbose", "-v", action="store_true") parser.add_argument("--userbenchmark", choices=list_userbenchmarks(), help="Install requirements for optional components.") args = parser.parse_args() os.chdir(os.path.realpath(os.path.dirname(__file__))) print(f"checking packages {', '.join(TORCH_DEPS)} are installed...", end="", flush=True) try: versions = get_pkg_versions(TORCH_DEPS) except ModuleNotFoundError as e: print("FAIL") print(f"Error: Users must first manually install packages {TORCH_DEPS} before installing the benchmark.") sys.exit(-1) print("OK") if args.userbenchmark: # Install userbenchmark dependencies if exists userbenchmark_dir = REPO_ROOT.joinpath("userbenchmark", args.userbenchmark) if userbenchmark_dir.joinpath("install.py").is_file(): subprocess.check_call([sys.executable, "install.py"], cwd=userbenchmark_dir.absolute()) sys.exit(0) success, errmsg = pip_install_requirements() if not success: print("Failed to install torchbenchmark requirements:") print(errmsg) if not args.continue_on_fail: sys.exit(-1) from torchbenchmark import setup success &= setup(models=args.models, verbose=args.verbose, continue_on_fail=args.continue_on_fail, test_mode=args.test_mode, allow_canary=args.canary) if not success: if args.continue_on_fail: print("Warning: some benchmarks were not installed due to failure") else: raise RuntimeError("Failed to complete setup") new_versions = get_pkg_versions(TORCH_DEPS) if versions != new_versions: print(f"The torch packages are re-installed after installing the benchmark deps. \ Before: {versions}, after: {new_versions}") sys.exit(-1)
""" The regression detector of TorchBench Userbenchmark. """ import json import argparse import importlib from dataclasses import asdict import os import yaml from pathlib import Path import time from datetime import datetime from typing import Any, List, Dict, Optional from userbenchmark.utils import PLATFORMS, USERBENCHMARK_OUTPUT_PREFIX, REPO_PATH, \ TorchBenchABTestResult, get_date_from_metrics, \ get_ub_name, get_latest_files_in_s3_from_last_n_days, get_date_from_metrics_s3_key from utils.s3_utils import S3Client, USERBENCHMARK_S3_BUCKET, USERBENCHMARK_S3_OBJECT GITHUB_ISSUE_TEMPLATE = """ TorchBench CI has detected a performance signal or runtime regression. Base PyTorch commit: {start} Affected PyTorch commit: {end} Affected Tests: {test_details} Tests that were no longer run on affected commit: {control_only_tests} Tests that were newly added on affected commit: {treatment_only_tests} Runtime regressions found? {runtime_regressions_msg} GitHub workflow that triggered this issue: {github_run_url} cc {owner} """ DEFAULT_GH_ISSUE_OWNER = "@xuzhao9" def get_default_output_path(bm_name: str) -> str: # By default, write result to $REPO_DIR/.userbenchmark/<userbenchmark-name>/regression-<time>.json output_path = os.path.join(REPO_PATH, USERBENCHMARK_OUTPUT_PREFIX, bm_name) fname = "regression-{}.yaml".format(datetime.fromtimestamp(time.time()).strftime("%Y%m%d%H%M%S")) return os.path.join(output_path, fname) def generate_regression_result(control: Dict[str, Any], treatment: Dict[str, Any]) -> TorchBenchABTestResult: def _call_userbenchmark_detector(detector, control: Dict[str, Any], treatment: Dict[str, Any]) -> TorchBenchABTestResult: return detector(control, treatment) assert control["name"] == treatment["name"], f'Expected the same userbenchmark name from metrics files, \ but getting {control["name"]} and {treatment["name"]}.' bm_name = control["name"] detector = importlib.import_module(f"userbenchmark.{bm_name}.regression_detector").run # Process control and treatment to include only shared keys filtered_control_metrics = {} control_only_metrics = {} filtered_treatment_metrics = {} treatment_only_metrics = {} for control_name, control_metric in control["metrics"].items(): if control_name in treatment["metrics"]: filtered_control_metrics[control_name] = control_metric else: control_only_metrics[control_name] = control_metric for treatment_name, treatment_metric in treatment["metrics"].items(): if treatment_name in control["metrics"]: filtered_treatment_metrics[treatment_name] = treatment_metric else: treatment_only_metrics[treatment_name] = treatment_metric control["metrics"] = filtered_control_metrics treatment["metrics"] = filtered_treatment_metrics assert filtered_control_metrics.keys() == filtered_treatment_metrics.keys() # Local file comparison, return the regression detection result object result = _call_userbenchmark_detector(detector, control, treatment) result.control_only_metrics = control_only_metrics result.treatment_only_metrics = treatment_only_metrics return result def process_regressions_into_yaml(regression_result: TorchBenchABTestResult, output_path: str, control_file: str, treatment_file: str) -> None: if not len(regression_result.details) and \ not len(regression_result.control_only_metrics) and \ not len(regression_result.treatment_only_metrics): print(f"No performance signal detected between file {control_file} and {treatment_file}.") return # create the output directory if doesn't exist output_dir = Path(os.path.dirname(output_path)) output_dir.mkdir(parents=True, exist_ok=True) output_yaml_str = yaml.safe_dump(asdict(regression_result), sort_keys=False) print(output_yaml_str) with open(output_path, "w") as ofptr: ofptr.write(output_yaml_str) print(f"Wrote above yaml to {output_path}.") def process_regressions_into_gh_issue(regression_result: TorchBenchABTestResult, owner: str, output_path: str, errors_path: str) -> None: regressions_dict = asdict(regression_result) troubled_tests = "" for test, stats in regressions_dict["details"].items(): delta = stats["delta"] if delta != 0: sign = "+" if delta > 0 else "" troubled_tests += f"- {test}: {sign}{delta:.5%}\n" control_only_tests = "" for test, stat in regressions_dict["control_only_metrics"].items(): control_only_tests += f"- {test}: {stat}\n" treatment_only_tests = "" for test, stat in regressions_dict["treatment_only_metrics"].items(): treatment_only_tests += f"- {test}: {stat}\n" control_commit = regressions_dict["control_env"]["pytorch_git_version"] treatment_commit = regressions_dict["treatment_env"]["pytorch_git_version"] runtime_regressions_msg = "No runtime errors were found in the " + \ "new benchmarks run--you are all good there!" errors_log_exists = Path(errors_path).exists() if errors_log_exists: runtime_regressions_msg = "An errors log was found. Please investigate runtime " + \ "errors by looking into the logs of the workflow linked." if troubled_tests == "" and control_only_tests == "" and treatment_only_tests == "" and not errors_log_exists: print(f"No regressions found between {control_commit} and {treatment_commit}.") return if "GITHUB_ENV" in os.environ: fname = os.environ["GITHUB_ENV"] content = f"TORCHBENCH_REGRESSION_DETECTED='{treatment_commit}'\n" with open(fname, 'a') as fo: fo.write(content) github_run_id = os.environ.get("GITHUB_RUN_ID", None) github_run_url = "No URL found, please look for the failing action in " + \ "https://github.com/pytorch/benchmark/actions" if github_run_id is not None: github_run_url = f"https://github.com/pytorch/benchmark/actions/runs/{github_run_id}" issue_config: Dict[str, str] = { "start": control_commit, "end": treatment_commit, "test_details": troubled_tests, "control_only_tests": control_only_tests, "treatment_only_tests": treatment_only_tests, "runtime_regressions_msg": runtime_regressions_msg, "github_run_url": github_run_url, "owner": owner } issue_body = GITHUB_ISSUE_TEMPLATE.format(**issue_config) print(issue_body) with open(output_path, "w") as f: f.write(issue_body) def get_best_start_date(latest_metrics_jsons: List[str], end_date: datetime) -> Optional[datetime]: """Get the date closest to `end_date` from `latest_metrics_jsons`""" for metrics_json in latest_metrics_jsons: start_datetime = get_date_from_metrics_s3_key(metrics_json) if start_datetime < end_date: return start_datetime return None def get_metrics_by_date(latest_metrics_jsons: List[str], pick_date: datetime): pick_metrics_json_key: Optional[str] = None for metrics_json_key in latest_metrics_jsons: metric_datetime = get_date_from_metrics_s3_key(metrics_json_key) # Use the latest metric file on on the same day if metric_datetime.date() == pick_date.date(): pick_metrics_json_key = metrics_json_key break assert pick_metrics_json_key, f"Selected date {pick_date} is not found in the latest_metrics_jsons: {latest_metrics_jsons}" s3 = S3Client(USERBENCHMARK_S3_BUCKET, USERBENCHMARK_S3_OBJECT) metrics_json = s3.get_file_as_json(pick_metrics_json_key) return (metrics_json, pick_metrics_json_key) if __name__ == "__main__": parser = argparse.ArgumentParser() # Local metrics file comparison parser.add_argument("--control", default=None, help="The control group metrics file for comparison. " "If unprovided, will attempt to download and compare the previous JSON from S3 " "within the past week. The platform flag must be specified in this case.") parser.add_argument("--treatment", default=None, help="The treatment metrics file for comparison.") # S3 metrics file comparison parser.add_argument("--name", help="Name of the userbenchmark to detect regression.") parser.add_argument("--platform", choices=PLATFORMS, default=None, help="The name of platform of the regression.") parser.add_argument("--start-date", default=None, help="The start date to detect regression.") parser.add_argument("--end-date", default=None, help="The latest date to detect regression.") # download from S3 parser.add_argument("--download-from-s3", action='store_true', help="Only download the existing regression yaml file from S3." \ "The regression yaml file can be used for bisection.") # output file path parser.add_argument("--output", default=None, help="Output path to print the regression detection file.") # GitHub issue details parser.add_argument("--owner", nargs="*", default=[DEFAULT_GH_ISSUE_OWNER], help="Owner(s) to cc on regression issues, e.g., @janeyx99.") parser.add_argument("--gh-issue-path", default="gh-issue.md", help="Output path to print the issue body") parser.add_argument("--errors-path", default="errors.txt", help="Path to errors log generated by the benchmarks run. " + "Its existence ONLY is used to detect whether runtime regressions occurred.") args = parser.parse_args() owner = " ".join(args.owner) if args.owner else DEFAULT_GH_ISSUE_OWNER # User provided both control and treatment files if args.control and args.treatment: with open(args.control, "r") as cfptr: control = json.load(cfptr) with open(args.treatment, "r") as tfptr: treatment = json.load(tfptr) output_path = args.output if args.output else get_default_output_path(control["name"]) regression_result = generate_regression_result(control, treatment) process_regressions_into_yaml(regression_result, output_path, args.control, args.treatment) process_regressions_into_gh_issue(regression_result, owner, args.gh_issue_path, args.errors_path) exit(0) # Query S3 to get control and treatment json files if not args.platform: raise ValueError("A platform must be specified with the --platform flag to retrieve the " "previous metrics JSONs as control from S3.") # User only provide the treatement file, and expect us to download from S3 control, treatment = None, None if not args.control and args.treatment: json_path = Path(args.treatment) assert json_path.exists(), f"Specified result json path {args.treatment} does not exist." end_date: datetime = datetime.strptime(get_date_from_metrics(json_path.stem), "%Y-%m-%d") userbenchmark_name: str = get_ub_name(args.treatment) with open(json_path, "r") as cfptr: treatment = json.load(cfptr) else: assert args.name, f"To detect regression with S3, you must specify a userbenchmark name." userbenchmark_name = args.name end_date = datetime.strptime(args.end_date, "%Y-%m-%d") # Only download the existing regression YAML file from S3 if args.download_from_s3: assert args.output, f"You must specify a regression output file path for S3 download." regression_yaml_cond = lambda x: x.endswith('.yaml') and 'regression' in x available_regression_yamls = get_latest_files_in_s3_from_last_n_days(userbenchmark_name, args.platform, end_date, regression_yaml_cond, ndays=1) if not len(available_regression_yamls): raise RuntimeError(f"No regression yaml found on S3 for end date {end_date}, userbenchmark {userbenchmark_name}, and platform {args.platform}") latest_regression_yaml = available_regression_yamls[0] s3 = S3Client(USERBENCHMARK_S3_BUCKET, USERBENCHMARK_S3_OBJECT) regression_yaml = s3.get_file_as_yaml(latest_regression_yaml) with open(args.output, "w") as rf: yaml.safe_dump(regression_yaml, rf) print(f"Downloaded the regression yaml file to path {args.output}") exit(0) metrics_json_cond = lambda x: x.endswith('.json') and 'metrics' in x available_metrics_jsons = get_latest_files_in_s3_from_last_n_days(userbenchmark_name, args.platform, end_date, metrics_json_cond, ndays=7) # Download control from S3 if len(available_metrics_jsons) == 0: raise RuntimeError(f"No previous JSONS in a week found to compare towards the end date {end_date}. No regression info has been generated.") print(f"Found metrics json files on S3: {available_metrics_jsons}") start_date = args.start_date if args.start_date else get_best_start_date(available_metrics_jsons, end_date) if not start_date: raise RuntimeError(f"No start date in previous JSONS found to compare towards the end date {end_date}. User specified start date: {args.start_date}. " + f"Available JSON dates: {available_metrics_jsons.keys()}. No regression info has been generated.") print(f"[TorchBench Regression Detector] Detecting regression of {userbenchmark_name} on platform {args.platform}, start date: {start_date}, end date: {end_date}.") (control, control_file) = get_metrics_by_date(available_metrics_jsons, start_date) if not control else (control, args.control) (treatment, treatment_file) = get_metrics_by_date(available_metrics_jsons, end_date) if not treatment else (treatment, args.treatment) regression_result = generate_regression_result(control, treatment) output_path = args.output if args.output else get_default_output_path(control["name"]) process_regressions_into_yaml(regression_result, output_path, control_file, treatment_file) process_regressions_into_gh_issue(regression_result, owner, args.gh_issue_path, args.errors_path)
"""bisection.py Runs bisection to determine PRs that trigger performance signals. It assumes that the pytorch, torchbench, torchvision, and torchaudio repositories provided are all clean with the latest code. By default, the torchaudio and torchvision packages will be fixed to the latest commit on the same pytorch commit date. Usage: python bisection.py --work-dir <WORK_DIR> \ --torch-repos-path <PYTORCH_REPOS_PATH> \ --torchbench-repo-path <TORCHBENCH_SRC_DIR> \ --config <BISECT_CONFIG> --output <OUTPUT_FILE_PATH> """ import argparse import os import sys import json import time import shutil import yaml from pathlib import Path import subprocess from datetime import datetime from dataclasses import asdict from typing import Optional, List, Dict, Tuple, Any, Callable from userbenchmark.utils import ( TorchBenchABTestResult, parse_abtest_result_from_regression_file_for_bisect ) from regression_detector import generate_regression_result from utils import gitutils from utils.build_utils import ( setup_bisection_build_env, build_repo, cleanup_torch_packages, TorchRepo, ) from utils.cuda_utils import prepare_cuda_env, DEFAULT_CUDA_VERSION TORCHBENCH_BISECTION_TARGETS = { "pytorch": { "name": "pytorch", "url": "https://github.com/pytorch/pytorch.git", "build_command": [sys.executable, "setup.py", "install"], }, "torchdata": { "name": "data", "url": "https://github.com/pytorch/data.git", "build_command": [sys.executable, "setup.py", "install"], }, "torchvision": { "name": "vision", "url": "https://github.com/pytorch/vision.git", "build_command": [sys.executable, "setup.py", "install"], }, "torchaudio": { "name": "audio", "url": "https://github.com/pytorch/audio.git", "build_command": [sys.executable, "setup.py", "clean", "develop"], }, "torchbench": { "name": "benchmark", "url": "https://github.com/pytorch/benchmark.git", "build_command": [sys.executable, "install.py"], }, } SKIP_INSTALL_TORCHBENCH = False def exist_dir_path(string): if os.path.isdir(string): return string else: raise NotADirectoryError(string) def exist_file_path(string): if not os.path.exists(string): raise FileNotFoundError(string) elif os.path.isdir(string): return IsADirectoryError(string) else: return string def get_latest_non_empty_file(directory: str, cond: Callable) -> Optional[str]: if os.path.isdir(directory): filelist = [ os.path.join(directory, f) for f in os.listdir(directory) ] non_empty_filelist = [ f for f in filelist if os.path.getsize(f) and cond(f) ] if len(non_empty_filelist): return max(non_empty_filelist, key=os.path.getctime) return None def get_updated_clean_torch_repos(pytorch_repos_path: str, torchbench_repo_path: Optional[str]=None, skip_update_repos: Optional[List[str]]=None) -> Dict[str, TorchRepo]: all_repos = {} def _gen_torch_repo(repo_name: str, repo_path: str): assert repo_path.exists() and repo_path.is_dir(), f"{str(repo_path)} is not an existing directory." main_branch = "main" if not "main_branch" in TORCHBENCH_BISECTION_TARGETS[repo_name] else \ TORCHBENCH_BISECTION_TARGETS[repo_name]["main_branch"] if not skip_update_repos or not repo_name in skip_update_repos: gitutils.cleanup_local_changes(repo_path.absolute()) assert gitutils.update_git_repo(repo_path.absolute(), main_branch) assert gitutils.clean_git_repo(repo_path.absolute()) cur_commit = gitutils.get_current_commit(repo_path.absolute()) return TorchRepo(name=repo_name, origin_url=TORCHBENCH_BISECTION_TARGETS[repo_name]["url"], main_branch=main_branch, src_path=repo_path, cur_commit=cur_commit, build_command=TORCHBENCH_BISECTION_TARGETS[repo_name]["build_command"]) for repo_name in TORCHBENCH_BISECTION_TARGETS.keys(): repo_subdir_name = TORCHBENCH_BISECTION_TARGETS[repo_name]["name"] repo_path = Path(pytorch_repos_path).joinpath(repo_subdir_name) if not (torchbench_repo_path and repo_name == "torchbench") \ else Path(torchbench_repo_path) all_repos[repo_name] = _gen_torch_repo(repo_name, repo_path) return all_repos class Commit: sha: str ctime: str digest: Optional[Dict[str, Any]] def __init__(self, sha, ctime): self.sha = sha self.ctime = ctime self.digest = None def __str__(self): return self.sha class BisectionTargetRepo: repo: TorchRepo start: str end: str non_target_repos: List[TorchRepo] # generated in prep() bisection_env: os._Environ commits: List[Commit] # Map from commit SHA to its index in commits commit_dict: Dict[str, int] def __init__(self, repo: TorchRepo, start: str, end: str, non_target_repos: List[TorchRepo]): self.repo = repo self.start = start self.end = end self.non_target_repos = non_target_repos self.commits = [] self.commit_dict = dict() # Checkout the last commit of non-target repos on date def _checkout_non_target_repos(self, cdate: datetime): for repo in self.non_target_repos: gitutils.checkout_git_branch(repo.src_path.absolute(), repo.main_branch) dep_commit = gitutils.get_git_commit_on_date(repo.src_path.absolute(), cdate) assert dep_commit, f"Failed to find the commit on {cdate} of {repo.name}" print(f"Checking out {repo.name} commit {dep_commit} ...", end="", flush=True) assert gitutils.checkout_git_commit(repo.src_path.absolute(), dep_commit), \ f"Failed to checkout commit {dep_commit} of {repo.name}" print("done.") def prep(self) -> bool: base_build_env = prepare_cuda_env(cuda_version=DEFAULT_CUDA_VERSION) self.bisection_env = setup_bisection_build_env(base_build_env) commits = gitutils.get_git_commits(self.repo.src_path, self.start, self.end) if not commits or len(commits) < 2: print(f"Failed to retrieve commits from {self.start} to {self.end} in {self.repo.src_path}.") return False for count, commit in enumerate(commits): ctime = gitutils.get_git_commit_date(self.repo.src_path, commit) self.commits.append(Commit(sha=commit, ctime=ctime)) self.commit_dict[commit] = count return True def get_mid_commit(self, left: Commit, right: Commit) -> Optional[Commit]: left_index = self.commit_dict[left.sha] right_index = self.commit_dict[right.sha] if right_index == left_index + 1: return None else: return self.commits[int((left_index + right_index) / 2)] def build(self, commit: Commit): # checkout target repo commit print(f"====================== [TORCHBENCH] Checking out target repo {self.repo.name} commit {commit.sha} " \ "=======================", flush=True) assert gitutils.checkout_git_commit(self.repo.src_path.absolute(), commit.sha) # checkout non-target repos commit ctime = datetime.strptime(commit.ctime.split(" ")[0], "%Y-%m-%d") self._checkout_non_target_repos(ctime) # build target repo build_repo(self.repo, self.bisection_env) # build non target repos for repo in self.non_target_repos: build_repo(repo, self.bisection_env) class TorchBenchRepo: repo: TorchRepo target_repo: BisectionTargetRepo workdir: Path bisection_env: os._Environ timelimit: int # timeout limit in minutes first_time: bool def __init__(self, repo: TorchRepo, target_repo: BisectionTargetRepo, workdir: Path): self.repo = repo self.target_repo = target_repo self.workdir = workdir self.first_time = True def prep(self, bisection_env: os._Environ) -> bool: self.bisection_env = bisection_env return True def _install_benchmark(self): "Install and build TorchBench dependencies" command = [sys.executable, "install.py"] subprocess.check_call(command, cwd=self.repo.src_path.absolute(), env=self.bisection_env) def _run_benchmark_for_commit(self, commit: Commit, bisect_config: TorchBenchABTestResult) -> str: # Return the result json file path output_dir = os.path.join(self.workdir.absolute(), commit.sha) # If the directory already exists, clear its contents if os.path.exists(output_dir): assert os.path.isdir(output_dir), "Must specify output directory: {output_dir}" shutil.rmtree(output_dir) os.mkdir(output_dir) # If the first time to run benchmark, install the dependencies first if self.first_time and not SKIP_INSTALL_TORCHBENCH: self._install_benchmark() self.first_time = False bm_name = bisect_config.name output_file = "metrics-{}.json".format(datetime.fromtimestamp(time.time()).strftime("%Y%m%d%H%M%S")) output_file_path = os.path.join(output_dir, output_file) print(f"===================== [TORCHBENCH] Running TorchBench for commit: {commit.sha} START =====================", flush=True) command = [sys.executable, "run_benchmark.py", bm_name, "--run-bisect", bisect_config.bisection_config_file_path, "--output", output_file_path] subprocess.check_call(command, cwd=self.repo.src_path, env=self.bisection_env) print(f"===================== [TORCHBENCH] Running TorchBench for commit: {commit.sha} END. OUTPUT: {output_file_path} =====================", flush=True) return output_file_path def _gen_digest(self, result_json: str) -> Dict[str, float]: out = {} if not os.path.getsize(result_json): print(f"Empty json file {result_json}. Return empty digest.") return out with open(result_json, "r") as df: data = json.load(df) return data def get_digest_for_commit(self, commit: Commit, abtest_result: Dict[str, Any], debug: bool) -> Dict[str, float]: # digest is cached before if commit.digest: return commit.digest # if in debug mode, load from the benchmark file if it exists if debug: result_dir = os.path.join(self.workdir, commit.sha) result_json = get_latest_non_empty_file(result_dir, lambda x: x.endswith(".json")) if result_json: commit.digest = self._gen_digest(result_json) return commit.digest # Build all torch packages self.target_repo.build(commit) # Run benchmark, return the output json file result_json = self._run_benchmark_for_commit(commit, abtest_result) commit.digest = self._gen_digest(result_json) print(f"================== [TORCHBENCH] Cleaning up packages for commit {commit.sha} ==================", flush=True) cleanup_torch_packages() return commit.digest class TorchBenchBisection: workdir: Path torch_repos: Dict[str, TorchRepo] target_repo: BisectionTargetRepo torchbench: TorchBenchRepo bisect_config: TorchBenchABTestResult output_json: str debug: bool # left commit, right commit, TorchBenchABTestResult to test bisectq: List[Tuple[Commit, Commit, TorchBenchABTestResult]] result: List[Tuple[Commit, Commit]] def __init__(self, workdir: str, torch_repos: List[TorchRepo], target_repo: TorchRepo, start: str, end: str, bisect_config: TorchBenchABTestResult, output_json: str, debug: bool = False): self.workdir = Path(workdir) self.torch_repos = torch_repos non_target_repos = list(filter(lambda x: not x.name == target_repo.name and not x.name == "torchbench", torch_repos.values())) self.target_repo = BisectionTargetRepo(repo=target_repo, start=start, end=end, non_target_repos=non_target_repos) self.torchbench = TorchBenchRepo(repo=torch_repos["torchbench"], target_repo=self.target_repo, workdir=self.workdir) self.bisect_config = bisect_config self.bisectq = list() self.result = list() self.output_json = output_json self.debug = debug def prep(self) -> bool: cleanup_torch_packages() if not self.target_repo.prep(): return False if not self.torchbench.prep(self.target_repo.bisection_env): return False left_commit = self.target_repo.commits[0] right_commit = self.target_repo.commits[-1] self.bisectq.append((left_commit, right_commit, self.bisect_config)) return True # Left: older commit, right: newer commit, target: TorchBenchABTestResult # Return: List of [left, right, TorchBenchABTestResult] that satisfy the regression rule def regression_detection(self, left: Commit, right: Commit) -> TorchBenchABTestResult: # If uncalculated, commit.digest will be None assert left.digest, "Commit {left.sha} must have a digest" assert right.digest, "Commit {right.sha} must have a digest" regression_result = generate_regression_result(left.digest, right.digest) regression_file = f"regression-{left.sha}-{right.sha}.yaml" regression_file_full_path = os.path.join(self.workdir.absolute(), regression_file) with open(regression_file_full_path, "w") as rf: rf.write(yaml.safe_dump(asdict(regression_result))) regression_result.bisection_config_file_path = regression_file_full_path return regression_result def run(self): while len(self.bisectq): (left, right, abtest_result) = self.bisectq.pop(0) self.torchbench.get_digest_for_commit(left, abtest_result, self.debug) self.torchbench.get_digest_for_commit(right, abtest_result, self.debug) updated_abtest_result = self.regression_detection(left, right) if len(updated_abtest_result.details) or \ len(updated_abtest_result.control_only_metrics) or \ len(updated_abtest_result.treatment_only_metrics): mid = self.target_repo.get_mid_commit(left, right) if mid == None: self.result.append((left, right)) else: self.bisectq.append((left, mid, updated_abtest_result)) self.bisectq.append((mid, right, updated_abtest_result)) def output(self): json_obj = dict() json_obj["target_repo"] = self.target_repo.repo.name json_obj["start"] = self.target_repo.start json_obj["end"] = self.target_repo.end json_obj["result"] = [] for res in self.result: r = dict() r["commit1"] = res[0].sha r["commit1_time"] = res[0].ctime r["commit1_digest"] = res[0].digest r["commit2"] = res[1].sha r["commit2_time"] = res[1].ctime r["commit2_digest"] = res[1].digest json_obj["result"].append(r) with open(self.output_json, 'w') as outfile: json.dump(json_obj, outfile, indent=2) print(f"Bisection successful. Result saved to {self.output_json}:") print(json_obj) if __name__ == "__main__": parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("--work-dir", required=True, help="bisection working directory for logs and results", type=exist_dir_path) parser.add_argument("--torch-repos-path", required=True, help="the directory of pytorch/* source code repositories", type=exist_dir_path) parser.add_argument("--torchbench-repo-path", default=None, help="the directory of torchbench source code git repository, if None, use `args.torch_repo_path/benchmark`.", type=exist_dir_path) parser.add_argument("--config", required=True, help="the regression dict output of regression_detector.py in YAML", type=exist_file_path) parser.add_argument("--skip-install-torchbench", action="store_true", help="Skip installing torchbench") parser.add_argument("--output", required=True, help="the output json file") parser.add_argument("--skip-update", type=str, default="torchbench", help="Repositories to skip update.") # by default, debug mode is disabled parser.add_argument("--debug", help="run in debug mode, if the result json exists, use it directly", action='store_true') args = parser.parse_args() bisect_config = parse_abtest_result_from_regression_file_for_bisect(args.config) # sanity checks assert bisect_config.name, "Invalid bisection config, must specify userbenchmark name." assert bisect_config.control_env["git_commit_hash"], "Invalid bisection config, must specify control group commit hash." assert bisect_config.treatment_env["git_commit_hash"], "Invalid bisection config, must specify treatment group commit hash." assert bisect_config.bisection in TORCHBENCH_BISECTION_TARGETS.keys(), f"Invalid bisection config, " \ f"get bisection target repo {bisect_config.bisection}, " \ f"available target repos: {TORCHBENCH_BISECTION_TARGETS.keys()}" assert bisect_config.bisection_mode == "bisect", "Abtest mode is not supported yet." assert len(bisect_config.details), "The bisection target metrics must not be empty." if args.skip_update: skip_update_repos = list(map(lambda x: x.strip(), args.skip_update.split(","))) for repo in skip_update_repos: assert repo in list(TORCHBENCH_BISECTION_TARGETS.keys()), f"User specified skip update repo {repo} not in list: {TORCHBENCH_BISECTION_TARGETS.keys()}" else: skip_update_repos = None if args.skip_install_torchbench: SKIP_INSTALL_TORCHBENCH = True # load, update, and clean the repo directories torch_repos: Dict[str, TorchRepo] = get_updated_clean_torch_repos(args.torch_repos_path, args.torchbench_repo_path, skip_update_repos) target_repo = torch_repos[bisect_config.bisection] start_hash = gitutils.get_torch_main_commit(target_repo.src_path.absolute(), bisect_config.control_env["git_commit_hash"]) end_hash = gitutils.get_torch_main_commit(target_repo.src_path.absolute(), bisect_config.treatment_env["git_commit_hash"]) bisection = TorchBenchBisection(workdir=args.work_dir, torch_repos=torch_repos, target_repo=torch_repos[bisect_config.bisection], start=start_hash, end=end_hash, bisect_config=bisect_config, output_json=args.output, debug=args.debug) assert bisection.prep(), "The working condition of bisection is not satisfied." print("Preparation steps ok. Commit to bisect: " + " ".join([str(x) for x in bisection.target_repo.commits])) bisection.run() bisection.output()
from enum import Enum # Enum class for each Domain for the model and the respective tasks # that is available in the domain. class COMPUTER_VISION(Enum): SEGMENTATION = "segmentation" CLASSIFICATION = "classification" DETECTION = "detection" GENERATION = "generation" PATTERN_RECOGNITION = "pattern recognition" VIDEO_INTERPOLATION = "video interpolation" OTHER_COMPUTER_VISION = "other computer vision" class NLP(Enum): TRANSLATION = "translation" LANGUAGE_MODELING = "language modeling" GENERATION = "generation" OTHER_NLP = "other nlp" class SPEECH(Enum): SYNTHESIS = "synthesis" RECOGNITION = "recognition" class RECOMMENDATION(Enum): RECOMMENDATION = "recommendation" class REINFORCEMENT_LEARNING(Enum): OTHER_RL = "other rl" class OTHER(Enum): OTHER_TASKS = "other tasks" class GNN(Enum): CLASSIFICATION = "classification"
import contextlib import dataclasses import gc import importlib import io import os import pathlib import subprocess import sys import tempfile import threading from pathlib import Path from typing import Any, Callable, Dict, List, NoReturn, Optional, Tuple from urllib import request import torch from components._impl.tasks import base as base_task from components._impl.workers import subprocess_worker class ModelNotFoundError(RuntimeError): pass REPO_PATH = Path(os.path.abspath(__file__)).parent.parent DATA_PATH = os.path.join(REPO_PATH, "torchbenchmark", "data", ".data") class add_path(): def __init__(self, path): self.path = path def __enter__(self): sys.path.insert(0, self.path) def __exit__(self, exc_type, exc_value, traceback): try: sys.path.remove(self.path) except ValueError: pass with add_path(str(REPO_PATH)): from utils import TORCH_DEPS, get_pkg_versions, proxy_suggestion this_dir = pathlib.Path(__file__).parent.absolute() model_dir = 'models' internal_model_dir = "fb" canary_model_dir = "canary_models" install_file = 'install.py' def _test_https(test_url: str = 'https://github.com', timeout: float = 0.5) -> bool: try: request.urlopen(test_url, timeout=timeout) except OSError: return False return True def _install_deps(model_path: str, verbose: bool = True) -> Tuple[bool, Any]: from .util.env_check import get_pkg_versions run_args = [ [sys.executable, install_file], ] run_env = os.environ.copy() run_env["PYTHONPATH"] = this_dir.parent run_kwargs = { 'cwd': model_path, 'check': True, 'env': run_env, } output_buffer = None _, stdout_fpath = tempfile.mkstemp() try: output_buffer = io.FileIO(stdout_fpath, mode="w") if os.path.exists(os.path.join(model_path, install_file)): if not verbose: run_kwargs['stderr'] = subprocess.STDOUT run_kwargs['stdout'] = output_buffer versions = get_pkg_versions(TORCH_DEPS) subprocess.run(*run_args, **run_kwargs) # type: ignore new_versions = get_pkg_versions(TORCH_DEPS) if versions != new_versions: errmsg = f"The torch packages are re-installed after installing the benchmark deps. \ Before: {versions}, after: {new_versions}" return (False, errmsg, None) else: return (True, f"No install.py is found in {model_path}. Skip.", None) except subprocess.CalledProcessError as e: return (False, e.output, io.FileIO(stdout_fpath, mode="r").read().decode()) except Exception as e: return (False, e, io.FileIO(stdout_fpath, mode="r").read().decode()) finally: del output_buffer os.remove(stdout_fpath) return (True, None, None) def dir_contains_file(dir, file_name) -> bool: names = map(lambda x: x.name, filter(lambda x: x.is_file(), dir.iterdir())) return file_name in names def _list_model_paths() -> List[str]: p = pathlib.Path(__file__).parent.joinpath(model_dir) # Only load the model directories that contain a "__init.py__" file models = sorted(str(child.absolute()) for child in p.iterdir() if child.is_dir() and \ (not child.name == internal_model_dir) and dir_contains_file(child, "__init__.py")) p = p.joinpath(internal_model_dir) if p.exists(): m = sorted(str(child.absolute()) for child in p.iterdir() if child.is_dir() and dir_contains_file(child, "__init__.py")) models.extend(m) return models def _list_canary_model_paths() -> List[str]: p = pathlib.Path(__file__).parent.joinpath(canary_model_dir) # Only load the model directories that contain a "__init.py__" file models = sorted(str(child.absolute()) for child in p.iterdir() if child.is_dir() and \ (not child.name == internal_model_dir) and dir_contains_file(child, "__init__.py")) return models def _is_internal_model(model_name: str) -> bool: p = pathlib.Path(__file__).parent.joinpath(model_dir).joinpath(internal_model_dir).joinpath(model_name) if p.exists() and p.joinpath("__init__.py").exists(): return True return False def _is_canary_model(model_name: str) -> bool: p = pathlib.Path(__file__).parent.joinpath(canary_model_dir).joinpath(model_name) if p.exists() and p.joinpath("__init__.py").exists(): return True return False def setup(models: List[str] = [], verbose: bool = True, continue_on_fail: bool = False, test_mode: bool = False, allow_canary: bool = False) -> bool: if not _test_https(): print(proxy_suggestion) sys.exit(-1) failures = {} models = list(map(lambda p: p.lower(), models)) model_paths = filter(lambda p: True if not models else os.path.basename(p).lower() in models, _list_model_paths()) if allow_canary: canary_model_paths = filter(lambda p: os.path.basename(p).lower() in models, _list_canary_model_paths()) model_paths = list(model_paths) model_paths.extend(canary_model_paths) for model_path in model_paths: print(f"running setup for {model_path}...", end="", flush=True) if test_mode: versions = get_pkg_versions(TORCH_DEPS) success, errmsg, stdout_stderr = _install_deps(model_path, verbose=verbose) if test_mode: new_versions = get_pkg_versions(TORCH_DEPS, reload=True) if versions != new_versions: print(f"The torch packages are re-installed after installing the benchmark model {model_path}. \ Before: {versions}, after: {new_versions}") sys.exit(-1) if success and errmsg and "No install.py is found" in errmsg: print("SKIP - No install.py is found") elif success: print("OK") else: print("FAIL") try: errmsg = errmsg.decode() except Exception: pass # If the install was very chatty, we don't want to overwhelm. # This will not affect verbose mode, which does not catch stdout # and stderr. log_lines = (stdout_stderr or "").splitlines(keepends=False) if len(log_lines) > 40: log_lines = log_lines[:20] + ["..."] + log_lines[-20:] stdout_stderr = "\n".join(log_lines) if stdout_stderr: errmsg = f"{stdout_stderr}\n\n{errmsg or ''}" failures[model_path] = errmsg if not continue_on_fail: break for model_path in failures: print(f"Error for {model_path}:") print("---------------------------------------------------------------------------") print(failures[model_path]) print("---------------------------------------------------------------------------") print() return len(failures) == 0 @dataclasses.dataclass(frozen=True) class ModelDetails: """Static description of what a particular TorchBench model supports. When parameterizing tests, we only want to generate sensible ones. (e.g. Those where a model can be imported and supports the feature to be tested or benchmarked.) This requires us to import the model; however many of the models are EXTREMELY stateful, and even importing them consumes significant system resources. As a result, we only want one (or a few) alive at any given time. Note that affinity cannot be solved by simply calling `torch.set_num_threads` in the child process; this will cause PyTorch to use all of the cores but at a much lower efficiency. This class describes what a particular model does and does not support, so that we can release the underlying subprocess but retain any pertinent metadata. """ path: str exists: bool _diagnostic_msg: str metadata: Dict[str, Any] @property def name(self) -> str: return os.path.basename(self.path) class Worker(subprocess_worker.SubprocessWorker): """Run subprocess using taskset if CPU affinity is set. When GOMP_CPU_AFFINITY is set, importing `torch` in the main process has the very surprising effect of changing the threading behavior in the subprocess. (See https://github.com/pytorch/pytorch/issues/49971 for details.) This is a problem, because it means that the worker is not hermetic and also tends to force the subprocess torch to run in single threaded mode which drastically skews results. This can be ameliorated by calling the subprocess using `taskset`, which allows the subprocess PyTorch to properly bind threads. """ @property def args(self) -> List[str]: affinity = os.environ.get("GOMP_CPU_AFFINITY", "") return ( ["taskset", "--cpu-list", affinity] if affinity else [] ) + super().args class ModelTask(base_task.TaskBase): # The worker may (and often does) consume significant system resources. # In order to ensure that runs do not interfere with each other, we only # allow a single ModelTask to exist at a time. _lock = threading.Lock() def __init__( self, model_path: str, timeout: Optional[float] = None, extra_env: Optional[Dict[str, str]] = None, ) -> None: gc.collect() # Make sure previous task has a chance to release the lock assert self._lock.acquire(blocking=False), "Failed to acquire lock." self._model_path = model_path if _is_internal_model(model_path): model_path = f"{internal_model_dir}.{model_path}" self._worker = Worker(timeout=timeout, extra_env=extra_env) self.worker.run("import torch") self._details: ModelDetails = ModelDetails( **self._maybe_import_model( package=__name__, model_path=model_path, ) ) def __del__(self) -> None: self._lock.release() @property def worker(self) -> subprocess_worker.SubprocessWorker: return self._worker @property def model_details(self) -> bool: return self._details # ========================================================================= # == Import Model in the child process ==================================== # ========================================================================= @base_task.run_in_worker(scoped=True) @staticmethod def _maybe_import_model(package: str, model_path: str) -> Dict[str, Any]: import importlib import os import traceback model_name = os.path.basename(model_path) diagnostic_msg = "" try: module = importlib.import_module(f'.models.{model_name}', package=package) if accelerator_backend := os.getenv("ACCELERATOR_BACKEND"): setattr(module, accelerator_backend, importlib.import_module(accelerator_backend)) Model = getattr(module, 'Model', None) if Model is None: diagnostic_msg = f"Warning: {module} does not define attribute Model, skip it" elif not hasattr(Model, 'name'): Model.name = model_name except ModuleNotFoundError as e: traceback.print_exc() exit(-1) # Populate global namespace so subsequent calls to worker.run can access `Model` globals()["Model"] = Model # This will be used to populate a `ModelDetails` instance in the parent. return { "path": model_path, "exists": Model is not None, "_diagnostic_msg": diagnostic_msg, "metadata": {} } # ========================================================================= # == Instantiate a concrete `model` instance ============================== # ========================================================================= @base_task.run_in_worker(scoped=True) @staticmethod def make_model_instance(test: str, device: str, batch_size: Optional[int]=None, extra_args: List[str]=[]) -> None: Model = globals()["Model"] model = Model(test=test, device=device, batch_size=batch_size, extra_args=extra_args) import gc gc.collect() if device == 'cuda': torch.cuda.empty_cache() maybe_sync = torch.cuda.synchronize else: maybe_sync = lambda: None globals().update({ "model": model, "maybe_sync": maybe_sync, }) # ========================================================================= # == Replace the `invoke()` function in `model` instance ================== # ========================================================================= @base_task.run_in_worker(scoped=True) @staticmethod def replace_invoke(module_name: str, func_name: str) -> None: import importlib # import function from pkg model = globals()["model"] try: module = importlib.import_module(module_name) inject_func = getattr(module, func_name, None) if inject_func is None: diagnostic_msg = f"Warning: {module} does not define attribute {func_name}, skip it" except ModuleNotFoundError as e: diagnostic_msg = f"Warning: Could not find dependent module {e.name} for Model {model.name}, skip it" model.invoke = inject_func.__get__(model) # ========================================================================= # == Get Model attribute in the child process ============================= # ========================================================================= @base_task.run_in_worker(scoped=True) @staticmethod def get_model_attribute(attr: str, field: str=None, classattr: bool=False) -> Any: if classattr: model = globals()["Model"] else: model = globals()["model"] if hasattr(model, attr): if field: model_attr = getattr(model, attr) return getattr(model_attr, field) else: return getattr(model, attr) else: return None def gc_collect(self) -> None: self.worker.run(""" import gc gc.collect() """) def del_model_instance(self): self.worker.run(""" del model del maybe_sync """) self.gc_collect() # ========================================================================= # == Forward calls to `model` from parent to worker ======================= # ========================================================================= def set_train(self) -> None: self.worker.run("model.set_train()") def invoke(self) -> None: self.worker.run(""" model.invoke() maybe_sync() """) def set_eval(self) -> None: self.worker.run("model.set_eval()") def extract_details_train(self) -> None: self._details.metadata["train_benchmark"] = self.worker.load_stmt("torch.backends.cudnn.benchmark") self._details.metadata["train_deterministic"] = self.worker.load_stmt("torch.backends.cudnn.deterministic") def check_details_train(self, device, md) -> None: self.extract_details_train() if device == 'cuda': assert md["train_benchmark"] == self._details.metadata["train_benchmark"], \ "torch.backends.cudnn.benchmark does not match expect metadata during training." assert md["train_deterministic"] == self._details.metadata["train_deterministic"], \ "torch.backends.cudnn.deterministic does not match expect metadata during training." def extract_details_eval(self) -> None: self._details.metadata["eval_benchmark"] = self.worker.load_stmt("torch.backends.cudnn.benchmark") self._details.metadata["eval_deterministic"] = self.worker.load_stmt("torch.backends.cudnn.deterministic") # FIXME: Models will use context "with torch.no_grad():", so the lifetime of no_grad will end after the eval(). # FIXME: Must incorporate this "torch.is_grad_enabled()" inside of actual eval() func. # self._details.metadata["eval_nograd"] = not self.worker.load_stmt("torch.is_grad_enabled()") self._details.metadata["eval_nograd"] = True def check_details_eval(self, device, md) -> None: self.extract_details_eval() if device == 'cuda': assert md["eval_benchmark"] == self._details.metadata["eval_benchmark"], \ "torch.backends.cudnn.benchmark does not match expect metadata during eval." assert md["eval_deterministic"] == self._details.metadata["eval_deterministic"], \ "torch.backends.cudnn.deterministic does not match expect metadata during eval." assert md["eval_nograd"] == self._details.metadata["eval_nograd"], \ "torch.is_grad_enabled does not match expect metadata during eval." @base_task.run_in_worker(scoped=True) @staticmethod def check_eval_output() -> None: instance = globals()["model"] assert instance.test == "eval", "We only support checking output of an eval test. Please submit a bug report." instance.invoke() @base_task.run_in_worker(scoped=True) @staticmethod def check_device() -> None: instance = globals()["model"] # Check this BenchmarkModel has a device attribute. current_device = getattr(instance, 'device', None) if current_device is None: raise RuntimeError('Missing device in BenchmarkModel.') model, inputs = instance.get_module() model_name = getattr(model, 'name', None) # Check the model tensors are assigned to the expected device. for t in model.parameters(): model_device = t.device.type if model_device != current_device: raise RuntimeError(f'Model {model_name} was not set to the' f' expected device {current_device},' f' found device {model_device}.') # Check the inputs are assigned to the expected device. def check_inputs(inputs): if isinstance(inputs, torch.Tensor): if inputs.dim() and current_device == "cuda": # Zero dim Tensors (Scalars) can be captured by CUDA # kernels and need not match device. return inputs_device = inputs.device.type if inputs_device != current_device: raise RuntimeError(f'Model {model_name} inputs were' f' not set to the expected device' f' {current_device}, found device' f' {inputs_device}.') elif isinstance(inputs, tuple): # Some inputs are nested inside tuples, such as tacotron2 for i in inputs: check_inputs(i) elif isinstance(inputs, dict): # Huggingface models take inputs as kwargs for i in inputs.values(): check_inputs(i) check_inputs(inputs) # ========================================================================= # == Control `torch` state (in the subprocess) ============================ # ========================================================================= @contextlib.contextmanager def no_grad(self, disable_nograd: bool) -> None: # TODO: deduplicate with `torchbenchmark.util.model.no_grad` initial_value = self.worker.load_stmt("torch.is_grad_enabled()") eval_in_nograd = ( not disable_nograd and self.worker.load_stmt("model.eval_in_nograd()")) try: self.worker.run(f"torch.set_grad_enabled({not eval_in_nograd})") yield finally: self.worker.run(f"torch.set_grad_enabled({initial_value})") @contextlib.contextmanager def watch_cuda_memory( self, skip: bool, assert_equal: Callable[[int, int], NoReturn], ): # This context manager is used in testing to ensure we're not leaking # memory; these tests are generally parameterized by device, so in some # cases we want this (and the outer check) to simply be a no-op. if skip or os.getenv('PYTORCH_TEST_SKIP_CUDA_MEM_LEAK_CHECK', '0') == '1': yield return if hasattr(torch._C, '_cuda_clearCublasWorkspaces'): self.worker.load_stmt("torch._C._cuda_clearCublasWorkspaces()") self.gc_collect() memory_before = self.worker.load_stmt("torch.cuda.memory_allocated()") yield if hasattr(torch._C, '_cuda_clearCublasWorkspaces'): self.worker.load_stmt("torch._C._cuda_clearCublasWorkspaces()") self.gc_collect() assert_equal( memory_before, self.worker.load_stmt("torch.cuda.memory_allocated()"), ) self.worker.run("torch.cuda.empty_cache()") def list_models_details(workers: int = 1) -> List[ModelDetails]: return [ ModelTask(model_path).model_details for model_path in _list_model_paths() ] def list_models(model_match=None): models = [] for model_path in _list_model_paths(): model_name = os.path.basename(model_path) model_pkg = model_name if not _is_internal_model(model_name) else f"{internal_model_dir}.{model_name}" try: module = importlib.import_module(f'.models.{model_pkg}', package=__name__) except ModuleNotFoundError as e: print(f"Warning: Could not find dependent module {e.name} for Model {model_name}, skip it") continue Model = getattr(module, 'Model', None) if Model is None: print(f"Warning: {module} does not define attribute Model, skip it") continue if not hasattr(Model, 'name'): Model.name = model_name # If given model_match, only return full or partial name matches in models. if model_match is None: models.append(Model) else: if model_match.lower() in Model.name.lower(): models.append(Model) return models def load_model_by_name(model): models = filter(lambda x: model.lower() == x.lower(), map(lambda y: os.path.basename(y), _list_model_paths())) models = list(models) if not models: raise ModelNotFoundError(f"{model} is not found in the core model list.") assert len(models) == 1, f"Found more than one models {models} with the exact name: {model}" model_name = models[0] model_pkg = model_name if not _is_internal_model(model_name) else f"{internal_model_dir}.{model_name}" module = importlib.import_module(f'.models.{model_pkg}', package=__name__) Model = getattr(module, 'Model', None) if Model is None: print(f"Warning: {module} does not define attribute Model, skip it") return None if not hasattr(Model, 'name'): Model.name = model_name return Model def load_canary_model_by_name(model: str): if not _is_canary_model(model): raise ModelNotFoundError(f"{model} is not found in the canary model list.") module = importlib.import_module(f'.canary_models.{model}', package=__name__) Model = getattr(module, 'Model', None) if Model is None: print(f"Warning: {module} does not define attribute Model, skip it") return None if not hasattr(Model, 'name'): Model.name = model return Model def get_metadata_from_yaml(path): import yaml metadata_path = path + "/metadata.yaml" md = None if os.path.exists(metadata_path): with open(metadata_path, 'r') as f: md = yaml.load(f, Loader=yaml.FullLoader) return md def str_to_bool(input: Any) -> bool: if not input: return False return str(input).lower() in ("1", "yes", "y", "true", "t", "on")
import os import pathlib import importlib from dataclasses import dataclass from typing import List, Dict, Any E2E_MODEL_DIR = 'e2e_models' def _list_model_paths() -> List[str]: p = pathlib.Path(__file__).parent.joinpath(E2E_MODEL_DIR) return sorted(str(child.absolute()) for child in p.iterdir() if child.is_dir()) @dataclass class E2EBenchmarkResult: device: str device_num: int test: str num_examples: int num_epochs: int batch_size: int result: Dict[str, Any] def load_e2e_model_by_name(model): models = filter(lambda x: model.lower() == x.lower(), map(lambda y: os.path.basename(y), _list_model_paths())) models = list(models) if not models: return None assert len(models) == 1, f"Found more than one models {models} with the exact name: {model}" model_name = models[0] try: module = importlib.import_module(f'torchbenchmark.e2e_models.{model_name}', package=__name__) except ModuleNotFoundError as e: print(f"Warning: Could not find dependent module {e.name} for Model {model_name}, skip it: {e}") return None Model = getattr(module, 'Model', None) if Model is None: print(f"Warning: {module} does not define attribute Model, skip it") return None if not hasattr(Model, 'name'): Model.name = model_name return Model
from torchbenchmark.util.framework.gnn.model_factory import GNNModel from torchbenchmark.tasks import GNN class Model(GNNModel): task = GNN.CLASSIFICATION DEFAULT_TRAIN_BSIZE = 64 DEFAULT_EVAL_BSIZE = 64 def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(model_name="gat", test=test, device=device, batch_size=batch_size, extra_args=extra_args) if device == 'cuda': # TODO - Add CUDA support raise NotImplementedError("GAT doesn't support CUDA")
import subprocess import sys def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt', '-f', 'https://data.pyg.org/whl/torch-2.0.0+cpu.html']) if __name__ == '__main__': pip_install_requirements()
from torchbenchmark.tasks import COMPUTER_VISION from torchbenchmark.util.framework.diffusers.model_factory import DiffuserModel class Model(DiffuserModel): task = COMPUTER_VISION.GENERATION DEFAULT_TRAIN_BSIZE = 4 DEFAULT_EVAL_BSIZE = 1 # Default eval precision on CUDA device is fp16 DEFAULT_EVAL_CUDA_PRECISION = "fp16" def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(name="timbrooks/instruct-pix2pix", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
from torchbenchmark.util.framework.diffusers import install_diffusers from diffusers import StableDiffusionInstructPix2PixPipeline import torch MODEL_NAME = "timbrooks/instruct-pix2pix" def load_model_checkpoint(): StableDiffusionInstructPix2PixPipeline.from_pretrained(MODEL_NAME, torch_dtype=torch.float16, safety_checker=None) if __name__ == '__main__': install_diffusers()
from torchbenchmark.tasks import NLP from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel, HuggingFaceAuthMixin class Model(HuggingFaceModel, HuggingFaceAuthMixin): task = NLP.LANGUAGE_MODELING DEFAULT_TRAIN_BSIZE = 1 DEFAULT_EVAL_BSIZE = 1 DEEPCOPY = False def __init__(self, test, device, batch_size=None, extra_args=[]): HuggingFaceAuthMixin.__init__(self) super().__init__(name="llama_v2_13b", test=test, device=device, batch_size=batch_size, extra_args=extra_args) def train(self): return NotImplementedError("FSDP should implement a training loop")
import subprocess import sys import os from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model if __name__ == '__main__': model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__))) cache_model(model_name)
from .. import lit_llama as lit_llama from ..lit_llama import LIT_LLAMA_PATH import importlib.util import os.path import torch.nn as nn import sys from lit_llama import Tokenizer def import_from_file_path(module_name, file_path): spec = importlib.util.spec_from_file_location(module_name, file_path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) sys.modules[module_name] = module return module lit_llama_generate = import_from_file_path("lit_llama_generate", os.path.join(LIT_LLAMA_PATH, 'generate.py')) class GenerationWrapper(nn.Module): def __init__(self, model): super().__init__() self.model = model def forward(self, idx, max_new_tokens): return lit_llama_generate.generate(self.model, idx, max_new_tokens) class Model(lit_llama.Model): def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args) self.model = GenerationWrapper(self.model) tokenizer = Tokenizer(os.path.join(LIT_LLAMA_PATH, "checkpoints/lit-llama/tokenizer.model")) # max_new_tokens matches lit-llama/generate.py self.example_inputs = (tokenizer.encode("The meaning of life is", bos=True, eos=False, device=device), 50) def train(self): return NotImplementedError("cannot train on autoregressive generation") def eval(self): self.model.eval() with torch.no_grad(): y = self.model(*self.example_inputs) return (y,)
from torchbenchmark.util.framework.lit_llama import install_lit_llama if __name__ == '__main__': install_lit_llama()
# Copyright (c) Meta Platforms, Inc. and affiliates. # This software may be used and distributed according to the terms of the GNU General Public License version 3. from PIL import Image import numpy as np import cv2 import torch import os from ...util.model import BenchmarkModel from torchmultimodal.transforms.clip_transform import CLIPTextTransform, CLIPImageTransform from torchmultimodal.models.clip.model import clip_vit_b32 from torchmultimodal.modules.losses.contrastive_loss_with_temperature import ( ContrastiveLossWithTemperature, ) from PIL import Image import math class Model(BenchmarkModel): DEFAULT_EVAL_BSIZE = 32 DEFAULT_TRAIN_BSIZE = 32 def __init__(self, test, device, batch_size=1, extra_args=[]): super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args) self.data_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), '.data') self.image_name = "pizza.jpg" self.image = Image.open(os.path.join(self.data_folder, self.image_name)) self.text = ["pizza", "dog"] * 16 self.img_transform = CLIPImageTransform(is_train=False) self.text_transform = CLIPTextTransform() self.images = [self.image for _ in range(self.batch_size)] self.texts = [self.text for _ in range(self.batch_size)] self.image_tensor = self.img_transform(self.images).to(self.device) self.text_tensor = self.text_transform(self.text).to(self.device) self.model = clip_vit_b32() self.model.to(self.device) # Create optimizer self.loss_fn = ContrastiveLossWithTemperature() self.optimizer = torch.optim.AdamW( list(self.model.parameters()) + list(self.loss_fn.parameters()), lr=5.0e-4, weight_decay=1.0e-4, eps=1.0e-6, ) def get_module(self): return self.model, (self.image_tensor, self.text_tensor) def train(self): self.model.train() total_loss = 0 self.optimizer.zero_grad() # Forward pass image_embedding, text_embedding = self.model(self.image_tensor, self.text_tensor) # Backward pass loss = self.loss_fn(image_embedding, text_embedding) loss.backward() self.optimizer.step() total_loss += loss.item() # Return the average loss return total_loss / len(self.text) def eval(self): self.model.eval() with torch.no_grad(): image_embedding, text_embedding = self.model(self.image_tensor, self.text_tensor) score = image_embedding @ text_embedding.t() return self.text[torch.argmax(score)]
import os import subprocess import sys def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt']) def download_data(data_folder): # CC-0 image from wikipedia page on pizza so legal to use subprocess.check_call(['wget', '-O', os.path.join(data_folder, 'pizza.jpg'), 'https://upload.wikimedia.org/wikipedia/commons/thumb/9/91/Pizza-3007395.jpg/2880px-Pizza-3007395.jpg']) if __name__ == '__main__': pip_install_requirements() # Create .data folder in the script's directory data_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), '.data') os.makedirs(data_folder, exist_ok=True) download_data(data_folder)
from ...util.model import BenchmarkModel from torchbenchmark.tasks import NLP import torch import os from torchbenchmark import add_path, REPO_PATH import sys import lightning as L LIT_LLAMA_PATH = os.path.join(REPO_PATH, "submodules", "lit-llama") with add_path(LIT_LLAMA_PATH): from lit_llama.utils import EmptyInitOnDevice, lazy_load, llama_model_lookup from lit_llama import LLaMA, Tokenizer class Model(BenchmarkModel): task = NLP.LANGUAGE_MODELING DEFAULT_EVAL_BSIZE = 1 def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args) checkpoint_path = os.path.join(LIT_LLAMA_PATH, "checkpoints/lit-llama/7B/lit-llama.pth") if not os.path.exists(checkpoint_path): raise NotImplementedError("checkpoint doesn't exist") with lazy_load(checkpoint_path) as checkpoint: name = llama_model_lookup(checkpoint) with EmptyInitOnDevice(device=device): model = LLaMA.from_name(name) model.load_state_dict(checkpoint) self.model = model self.seq_len = 32 self.max_seq_len = 64 self.example_inputs = ( torch.ones([self.batch_size, self.seq_len], dtype=torch.int32, device=self.device), self.max_seq_len, torch.arange(self.seq_len, dtype=torch.int64, device=self.device) # positions ) def get_module(self): return self.model, self.example_inputs def train(self): return NotImplementedError("you will OOM trying to train directly") def eval(self): self.model.eval() with torch.no_grad(): logits = self.model(*self.example_inputs) return (logits,)
from torchbenchmark.util.framework.lit_llama import install_lit_llama if __name__ == '__main__': install_lit_llama()
import dataclasses from typing import List def cfg_to_str(cfg: dataclasses.dataclass) -> List[str]: def rewrite_option(opt: str) -> str: new_opt = opt.replace("_", "-") return f"--{new_opt}" out = [] for fld in dataclasses.fields(cfg): new_option = rewrite_option(fld.name) val = getattr(cfg, fld.name) if isinstance(val, bool): if val: out.append(new_option) else: out.append(new_option) out.append(str(getattr(cfg, fld.name))) return out # dummy config location: # https://github.com/facebookresearch/FAMBench/blob/a0f12ca4fe8973f4cc65d18b51ce3aa94ceec0ac/benchmarks/run_dlrm_ootb_train.sh#L54 # config: A.1dev-embed32-fp32 @dataclasses.dataclass class FAMBenchTrainConfig: mini_batch_size: int = 1024 test_mini_batch_size: int = 1024 test_num_workers: int = 0 data_generation: str = "random" arch_mlp_bot:str = "2000-1500-1500-1500-192" arch_mlp_top:str = "4000-4000-4000-4000-4000-4000-4000-4000-4000-1" arch_sparse_feature_size:int = 192 arch_embedding_size:str = "965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965" num_indices_per_lookup:int = 55 num_indices_per_lookup_fixed:int = 1 numpy_rand_seed:int = 727 weighted_pooling: str = "learned" # torchbench: run 2 batches only (original 15) num_batches:int = 2 # torchbench: these items in the original config are disabled # because they are handled by the framework # num_batches:int = 15 # warmup_step = 5 # use_gpu: bool = True # precache_ml_data: bool = True # dummy config location: # https://github.com/facebookresearch/FAMBench/blob/a0f12ca4fe8973f4cc65d18b51ce3aa94ceec0ac/benchmarks/run_dlrm_ootb_infer.sh#L54 # config: A.1dev-embed4-fp16 @dataclasses.dataclass class FAMBenchEvalConfig: mini_batch_size:int = 1024 test_mini_batch_size:int = 1024 test_num_workers:int = 0 data_generation:str = "random" arch_mlp_bot:str = "1414-1750-1750-1750-1750-1750-1750-1750-1750-96" arch_mlp_top:str = "1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1" arch_sparse_feature_size:int = 96 arch_embedding_size:str = "555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693" num_indices_per_lookup:int = 8 num_indices_per_lookup_fixed:int = 1 numpy_rand_seed:int = 727 weighted_pooling: str = "fixed" # original number of batches: 15 num_batches:int = 15 # torchbench: these items in the original config are disabled # because they either handled by the framework # or requires extra dependencies that we don't support yet (such as fbgemm and torch2trt_for_mlp) # disable warmup # warmup_step: int = 5 # do not support quantize, torch2trt_for_mlp or fbgemm # quantize_emb_with_bit: int = 4 # use_fbgemm_gpu: bool = True # use_gpu: bool = True # inference_only: bool = True # precache_ml_data: bool = True # use_torch2trt_for_mlp: bool = True # quantize_mlp_with_bit: int = 16
import sys from torch.optim.lr_scheduler import _LRScheduler class LRPolicyScheduler(_LRScheduler): def __init__(self, optimizer, num_warmup_steps, decay_start_step, num_decay_steps): self.num_warmup_steps = num_warmup_steps self.decay_start_step = decay_start_step self.decay_end_step = decay_start_step + num_decay_steps self.num_decay_steps = num_decay_steps if self.decay_start_step < self.num_warmup_steps: sys.exit("Learning rate warmup must finish before the decay starts") super(LRPolicyScheduler, self).__init__(optimizer) def get_lr(self): step_count = self._step_count if step_count < self.num_warmup_steps: # warmup scale = 1.0 - (self.num_warmup_steps - step_count) / self.num_warmup_steps lr = [base_lr * scale for base_lr in self.base_lrs] self.last_lr = lr elif self.decay_start_step <= step_count and step_count < self.decay_end_step: # decay decayed_steps = step_count - self.decay_start_step scale = ((self.num_decay_steps - decayed_steps) / self.num_decay_steps) ** 2 min_lr = 0.0000001 lr = [max(min_lr, base_lr * scale) for base_lr in self.base_lrs] self.last_lr = lr else: if self.num_decay_steps > 0: # freeze at last, either because we're after decay # or because we're between warmup and decay lr = self.last_lr else: # do not adjust lr = self.base_lrs return lr
""" Simplifed dlrm model from FAMBench It doesn't support multiGPU or fbgemm_gpu. """ import torch import sys import os import numpy as np import torch.nn as nn from torchbenchmark import REPO_PATH from typing import Tuple, List from torchbenchmark.util.model import BenchmarkModel from torchbenchmark.tasks import RECOMMENDATION # Import FAMBench model path class add_path(): def __init__(self, path): self.path = path def __enter__(self): sys.path.insert(0, self.path) def __exit__(self, exc_type, exc_value, traceback): try: sys.path.remove(self.path) except ValueError: pass DLRM_PATH = os.path.join(REPO_PATH, "submodules", "FAMBench", "benchmarks", "dlrm", "ootb") with add_path(DLRM_PATH): import optim.rwsadagrad as RowWiseSparseAdagrad from .dlrmnet import DLRM_Net from .data import prep_data from .config import FAMBenchTrainConfig, FAMBenchEvalConfig, cfg_to_str from .args import parse_fambench_args, validate_fambench_args from .lrscheduler import LRPolicyScheduler from .utils import unpack_batch, loss_fn_wrap, dlrm_wrap, prefetch class Model(BenchmarkModel): task = RECOMMENDATION.RECOMMENDATION FAMBENCH_MODEL = True # config DEFAULT_EVAL_ARGS = FAMBenchEvalConfig() DEFAULT_TRAIN_ARGS = FAMBenchTrainConfig() DEFAULT_EVAL_BSIZE = DEFAULT_EVAL_ARGS.mini_batch_size DEFAULT_TRAIN_BSIZE = DEFAULT_TRAIN_ARGS.mini_batch_size DEEPCOPY: bool = False def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(test, device, batch_size, extra_args) if test == "train": self.fambench_args = parse_fambench_args(cfg_to_str(self.DEFAULT_TRAIN_ARGS)) self.fambench_args.inference_only = False elif test == "eval": self.fambench_args = parse_fambench_args(cfg_to_str(self.DEFAULT_EVAL_ARGS)) self.fambench_args.inference_only = True if device == "cuda": self.fambench_args.use_gpu = True self.fambench_args.ndevices = 1 args = self.fambench_args validate_fambench_args(args) self.prep(args) ln_bot, ln_emb, ln_top, m_spa, train_ld, test_ld = prep_data(args) dlrm = DLRM_Net( args, m_spa, ln_emb, ln_bot, ln_top, args.arch_project_size, arch_interaction_op=args.arch_interaction_op, arch_interaction_itself=args.arch_interaction_itself, sigmoid_bot=-1, sigmoid_top=ln_top.size - 2, sync_dense_params=args.sync_dense_params, loss_threshold=args.loss_threshold, ndevices=args.ndevices, qr_flag=args.qr_flag, qr_operation=args.qr_operation, qr_collisions=args.qr_collisions, qr_threshold=args.qr_threshold, md_flag=args.md_flag, md_threshold=args.md_threshold, weighted_pooling=args.weighted_pooling, loss_function=args.loss_function, learning_rate=args.learning_rate, use_gpu=args.use_gpu, use_fbgemm_gpu=args.use_fbgemm_gpu, fbgemm_gpu_codegen_pref=args.fbgemm_gpu_codegen_pref, inference_only=args.inference_only, quantize_mlp_with_bit=args.quantize_mlp_with_bit, quantize_emb_with_bit=args.quantize_emb_with_bit, use_torch2trt_for_mlp=args.use_torch2trt_for_mlp,) # In dlrm.quantize_embedding called below, the torch quantize calls run # on cpu tensors only. They cannot quantize tensors stored on the gpu. # So quantization occurs on cpu tensors before transferring them to gpu if # use_gpu is enabled. if args.quantize_emb_with_bit != 32: dlrm.quantize_embedding(args.quantize_emb_with_bit) if not args.inference_only: assert args.quantize_mlp_with_bit == 32, ( "Dynamic quantization for mlp requires " + "--inference-only because training is not supported" ) else: # Currently only INT8 and FP16 quantized types are supported for quantized MLP inference. # By default we don't do the quantization: quantize_{mlp,emb}_with_bit == 32 (FP32) assert args.quantize_mlp_with_bit in [ 8, 16, 32, ], "only support 8/16/32-bit but got {}".format(args.quantize_mlp_with_bit) if not args.use_torch2trt_for_mlp: if args.quantize_mlp_with_bit == 16 and args.use_gpu: dlrm.top_l = dlrm.top_l.half() dlrm.bot_l = dlrm.bot_l.half() elif args.quantize_mlp_with_bit in [8, 16]: assert not args.use_gpu, ( "Cannot run PyTorch's built-in dynamic quantization for mlp " + "with --use-gpu enabled, because DynamicQuantizedLinear's " + "forward function calls 'quantized::linear_dynamic', which does not " + "support the 'CUDA' backend. To convert to and run quantized mlp layers " + "on the gpu, install torch2trt and enable --use-torch2trt-for-mlp. " + "Alternatively, disable --use-gpu to use PyTorch's built-in " + "cpu quantization ops for the mlp layers. " ) if args.quantize_mlp_with_bit == 8: quantize_dtype = torch.qint8 else: quantize_dtype = torch.float16 dlrm.top_l = torch.quantization.quantize_dynamic( dlrm.top_l, {torch.nn.Linear}, quantize_dtype ) dlrm.bot_l = torch.quantization.quantize_dynamic( dlrm.bot_l, {torch.nn.Linear}, quantize_dtype ) # Prep work for embedding tables and model transfer: # Handling single-cpu and single-gpu modes # NOTE: This also handles dist-backend modes (CLI args --dist-backend=nccl, # --dist-backend=ccl, and --dist-backend=mpi) because in these modes each # process runs in single-gpu mode. For example, if 8 processes are launched # running dlrm_s_pytorch.py with --dist-backend=nccl --use-gpu, each process # will run in single-gpu mode, resulting in 8 gpus total running distributed # training or distributed inference if --inference-only is enabled. if dlrm.ndevices_available <= 1: if args.use_fbgemm_gpu: from .fbgemm_embedding import fbgemm_gpu_emb_bag_wrapper dlrm.fbgemm_emb_l = nn.ModuleList( [ fbgemm_gpu_emb_bag_wrapper( device, dlrm.emb_l if dlrm.emb_l else dlrm.emb_l_q, dlrm.m_spa, dlrm.quantize_bits, dlrm.learning_rate, dlrm.fbgemm_gpu_codegen_pref, dlrm.requires_grad, ) ] ) if args.use_gpu: dlrm = dlrm.to(device) if dlrm.weighted_pooling == "fixed": for k, w in enumerate(dlrm.v_W_l): dlrm.v_W_l[k] = w.to(device) else: # Handing Multi-gpu mode dlrm.bot_l = dlrm.bot_l.to(device) dlrm.top_l = dlrm.top_l.to(device) dlrm.prepare_parallel_model(args.ndevices) assert not args.use_torch2trt_for_mlp, "torch2trt is not supported." if not args.inference_only: # specify the optimizer algorithm opts = { "sgd": torch.optim.SGD, "rwsadagrad": RowWiseSparseAdagrad.RWSAdagrad, "adagrad": torch.optim.Adagrad, } # removed distributed code here parameters = ( dlrm.parameters() ) self.optimizer = opts[args.optimizer](parameters, lr=args.learning_rate) self.lr_scheduler = LRPolicyScheduler( self.optimizer, args.lr_num_warmup_steps, args.lr_decay_start_step, args.lr_num_decay_steps, ) self.model = dlrm.to(self.device) # torchbench: prefetch the input to device if test == "train": self.ld = prefetch(train_ld, self.device) elif test == "eval": self.ld = prefetch(test_ld, self.device) # Guarantee GPU setup has completed before training or inference starts. if args.use_gpu: torch.cuda.synchronize() def prep(self, args): np.random.seed(args.numpy_rand_seed) np.set_printoptions(precision=args.print_precision) torch.set_printoptions(args.print_precision) torch.manual_seed(args.numpy_rand_seed) if args.test_mini_batch_size < 0: # if the parameter is not set, use the training batch size args.test_mini_batch_size = args.mini_batch_size if args.test_num_workers < 0: # if the parameter is not set, use the same parameter for training args.test_num_workers = args.num_workers if args.use_gpu: torch.cuda.manual_seed_all(args.numpy_rand_seed) torch.backends.cudnn.deterministic = True # we only support 1 device args.ndevices = 1 def get_module(self) -> Tuple[torch.nn.Module, List[torch.Tensor]]: for inputBatch in self.ld: X, lS_o, lS_i, T, W, CBPP = unpack_batch(inputBatch, self.device) if self.model.quantize_mlp_input_with_half_call: X = X.half() return (self.model, (X, lS_o, lS_i)) def train(self): args = self.fambench_args for j, inputBatch in enumerate(self.ld): X, lS_o, lS_i, T, W, CBPP = unpack_batch(inputBatch, self.device) mbs = T.shape[0] # = args.mini_batch_size except maybe for last # forward pass Z = dlrm_wrap( self.model, X, lS_o, lS_i, args.use_gpu, self.device, ndevices=args.ndevices, ) # loss E = loss_fn_wrap(self.model, self.fambench_args, Z, T, args.use_gpu, self.device) # compute loss and accuracy L = E.detach().cpu().numpy() # numpy array self.optimizer.zero_grad() E.backward() self.optimizer.step() self.lr_scheduler.step() def eval(self) -> Tuple[torch.Tensor]: result = [] args = self.fambench_args for i, testBatch in enumerate(self.ld): X_test, lS_o_test, lS_i_test, T_test, W_test, CBPP_test = unpack_batch( testBatch, self.device ) # forward pass Z_test = dlrm_wrap( self.model, X_test, lS_o_test, lS_i_test, args.use_gpu, self.device, ndevices=args.ndevices, ) result = (Z_test, T_test) return result
import torch.nn as nn import torch import sys import numpy as np import itertools from torch._ops import ops from torch.nn.parameter import Parameter from torch.nn.parallel.replicate import replicate from torch.nn.parallel.parallel_apply import parallel_apply from torch.nn.parallel.scatter_gather import gather, scatter # fambench imports # projection import project # quotient-remainder trick from tricks.qr_embedding_bag import QREmbeddingBag # mixed-dimension trick from tricks.md_embedding_bag import PrEmbeddingBag class DLRM_Net(nn.Module): def create_mlp(self, ln, sigmoid_layer): # build MLP layer by layer layers = nn.ModuleList() layers.training = self.requires_grad for i in range(0, ln.size - 1): n = ln[i] m = ln[i + 1] # construct fully connected operator LL = nn.Linear(int(n), int(m), bias=True) # initialize the weights # with torch.no_grad(): # custom Xavier input, output or two-sided fill mean = 0.0 # std_dev = np.sqrt(variance) std_dev = np.sqrt(2 / (m + n)) # np.sqrt(1 / m) # np.sqrt(1 / n) W = np.random.normal(mean, std_dev, size=(m, n)).astype(np.float32) std_dev = np.sqrt(1 / m) # np.sqrt(2 / (m + 1)) bt = np.random.normal(mean, std_dev, size=m).astype(np.float32) # approach 1 LL.weight.data = torch.tensor(W) LL.weight.requires_grad = self.requires_grad LL.bias.data = torch.tensor(bt) LL.bias.requires_grad = self.requires_grad # approach 2 # LL.weight.data.copy_(torch.tensor(W)) # LL.bias.data.copy_(torch.tensor(bt)) # approach 3 # LL.weight = Parameter(torch.tensor(W),requires_grad=True) # LL.bias = Parameter(torch.tensor(bt),requires_grad=True) layers.append(LL) # construct sigmoid or relu operator if i == sigmoid_layer: layers.append(nn.Sigmoid()) else: layers.append(nn.ReLU()) # approach 1: use ModuleList # return layers # approach 2: use Sequential container to wrap all layers return torch.nn.Sequential(*layers) def create_emb(self, m, ln, weighted_pooling=None): # create_emb parameter description # # ln parameter: # ln is a list of all the tables' row counts. E.g. [10,5,16] would mean # table 0 has 10 rows, table 1 has 5 rows, and table 2 has 16 rows. # # m parameter (when m is a single value): # m is the length of all embedding vectors. All embedding vectors in all # embedding tables are created to be the same length. E.g. if ln were [3,2,5] # and m were 4, table 0 would be dimension 3 x 4, table 1 would be 2 x 4, # and table 2 would be 5 x 4. # # m parameter (when m is a list): # m is a list of all the tables' column counts. E.g. if m were [4,5,6] and # ln were [3,2,5], table 0 would be dimension 3 x 4, table 1 would be 2 x 5, # and table 2 would be 5 x 6. # # Key to remember: # embedding table i has shape: ln[i] rows, m columns, when m is a single value. # embedding table i has shape: ln[i] rows, m[i] columns, when m is a list. emb_l = nn.ModuleList() v_W_l = [] for i in range(0, ln.size): # torchbench: commment distributed # if ext_dist.my_size > 1: # if i not in self.local_emb_indices: # continue n = ln[i] # construct embedding operator if self.qr_flag and n > self.qr_threshold: EE = QREmbeddingBag( n, m, self.qr_collisions, operation=self.qr_operation, mode="sum", sparse=True, ) elif self.md_flag and n > self.md_threshold: base = max(m) _m = m[i] if n > self.md_threshold else base EE = PrEmbeddingBag(n, _m, base) # use np initialization as below for consistency... W = np.random.uniform( low=-np.sqrt(1 / n), high=np.sqrt(1 / n), size=(n, _m) ).astype(np.float32) EE.embs.weight.data = torch.tensor(W, requires_grad=self.requires_grad) else: EE = nn.EmbeddingBag(n, m, mode="sum", sparse=True) # initialize embeddings # nn.init.uniform_(EE.weight, a=-np.sqrt(1 / n), b=np.sqrt(1 / n)) W = np.random.uniform( low=-np.sqrt(1 / n), high=np.sqrt(1 / n), size=(n, m) ).astype(np.float32) # approach 1 EE.weight.data = torch.tensor(W, requires_grad=self.requires_grad) # approach 2 # EE.weight.data.copy_(torch.tensor(W)) # approach 3 # EE.weight = Parameter(torch.tensor(W),requires_grad=True) if weighted_pooling is None: v_W_l.append(None) else: v_W_l.append(torch.ones(n, dtype=torch.float32)) emb_l.append(EE) return emb_l, v_W_l def __init__( self, args, m_spa=None, ln_emb=None, ln_bot=None, ln_top=None, proj_size=0, arch_interaction_op=None, arch_interaction_itself=False, sigmoid_bot=-1, sigmoid_top=-1, sync_dense_params=True, loss_threshold=0.0, ndevices=-1, qr_flag=False, qr_operation="mult", qr_collisions=0, qr_threshold=200, md_flag=False, md_threshold=200, weighted_pooling=None, loss_function="bce", learning_rate=0.1, use_gpu=False, use_fbgemm_gpu=False, fbgemm_gpu_codegen_pref="Split", inference_only=False, quantize_mlp_with_bit=False, quantize_emb_with_bit=False, use_torch2trt_for_mlp=False, ): super(DLRM_Net, self).__init__() if ( (m_spa is not None) and (ln_emb is not None) and (ln_bot is not None) and (ln_top is not None) and (arch_interaction_op is not None) ): # save arguments self.ntables = len(ln_emb) self.m_spa = m_spa self.proj_size = proj_size self.use_gpu = use_gpu self.use_fbgemm_gpu = use_fbgemm_gpu self.fbgemm_gpu_codegen_pref = fbgemm_gpu_codegen_pref self.requires_grad = not inference_only self.ndevices_available = ndevices self.ndevices_in_use = ndevices self.output_d = 0 self.add_new_weights_to_params = False self.arch_interaction_op = arch_interaction_op self.arch_interaction_itself = arch_interaction_itself self.sync_dense_params = sync_dense_params and not inference_only self.loss_threshold = loss_threshold self.loss_function = loss_function self.learning_rate = learning_rate if weighted_pooling is not None and weighted_pooling != "fixed": self.weighted_pooling = "learned" else: self.weighted_pooling = weighted_pooling # create variables for QR embedding if applicable self.qr_flag = qr_flag if self.qr_flag: self.qr_collisions = qr_collisions self.qr_operation = qr_operation self.qr_threshold = qr_threshold # create variables for MD embedding if applicable self.md_flag = md_flag if self.md_flag: self.md_threshold = md_threshold # torchbench: comment distributed # If running distributed, get local slice of embedding tables # if ext_dist.my_size > 1: # n_emb = len(ln_emb) # if n_emb < ext_dist.my_size: # sys.exit( # "only (%d) sparse features for (%d) devices, table partitions will fail" # % (n_emb, ext_dist.my_size) # ) # self.n_global_emb = n_emb # self.n_local_emb, self.n_emb_per_rank = ext_dist.get_split_lengths( # n_emb # ) # self.local_emb_slice = ext_dist.get_my_slice(n_emb) # self.local_emb_indices = list(range(n_emb))[self.local_emb_slice] # create operators self.emb_l, self.v_W_l = self.create_emb(m_spa, ln_emb, weighted_pooling) if self.weighted_pooling == "learned": self.v_W_l = nn.ParameterList(list(map(Parameter, self.v_W_l))) self.bot_l = self.create_mlp(ln_bot, sigmoid_bot) self.top_l = self.create_mlp(ln_top, sigmoid_top) if proj_size > 0: self.proj_l = project.create_proj(len(ln_emb) + 1, proj_size) # mlp quantization self.quantize_mlp_with_bit = quantize_mlp_with_bit self.use_torch2trt_for_mlp = use_torch2trt_for_mlp self.quantize_mlp_input_with_half_call = use_gpu and not args.use_torch2trt_for_mlp and args.quantize_mlp_with_bit == 16 # embedding quantization self.quantize_emb = False self.emb_l_q = [] self.quantize_bits = 32 # fbgemm_gpu self.fbgemm_emb_l = [] self.v_W_l_l = [self.v_W_l] if self.weighted_pooling else [None] self.interact_features_l = [] # specify the loss function if self.loss_function == "mse": self.loss_fn = torch.nn.MSELoss(reduction="mean") elif self.loss_function == "bce": self.loss_fn = torch.nn.BCELoss(reduction="mean") elif self.loss_function == "wbce": self.loss_ws = torch.tensor( np.fromstring(args.loss_weights, dtype=float, sep="-") ) self.loss_fn = torch.nn.BCELoss(reduction="none") else: sys.exit( "ERROR: --loss-function=" + self.loss_function + " is not supported" ) def prepare_parallel_model(self, ndevices): device_ids = range(ndevices) # replicate mlp (data parallelism) self.bot_l_replicas = replicate(self.bot_l, device_ids) self.top_l_replicas = replicate(self.top_l, device_ids) # distribute embeddings (model parallelism) if self.weighted_pooling is not None: for k, w in enumerate(self.v_W_l): self.v_W_l[k] = Parameter( w.to(torch.device("cuda:" + str(k % ndevices))) ) if not self.use_fbgemm_gpu: for k, w in enumerate(self.emb_l): self.emb_l[k] = w.to(torch.device("cuda:" + str(k % ndevices))) else: from .fbgemm_embedding import fbgemm_gpu_emb_bag_wrapper self.fbgemm_emb_l, self.v_W_l_l = zip( *[ ( fbgemm_gpu_emb_bag_wrapper( torch.device("cuda:" + str(k)), self.emb_l[k::ndevices] if self.emb_l else self.emb_l_q[k::ndevices], self.m_spa[k::ndevices] if isinstance(self.m_spa, list) else self.m_spa, self.quantize_bits, self.learning_rate, self.fbgemm_gpu_codegen_pref, self.requires_grad, ), self.v_W_l[k::ndevices] if self.weighted_pooling else None, ) for k in range(ndevices) ] ) self.add_new_weights_to_params = True self.interact_features_l = [self.nn_module_wrapper() for _ in range(ndevices)] # nn_module_wrapper is used to call functions concurrently across multi-gpus, using parallel_apply, # which requires an nn.Module subclass. class nn_module_wrapper(nn.Module): def __init__(self): super(DLRM_Net.nn_module_wrapper, self).__init__() def forward(self, E, x, ly): return E(x, ly) def apply_mlp(self, x, layers): # approach 1: use ModuleList # for layer in layers: # x = layer(x) # return x # approach 2: use Sequential container to wrap all layers return layers(x) def apply_emb(self, lS_o, lS_i): # WARNING: notice that we are processing the batch at once. We implicitly # assume that the data is laid out such that: # 1. each embedding is indexed with a group of sparse indices, # corresponding to a single lookup # 2. for each embedding the lookups are further organized into a batch # 3. for a list of embedding tables there is a list of batched lookups if self.use_fbgemm_gpu: # Deinterleave and reshape to 2d, so items are grouped by device # per row. Then parallel apply. ndevices = len(self.fbgemm_emb_l) lS_o_l = [lS_o[k::ndevices] for k in range(ndevices)] lS_i_l = [lS_i[k::ndevices] for k in range(ndevices)] ly = parallel_apply( self.fbgemm_emb_l, list(zip(lS_o_l, lS_i_l, self.v_W_l_l)) ) # Interleave and flatten to match non-fbgemm_gpu ly format. ly = [ly[i % ndevices][i // ndevices] for i in range(self.ntables)] else: ly = [] for k, sparse_index_group_batch in enumerate(lS_i): sparse_offset_group_batch = lS_o[k] # embedding lookup # We are using EmbeddingBag, which implicitly uses sum operator. # The embeddings are represented as tall matrices, with sum # happening vertically across 0 axis, resulting in a row vector # E = emb_l[k] if self.v_W_l[k] is not None: per_sample_weights = self.v_W_l[k].gather( 0, sparse_index_group_batch ) else: per_sample_weights = None if self.quantize_emb: if self.quantize_bits == 4: E = ops.quantized.embedding_bag_4bit_rowwise_offsets elif self.quantize_bits == 8: E = ops.quantized.embedding_bag_byte_rowwise_offsets QV = E( self.emb_l_q[k], sparse_index_group_batch, sparse_offset_group_batch, per_sample_weights=per_sample_weights, ) ly.append(QV) else: E = self.emb_l[k] V = E( sparse_index_group_batch, sparse_offset_group_batch, per_sample_weights=per_sample_weights, ) ly.append(V) # print(ly) return ly # using quantizing functions from caffe2/aten/src/ATen/native/quantized/cpu def quantize_embedding(self, bits): n = len(self.emb_l) self.emb_l_q = [None] * n for k in range(n): if bits == 4: self.emb_l_q[k] = ops.quantized.embedding_bag_4bit_prepack( self.emb_l[k].weight ) elif bits == 8: self.emb_l_q[k] = ops.quantized.embedding_bag_byte_prepack( self.emb_l[k].weight ) elif bits == 16: self.emb_l_q[k] = self.emb_l[k].half().weight else: return self.emb_l = None self.quantize_emb = True self.quantize_bits = bits def interact_features(self, x, ly): if self.arch_interaction_op == "dot": # concatenate dense and sparse features (batch_size, d) = x.shape T = torch.cat([x] + ly, dim=1).view((batch_size, -1, d)) # perform a dot product if self.proj_size > 0: R = project.project(T, x, self.proj_l) else: Z = torch.bmm(T, torch.transpose(T, 1, 2)) # append dense feature with the interactions (into a row vector) # approach 1: all # Zflat = Z.view((batch_size, -1)) # approach 2: unique _, ni, nj = Z.shape # approach 1: tril_indices # offset = 0 if self.arch_interaction_itself else -1 # li, lj = torch.tril_indices(ni, nj, offset=offset) # approach 2: custom offset = 1 if self.arch_interaction_itself else 0 li = torch.tensor([i for i in range(ni) for j in range(i + offset)]) lj = torch.tensor([j for i in range(nj) for j in range(i + offset)]) Zflat = Z[:, li, lj] # concatenate dense features and interactions R = torch.cat([x] + [Zflat], dim=1) elif self.arch_interaction_op == "cat": # concatenation features (into a row vector) R = torch.cat([x] + ly, dim=1) else: sys.exit( "ERROR: --arch-interaction-op=" + self.arch_interaction_op + " is not supported" ) return R def forward(self, dense_x, lS_o, lS_i): # torchbench: only enable sequential forward return self.sequential_forward(dense_x, lS_o, lS_i) # if ext_dist.my_size > 1: # # multi-node multi-device run # return self.distributed_forward(dense_x, lS_o, lS_i) # elif self.ndevices_available <= 1: # # single device run # return self.sequential_forward(dense_x, lS_o, lS_i) # else: # # single-node multi-device run # return self.parallel_forward(dense_x, lS_o, lS_i) # torchbench: disable distributed forward # def distributed_forward(self, dense_x, lS_o, lS_i): # batch_size = dense_x.size()[0] # # WARNING: # of ranks must be <= batch size in distributed_forward call # if batch_size < ext_dist.my_size: # sys.exit( # "ERROR: batch_size (%d) must be larger than number of ranks (%d)" # % (batch_size, ext_dist.my_size) # ) # if batch_size % ext_dist.my_size != 0: # sys.exit( # "ERROR: batch_size %d can not split across %d ranks evenly" # % (batch_size, ext_dist.my_size) # ) # dense_x = dense_x[ext_dist.get_my_slice(batch_size)] # lS_o = lS_o[self.local_emb_slice] # lS_i = lS_i[self.local_emb_slice] # if (self.ntables != len(lS_o)) or (self.ntables != len(lS_i)): # sys.exit( # "ERROR: corrupted model input detected in distributed_forward call" # ) # # embeddings # with record_function("DLRM embedding forward"): # ly = self.apply_emb(lS_o, lS_i) # # WARNING: Note that at this point we have the result of the embedding lookup # # for the entire batch on each rank. We would like to obtain partial results # # corresponding to all embedding lookups, but part of the batch on each rank. # # Therefore, matching the distribution of output of bottom mlp, so that both # # could be used for subsequent interactions on each device. # if self.ntables != len(ly): # sys.exit("ERROR: corrupted intermediate result in distributed_forward call") # a2a_req = ext_dist.alltoall(ly, self.n_emb_per_rank) # with record_function("DLRM bottom mlp forward"): # x = self.apply_mlp(dense_x, self.bot_l) # ly = a2a_req.wait() # ly = list(ly) # # interactions # with record_function("DLRM interaction forward"): # z = self.interact_features(x, ly) # # top mlp # with record_function("DLRM top mlp forward"): # # quantize top mlp's input to fp16 if PyTorch's built-in fp16 quantization is used. # if self.quantize_mlp_input_with_half_call: # z = z.half() # p = self.apply_mlp(z, self.top_l) # # clamp output if needed # if 0.0 < self.loss_threshold and self.loss_threshold < 1.0: # z = torch.clamp(p, min=self.loss_threshold, max=(1.0 - self.loss_threshold)) # else: # z = p # return z def sequential_forward(self, dense_x, lS_o, lS_i): # process dense features (using bottom mlp), resulting in a row vector x = self.apply_mlp(dense_x, self.bot_l) # debug prints # print("intermediate") # print(x.detach().cpu().numpy()) # process sparse features(using embeddings), resulting in a list of row vectors ly = self.apply_emb(lS_o, lS_i) # for y in ly: # print(y.detach().cpu().numpy()) # interact features (dense and sparse) z = self.interact_features(x, ly) # print(z.detach().cpu().numpy()) # quantize top mlp's input to fp16 if PyTorch's built-in fp16 quantization is used. if self.quantize_mlp_input_with_half_call: z = z.half() # obtain probability of a click (using top mlp) p = self.apply_mlp(z, self.top_l) # clamp output if needed if 0.0 < self.loss_threshold and self.loss_threshold < 1.0: z = torch.clamp(p, min=self.loss_threshold, max=(1.0 - self.loss_threshold)) else: z = p return z def parallel_forward(self, dense_x, lS_o, lS_i): ### prepare model (overwrite) ### # WARNING: # of devices must be >= batch size in parallel_forward call batch_size = dense_x.size()[0] ndevices = min(self.ndevices_available, batch_size, self.ntables) device_ids = range(ndevices) # WARNING: must redistribute the model if mini-batch size changes(this is common # for last mini-batch, when # of elements in the dataset/batch size is not even if self.ndevices_in_use != ndevices: self.ndevices_in_use = ndevices self.prepare_parallel_model(ndevices) elif self.sync_dense_params: # When training, replicate the new/updated mlp weights each iteration. # For inference-only, this code should never run. self.bot_l_replicas = replicate(self.bot_l, device_ids) self.top_l_replicas = replicate(self.top_l, device_ids) ### prepare input (overwrite) ### # scatter dense features (data parallelism) # print(dense_x.device) dense_x = scatter(dense_x, device_ids, dim=0) # distribute sparse features (model parallelism) if (self.ntables != len(lS_o)) or (self.ntables != len(lS_i)): sys.exit("ERROR: corrupted model input detected in parallel_forward call") lS_o = [ lS_o[k].to(torch.device("cuda:" + str(k % ndevices))) for k in range(self.ntables) ] lS_i = [ lS_i[k].to(torch.device("cuda:" + str(k % ndevices))) for k in range(self.ntables) ] ### compute results in parallel ### # bottom mlp # WARNING: Note that the self.bot_l is a list of bottom mlp modules # that have been replicated across devices, while dense_x is a tuple of dense # inputs that has been scattered across devices on the first (batch) dimension. # The output is a list of tensors scattered across devices according to the # distribution of dense_x. x = parallel_apply(self.bot_l_replicas, dense_x, None, device_ids) # debug prints # print(x) # embeddings ly = self.apply_emb(lS_o, lS_i) # debug prints # print(ly) # butterfly shuffle (implemented inefficiently for now) # WARNING: Note that at this point we have the result of the embedding lookup # for the entire batch on each device. We would like to obtain partial results # corresponding to all embedding lookups, but part of the batch on each device. # Therefore, matching the distribution of output of bottom mlp, so that both # could be used for subsequent interactions on each device. if self.ntables != len(ly): sys.exit("ERROR: corrupted intermediate result in parallel_forward call") t_list = [scatter(ly[k], device_ids, dim=0) for k in range(self.ntables)] # adjust the list to be ordered per device ly = list(map(lambda y: list(y), zip(*t_list))) # debug prints # print(ly) # interactions z = parallel_apply(self.interact_features_l, list(zip(itertools.repeat(self.interact_features),x,ly))) # debug prints # print(z) if self.quantize_mlp_input_with_half_call: z = [tens.half() for tens in z] # top mlp # WARNING: Note that the self.top_l is a list of top mlp modules that # have been replicated across devices, while z is a list of interaction results # that by construction are scattered across devices on the first (batch) dim. # The output is a list of tensors scattered across devices according to the # distribution of z. p = parallel_apply(self.top_l_replicas, z, None, device_ids) ### gather the distributed results ### p0 = gather(p, self.output_d, dim=0) # clamp output if needed if 0.0 < self.loss_threshold and self.loss_threshold < 1.0: z0 = torch.clamp( p0, min=self.loss_threshold, max=(1.0 - self.loss_threshold) ) else: z0 = p0 return z0 def print_weights(self): if self.use_fbgemm_gpu and len(self.fbgemm_emb_l): ntables_l = [ len(e.fbgemm_gpu_emb_bag.embedding_specs) for e in self.fbgemm_emb_l ] for j in range(ntables_l[0] + 1): for k, e in enumerate(self.fbgemm_emb_l): if j < ntables_l[k]: print( e.fbgemm_gpu_emb_bag.split_embedding_weights()[j] .detach() .cpu() .numpy() ) elif self.quantize_bits != 32: for e in self.emb_l_q: print(e.data.detach().cpu().numpy()) else: # if self.emb_l: for param in self.emb_l.parameters(): print(param.detach().cpu().numpy()) if isinstance(self.v_W_l, nn.ParameterList): for param in self.v_W_l.parameters(): print(param.detach().cpu().numpy()) for param in self.bot_l.parameters(): print(param.detach().cpu().numpy()) for param in self.top_l.parameters(): print(param.detach().cpu().numpy())
import torch # The following function is a wrapper to avoid checking this multiple times in th # loop below. def unpack_batch(b, device): # Experiment with unweighted samples return b[0], b[1], b[2], b[3], torch.ones(b[3].size()).to(device), None def dlrm_wrap(dlrm, X, lS_o, lS_i, use_gpu, device, ndevices=1): if dlrm.quantize_mlp_input_with_half_call: X = X.half() if use_gpu: # lS_i can be either a list of tensors or a stacked tensor. # Handle each case below: if ndevices == 1: lS_i = ( [S_i.to(device) for S_i in lS_i] if isinstance(lS_i, list) else lS_i.to(device) ) lS_o = ( [S_o.to(device) for S_o in lS_o] if isinstance(lS_o, list) else lS_o.to(device) ) return dlrm(X.to(device), lS_o, lS_i) def loss_fn_wrap(dlrm, args, Z, T, use_gpu, device): if args.loss_function == "mse" or args.loss_function == "bce": return dlrm.loss_fn(Z, T.to(device)) elif args.loss_function == "wbce": loss_ws_ = dlrm.loss_ws[T.data.view(-1).long()].view_as(T).to(device) loss_fn_ = dlrm.loss_fn(Z, T.to(device)) loss_sc_ = loss_ws_ * loss_fn_ return loss_sc_.mean() def prefetch(dl, device): out = [] for inputBatch in dl: X, lS_o, lS_i, T = inputBatch lS_i = ( [S_i.to(device) for S_i in lS_i] if isinstance(lS_i, list) else lS_i.to(device) ) lS_o = ( [S_o.to(device) for S_o in lS_o] if isinstance(lS_o, list) else lS_o.to(device) ) out.append(tuple([X.to(device), lS_o, lS_i, T])) return out
# Currently, this file is not used, because torchbench doesn't support fbgemm embeddding yet; # Note that FAMBench does support it. import torch.nn as nn import torch import os import sys import numpy as np from torchbenchmark import REPO_PATH # This file assumes fbgemm_gpu is installed import fbgemm_gpu from fbgemm_gpu import split_table_batched_embeddings_ops from fbgemm_gpu.split_table_batched_embeddings_ops import ( CacheAlgorithm, PoolingMode, OptimType, SparseType, SplitTableBatchedEmbeddingBagsCodegen, IntNBitTableBatchedEmbeddingBagsCodegen, ) # mixed-dimension trick from tricks.md_embedding_bag import PrEmbeddingBag # quantize_fbgemm_gpu_embedding_bag is partially lifted from # fbgemm_gpu/test/split_embedding_inference_converter.py, def _quantize_split_embs. # Converts SplitTableBatchedEmbeddingBagsCodegen to IntNBitTableBatchedEmbeddingBagsCodegen def quantize_fbgemm_gpu_embedding_bag(model, quantize_type, device): embedding_specs = [] if device.type == "cpu": emb_location = split_table_batched_embeddings_ops.EmbeddingLocation.HOST else: emb_location = split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE for (E, D, _, _) in model.embedding_specs: weights_ty = quantize_type if D % weights_ty.align_size() != 0: assert D % 4 == 0 weights_ty = ( SparseType.FP16 ) # fall back to FP16 if dimension couldn't be aligned with the required size embedding_specs.append(("", E, D, weights_ty, emb_location)) q_model = ( split_table_batched_embeddings_ops.IntNBitTableBatchedEmbeddingBagsCodegen( embedding_specs=embedding_specs, pooling_mode=model.pooling_mode, device=device, ) ) q_model.initialize_weights() for t, (_, _, _, weight_ty, _) in enumerate(embedding_specs): if weight_ty == SparseType.FP16: original_weight = model.split_embedding_weights()[t] q_weight = original_weight.half() weights = torch.tensor(q_weight.cpu().numpy().view(np.uint8)) q_model.split_embedding_weights()[t][0].data.copy_(weights) elif weight_ty == SparseType.INT8: original_weight = model.split_embedding_weights()[t] q_weight = torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized( original_weight ) weights = q_weight[:, :-8] scale_shift = torch.tensor( q_weight[:, -8:] .contiguous() .cpu() .numpy() .view(np.float32) .astype(np.float16) .view(np.uint8) ) q_model.split_embedding_weights()[t][0].data.copy_(weights) q_model.split_embedding_weights()[t][1].data.copy_(scale_shift) elif weight_ty == SparseType.INT4 or weight_ty == SparseType.INT2: original_weight = model.split_embedding_weights()[t] q_weight = torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf( original_weight, bit_rate=quantize_type.bit_rate(), ) weights = q_weight[:, :-4] scale_shift = torch.tensor( q_weight[:, -4:].contiguous().cpu().numpy().view(np.uint8) ) q_model.split_embedding_weights()[t][0].data.copy_(weights) q_model.split_embedding_weights()[t][1].data.copy_(scale_shift) return q_model def create_fbgemm_gpu_emb_bag( device, emb_l, m_spa, quantize_bits, learning_rate, codegen_preference=None, requires_grad=True, ): if isinstance(emb_l[0], PrEmbeddingBag): emb_l = [e.embs for e in emb_l] if isinstance(emb_l[0], nn.EmbeddingBag): emb_l = [e.weight for e in emb_l] Es = [e.shape[0] for e in emb_l] if isinstance(m_spa, list): Ds = m_spa else: Ds = [m_spa for _ in emb_l] if device.type == "cpu": emb_location = split_table_batched_embeddings_ops.EmbeddingLocation.HOST compute_device = split_table_batched_embeddings_ops.ComputeDevice.CPU else: emb_location = split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE compute_device = split_table_batched_embeddings_ops.ComputeDevice.CUDA pooling_mode = PoolingMode.SUM cache_algorithm = CacheAlgorithm.LRU sparse_type_dict = { 4: SparseType.INT4, 8: SparseType.INT8, 16: SparseType.FP16, 32: SparseType.FP32, } codegen_type_dict = { 4: "IntN", 8: "Split" if codegen_preference != "IntN" else "IntN", 16: "Split" if codegen_preference != "IntN" else "IntN", 32: "Split", } codegen_type = codegen_type_dict[quantize_bits] quantize_type = sparse_type_dict[quantize_bits] if codegen_type == "IntN": # Create non-quantized model and then call quantize_fbgemm_gpu_embedding_bag fbgemm_gpu_emb_bag = SplitTableBatchedEmbeddingBagsCodegen( embedding_specs=[ ( E, # num of rows in the table D, # num of columns in the table split_table_batched_embeddings_ops.EmbeddingLocation.HOST, split_table_batched_embeddings_ops.ComputeDevice.CPU, ) for (E, D) in zip(Es, Ds) ], weights_precision=SparseType.FP32, optimizer=OptimType.EXACT_SGD, learning_rate=learning_rate, cache_algorithm=cache_algorithm, pooling_mode=pooling_mode, ).to(device) if quantize_type == quantize_type.FP16: weights = fbgemm_gpu_emb_bag.split_embedding_weights() for i, emb in enumerate(weights): emb.data.copy_(emb_l[i]) elif quantize_type == quantize_type.INT8: # copy quantized values upsampled/recasted to FP32 for i in range(len(Es)): fbgemm_gpu_emb_bag.split_embedding_weights()[i].data.copy_( torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(emb_l[i]) ) elif quantize_type == quantize_type.INT4: # copy quantized values upsampled/recasted to FP32 for i in range(len(Es)): fbgemm_gpu_emb_bag.split_embedding_weights()[i].data.copy_( torch.ops.fbgemm.FusedNBitRowwiseQuantizedSBHalfToFloat( emb_l[i], bit_rate=quantize_type.bit_rate(), ) ) fbgemm_gpu_emb_bag = quantize_fbgemm_gpu_embedding_bag( fbgemm_gpu_emb_bag, quantize_type, device ) else: fbgemm_gpu_emb_bag = SplitTableBatchedEmbeddingBagsCodegen( embedding_specs=[ ( E, # num of rows in the table D, # num of columns in the table emb_location, compute_device, ) for (E, D) in zip(Es, Ds) ], weights_precision=quantize_type, optimizer=OptimType.EXACT_SGD, learning_rate=learning_rate, cache_algorithm=cache_algorithm, pooling_mode=pooling_mode, ).to(device) weights = fbgemm_gpu_emb_bag.split_embedding_weights() for i, emb in enumerate(weights): emb.data.copy_(emb_l[i]) if not requires_grad: torch.no_grad() torch.set_grad_enabled(False) return fbgemm_gpu_emb_bag # The purpose of this wrapper is to encapsulate the format conversions to/from fbgemm_gpu # so parallel_apply() executes the format-in -> fbgemm_gpu op -> format-out instructions # for each respective GPU in parallel. class fbgemm_gpu_emb_bag_wrapper(nn.Module): def __init__( self, device, emb_l, m_spa, quantize_bits, learning_rate, codegen_preference, requires_grad, ): super(fbgemm_gpu_emb_bag_wrapper, self).__init__() self.fbgemm_gpu_emb_bag = create_fbgemm_gpu_emb_bag( device, emb_l, m_spa, quantize_bits, learning_rate, codegen_preference, requires_grad, ) self.device = device self.m_spa = m_spa # create cumsum array for mixed dimension support if isinstance(m_spa, list): self.m_spa_cumsum = np.cumsum([0] + m_spa) if not requires_grad: torch.no_grad() torch.set_grad_enabled(False) def forward(self, lS_o, lS_i, v_W_l=None): # convert offsets to fbgemm format lengths_list = list(map(len, lS_i)) indices_lengths_cumsum = np.cumsum([0] + lengths_list) if isinstance(lS_o, list): lS_o = torch.stack(lS_o) lS_o = lS_o.to(self.device) lS_o += torch.from_numpy(indices_lengths_cumsum[:-1, np.newaxis]).to( self.device ) numel = torch.tensor([indices_lengths_cumsum[-1]], dtype=torch.long).to( self.device ) lS_o = torch.cat((lS_o.flatten(), numel)) # create per_sample_weights if v_W_l: per_sample_weights = torch.cat( [a.gather(0, b) for a, b in zip(v_W_l, lS_i)] ) else: per_sample_weights = None # convert indices to fbgemm_gpu format if isinstance(lS_i, torch.Tensor): lS_i = [lS_i] lS_i = torch.cat(lS_i, dim=0).to(self.device) if isinstance(self.fbgemm_gpu_emb_bag, IntNBitTableBatchedEmbeddingBagsCodegen): lS_o = lS_o.int() lS_i = lS_i.int() # gpu embedding bag op ly = self.fbgemm_gpu_emb_bag(lS_i, lS_o, per_sample_weights) # convert the results to the next layer's input format. if isinstance(self.m_spa, list): # handle mixed dimensions case. ly = [ ly[:, s:e] for (s, e) in zip(self.m_spa_cumsum[:-1], self.m_spa_cumsum[1:]) ] else: # handle case in which all tables share the same column dimension. cols = self.m_spa ntables = len(self.fbgemm_gpu_emb_bag.embedding_specs) ly = ly.reshape(-1, ntables, cols).swapaxes(0, 1) ly = list(ly) return ly
# Original source: # https://github.com/facebookresearch/FAMBench/blob/a0f12ca4fe8973f4cc65d18b51ce3aa94ceec0ac/benchmarks/dlrm/ootb/dlrm_s_pytorch.py import sys import torch import argparse def dash_separated_ints(value): vals = value.split("-") for val in vals: try: int(val) except ValueError: raise argparse.ArgumentTypeError( "%s is not a valid dash separated list of ints" % value ) return value def dash_separated_floats(value): vals = value.split("-") for val in vals: try: float(val) except ValueError: raise argparse.ArgumentTypeError( "%s is not a valid dash separated list of floats" % value ) return value def validate_fambench_args(args): if args.weighted_pooling is not None: if args.qr_flag: sys.exit("ERROR: quotient remainder with weighted pooling is not supported") if args.md_flag: sys.exit("ERROR: mixed dimensions with weighted pooling is not supported") if args.quantize_emb_with_bit in [4, 8]: if args.qr_flag: sys.exit( "ERROR: 4 and 8-bit quantization with quotient remainder is not supported" ) if args.md_flag: sys.exit( "ERROR: 4 and 8-bit quantization with mixed dimensions is not supported" ) if args.quantize_emb_with_bit in [4, 8, 16] and ( not args.use_fbgemm_gpu ): try: import fbgemm_gpu except ImportError: sys.exit("Failed to import fbgemm_gpu module.\n") extra_info = "" if not args.use_fbgemm_gpu: extra_info += "--use-fbgemm-gpu not set. " if not args.inference_only: sys.exit( "ERROR: Training quantized embeddings requires fbgemm_gpu. " + extra_info ) elif args.use_gpu: sys.exit( "ERROR: Quantized embeddings on GPU requires fbgemm_gpu. " + extra_info ) elif args.quantize_emb_with_bit == 16: sys.exit( "ERROR: 16-bit quantized embeddings requires fbgemm_gpu. " + extra_info ) assert args.quantize_emb_with_bit in [ 4, 8, 16, 32, ], "only support 4/8/16/32-bit but got {}".format(args.quantize_emb_with_bit) if args.use_gpu: assert torch.cuda.is_available(), "No cuda device is available." # validations by torchbench (distributed is not supported) # we don't support fbgemm_gpu assert not args.use_fbgemm_gpu, "fbgemm_gpu is not supported." # we don't support torch2trt for mlp assert not args.use_torch2trt_for_mlp, "torch2trt for mlp is not supported." # we only support random dataset for now assert args.data_generation == "random", f"only random data generator is supported right now, but get {args.data_generation}." def parse_fambench_args(args): ### parse arguments ### parser = argparse.ArgumentParser( description="Train Deep Learning Recommendation Model (DLRM)" ) # model related parameters parser.add_argument("--arch-sparse-feature-size", type=int, default=2) parser.add_argument( "--arch-embedding-size", type=dash_separated_ints, default="4-3-2" ) parser.add_argument("--arch-project-size", type=int, default=0) # j will be replaced with the table number parser.add_argument("--arch-mlp-bot", type=dash_separated_ints, default="4-3-2") parser.add_argument("--arch-mlp-top", type=dash_separated_ints, default="4-2-1") parser.add_argument( "--arch-interaction-op", type=str, choices=["dot", "cat"], default="dot" ) parser.add_argument("--arch-interaction-itself", action="store_true", default=False) parser.add_argument( "--weighted-pooling", type=str, choices=["fixed", "learned", None], default=None ) # embedding table options parser.add_argument("--md-flag", action="store_true", default=False) parser.add_argument("--md-threshold", type=int, default=200) parser.add_argument("--md-temperature", type=float, default=0.3) parser.add_argument("--md-round-dims", action="store_true", default=False) parser.add_argument("--qr-flag", action="store_true", default=False) parser.add_argument("--qr-threshold", type=int, default=200) parser.add_argument("--qr-operation", type=str, default="mult") parser.add_argument("--qr-collisions", type=int, default=4) # activations and loss parser.add_argument("--activation-function", type=str, default="relu") parser.add_argument("--loss-function", type=str, default="mse") # or bce or wbce parser.add_argument( "--loss-weights", type=dash_separated_floats, default="1.0-1.0" ) # for wbce parser.add_argument("--loss-threshold", type=float, default=0.0) # 1.0e-7 parser.add_argument("--round-targets", type=bool, default=False) # data parser.add_argument("--data-size", type=int, default=1) parser.add_argument("--num-batches", type=int, default=0) parser.add_argument( "--data-generation", type=str, default="random" ) # synthetic or dataset parser.add_argument( "--rand-data-dist", type=str, default="uniform" ) # uniform or gaussian parser.add_argument("--rand-data-min", type=float, default=0) parser.add_argument("--rand-data-max", type=float, default=1) parser.add_argument("--rand-data-mu", type=float, default=-1) parser.add_argument("--rand-data-sigma", type=float, default=1) parser.add_argument("--data-trace-file", type=str, default="./input/dist_emb_j.log") parser.add_argument("--data-set", type=str, default="kaggle") # or terabyte parser.add_argument("--raw-data-file", type=str, default="") parser.add_argument("--processed-data-file", type=str, default="") parser.add_argument("--data-randomize", type=str, default="total") # or day or none parser.add_argument("--data-trace-enable-padding", type=bool, default=False) parser.add_argument("--max-ind-range", type=int, default=-1) parser.add_argument("--data-sub-sample-rate", type=float, default=0.0) # in [0, 1] parser.add_argument("--num-indices-per-lookup", type=int, default=10) parser.add_argument("--num-indices-per-lookup-fixed", type=bool, default=False) parser.add_argument("--num-workers", type=int, default=0) parser.add_argument("--memory-map", action="store_true", default=False) # training parser.add_argument("--mini-batch-size", type=int, default=1) parser.add_argument("--nepochs", type=int, default=1) parser.add_argument("--learning-rate", type=float, default=0.01) parser.add_argument("--print-precision", type=int, default=5) parser.add_argument("--numpy-rand-seed", type=int, default=123) parser.add_argument("--sync-dense-params", type=bool, default=True) parser.add_argument("--optimizer", type=str, default="sgd") parser.add_argument( "--dataset-multiprocessing", action="store_true", default=False, help="The Kaggle dataset can be multiprocessed in an environment \ with more than 7 CPU cores and more than 20 GB of memory. \n \ The Terabyte dataset can be multiprocessed in an environment \ with more than 24 CPU cores and at least 1 TB of memory.", ) # inference parser.add_argument("--inference-only", action="store_true", default=False) # quantize parser.add_argument("--quantize-mlp-with-bit", type=int, default=32) parser.add_argument("--quantize-emb-with-bit", type=int, default=32) # onnx parser.add_argument("--save-onnx", action="store_true", default=False) # gpu parser.add_argument("--use-gpu", action="store_true", default=False) parser.add_argument("--use-fbgemm-gpu", action="store_true", default=False) parser.add_argument( "--fbgemm-gpu-codegen-pref", type=str, choices=["Split", "IntN"], default="Split", ) # torch2trt parser.add_argument("--use-torch2trt-for-mlp", action="store_true", default=False) # distributed parser.add_argument("--local_rank", type=int, default=-1) parser.add_argument("--dist-backend", type=str, default="") # debugging and profiling parser.add_argument("--print-freq", type=int, default=1) parser.add_argument("--test-freq", type=int, default=-1) parser.add_argument("--test-mini-batch-size", type=int, default=-1) parser.add_argument("--test-num-workers", type=int, default=-1) parser.add_argument("--print-time", action="store_true", default=False) parser.add_argument("--print-wall-time", action="store_true", default=False) parser.add_argument("--print-accumulated-time", action="store_true", default=False) parser.add_argument("--debug-mode", action="store_true", default=False) parser.add_argument("--enable-profiling", action="store_true", default=False) parser.add_argument("--plot-compute-graph", action="store_true", default=False) parser.add_argument("--tensor-board-filename", type=str, default="run_kaggle_pt") # store/load model parser.add_argument("--save-model", type=str, default="") parser.add_argument("--load-model", type=str, default="") # mlperf logging (disables other output and stops early) parser.add_argument("--mlperf-logging", action="store_true", default=False) # stop at target accuracy Kaggle 0.789, Terabyte (sub-sampled=0.875) 0.8107 parser.add_argument("--mlperf-acc-threshold", type=float, default=0.0) # stop at target AUC Terabyte (no subsampling) 0.8025 parser.add_argument("--mlperf-auc-threshold", type=float, default=0.0) parser.add_argument("--mlperf-bin-loader", action="store_true", default=False) parser.add_argument("--mlperf-bin-shuffle", action="store_true", default=False) # mlperf gradient accumulation iterations parser.add_argument("--mlperf-grad-accum-iter", type=int, default=1) # LR policy parser.add_argument("--lr-num-warmup-steps", type=int, default=0) parser.add_argument("--lr-decay-start-step", type=int, default=0) parser.add_argument("--lr-num-decay-steps", type=int, default=0) parser.add_argument("--precache-ml-data", type=int, nargs='?', default=None, const=sys.maxsize) parser.add_argument("--warmup-steps", type=int, default=0) # FB5 Logging parser.add_argument("--fb5logger", type=str, default=None) parser.add_argument("--fb5config", type=str, default="tiny") args = parser.parse_args(args) return args
import os import sys import torch import subprocess from torchbenchmark import REPO_PATH def update_fambench_submodule(): "Update FAMBench submodule of the benchmark repo" update_command = ["git", "submodule", "update", "--init", "--recursive", os.path.join("submodules","FAMBench")] subprocess.check_call(update_command, cwd=REPO_PATH) def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt']) if __name__ == "__main__": update_fambench_submodule() pip_install_requirements()
import torch import sys import numpy as np # data generation import dlrm_data_pytorch as dp def prep_data(args): ln_bot = np.fromstring(args.arch_mlp_bot, dtype=int, sep="-") if args.data_generation == "dataset": train_data, train_ld, test_data, test_ld = dp.make_criteo_data_and_loaders(args) table_feature_map = {idx: idx for idx in range(len(train_data.counts))} nbatches = args.num_batches if args.num_batches > 0 else len(train_ld) nbatches_test = len(test_ld) ln_emb = train_data.counts # enforce maximum limit on number of vectors per embedding if args.max_ind_range > 0: ln_emb = np.array( list( map( lambda x: x if x < args.max_ind_range else args.max_ind_range, ln_emb, ) ) ) else: ln_emb = np.array(ln_emb) m_den = train_data.m_den ln_bot[0] = m_den else: # input and target at random ln_emb = np.fromstring(args.arch_embedding_size, dtype=int, sep="-") m_den = ln_bot[0] train_data, train_ld, test_data, test_ld = dp.make_random_data_and_loader( args, ln_emb, m_den, cache_size=args.precache_ml_data ) nbatches = args.num_batches if args.num_batches > 0 else len(train_ld) nbatches_test = len(test_ld) nbatches_in_use = nbatches_test if args.inference_only else nbatches assert nbatches_in_use > args.warmup_steps, (f"Change --warmup-steps={args.warmup_steps} to be lower than {nbatches_in_use}.") args.ln_emb = ln_emb.tolist() ### parse command line arguments ### m_spa = args.arch_sparse_feature_size ln_emb = np.asarray(ln_emb) num_fea = ln_emb.size + 1 # num sparse + num dense features if args.use_fbgemm_gpu: assert m_spa % 4 == 0, ( f"{m_spa} % 4 is not 0, but fbgemm_gpu requires the embedding dim " + "(--arch-sparse-feature-size number) to be evenly divisible by 4." ) m_den_out = ln_bot[ln_bot.size - 1] if args.arch_interaction_op == "dot": # approach 1: all # num_int = num_fea * num_fea + m_den_out # approach 2: unique if args.arch_project_size > 0: num_int = num_fea * args.arch_project_size + m_den_out else: if args.arch_interaction_itself: num_int = (num_fea * (num_fea + 1)) // 2 + m_den_out else: num_int = (num_fea * (num_fea - 1)) // 2 + m_den_out elif args.arch_interaction_op == "cat": num_int = num_fea * m_den_out else: sys.exit( "ERROR: --arch-interaction-op=" + args.arch_interaction_op + " is not supported" ) arch_mlp_top_adjusted = str(num_int) + "-" + args.arch_mlp_top ln_top = np.fromstring(arch_mlp_top_adjusted, dtype=int, sep="-") # sanity check: feature sizes and mlp dimensions must match if m_den != ln_bot[0]: sys.exit( "ERROR: arch-dense-feature-size " + str(m_den) + " does not match first dim of bottom mlp " + str(ln_bot[0]) ) if args.qr_flag: if args.qr_operation == "concat" and 2 * m_spa != m_den_out: sys.exit( "ERROR: 2 arch-sparse-feature-size " + str(2 * m_spa) + " does not match last dim of bottom mlp " + str(m_den_out) + " (note that the last dim of bottom mlp must be 2x the embedding dim)" ) if args.qr_operation != "concat" and m_spa != m_den_out: sys.exit( "ERROR: arch-sparse-feature-size " + str(m_spa) + " does not match last dim of bottom mlp " + str(m_den_out) ) else: if m_spa != m_den_out: sys.exit( "ERROR: arch-sparse-feature-size " + str(m_spa) + " does not match last dim of bottom mlp " + str(m_den_out) ) if num_int != ln_top[0]: sys.exit( "ERROR: # of feature interactions " + str(num_int) + " does not match first dimension of top mlp " + str(ln_top[0]) ) # assign mixed dimensions if applicable if args.md_flag: m_spa = md_solver( torch.tensor(ln_emb), args.md_temperature, # alpha d0=m_spa, round_dim=args.md_round_dims, ).tolist() if args.use_fbgemm_gpu: for m in m_spa: assert m % 4 == 0, ( "Found an incompatible embedding dim in m_spa. " + f"{m} % 4 is not 0, but fbgemm_gpu requires the " + "embedding dim to be evenly divisible by 4." ) return ln_bot, ln_emb, ln_top, m_spa, train_ld, test_ld
import torch # OSS import try: # pyre-ignore[21] # @manual=//ai_codesign/benchmarks/dlrm/torchrec_dlrm/data:dlrm_dataloader from .data.dlrm_dataloader import get_dataloader except ImportError: pass import itertools import os from pyre_extensions import none_throws from torch import distributed as dist from torchbenchmark.tasks import RECOMMENDATION from torchrec import EmbeddingBagCollection from torchrec.datasets.criteo import DEFAULT_CAT_NAMES, DEFAULT_INT_NAMES from torchrec.distributed import TrainPipelineSparseDist from torchrec.distributed.shard import shard_modules from torchrec.models.dlrm import DLRM, DLRM_DCN, DLRM_Projection, DLRMTrain from torchrec.modules.embedding_configs import EmbeddingBagConfig from torchrec.optim.apply_optimizer_in_backward import apply_optimizer_in_backward from torchrec.optim.keyed import CombinedOptimizer, KeyedOptimizerWrapper from torchrec.optim.optimizers import in_backward_optimizer_filter from ...util.model import BenchmarkModel from .args import InteractionType, parse_args class Model(BenchmarkModel): task = RECOMMENDATION.RECOMMENDATION DEFAULT_TRAIN_BSIZE = 1024 DEFAULT_EVAL_BSIZE = 1024 CANNOT_SET_CUSTOM_OPTIMIZER = True # Deepcopy will OOM in correctness testing DEEPCOPY = False def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args) args = parse_args(self.extra_args) backend = "nccl" if self.device == "cuda" else "gloo" device = torch.device(self.device) os.environ["RANK"] = "0" os.environ["WORLD_SIZE"] = "1" os.environ["MASTER_ADDR"] = "localhost" os.environ["MASTER_PORT"] = "29500" if not dist.is_initialized(): dist.init_process_group(backend=backend) # initialize example data if self.test == "train": args.batch_size = self.batch_size loader = get_dataloader(args, backend, "train") if self.test == "eval": args.test_batch_size = self.batch_size loader = get_dataloader(args, backend, "test") self.iterator = itertools.cycle(iter(loader)) self.example_inputs = next(self.iterator).to(device) # parse the args args.dense_arch_layer_sizes = [int(x) for x in args.dense_arch_layer_sizes.split(',') if x.strip().isdigit()] args.over_arch_layer_sizes = [int(x) for x in args.over_arch_layer_sizes.split(',') if x.strip().isdigit()] args.interaction_branch1_layer_sizes = [int(x) for x in args.interaction_branch1_layer_sizes.split(',') if x.strip().isdigit()] args.interaction_branch2_layer_sizes = [int(x) for x in args.interaction_branch2_layer_sizes.split(',') if x.strip().isdigit()] assert args.in_memory_binary_criteo_path == None and args.synthetic_multi_hot_criteo_path == None, \ f"Torchbench only supports random data inputs." eb_configs = [ EmbeddingBagConfig( name=f"t_{feature_name}", embedding_dim=args.embedding_dim, num_embeddings=none_throws(args.num_embeddings_per_feature)[feature_idx] if args.num_embeddings is None else args.num_embeddings, feature_names=[feature_name], ) for feature_idx, feature_name in enumerate(DEFAULT_CAT_NAMES) ] dlrm_model = DLRM_DCN( embedding_bag_collection=EmbeddingBagCollection( tables=eb_configs, device=device ), dense_in_features=len(DEFAULT_INT_NAMES), dense_arch_layer_sizes=args.dense_arch_layer_sizes, over_arch_layer_sizes=args.over_arch_layer_sizes, dcn_num_layers=args.dcn_num_layers, dcn_low_rank_dim=args.dcn_low_rank_dim, dense_device=device, ) train_model = DLRMTrain(dlrm_model) # This will apply the Adagrad optimizer in the backward pass for the embeddings (sparse_arch). This means that # the optimizer update will be applied in the backward pass, in this case through a fused op. # TorchRec will use the FBGEMM implementation of EXACT_ADAGRAD. For GPU devices, a fused CUDA kernel is invoked. For CPU, FBGEMM_GPU invokes CPU kernels # https://github.com/pytorch/FBGEMM/blob/2cb8b0dff3e67f9a009c4299defbd6b99cc12b8f/fbgemm_gpu/fbgemm_gpu/split_table_batched_embeddings_ops.py#L676-L678 apply_optimizer_in_backward( torch.optim.Adagrad, train_model.model.sparse_arch.parameters(), {"lr": args.learning_rate}, ) if args.shard_model: self.model = shard_modules( module=train_model, device=device ).to(device) else: self.model = train_model.to(device) dense_optimizer = KeyedOptimizerWrapper( dict(in_backward_optimizer_filter(self.model.named_parameters())), lambda params: torch.optim.Adagrad(params, lr=args.learning_rate), ) # fused optimizer will already be called opt = CombinedOptimizer([dense_optimizer]) if args.multi_hot_sizes is not None: raise RuntimeError("Multi-hot is not supported in TorchBench.") if self.test == "train": self.opt = opt self.train_pipeline = TrainPipelineSparseDist( self.model, opt, device, ) self.model.train() elif self.test == "eval": self.model.eval() def get_module(self): return self.model, (self.example_inputs, ) def train(self): self.train_pipeline.progress(self.iterator) def eval(self): with torch.no_grad(): _loss, logits = self.model(self.example_inputs) return logits
import argparse from enum import Enum from typing import List class InteractionType(Enum): ORIGINAL = "original" DCN = "dcn" PROJECTION = "projection" def __str__(self): return self.value def parse_args(argv: List[str]) -> argparse.Namespace: parser = argparse.ArgumentParser(description="torchrec dlrm example trainer") parser.add_argument( "--epochs", type=int, default=1, help="number of epochs to train", ) parser.add_argument( "--batch_size", type=int, default=1024, help="batch size to use for training", ) parser.add_argument( "--drop_last_training_batch", dest="drop_last_training_batch", action="store_true", help="Drop the last non-full training batch", ) parser.add_argument( "--test_batch_size", type=int, default=None, help="batch size to use for validation and testing", ) parser.add_argument( "--limit_train_batches", type=int, default=None, help="number of train batches", ) parser.add_argument( "--limit_val_batches", type=int, default=None, help="number of validation batches", ) parser.add_argument( "--limit_test_batches", type=int, default=None, help="number of test batches", ) parser.add_argument( "--dataset_name", type=str, default="criteo_1t", help="dataset for experiment, current support criteo_1tb, criteo_kaggle", ) parser.add_argument( "--num_embeddings", type=int, default=100_000, help="max_ind_size. The number of embeddings in each embedding table. Defaults" " to 100_000 if num_embeddings_per_feature is not supplied.", ) parser.add_argument( "--num_embeddings_per_feature", type=str, default=None, help="Comma separated max_ind_size per sparse feature. The number of embeddings" " in each embedding table. 26 values are expected for the Criteo dataset.", ) parser.add_argument( "--dense_arch_layer_sizes", type=str, default="512,256,64", help="Comma separated layer sizes for dense arch.", ) parser.add_argument( "--over_arch_layer_sizes", type=str, default="512,512,256,1", help="Comma separated layer sizes for over arch.", ) parser.add_argument( "--embedding_dim", type=int, default=64, help="Size of each embedding.", ) parser.add_argument( "--interaction_branch1_layer_sizes", type=str, default="2048,2048", help="Comma separated layer sizes for interaction branch1 (only on dlrm with projection).", ) parser.add_argument( "--interaction_branch2_layer_sizes", type=str, default="2048,2048", help="Comma separated layer sizes for interaction branch2 (only on dlrm with projection).", ) parser.add_argument( "--dcn_num_layers", type=int, default=3, help="Number of DCN layers in interaction layer (only on dlrm with DCN).", ) parser.add_argument( "--dcn_low_rank_dim", type=int, default=512, help="Low rank dimension for DCN in interaction layer (only on dlrm with DCN).", ) parser.add_argument( "--undersampling_rate", type=float, help="Desired proportion of zero-labeled samples to retain (i.e. undersampling zero-labeled rows)." " Ex. 0.3 indicates only 30pct of the rows with label 0 will be kept." " All rows with label 1 will be kept. Value should be between 0 and 1." " When not supplied, no undersampling occurs.", ) parser.add_argument( "--seed", type=int, help="Random seed for reproducibility.", ) parser.add_argument( "--pin_memory", dest="pin_memory", action="store_true", help="Use pinned memory when loading data.", ) parser.add_argument( "--mmap_mode", dest="mmap_mode", action="store_true", help="--mmap_mode mmaps the dataset." " That is, the dataset is kept on disk but is accessed as if it were in memory." " --mmap_mode is intended mostly for faster debugging. Use --mmap_mode to bypass" " preloading the dataset when preloading takes too long or when there is " " insufficient memory available to load the full dataset.", ) parser.add_argument( "--in_memory_binary_criteo_path", type=str, default=None, help="Directory path containing the Criteo dataset npy files.", ) parser.add_argument( "--synthetic_multi_hot_criteo_path", type=str, default=None, help="Directory path containing the MLPerf v2 synthetic multi-hot dataset npz files.", ) parser.add_argument( "--learning_rate", type=float, default=15.0, help="Learning rate.", ) parser.add_argument( "--shuffle_batches", dest="shuffle_batches", action="store_true", help="Shuffle each batch during training.", ) parser.add_argument( "--shuffle_training_set", dest="shuffle_training_set", action="store_true", help="Shuffle the training set in memory. This will override mmap_mode", ) parser.add_argument( "--validation_freq_within_epoch", type=int, default=None, help="Frequency at which validation will be run within an epoch.", ) parser.set_defaults( pin_memory=None, mmap_mode=None, drop_last=None, shuffle_batches=None, shuffle_training_set=None, ) parser.add_argument( "--collect_multi_hot_freqs_stats", dest="collect_multi_hot_freqs_stats", action="store_true", help="Flag to determine whether to collect stats on freq of embedding access.", ) parser.add_argument( "--multi_hot_sizes", type=str, default=None, help="Comma separated multihot size per sparse feature. 26 values are expected for the Criteo dataset.", ) parser.add_argument( "--multi_hot_distribution_type", type=str, choices=["uniform", "pareto"], default=None, help="Multi-hot distribution options.", ) parser.add_argument("--lr_warmup_steps", type=int, default=0) parser.add_argument("--lr_decay_start", type=int, default=0) parser.add_argument("--lr_decay_steps", type=int, default=0) parser.add_argument( "--print_lr", action="store_true", help="Print learning rate every iteration.", ) parser.add_argument( "--allow_tf32", action="store_true", help="Enable TensorFloat-32 mode for matrix multiplications on A100 (or newer) GPUs.", ) parser.add_argument( "--print_sharding_plan", action="store_true", help="Print the sharding plan used for each embedding table.", ) parser.add_argument( "--shard_model", action="store_true", help="Shard the model and run it distributed.", ) return parser.parse_args(argv)
import subprocess import sys import os from pathlib import Path def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt']) if __name__ == '__main__': pip_install_requirements()
#!/usr/bin/env python3 # Copyright (c) Meta Platforms, Inc. and affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import os from typing import List from torch import distributed as dist from torch.utils.data import DataLoader from torchrec.datasets.criteo import ( CAT_FEATURE_COUNT, DAYS, DEFAULT_CAT_NAMES, DEFAULT_INT_NAMES, InMemoryBinaryCriteoIterDataPipe, ) from torchrec.datasets.random import RandomRecDataset # OSS import try: # pyre-ignore[21] # @manual=//ai_codesign/benchmarks/dlrm/torchrec_dlrm/data:multi_hot_criteo from data.multi_hot_criteo import MultiHotCriteoIterDataPipe except ImportError: pass # internal import try: from .multi_hot_criteo import MultiHotCriteoIterDataPipe # noqa F811 except ImportError: pass STAGES = ["train", "val", "test"] def _get_random_dataloader( args: argparse.Namespace, stage: str, ) -> DataLoader: attr = f"limit_{stage}_batches" num_batches = getattr(args, attr) if stage in ["val", "test"] and args.test_batch_size is not None: batch_size = args.test_batch_size else: batch_size = args.batch_size return DataLoader( RandomRecDataset( keys=DEFAULT_CAT_NAMES, batch_size=batch_size, hash_size=args.num_embeddings, hash_sizes=args.num_embeddings_per_feature if hasattr(args, "num_embeddings_per_feature") else None, manual_seed=getattr(args, "seed", None), ids_per_feature=1, num_dense=len(DEFAULT_INT_NAMES), num_batches=num_batches, ), batch_size=None, batch_sampler=None, pin_memory=args.pin_memory, num_workers=0, ) def _get_in_memory_dataloader( args: argparse.Namespace, stage: str, ) -> DataLoader: if args.in_memory_binary_criteo_path is not None: dir_path = args.in_memory_binary_criteo_path sparse_part = "sparse.npy" datapipe = InMemoryBinaryCriteoIterDataPipe else: dir_path = args.synthetic_multi_hot_criteo_path sparse_part = "sparse_multi_hot.npz" datapipe = MultiHotCriteoIterDataPipe if stage == "train": stage_files: List[List[str]] = [ [os.path.join(dir_path, f"day_{i}_dense.npy") for i in range(DAYS - 1)], [os.path.join(dir_path, f"day_{i}_{sparse_part}") for i in range(DAYS - 1)], [os.path.join(dir_path, f"day_{i}_labels.npy") for i in range(DAYS - 1)], ] elif stage in ["val", "test"]: stage_files: List[List[str]] = [ [os.path.join(dir_path, f"day_{DAYS-1}_dense.npy")], [os.path.join(dir_path, f"day_{DAYS-1}_{sparse_part}")], [os.path.join(dir_path, f"day_{DAYS-1}_labels.npy")], ] if stage in ["val", "test"] and args.test_batch_size is not None: batch_size = args.test_batch_size else: batch_size = args.batch_size dataloader = DataLoader( datapipe( stage, *stage_files, # pyre-ignore[6] batch_size=batch_size, rank=dist.get_rank(), world_size=dist.get_world_size(), drop_last=args.drop_last_training_batch if stage == "train" else False, shuffle_batches=args.shuffle_batches, shuffle_training_set=args.shuffle_training_set, shuffle_training_set_random_seed=args.seed, mmap_mode=args.mmap_mode, hashes=args.num_embeddings_per_feature if args.num_embeddings is None else ([args.num_embeddings] * CAT_FEATURE_COUNT), ), batch_size=None, pin_memory=args.pin_memory, collate_fn=lambda x: x, ) return dataloader def get_dataloader(args: argparse.Namespace, backend: str, stage: str) -> DataLoader: """ Gets desired dataloader from dlrm_main command line options. Currently, this function is able to return either a DataLoader wrapped around a RandomRecDataset or a Dataloader wrapped around an InMemoryBinaryCriteoIterDataPipe. Args: args (argparse.Namespace): Command line options supplied to dlrm_main.py's main function. backend (str): "nccl" or "gloo". stage (str): "train", "val", or "test". Returns: dataloader (DataLoader): PyTorch dataloader for the specified options. """ stage = stage.lower() if stage not in STAGES: raise ValueError(f"Supplied stage was {stage}. Must be one of {STAGES}.") args.pin_memory = ( (backend == "nccl") if not hasattr(args, "pin_memory") else args.pin_memory ) if ( args.in_memory_binary_criteo_path is None and args.synthetic_multi_hot_criteo_path is None ): return _get_random_dataloader(args, stage) else: return _get_in_memory_dataloader(args, stage)
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceGenerationModel class Model(HuggingFaceGenerationModel): def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(name="hf_GPT2_generate", test=test, device=device, batch_size=batch_size, extra_args=extra_args)
from ...util.model import BenchmarkModel from torchbenchmark.tasks import NLP import torch from ..lit_llama import LIT_LLAMA_PATH import importlib.util import os.path import torch.nn as nn import sys from lit_llama.lora import mark_only_lora_as_trainable, lora, lora_state_dict from torchbenchmark import REPO_PATH LIT_LLAMA_PATH = os.path.join(REPO_PATH, "submodules", "lit-llama") sys.path.insert(0, LIT_LLAMA_PATH) from lit_llama.utils import EmptyInitOnDevice, lazy_load, llama_model_lookup from lit_llama import LLaMA, Tokenizer class Model(BenchmarkModel): task = NLP.LANGUAGE_MODELING DEFAULT_EVAL_BSIZE = 1 DEFAULT_TRAIN_BSIZE = 4 # micro_batch_size in lora.py def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args) # From finetune/lora.py hyperparameters lora_r = 8 lora_alpha = 16 lora_dropout = 0.05 checkpoint_path = os.path.join(LIT_LLAMA_PATH, "checkpoints/lit-llama/7B/lit-llama.pth") if not os.path.exists(checkpoint_path): raise NotImplementedError("checkpoint doesn't exist") with lazy_load(checkpoint_path) as checkpoint, lora(r=lora_r, alpha=lora_alpha, dropout=lora_dropout, enabled=True): name = llama_model_lookup(checkpoint) with EmptyInitOnDevice(device=device): model = LLaMA.from_name(name) # LoRA weights won't be in base checkpoint model.load_state_dict(checkpoint, strict=False) mark_only_lora_as_trainable(model) self.model = model self.seq_len = 32 self.max_seq_len = 64 self.example_inputs = ( torch.ones([self.batch_size, self.seq_len], dtype=torch.int32, device=self.device), self.max_seq_len, ) def get_module(self): return self.model, self.example_inputs def train(self): logits = self.model(*self.example_inputs) logits.sum().backward() # meh this sucks def eval(self): self.model.eval() with torch.no_grad(): logits = self.model(*self.example_inputs) return (logits,)
from torchbenchmark.util.framework.lit_llama import install_lit_llama if __name__ == '__main__': install_lit_llama()
from torchbenchmark.tasks import NLP from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel, HuggingFaceAuthMixin class Model(HuggingFaceModel, HuggingFaceAuthMixin): task = NLP.LANGUAGE_MODELING DEFAULT_TRAIN_BSIZE = 1 DEFAULT_EVAL_BSIZE = 1 DEEPCOPY = False def __init__(self, test, device, batch_size=None, extra_args=[]): HuggingFaceAuthMixin.__init__(self) super().__init__(name="llama_v2_70b", test=test, device=device, batch_size=batch_size, extra_args=extra_args) def train(self): return NotImplementedError("FSDP should implement a training loop")
import subprocess import sys import os from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model if __name__ == '__main__': model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__))) cache_model(model_name)
from torchbenchmark.tasks import NLP from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel class Model(HuggingFaceModel): task = NLP.LANGUAGE_MODELING # https://huggingface.co/mosaicml/mpt-7b DEFAULT_TRAIN_BSIZE = 4 DEFAULT_EVAL_BSIZE = 1 def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(name="hf_MPT_7b_instruct", test=test, device=device, batch_size=batch_size, extra_args=extra_args) def eval(self): super().eval()
import os from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model if __name__ == '__main__': patch_transformers() model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__))) cache_model(model_name, trust_remote_code=True)
from torchbenchmark.util.framework.gnn.model_factory import GNNModel from torchbenchmark.tasks import GNN class Model(GNNModel): task = GNN.CLASSIFICATION DEFAULT_TRAIN_BSIZE = 64 DEFAULT_EVAL_BSIZE = 64 def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(model_name="gcn", test=test, device=device, batch_size=batch_size, extra_args=extra_args) if device == 'cuda': # TODO - Add CUDA support raise NotImplementedError("GCN doesn't support CUDA")
import subprocess import sys def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt', '-f', 'https://data.pyg.org/whl/torch-2.0.0+cpu.html']) if __name__ == '__main__': pip_install_requirements()
from torchbenchmark.tasks import NLP from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel, HuggingFaceAuthMixin class Model(HuggingFaceModel, HuggingFaceAuthMixin): task = NLP.LANGUAGE_MODELING DEFAULT_TRAIN_BSIZE = 1 DEFAULT_EVAL_BSIZE = 1 DEEPCOPY = False def __init__(self, test, device, batch_size=None, extra_args=[]): HuggingFaceAuthMixin.__init__(self) super().__init__(name="llama_v2_7b", test=test, device=device, batch_size=batch_size, extra_args=extra_args) def train(self): return NotImplementedError("FSDP should implement a training loop")
import subprocess import sys import os from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model if __name__ == '__main__': model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__))) cache_model(model_name)
from torchbenchmark.util.framework.gnn.model_factory import GNNModel from torchbenchmark.tasks import GNN class Model(GNNModel): task = GNN.CLASSIFICATION DEFAULT_TRAIN_BSIZE = 64 DEFAULT_EVAL_BSIZE = 64 def __init__(self, test, device, batch_size=None, extra_args=[]): super().__init__(model_name="sage", test=test, device=device, batch_size=batch_size, extra_args=extra_args) if device == 'cuda': # TODO - Add CUDA support raise NotImplementedError("Sage doesn't support CUDA")
import subprocess import sys def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt', '-f', 'https://data.pyg.org/whl/torch-2.0.0+cpu.html']) if __name__ == '__main__': pip_install_requirements()
import torch from typing import Optional, List from contextlib import contextmanager, ExitStack from typing import ContextManager class PostInitProcessor(type): def __call__(cls, *args, **kwargs): obj = type.__call__(cls, *args, **kwargs) obj.__post__init__() return obj @contextmanager def nested(*contexts): """ Chain and apply a list of contexts """ with ExitStack() as stack: for ctx in contexts: stack.enter_context(ctx()) yield contexts class E2EBenchmarkModel(metaclass=PostInitProcessor): """ A base class for adding models for all e2e models. """ def __init__(self, test: str, batch_size: Optional[int]=None, extra_args: List[str]=[]): self.test = test assert self.test == "train" or self.test == "eval", f"Test must be 'train' or 'eval', but get {self.test}. Please submit a bug report." self.batch_size = batch_size if not self.batch_size: self.batch_size = self.DEFAULT_TRAIN_BSIZE if test == "train" else self.DEFAULT_EVAL_BSIZE # If the model doesn't implement test or eval test # its DEFAULT_TRAIN_BSIZE or DEFAULT_EVAL_BSIZE will still be None if not self.batch_size: raise NotImplementedError(f"Test {test} is not implemented.") self.extra_args = extra_args if "--torchdynamo" in self.extra_args: self.dynamo = True from torchbenchmark.util.backends.torchdynamo import parse_torchdynamo_args self.opt_args, self.extra_args = parse_torchdynamo_args(self.extra_args) else: self.dynamo = False # Run the post processing for model acceleration def __post__init__(self): # sanity checks of the options assert self.test == "train" or self.test == "eval", f"Test must be 'train' or 'eval', but provided {self.test}." # initialize run contexts self.run_contexts = [] if self.dynamo: from torchbenchmark.util.backends.torchdynamo import apply_torchdynamo_args apply_torchdynamo_args(self, self.opt_args, precision=self.tb_args.fp16) def add_context(self, context_fn): ctx = context_fn() assert isinstance(ctx, ContextManager), f"Expected adding a ContextManager, get {type(ctx)}. Please report a bug." self.run_contexts.append(context_fn) def get_optimizer(self): raise NotImplementedError("Every E2EModel should implement a way to access the optimizer used.") def set_optimizer(self, optimizer) -> None: raise NotImplementedError("Every E2EModel should implement a way to swap out the optimizer(s).") def next_batch(self): raise NotImplementedError("Every E2EModel should implement a way to retrieve the next batch.") def run_forward(self, input): raise NotImplementedError("Every E2EModel should implement a modular forward step.") def run_backward(self, loss): raise NotImplementedError("Every E2EModel should implement a modular backward step.") def run_optimizer_step(self): raise NotImplementedError("Every E2EModel should implement a modular optimizer step.")
import argparse import enum from typing import List, Optional, Tuple from torchbenchmark.util.backends import list_backends, BACKENDS from torchbenchmark.util.env_check import is_staged_train_test TEST_STAGE = enum.Enum('TEST_STAGE', ['FORWARD', 'BACKWARD', 'OPTIMIZER', 'ALL']) AVAILABLE_PRECISIONS = ["fp32", "tf32", "fp16", "amp", "fx_int8", "bf16","amp_fp16", "amp_bf16"] QUANT_ENGINES = ["x86", "fbgemm", "qnnpack", "onednn"] def add_bool_arg(parser: argparse.ArgumentParser, name: str, default_value: bool=True): group = parser.add_mutually_exclusive_group(required=False) group.add_argument('--' + name, dest=name, action='store_true') group.add_argument('--no-' + name, dest=name, action='store_false') parser.set_defaults(**{name: default_value}) def check_precision(model: 'torchbenchmark.util.model.BenchmarkModel', precision: str) -> bool: if precision == "fp16": return model.device == 'cuda' and hasattr(model, "enable_fp16_half") if precision == "tf32": return model.device == "cuda" if precision == "amp": return True if precision == "fx_int8": return model.device == 'cpu' and hasattr(model, "enable_fx_int8") if precision == "bf16": return model.device == 'cpu' and hasattr(model, "enable_bf16") if precision == "amp_fp16": if model.test == 'eval' and model.device == 'cuda': return True if model.test == 'train' and model.device == 'cuda': return hasattr(model, 'enable_amp') or is_staged_train_test(model) if precision == "amp_bf16": return model.device == 'cpu' assert precision == "fp32", f"Expected precision to be one of {AVAILABLE_PRECISIONS}, but get {precision}" return True def check_memory_layout(model: 'torchbenchmark.util.model.BenchmakModel', channels_last: bool) -> bool: if channels_last: return hasattr(model, 'enable_channels_last') return True def check_distributed_trainer(model: 'torchbenchmark.util.model.BenchmakModel', distributed_trainer: Optional[str]) -> bool: if not model.test == "train" and distributed_trainer: return False return True def get_precision_default(model: 'torchbenchmark.util.model.BenchmarkModel') -> str: if hasattr(model, "DEFAULT_EVAL_CUDA_PRECISION") and model.test == 'eval' and model.device == 'cuda': return model.DEFAULT_EVAL_CUDA_PRECISION if hasattr(model, "DEFAULT_TRAIN_CUDA_PRECISION") and model.test == 'train' and model.device == 'cuda': return model.DEFAULT_TRAIN_CUDA_PRECISION return "fp32" def parse_decoration_args(model: 'torchbenchmark.util.model.BenchmarkModel', extra_args: List[str]) -> Tuple[argparse.Namespace, List[str]]: parser = argparse.ArgumentParser() parser.add_argument( "--distributed", choices=["ddp", "ddp_no_static_graph", "fsdp"], default=None, help="Enable distributed trainer", ) parser.add_argument( "--distributed_wrap_fn", type=str, default=None, help="Path to function that will apply distributed wrapping fn(model, dargs.distributed)", ) parser.add_argument("--precision", choices=AVAILABLE_PRECISIONS, default=get_precision_default(model), help=f"choose precisions from {AVAILABLE_PRECISIONS}") parser.add_argument("--channels-last", action='store_true', help="enable channels-last memory layout") parser.add_argument("--accuracy", action="store_true", help="Check accuracy of the model only instead of running the performance test.") parser.add_argument("--use_cosine_similarity", action='store_true', help="use cosine similarity for correctness check") parser.add_argument("--quant-engine", choices=QUANT_ENGINES, default='x86', help=f"choose quantization engine for fx_int8 precision from {QUANT_ENGINES}") dargs, opt_args = parser.parse_known_args(extra_args) if not check_precision(model, dargs.precision): raise NotImplementedError(f"precision value: {dargs.precision}, " "fp16 is only supported if the model implements the `enable_fp16_half()` callback function." "amp is only supported if cuda+eval, or if `enable_amp` implemented," "or if model uses staged train interfaces (forward, backward, optimizer).") if not check_memory_layout(model, dargs.channels_last): raise NotImplementedError(f"Specified channels_last: {dargs.channels_last} ," f" but the model doesn't implement the enable_channels_last() interface.") if not check_distributed_trainer(model, dargs.distributed): raise NotImplementedError(f"We only support distributed trainer {dargs.distributed} for train tests, " f"but get test: {model.test}") return (dargs, opt_args) def apply_decoration_args(model: 'torchbenchmark.util.model.BenchmarkModel', dargs: argparse.Namespace): if dargs.channels_last: model.enable_channels_last() if dargs.precision == "fp16": model.enable_fp16_half() elif dargs.precision == "tf32": import torch torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = True elif dargs.precision == "amp": # model handles amp itself if it has 'enable_amp' callback function (e.g. pytorch_unet) if hasattr(model, "enable_amp"): model.enable_amp() elif dargs.precision == "fx_int8": assert model.device == "cpu" and model.test == "eval", f"fx_int8 only work for eval mode on cpu device." model.enable_fx_int8(dargs.quant_engine) elif dargs.precision == "bf16": assert model.device == "cpu", f"bf16 only work on cpu device." model.enable_bf16() elif dargs.precision == "amp_fp16": assert model.device == "cuda", f"{model.device} has no fp16 autocast." if model.test == "eval": import torch model.add_context(lambda: torch.cuda.amp.autocast(dtype=torch.float16)) elif model.test == "train": # the model must implement staged train test assert is_staged_train_test(model), f"Expected model implements staged train test (forward, backward, optimizer)." import torch model.add_context(lambda: torch.cuda.amp.autocast(dtype=torch.float16), stage=TEST_STAGE.FORWARD) elif dargs.precision == "amp_bf16": import torch model.add_context(lambda: torch.cpu.amp.autocast(dtype=torch.bfloat16)) elif not dargs.precision == "fp32": assert False, f"Get an invalid precision option: {dargs.precision}. Please report a bug." # Dispatch arguments based on model type def parse_opt_args(model: 'torchbenchmark.util.model.BenchmarkModel', opt_args: List[str]) -> argparse.Namespace: parser = argparse.ArgumentParser() parser.add_argument("--backend", choices=list_backends(), help="enable backends") parser.add_argument("--rank", help="rank of current process") parser.add_argument("--world_size", help="world size of multiprocess") args, extra_args = parser.parse_known_args(opt_args) if args.backend: backend = BACKENDS[args.backend] model._enable_backend, extra_args = backend(model, backend_args=extra_args) if args.rank: model._rank = int(args.rank) if args.world_size: model._world_size = int(args.world_size) return args, extra_args def apply_opt_args(model: 'torchbenchmark.util.model.BenchmarkModel', args: argparse.Namespace): if args.backend: model._enable_backend()
import argparse import re import torch from enum import Enum class OpType(Enum): POINTWISE = 1 NORMS = 2 REDUCTIONS = 3 VIEWS_EXPANDS = 4 REMOVE = 5 IGNORE = 6 op_types = { "aten::rsqrt": OpType.POINTWISE, "aten::abs": OpType.POINTWISE, "aten::eq": OpType.POINTWISE, "aten::gelu": OpType.POINTWISE, "aten::remainder": OpType.POINTWISE, "aten::_softmax": OpType.POINTWISE, "aten::clamp": OpType.POINTWISE, "aten::gt": OpType.POINTWISE, "aten::mul": OpType.POINTWISE, "aten::add": OpType.POINTWISE, "aten::sum": OpType.REDUCTIONS, "aten::ne": OpType.POINTWISE, "aten::silu": OpType.POINTWISE, "aten::pow": OpType.POINTWISE, "aten::ge": OpType.POINTWISE, "aten::native_batch_norm": OpType.NORMS, "aten::sub": OpType.POINTWISE, "aten::mean": OpType.REDUCTIONS, "aten::sqrt": OpType.POINTWISE, "aten::reciprocal": OpType.POINTWISE, "aten::reshape": OpType.VIEWS_EXPANDS, "aten::relu": OpType.POINTWISE, "prim::Constant": OpType.REMOVE, "prim::TupleConstruct": OpType.IGNORE, "aten::div": OpType.POINTWISE, "aten::tanh": OpType.POINTWISE, "aten::neg": OpType.POINTWISE, "aten::log": OpType.POINTWISE, "aten::unsqueeze": OpType.VIEWS_EXPANDS, "aten::native_layer_norm": OpType.NORMS, "aten::exp": OpType.POINTWISE, "aten::sigmoid": OpType.POINTWISE, } def type_to_placeholder(op_type: OpType) -> str: mapping = { OpType.POINTWISE: "aten::pointwise_placeholder", OpType.NORMS: "aten::norm_placeholder", OpType.REDUCTIONS: "aten::reduction_placeholder", OpType.VIEWS_EXPANDS: "aten::view_expand_placeholder", OpType.IGNORE: "aten::ignore_placeholder", OpType.REMOVE: "aten::remove_placeholder", } return mapping[op_type] # get the op type. op_name is expected to be the qualified name. def get_type(op_name: str) -> OpType: if op_name in op_types: return op_types[op_name] for optype in OpType: if type_to_placeholder(optype) == op_name: return optype raise NotImplementedError(f"No OpType known for op '{op_name}'") def simplify_tensor_type(jit_type): if isinstance(jit_type, torch._C.TensorType): return torch._C.TensorType.get() return jit_type def remove_inputs(graph): inputs_size = 0 for n in graph.inputs(): inputs_size += 1 for use in n.uses(): use.user.removeInput(use.offset) for i in reversed(range(inputs_size)): graph.eraseInput(i) return graph # Remove vertices like x or y below, where x or y are pointwise. # (pointwise) --> (x) --> (...) # (...) --> (y) --> (pointwise) # if remove_all is true, then it doesn't care if pointwise ops preceed/succeed x or y. def remove_duplicate_pointwise(graph, remove_all=False): to_remove = [] old_str = str(graph) def bypass_node(n): to_remove.append(n) n.output().replaceAllUsesWith(n.input()) for n in graph.nodes(): if get_type(n.kind()) != OpType.POINTWISE: continue if n.inputsSize() != 1 or n.outputsSize() != 1: continue if get_type(n.input().node().kind()) == OpType.POINTWISE or remove_all: bypass_node(n) continue uses = [r.user for r in n.output().uses() if r.user.kind() != "prim::Return"] if len(uses) >= 1 and (all(get_type(r.kind()) == OpType.POINTWISE for r in uses) or remove_all): bypass_node(n) continue for n in reversed(to_remove): n.destroy() return graph def compress_graph(graph): old_nodes = [] erased_nodes = set() for n in graph.nodes(): simple_type = get_type(n.kind()) if simple_type == OpType.IGNORE: continue old_nodes.append(n) if simple_type == OpType.REMOVE: erased_nodes.add(n) continue new_node = graph.create(type_to_placeholder(simple_type), n.outputsSize()) new_node.insertBefore(n) for inp in n.inputs(): if inp.node() not in erased_nodes: new_node.addInput(inp) for old_out, new_out in zip(n.outputs(), new_node.outputs()): new_out.setType(simplify_tensor_type(old_out.type())) old_out.replaceAllUsesWith(new_out) for n in reversed(old_nodes): n.destroy() graph = remove_inputs(graph) graph = remove_duplicate_pointwise(graph) return torch._C._jit_pass_canonicalize(graph, False) if __name__ == '__main__': parser = argparse.ArgumentParser(description=""" Collection of helper functions for eliminating duplicate subgraphs Usage: ~~~ import classify_graphs # some ir string called "ir" graph = torch._C.parse_ir(ir) # "hashes" the graph based on categories of ops (pointwise, reductions, views/expands, norms) compressed_graph = classify_graphs.compress_graph(graph) # do something with the compressed graph ~~~ Alternatively, call it and it will return one graph per hashed category Usage: python3 log_extract.py log.txt --output > log_result.py python3 classify_graphs.py log_result.py > filtered_logs.py """, formatter_class = argparse.RawDescriptionHelpFormatter) parser.add_argument("filename", type=str, help="output from log_extract.py --help") args = parser.parse_args() with open(args.filename) as f: arr = eval(f.read()) # see 73984 for i in range(len(arr)): if len(re.findall(r'value=annotate\(List\[int', arr[i])) >= 1: arr[i] = arr[0] classified = {} for ir in arr: graph = torch._C.parse_ir(ir) graph = compress_graph(graph) graph_class = str(graph) if graph_class not in classified: classified[graph_class] = [] classified[graph_class].append(ir) final_selection = [] for cl, graphs in classified.items(): # choose the longest graph of this type s = sorted(graphs, key=lambda x: -len(str(x))) final_selection.append(str(graphs[0])) print('[' + ', '.join(f'"""{x}"""' for x in final_selection) + ']')
"""Utilities for tuning the machine for better benchmark stability. Written for Amazon linux and Intel CPU, Nvidia GPU althogh many utilities will overlap. """ import argparse import cpuinfo import distro import enum import os import platform import psutil import subprocess import re import sys import typing from pathlib import Path def read_sys_file(sysfile: Path): with open(sysfile, 'r') as f: return f.read() def write_sys_file(sysfile: Path, content: str): print(f"Write {content} to {sysfile}") with open(sysfile, 'w') as f: f.write(content) def check_intel_no_turbo_state(turbo_file='/sys/devices/system/cpu/intel_pstate/no_turbo'): return int(read_sys_file(turbo_file)) def set_intel_no_turbo_state(state: int, turbo_file='/sys/devices/system/cpu/intel_pstate/no_turbo'): assert state in [0, 1] write_sys_file(turbo_file, str(state)) def parse_lscpu_cpu_core_list(): coreinfo = subprocess.check_output("lscpu --all --parse=CPU,CORE,ONLINE", shell=True).strip().decode().split('\n') matched_cpus = 0 cpu_core = [] for line in coreinfo[2:]: if line[0] == '#': continue cpu, core, online = line.split(',') cpu = int(cpu) online = online == "Y" core = int(core) if online else None if cpu == core: matched_cpus += 1 cpu_core.append((cpu, core, online)) assert matched_cpus > 0, "Failed to parse lscpu output" return cpu_core def hyper_threading_enabled(): for cpu, core, online in parse_lscpu_cpu_core_list(): if cpu != core and online: return True return False def set_hyper_threading(enabled=False): for cpu, core, online in parse_lscpu_cpu_core_list(): if cpu != core: if not online and not enabled: continue if online and enabled: continue virtual_cpu_online_file = f"/sys/devices/system/cpu/cpu{cpu}/online" value = "1" if enabled else "0" write_sys_file(virtual_cpu_online_file, value) def get_intel_max_cstate(): kernel_args = read_sys_file('/proc/cmdline').split() for arg in kernel_args: if arg.find('intel_idle.max_cstate') == 0: return int(arg.split('=')[1]) return None def get_isolated_cpus(): """ Returns a list of cpus marked as isolated from the kernel scheduler for regular tasks. Only tasks scheduled via taskset command can use these cpus, e.g. benchmarking workload. """ kernel_args = read_sys_file('/proc/cmdline').split() isolcpus = set() for arg in kernel_args: if arg.find('isolcpus') == 0: arg = arg.split('=')[1] chunks = arg.split(',') for chunk in chunks: if '-' in chunk: start, end = chunk.split('-') for cpu in range(int(start), int(end) + 1): isolcpus.add(cpu) else: isolcpus.add(int(chunk)) return list(isolcpus) def get_process_cpu_affinity(): p = psutil.Process() return p.cpu_affinity() def nvidia_smi_query(query: str, device_ids: typing.List[int] = None): if device_ids: device_ids = [str(id) for id in device_ids] device_ids = ",".join(device_ids) id_selector = f"-i {device_ids}" if device_ids else "" values = subprocess.check_output(f'nvidia-smi --query-gpu="{query}" {id_selector} --format=csv,noheader,nounits', shell=True).strip().decode().split("\n") return values def has_nvidia_smi(): try: subprocess.check_output('nvidia-smi', shell=True) return True except: return False def get_nvidia_gpu_clocks(device_ids: typing.List[int] = None): clocks = nvidia_smi_query("clocks.applications.graphics", device_ids) for clock in range(len(clocks)): clocks[clock] = 0 if clocks[clock] == '[N/A]' else clocks[clock] return [int(clock) for clock in clocks] def get_nvidia_gpu_temps(device_ids: typing.List[int] = None): temps = {} raw_temps = nvidia_smi_query("temperature.gpu,temperature.memory", device_ids) temps['gpu'] = [temp.split(',')[0] for temp in raw_temps] temps['memory'] = [temp.split(',')[1] for temp in raw_temps] return temps def set_nvidia_graphics_clock(device_id=0, clock=900): if has_nvidia_smi(): return subprocess.check_call(['nvidia-smi', '-ac', '5001,900']) return False def get_nvidia_throttle_reasons(device_ids: typing.List[int] = None): """ See 'nvidia-smi --help-query-gpu for explanation of throttle reasons """ queries = ['gpu_idle', 'applications_clocks_setting', 'sw_power_cap', 'hw_slowdown', 'hw_thermal_slowdown', 'hw_power_brake_slowdown', 'sw_thermal_slowdown', 'sync_boost'] query_str = ','.join(["clocks_throttle_reasons." + q for q in queries]) raw = nvidia_smi_query(query_str, device_ids) throttle_reasons = [] for line in raw: gpu_reasons = [q for q, v in zip(queries, line.split(',')) if 'Active' == v] throttle_reasons.append(gpu_reasons) return throttle_reasons MACHINE = enum.Enum('MACHINE', ['AMAZON_LINUX', 'UBUNTU', 'UNKNOWN']) def get_machine_type(): # It's tricky to write platform setup code that works on different OS/configs. # initially, just intend to identify a known environment and for any other # environment revert to no-op. Expand functionality over time as needed. if platform.system() == 'Linux': if distro.name() == "Amazon Linux": return MACHINE.AMAZON_LINUX if platform.system() == 'Linux': if distro.name() == 'Ubuntu': return MACHINE.UBUNTU return MACHINE.UNKNOWN def get_cpu_temp(): temps = {} if not MACHINE.UNKNOWN == get_machine_type(): thermal_path = Path('/sys/class/thermal/') for zone in filter(lambda x: "thermal_zone" in x, os.listdir(thermal_path)): temps[zone] = int(read_sys_file(thermal_path / zone / "temp")) / 1000. return temps def is_using_isolated_cpus(): isolated_cpus = get_isolated_cpus() using_cpus = get_process_cpu_affinity() omp_using_cpus = get_omp_affinity() lscpu = parse_lscpu_cpu_core_list() assert len(lscpu) > 0, "unable to parse current CPUs" for cpu, core, active in lscpu: # check that all used cpus are isolated ones (more critical) if (cpu in using_cpus or cpu in omp_using_cpus) and cpu not in isolated_cpus: return False # check all isolated cpus are used (less critical) elif active and cpu in isolated_cpus: if cpu not in using_cpus: # currently after importing torch, process cpu affinity mask changes from e.g. 4-47 to 4. # since we can't assert that all intended cores are being used, we can at least assert that # the first core in the range of isolated cores is used. # see https://github.com/pytorch/pytorch/issues/49971 # return False pass if cpu not in omp_using_cpus: return False return True def get_omp_affinity(): if 'GOMP_CPU_AFFINITY' not in os.environ: return [] raw = os.environ['GOMP_CPU_AFFINITY'] affinity = [] def parse_block(block): if '-' in block: start, end = block.split('-') return list(range(int(start), int(end) + 1)) return [int(block)] if ' ' in raw: for block in raw.split(' '): affinity.extend(parse_block(block)) else: affinity.extend(parse_block(raw)) return affinity def get_pstate_frequency(): CPU_FREQ_BASE_DIR = '/sys/devices/system/cpu' CPU_FREQ_FILES = ["scaling_min_freq", "scaling_max_freq", "scaling_cur_freq"] cpu_dirs = ["cpu" + str(cpu[0]) for cpu in parse_lscpu_cpu_core_list() if cpu[2]] output = dict() for cpu_dir in cpu_dirs: full_path = os.path.join(CPU_FREQ_BASE_DIR, cpu_dir, "cpufreq") freq_paths = [os.path.join(full_path, x) for x in CPU_FREQ_FILES] all_exist = True for path in freq_paths: all_exist = all_exist and os.path.exists(path) if all_exist: output[cpu_dir] = dict() for i, path in enumerate(freq_paths): output[cpu_dir][CPU_FREQ_FILES[i]] = int(read_sys_file(path)) / 1000 return output def set_pstate_frequency(min_freq = 2500, max_freq = 2500): CPU_FREQ_BASE_DIR = '/sys/devices/system/cpu' CPU_FREQ_FILES = ["scaling_min_freq", "scaling_max_freq", "scaling_cur_freq"] cpu_dirs = ["cpu" + str(cpu[0]) for cpu in parse_lscpu_cpu_core_list() if cpu[2]] for cpu_dir in cpu_dirs: full_path = os.path.join(CPU_FREQ_BASE_DIR, cpu_dir, "cpufreq") freq_paths = [os.path.join(full_path, x) for x in CPU_FREQ_FILES] all_exist = True for path in freq_paths: all_exist = all_exist and os.path.exists(path) if all_exist: write_sys_file(freq_paths[0], str(min_freq * 1000)) write_sys_file(freq_paths[1], str(max_freq * 1000)) def check_pstate_frequency_pin(pin_freq = 2500): FREQ_THRESHOLD = 15 # Allow 15 MHz difference maximum all_freq = get_pstate_frequency() for cpuid in all_freq: for attr in all_freq[cpuid]: freq = all_freq[cpuid][attr] difference = abs(freq - pin_freq) if difference > FREQ_THRESHOLD: print(f"Specify frequency {pin_freq} Mhz, find setting {cpuid} {attr}: {freq}.") return False return True def get_machine_config(): config = {} machine_type = get_machine_type() config['machine_type'] = machine_type config['cpu_brand'] = cpuinfo.get_cpu_info()['brand_raw'] if not MACHINE.UNKNOWN == machine_type: config['linux_distribution'] = distro.linux_distribution() config['intel_turbo_disabled'] = check_intel_no_turbo_state() config['intel_hyper_threading_enabled'] = hyper_threading_enabled() config['intel_max_cstate'] = get_intel_max_cstate() config['isolated_cpus'] = get_isolated_cpus() config['process_cpu_affinity'] = get_process_cpu_affinity() config['is_using_isolated_cpus'] = is_using_isolated_cpus() config['cpu_pstate_frequency'] = get_pstate_frequency() return config def check_machine_configured(check_process_affinity=True): check_environment() if not MACHINE.UNKNOWN == get_machine_type(): assert 1 == check_intel_no_turbo_state(), "Turbo Boost is not disabled" assert False == hyper_threading_enabled(), "HyperThreading is not disabled" assert 1 == get_intel_max_cstate(), "Intel max C-State isn't set to 1, which avoids power-saving modes." assert len(get_isolated_cpus()) > 0, "No cpus are isolated for benchmarking with isolcpus" assert 900 == get_nvidia_gpu_clocks()[0], "Nvidia gpu clock isn't limited, to increase consistency by reducing throttling" assert is_using_isolated_cpus(), "taskset or GOMP_CPU_AFFINITY not specified or not matching kernel isolated cpus" assert check_pstate_frequency_pin(), "Must pin CPU frequency to a fixed number in MHz" else: raise RuntimeError(f"Unsupported machine type {get_machine_type()}") def get_machine_state(): state = {} machine_type = get_machine_type() state['machine_type'] = machine_type if not MACHINE.UNKNOWN == machine_type: state['cpu_temps'] = get_cpu_temp() if has_nvidia_smi(): state['nvidia_gpu_temps'] = get_nvidia_gpu_temps() state['nvidia_gpu_clocks'] = get_nvidia_gpu_clocks() state['nvidia_gpu_throttle_reasons'] = get_nvidia_throttle_reasons() state['process_cpu_affinity'] = get_process_cpu_affinity() return state if __name__ == "__main__": parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("--enable_ht", action="store_true", help="Enable HyperThreading") parser.add_argument("--configure", action="store_true", help="Apply benchmark tuning to this machine") parser.add_argument("--no_verify", action="store_true", help="Skip verifying machine is configured for benchmarking") args = parser.parse_args() machine_type = get_machine_type() if MACHINE.UNKNOWN == machine_type: raise RuntimeError(f"Unsupported machine type {machine_type}") if args.enable_ht: set_hyper_threading(True) if args.configure: set_intel_no_turbo_state(1) set_hyper_threading(False) set_nvidia_graphics_clock() set_pstate_frequency() if not args.no_verify: assert 1 == check_intel_no_turbo_state(), "Turbo Boost is not disabled" assert False == hyper_threading_enabled(), "HyperThreading is not disabled" assert 1 == get_intel_max_cstate(), "Intel max C-State isn't set to 1, which avoids power-saving modes." assert len(get_isolated_cpus()) > 0, "No cpus are isolated for benchmarking with isolcpus" assert 900 == get_nvidia_gpu_clocks()[0], "Nvidia gpu clock isn't limited, to increase consistency by reducing throttling" assert check_pstate_frequency_pin(), "CPU frequency is not correctly pinned, which is required to minimize noise." # doesn't make too much sense to ask the user to run this configure script with the isolated cpu cores # that check is more important to be done at runtime of benchmark, and is checked by conftest.py #assert is_using_isolated_cpus(), "Not using isolated CPUs for this process" def check_environment(): checks = [ # VAR_NAME, blacklist ("DEBUG", None), ("MKLDNN_VERBOSE", None), ("PYTORCH_JIT_LOG_LEVEL", None) ] for check in checks: if check[0] in os.environ and (check[1] == None or os.environ[check[0]] in check[1]): raise RuntimeError(f"{check[0]} is set")
import importlib import os import torch from contextlib import contextmanager, ExitStack import warnings import inspect import yaml from pathlib import Path from typing import ContextManager, Optional, List, Tuple, Generator from torch.utils._pytree import tree_map from torchbenchmark import REPO_PATH from torchbenchmark.util.extra_args import parse_opt_args, apply_opt_args, \ parse_decoration_args, apply_decoration_args, is_staged_train_test, \ TEST_STAGE from torchbenchmark.util.env_check import set_random_seed, is_hf_model, \ save_deterministic_dict, load_deterministic_dict, check_accuracy from torchbenchmark.util.fx_int8 import get_sub_module, prepare_sub_module, convert_sub_module SPECIAL_DEVICE_MAPPING = { "AMD Instinct MI210": "NVIDIA A100-SXM4-40GB" } class PostInitProcessor(type): def __call__(cls, *args, **kwargs): obj = type.__call__(cls, *args, **kwargs) obj.__post__init__() return obj @contextmanager def no_grad(val): """Some meta-learning models (e.g. maml) may need to train a target(another) model in inference runs """ old_state = torch.is_grad_enabled() try: torch.set_grad_enabled(not val) yield finally: torch.set_grad_enabled(old_state) @contextmanager def nested(*contexts): """ Chain and apply a list of contexts """ with ExitStack() as stack: for ctx in contexts: stack.enter_context(ctx()) yield contexts # enable JIT profiling executor @contextmanager def enable_profiling_executor(): try: graph_executor = torch._C._get_graph_executor_optimize(True) profiling_executor = torch._C._jit_set_profiling_executor(True) profiling_mode = torch._C._jit_set_profiling_mode(True) yield finally: torch._C._jit_set_profiling_mode(profiling_mode) torch._C._jit_set_profiling_executor(profiling_executor) torch._C._get_graph_executor_optimize(graph_executor) class BenchmarkModel(metaclass=PostInitProcessor): DEFAULT_TRAIN_BSIZE: Optional[int] = None DEFAULT_EVAL_BSIZE: Optional[int] = None # by default, deepcopy the model when checking accuracy # because some models are stateful (such as moco) DEEPCOPY: bool = True # by default, turn on deterministic mode when checking accuracy DISABLE_DETERMINISM: bool = False test: str device: str batch_size: int extra_args: List[str] run_contexts: List[ContextManager] """ A base class for adding models to torch benchmark. See [Adding Models](#../models/ADDING_MODELS.md) """ def __init__(self, test: str, device: str, batch_size: Optional[int]=None, extra_args: List[str]=[]): self.metadata = self.load_metadata() self.test = test assert self.test == "train" or self.test == "eval", \ f"Test must be 'train' or 'eval', but get {self.test}. Please submit a bug report." self.device = device self.extra_args = extra_args self.opt = None # contexts to run in the test function if self.test == "train": # In train test, there are run contexts that should only be applied for forward/backward/optimizer stage # For example, amp only applies for the forward stage self.forward_contexts = [] self.backward_contexts = [] self.optimizer_contexts = [] self.run_contexts = [ enable_profiling_executor # force JIT profiling executor to be enabled by default ] set_random_seed() # sanity checks of the options assert self.test == "train" or self.test == "eval", f"Test must be 'train' or 'eval', but provided {self.test}." # parse the args self.dargs, opt_args = parse_decoration_args(self, self.extra_args) if self.dargs.accuracy and not self.DISABLE_DETERMINISM: self.deterministic_dict = save_deterministic_dict(self.name) # if the args contain "--torchdynamo", parse torchdynamo args if "--torchdynamo" in opt_args: self.dynamo = True from torchbenchmark.util.backends.torchdynamo import parse_torchdynamo_args self.opt_args, self.extra_args = parse_torchdynamo_args(opt_args) else: self.dynamo = False self.opt_args, self.extra_args = parse_opt_args(self, opt_args) self.determine_batch_size(batch_size) # Run the post processing for model acceleration def __post__init__(self): # All arguments should be parsed at this point. assert not self.extra_args, f"Expected no unknown args at this point, found {self.extra_args}" if self.dargs.accuracy: self.accuracy = check_accuracy(self) if not self.DISABLE_DETERMINISM: load_deterministic_dict(self.deterministic_dict) return # apply decoration args apply_decoration_args(self, self.dargs) # apply optimization args if self.dynamo: from torchbenchmark.util.backends.torchdynamo import apply_torchdynamo_args apply_torchdynamo_args(self, self.opt_args, self.dargs.precision) else: apply_opt_args(self, self.opt_args) # setup distributed trainer if self.dargs.distributed: if self.dargs.distributed_wrap_fn: pos = self.dargs.distributed_wrap_fn.rfind(".") module = importlib.import_module(self.dargs.distributed_wrap_fn[:pos]) apply_trainer = getattr(module, self.dargs.distributed_wrap_fn[(pos+1):]) else: from torchbenchmark.util.distributed.core_model.apply_trainer import apply_trainer if is_hf_model(self): # DDP requires to use unwrapped model for huggingface module, _inputs = self.get_module(wrap_model=False) else: module, _inputs = self.get_module() self.set_module(apply_trainer(module, self.dargs.distributed)) # Need to clean up the cache because we run deep copy within correceness check if self.device == "cuda": torch.cuda.empty_cache() def determine_batch_size(self, batch_size=None): # batch size priority for eval tests: not ALLOW_CUSTOMIZE_BSIZE > user specified > device specified > default # batch size priority for train tests: not ALLOW_CUSTOMIZE_BSIZE > user specified > default self.batch_size = batch_size if not batch_size: self.batch_size = self.DEFAULT_TRAIN_BSIZE if self.test == "train" else self.DEFAULT_EVAL_BSIZE if self.device == "cuda": current_device_name = torch.cuda.get_device_name() assert current_device_name, f"torch.cuda.get_device_name() returns None when device is set to cuda, please double check." if current_device_name in SPECIAL_DEVICE_MAPPING: current_device_name = SPECIAL_DEVICE_MAPPING[current_device_name] else: current_device_name = str(self.device) # use the device suggestion on CUDA inference tests, key should be either eval_batch_size or train_batch_size device_batch_size_key = f"{self.test}_batch_size" if self.metadata and "devices" in self.metadata and current_device_name in self.metadata["devices"] \ and device_batch_size_key in self.metadata["devices"][current_device_name]: self.batch_size = self.metadata["devices"][current_device_name][device_batch_size_key] # If the model doesn't implement test or eval test # its DEFAULT_TRAIN_BSIZE or DEFAULT_EVAL_BSIZE will still be None if not self.batch_size: raise NotImplementedError(f"Test {self.test} is not implemented.") else: self.batch_size = batch_size # Check if specified batch size is supported by the model if hasattr(self, "ALLOW_CUSTOMIZE_BSIZE") and (not getattr(self, "ALLOW_CUSTOMIZE_BSIZE")): if self.test == "train" and (not self.batch_size == self.DEFAULT_TRAIN_BSIZE): raise NotImplementedError("Model doesn't support customizing batch size.") elif self.test == "eval" and (not self.batch_size == self.DEFAULT_EVAL_BSIZE): raise NotImplementedError("Model doesn't support customizing batch size.") elif self.dargs.accuracy: self.batch_size = 4 if self.batch_size > 4 else self.batch_size def load_metadata(self): relative_path = self.__class__.__module__.split(".") self.name = relative_path[-1] metadata_loc = Path(REPO_PATH).joinpath(*relative_path).joinpath("metadata.yaml") if not metadata_loc.exists(): return None with open(metadata_loc, "r") as mf: metadata = yaml.safe_load(mf) return metadata def add_context(self, context_fn, stage=TEST_STAGE.ALL): ctx = context_fn() assert isinstance(ctx, ContextManager), f"Expected adding a ContextManager, get {type(ctx)}. Please report a bug." if stage == TEST_STAGE.ALL: self.run_contexts.append(context_fn) elif stage == TEST_STAGE.FORWARD: self.forward_contexts.append(context_fn) elif stage == TEST_STAGE.BACKWARD: self.backward_contexts.append(context_fn) elif stage == TEST_STAGE.OPTIMIZER: self.optimizer_contexts.append(context_fn) # Common interface for all models extending BenchmarkModel to access the optimizer. # Some models have an opt attribute, others have an optimizer attribute; this # implementation handles both. This function should not error! Simply return None # if there's no optimizer in sight. def get_optimizer(self): if hasattr(self, "optimizer"): return self.optimizer if hasattr(self, "opt"): return self.opt warnings.warn("The optimizer for this model is not stored in self.opt nor self.optimizer. " "Currently returning None! Please override this implementation with your own " "if there is an optimizer this should be returning instead.") return None # Takes in an optimizer and sets that to be the optimizer used from now on. # There are special models like dcgan that would update multiple optimizers at once, # so optimizer here is not always strictly a, say, torch.optim.Optimizer. def set_optimizer(self, optimizer) -> None: if hasattr(self, "optimizer"): self.optimizer = optimizer return if hasattr(self, "opt"): self.opt = optimizer return raise NotImplementedError("The optimizer for this model is not stored in self.opt nor self.optimizer. " "Please override this implementation with your own.") # Default implementation for replacing the model def set_module(self, new_model): if hasattr(self, 'model') and isinstance(self.model, torch.nn.Module): self.model = new_model else: raise NotImplementedError("The instance variable 'model' does not exist or is not type 'torch.nn.Module', implement your own `set_module()` function.") def gen_inputs(self, num_batches: int=1) -> Tuple[Generator, Optional[int]]: """Generate a tuple of (iterator of model input, the size of the iterator). If size is None, the input is randomly generated and has infinite size.""" raise NotImplementedError("Default input generation function is not implemented. " "Please submit an issue if you need input iterator implementation for the model.") def invoke_staged_train_test(self) -> None: optimizer = self.get_optimizer() if optimizer is not None: optimizer.zero_grad() with nested(*self.forward_contexts): losses = self.forward() with nested(*self.backward_contexts): self.backward(losses) if optimizer is not None: with nested(*self.optimizer_contexts): self.optimizer_step() return None def invoke(self) -> Optional[Tuple[torch.Tensor]]: out = None if self.test == "train" and is_staged_train_test(self): self.invoke_staged_train_test() return out with nested(*self.run_contexts): if self.test == "train": self.train() elif self.test == "eval": out = self.eval() return out def eval_in_nograd(self): return True def enable_channels_last(self): model_name = self.name try: model, _ = self.get_module() model = model.to(memory_format=torch.channels_last) except RuntimeError: warnings.warn(UserWarning(f"{model_name} doesn't support `channels_last` yet!")) return self.set_module(model) def inputs_convert(example_inputs): if isinstance(example_inputs, torch.Tensor) and example_inputs.dim()==4: return example_inputs.to(memory_format=torch.channels_last) elif isinstance(example_inputs, (tuple, list, dict)): return tree_map(lambda x: inputs_convert(x), example_inputs) else: warnings.warn(UserWarning(f"{model_name} example inputs doesn't convert to `channels_last`!")) return example_inputs if hasattr(self, 'example_inputs'): self.example_inputs = inputs_convert(self.example_inputs) else: warnings.warn(UserWarning(f"{model_name} example inputs doesn't convert to `channels_last`!")) def enable_fx_int8(self, quant_engine:str='x86'): torch.backends.quantized.engine = quant_engine try: model, _ = self.get_module() # Get sub modules model, sub_module_list = get_sub_module(model, dict(model.named_modules()), '') if not len(sub_module_list): warnings.warn(UserWarning(f"{self.name} doesn't have submodule can ben quantized!")) model = prepare_sub_module(sub_module_list, model, '', quant_engine) self.set_module(model) # Calibration self.eval() model, _ = self.get_module() model = convert_sub_module(sub_module_list, model, '') self.set_module(model) except Exception as e: print(e) raise RuntimeError(f"{self.name} doesn't support `fx_int8` yet!") def enable_bf16(self): model_name = self.name try: model, _ = self.get_module() model = model.to(torch.bfloat16) except RuntimeError: warnings.warn(UserWarning(f"{model_name} doesn't support `to(torch.bfloat16)` yet!")) return self.set_module(model) def inputs_convert(example_inputs): if isinstance(example_inputs, torch.Tensor) and example_inputs.dtype == torch.float32: return example_inputs.to(torch.bfloat16) elif isinstance(example_inputs, (tuple, list, dict)): return tree_map(lambda x: inputs_convert(x), example_inputs) else: warnings.warn(UserWarning(f"{model_name} example inputs doesn't convert to `torch.bfloat16`!")) return example_inputs if hasattr(self, 'example_inputs'): self.example_inputs = inputs_convert(self.example_inputs) else: warnings.warn(UserWarning(f"{model_name} example inputs doesn't convert to `torch.bfloat16`!")) def enable_amp(self): if not self.dynamo and self.opt_args.backend == 'cudagraph': return NotImplementedError("AMP not implemented for cudagraphs") if not hasattr(self, "amp_context"): raise RuntimeError(f"{self.name} doesn't have amp_context support!") if self.device == "cpu": self.amp_context = lambda: torch.cpu.amp.autocast() elif self.device == "cuda": self.amp_context = lambda: torch.cuda.amp.autocast() @property def pt2_compilation_time(self): from torch._dynamo.utils import compile_times compile_time = dict(zip(*compile_times(repr="csv", aggregate=True)))["_compile.<locals>.compile_inner"] return float(compile_time) @property def pt2_graph_breaks(self): from torch._dynamo.utils import counters num_graph_breaks = len(counters["graph_break"].keys()) return num_graph_breaks
""" Return a list of recent PyTorch wheels published on download.pytorch.org. Users can specify package name, python version, platform, and the number of days to return. If one of the packages specified is missing on one day, the script will skip outputing the results on that day. """ import os import re import requests import argparse import urllib.parse from datetime import date, timedelta from bs4 import BeautifulSoup from collections import defaultdict import sys from pathlib import Path import subprocess from typing import List REPO_ROOT = Path(__file__).parent.parent.parent.resolve() class add_path(): def __init__(self, path): self.path = path def __enter__(self): sys.path.insert(0, self.path) def __exit__(self, exc_type, exc_value, traceback): try: sys.path.remove(self.path) except ValueError: pass with add_path(str(REPO_ROOT)): from utils.cuda_utils import DEFAULT_CUDA_VERSION, CUDA_VERSION_MAP from utils.python_utils import DEFAULT_PYTHON_VERSION, PYTHON_VERSION_MAP PYTORCH_CUDA_VERISON = CUDA_VERSION_MAP[DEFAULT_CUDA_VERSION]["pytorch_url"] PYTORCH_PYTHON_VERSION = PYTHON_VERSION_MAP[DEFAULT_PYTHON_VERSION]["pytorch_url"] torch_wheel_nightly_base = f"https://download.pytorch.org/whl/nightly/{PYTORCH_CUDA_VERISON}/" torch_nightly_wheel_index = f"https://download.pytorch.org/whl/nightly/{PYTORCH_CUDA_VERISON}/torch_nightly.html" torch_nightly_wheel_index_override = "torch_nightly.html" def memoize(function): """ """ call_cache = {} def memoized_function(*f_args): if f_args in call_cache: return call_cache[f_args] call_cache[f_args] = result = function(*f_args) return result return memoized_function @memoize def get_wheel_index_data(py_version, platform_version, url=torch_nightly_wheel_index, override_file=torch_nightly_wheel_index_override): """ """ if os.path.isfile(override_file) and os.stat(override_file).st_size: with open(override_file) as f: data = f.read() else: r = requests.get(url) r.raise_for_status() data = r.text soup = BeautifulSoup(data, 'html.parser') data = defaultdict(dict) for link in soup.find_all('a'): group_match = re.search("([a-z]*)-(.*)-(.*)-(.*)-(.*)\.whl", link.text) # some packages (e.g., torch-rec) doesn't follow this naming convention if not group_match: continue pkg, version, py, py_m, platform = group_match.groups() version = urllib.parse.unquote(version) if py == py_version and platform == platform_version: full_url = os.path.join(torch_wheel_nightly_base, link.text) data[pkg][version] = full_url return data def get_nightly_wheel_urls(packages:list, date:date, py_version=PYTORCH_PYTHON_VERSION, platform_version='linux_x86_64'): """Gets urls to wheels for specified packages matching the date, py_version, platform_version """ date_str = f"{date.year}{date.month:02}{date.day:02}" data = get_wheel_index_data(py_version, platform_version) rc = {} for pkg in packages: pkg_versions = data[pkg] # multiple versions could happen when bumping the pytorch version number # e.g., both torch-1.11.0.dev20220211%2Bcu113-cp38-cp38-linux_x86_64.whl and # torch-1.12.0.dev20220212%2Bcu113-cp38-cp38-linux_x86_64.whl exist in the download link keys = sorted([key for key in pkg_versions if date_str in key], reverse=True) if len(keys) > 1: print(f"Warning: multiple versions matching a single date: {keys}, using {keys[0]}") if len(keys) == 0: return None full_url = pkg_versions[keys[0]] rc[pkg] = { "version": keys[0], "wheel": full_url, } return rc def get_nightly_wheels_in_range(packages:list, start_date:date, end_date:date, py_version=PYTORCH_PYTHON_VERSION, platform_version='linux_x86_64', reverse=False): rc = [] curr_date = start_date while curr_date <= end_date: curr_wheels = get_nightly_wheel_urls(packages, curr_date, py_version=py_version, platform_version=platform_version) if curr_wheels is not None: rc.append(curr_wheels) curr_date += timedelta(days=1) if reverse: rc.reverse() return rc def get_n_prior_nightly_wheels(packages:list, n:int, py_version=PYTORCH_PYTHON_VERSION, platform_version='linux_x86_64', reverse=False): end_date = date.today() start_date = end_date - timedelta(days=n) return get_nightly_wheels_in_range(packages, start_date, end_date, py_version=py_version, platform_version=platform_version, reverse=reverse) def get_most_recent_successful_wheels(packages: list, pyver: str, platform: str) -> List[str]: """Get the most recent successful nightly wheels. Return List[str] """ curr_date = date.today() date_limit = curr_date - timedelta(days=365) while curr_date >= date_limit: wheels = get_nightly_wheel_urls(packages, curr_date, py_version=pyver, platform_version=platform) if wheels: return wheels curr_date = curr_date - timedelta(days=1) # Can't find any valid pytorch package return None def install_wheels(wheels): """Install the wheels specified in the wheels.""" wheel_urls = list(map(lambda x: wheels[x]["wheel"], wheels.keys())) work_dir = Path(__file__).parent.joinpath(".data") work_dir.mkdir(parents=True, exist_ok=True) requirements_file = work_dir.joinpath("requirements.txt").resolve() with open(requirements_file, "w") as rf: rf.write("\n".join(wheel_urls)) command = ["pip", "install", "-r", str(requirements_file)] print(f"Installing pytorch nightly packages command: {command}") subprocess.check_call(command) if __name__ == "__main__": parser = argparse.ArgumentParser(description=__doc__) parser.add_argument("--pyver", type=str, default=PYTORCH_PYTHON_VERSION, help="PyTorch Python version") parser.add_argument("--platform", type=str, default="linux_x86_64", help="PyTorch platform") parser.add_argument("--priordays", type=int, default=1, help="Number of days") parser.add_argument("--reverse", action="store_true", help="Return reversed result") parser.add_argument("--packages", required=True, type=str, nargs="+", help="List of package names") parser.add_argument("--install-nightlies", action="store_true", help="Install the most recent successfully built nightly packages") args = parser.parse_args() if args.install_nightlies: wheels = get_most_recent_successful_wheels(args.packages, args.pyver, args.platform) assert wheels, f"We do not find any successful pytorch nightly build of packages: {args.packages}." print(f"Found pytorch nightly wheels: {wheels} ") install_wheels(wheels) exit(0) wheels = get_n_prior_nightly_wheels(packages=args.packages, n=args.priordays, py_version=args.pyver, platform_version=args.platform, reverse=args.reverse) for wheelset in wheels: for pkg in wheelset: print(f"{pkg}-{wheelset[pkg]['version']}: {wheelset[pkg]['wheel']}")
""" Utils for model metadata """ from typing import Any, List, Dict def match_item(item_name: str, item_val: str, skip_item: Dict[str, Any]) -> bool: if item_name not in skip_item: return True return skip_item[item_name] == item_val def skip_by_metadata(test: str, device:str, extra_args: List[str], metadata: Dict[str, Any]) -> bool: "Check if the test should be skipped based on model metadata." if not "not_implemented" in metadata: return False for skip_item in metadata["not_implemented"]: match = match_item("test", test, skip_item) and \ match_item("device", device, skip_item) and \ match_item("extra_args", extra_args, skip_item) if match: return True return False
def prefetch_loader(loader, device): result = [] for data in loader: items = [] for item in data: items.append(item.to(device)) result.append(tuple(items)) return result
""" PyTorch benchmark env check utils. This file may be loaded without torch packages installed, e.g., in OnDemand CI. """ import copy import importlib import os import argparse import logging from contextlib import contextmanager, ExitStack from typing import Any, Dict, List, Optional MAIN_RANDOM_SEED = 1337 # rounds for stableness tests STABLENESS_CHECK_ROUNDS: int = 3 # rounds for correctness tests CORRECTNESS_CHECK_ROUNDS: int = 2 # Use the list from # https://github.com/pytorch/pytorch/blob/6c7410ddc350fea625e47744da9d6be7ec74b628/benchmarks/dynamo/common.py#L2247 UNSUPPORTED_USE_DETERMINISTIC_ALGORITHMS = [ "alexnet", "Background_Matting", "pytorch_CycleGAN_and_pix2pix", "pytorch_unet", "sam", "Super_SloMo", "vgg16", ] CI_SKIP_OPTIMIZER = { # TIMM "convmixer_768_32", # accuracy "hrnet_w18", # Stack issue in fx # TorchBench "dlrm", # symbolic shapes error # HF "pnasnet5large", # Stack issue in fx "MobileBertForMaskedLM", # Stack issue in fx "MobileBertForQuestionAnswering", # Stack issue in fx "PegasusForConditionalGeneration", # OOM } # Need lower tolerance on GPU. GPU kernels have non deterministic kernels for these models. REQUIRE_HIGHER_TOLERANCE = { "alexnet", "densenet121", "hf_Albert", "vgg16", "mobilenet_v3_large", "nvidia_deeprecommender", "timm_efficientdet", } # These models need >1e-3 tolerance REQUIRE_EVEN_HIGHER_TOLERANCE = { "soft_actor_critic", "tacotron2", } REQUIRE_HIGHER_FP16_TOLERANCE = { "drq", } REQUIRE_COSINE_TOLERACE = { # Just keeping it here even though its empty, if we need this in future. } SKIP_ACCURACY_CHECK_AS_EAGER_NON_DETERMINISTIC_MODELS = { # Models that deterministic algorithms can not be turned on for eager mode. "Background_Matting", "detectron2_fasterrcnn_r_101_c4", "detectron2_fasterrcnn_r_101_dc5", "detectron2_fasterrcnn_r_101_fpn", "detectron2_fasterrcnn_r_50_c4", "detectron2_fasterrcnn_r_50_dc5", "detectron2_fasterrcnn_r_50_fpn", "detectron2_maskrcnn", "stable_diffusion_unet", } # Use the list from # https://github.com/pytorch/pytorch/blob/6c7410ddc350fea625e47744da9d6be7ec74b628/benchmarks/dynamo/torchbench.py#L382 USE_GRAD_IN_INFERENCE = [ "maml" ] HAS_NUMPY = True log = logging.getLogger(__name__) @contextmanager def nested(*contexts): """ Chain and apply a list of contexts """ with ExitStack() as stack: for ctx in contexts: stack.enter_context(ctx()) yield contexts def pick_grad(name: str, is_training: bool): import torch if is_training or name in USE_GRAD_IN_INFERENCE: return torch.enable_grad() else: return torch.no_grad() def set_random_seed(): """Make torch manual seed deterministic. Helps with accuracy testing.""" import torch import random import numpy def deterministic_torch_manual_seed(*args, **kwargs): from torch._C import default_generator seed = MAIN_RANDOM_SEED import torch.cuda if not torch.cuda._is_in_bad_fork(): torch.cuda.manual_seed_all(seed) return default_generator.manual_seed(seed) torch.manual_seed(MAIN_RANDOM_SEED) random.seed(MAIN_RANDOM_SEED) numpy.random.seed(MAIN_RANDOM_SEED) torch.manual_seed = deterministic_torch_manual_seed def get_pkg_versions(packages: List[str]) -> Dict[str, str]: versions = {} for module in packages: module = importlib.import_module(module) versions[module] = module.__version__ return versions def has_native_amp() -> bool: import torch try: if getattr(torch.cuda.amp, 'autocast') is not None: return True except AttributeError: pass return False def is_timm_model(model: 'torchbenchmark.util.model.BenchmarkModel') -> bool: return hasattr(model, 'TIMM_MODEL') and model.TIMM_MODEL def is_torchvision_model(model: 'torchbenchmark.util.model.BenchmarkModel') -> bool: return hasattr(model, 'TORCHVISION_MODEL') and model.TORCHVISION_MODEL def is_hf_model(model: 'torchbenchmark.util.model.BenchmarkModel') -> bool: return hasattr(model, 'HF_MODEL') and model.HF_MODEL def is_fambench_model(model: 'torchbenchmark.util.model.BenchmarkModel') -> bool: return hasattr(model, 'FAMBENCH_MODEL') and model.FAMBENCH_MODEL def is_staged_train_test(model: 'torchbenchmark.util.model.BenchmarkModel') -> bool: return hasattr(model, 'forward') and hasattr(model, 'backward') and hasattr(model, 'optimizer_step') def save_deterministic_dict(name: str): determinism_dict = {} if "CUBLAS_WORKSPACE_CONFIG" in os.environ: determinism_dict["CUBLAS_WORKSPACE_CONFIG"] = os.environ["CUBLAS_WORKSPACE_CONFIG"] os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8" import torch determinism_dict["torch.use_deterministic_algorithms"] = torch.are_deterministic_algorithms_enabled() determinism_dict["torch.backends.cudnn.allow_tf32"] = torch.backends.cudnn.allow_tf32 determinism_dict["torch.backends.cudnn.benchmark"] = torch.backends.cudnn.benchmark determinism_dict["torch.backends.cuda.matmul.allow_tf32"] = torch.backends.cuda.matmul.allow_tf32 if not name in UNSUPPORTED_USE_DETERMINISTIC_ALGORITHMS: torch.use_deterministic_algorithms(True) torch.backends.cudnn.deterministic = True torch.backends.cudnn.allow_tf32 = False torch.backends.cudnn.benchmark = False torch.backends.cuda.matmul.allow_tf32 = False return determinism_dict def load_deterministic_dict(determinism_dict: Dict[str, bool]): if "CUBLAS_WORKSPACE_CONFIG" in determinism_dict: os.environ["CUBLAS_WORKSPACE_CONFIG"] = determinism_dict["CUBLAS_WORKSPACE_CONFIG"] elif "CUBLAS_WORKSPACE_CONFIG" in os.environ: del os.environ["CUBLAS_WORKSPACE_CONFIG"] import torch torch.use_deterministic_algorithms(determinism_dict["torch.use_deterministic_algorithms"]) torch.backends.cudnn.allow_tf32 = determinism_dict["torch.backends.cudnn.allow_tf32"] torch.backends.cudnn.benchmark = determinism_dict["torch.backends.cudnn.benchmark"] torch.backends.cuda.matmul.allow_tf32 = determinism_dict["torch.backends.cuda.matmul.allow_tf32"] def cast_to(dtype, model, inputs): import torch from torch.utils._pytree import tree_map # cast model and inputs to fp16 if dtype == torch.float16: model = model.half() else: model = model.to(dtype) inputs = tree_map( lambda x: x.to(dtype) if isinstance(x, torch.Tensor) and x.is_floating_point() else x, inputs, ) return model, inputs def collect_results(model, prediction, loss, example_inputs): import torch results = [] results.append(prediction) results.append(loss) # if isinstance(loss, torch.Tensor) and loss.item() > 1: # log.warning( # f"High loss value alert - {loss:.2f}. Can result in unstable gradients." # ) grads = dict() params = dict() for name, param in model.named_parameters(): # if isinstance(model, eval_frame.OptimizedModule): # name = remove_optimized_module_prefix(name) param_copy = param grad = param.grad # Treat None and zero grad as same if param.grad is None: grad = torch.zeros_like(param) grads[name + ".grad"] = grad params[name] = param_copy results.append(grads) results.append(params) buffers = dict() for name, buffer in model.named_buffers(): # if isinstance(model, eval_frame.OptimizedModule): # name = remove_optimized_module_prefix(name) buffers[name] = buffer results.append(buffers) for example in example_inputs: if isinstance(example, (tuple, list)): for inp in example: if isinstance(inp, torch.Tensor): results.append(inp.grad) else: if isinstance(example, torch.Tensor): results.append(example.grad) return results def clone_input(x, *, dtype=None): """copy while preserving strides""" import torch # TODO: this is questionable if isinstance(x, torch._subclasses.FakeTensor): # this func fails on fake tensors in __torch_dispatch__ return x def torch_clone(x): y = torch.clone(x) if x.is_leaf: y.requires_grad_(x.requires_grad) if x.is_leaf and x.grad is not None: y.grad = clone_input(x.grad, dtype=dtype) if hasattr(x, "_dynamo_dynamic_indices"): y._dynamo_dynamic_indices = x._dynamo_dynamic_indices.copy() return y with torch.no_grad(): if x.device.type == "xla": # Access data_ptr() for a xla tensor will cause crash return torch_clone(x) needed_size = sum( (shape - 1) * stride for shape, stride in zip(x.size(), x.stride()) ) if x.is_quantized: result = torch.empty_quantized((needed_size + 32,), x) else: result = torch.empty( needed_size + 32, dtype=dtype or x.dtype, device=x.device ) cache_line_offset = ( (x.data_ptr() - result.data_ptr()) % 32 ) // x.element_size() result.as_strided_(x.size(), x.stride(), cache_line_offset) try: result.copy_(x.clone()) if x.is_leaf: result.requires_grad_(x.requires_grad) if x.is_leaf and x.grad is not None: result.grad = clone_input(x.grad, dtype=dtype) except RuntimeError: # RuntimeError: unsupported operation: more than one element of the written-to # tensor refers to a single memory location. Please clone() the tensor before # performing the operation. return torch_clone(x) if hasattr(x, "_dynamo_dynamic_indices"): result._dynamo_dynamic_indices = x._dynamo_dynamic_indices.copy() return result def clone_inputs(example_inputs): import torch if type(example_inputs) is dict: res = dict(example_inputs) for key, value in res.items(): assert isinstance(value, torch.Tensor) res[key] = clone_input(value) return res res = list(example_inputs) for i in range(len(res)): if isinstance(res[i], torch.Tensor): res[i] = clone_input(res[i]) return res def init_optimizer(name, device, params, is_training): import torch if device == "cuda" and is_training and name not in CI_SKIP_OPTIMIZER: optimizer = torch.optim.SGD(params, lr=0.01) else: optimizer = None return optimizer def reduce_to_scalar_loss(out): """Reduce the output of a model to get scalar loss""" import torch if isinstance(out, torch.Tensor): # Mean does not work on integer tensors return out.sum() / out.numel() elif isinstance(out, (list, tuple)): return sum([reduce_to_scalar_loss(x) for x in out]) / len(out) elif type(out).__name__ in ( "MaskedLMOutput", "Seq2SeqLMOutput", "CausalLMOutputWithCrossAttentions", ): return reduce_to_scalar_loss(out.logits) elif type(out).__name__ == "SquashedNormal": return out.mean.sum() elif isinstance(out, dict): return sum([reduce_to_scalar_loss(value) for value in out.values()]) / len( out.keys() ) elif out == None: return 0.0 raise NotImplementedError("Don't know how to reduce", type(out)) def compute_loss(pred): return reduce_to_scalar_loss(pred) def optimizer_zero_grad(optimizer, mod): if optimizer is not None: optimizer.zero_grad(True) else: mod.zero_grad(True) def optimizer_step(optimizer): if optimizer is not None: optimizer.step() def forward_pass(mod, inputs, contexts, _collect_outputs=True): with nested(*contexts): return mod(*inputs) def forward_and_backward_pass(mod, inputs, contexts, optimizer, collect_outputs=True): cloned_inputs = clone_inputs(inputs) optimizer_zero_grad(optimizer, mod) with nested(*contexts): pred = mod(*cloned_inputs) loss = compute_loss(pred) loss.backward(retain_graph=True) optimizer_step(optimizer) if collect_outputs: return collect_results(mod, pred, loss, cloned_inputs) return None def run_n_iterations(mod, inputs, contexts, optimizer=None, is_training=False, iterations=STABLENESS_CHECK_ROUNDS): def _model_iter_fn(mod, inputs, contexts, optimizer, collect_outputs): if is_training: return forward_and_backward_pass(mod, inputs, contexts, optimizer, collect_outputs) else: return forward_pass(mod, inputs, contexts, collect_outputs) for _ in range(iterations - 1): _model_iter_fn(mod, inputs, contexts, optimizer, collect_outputs=False) return _model_iter_fn(mod, inputs, contexts, optimizer, collect_outputs=True) def get_tolerance_and_cosine_flag(model, is_training, current_device, name): tolerance = 1e-4 cosine = model.dargs.use_cosine_similarity # Increase the tolerance for torch allclose if model.dargs.precision == "fp16" or model.dargs.precision == "amp": if name in REQUIRE_HIGHER_FP16_TOLERANCE: return 1e-2, cosine return 1e-3, cosine if is_training and current_device == "cuda": tolerance = 1e-3 if name in REQUIRE_COSINE_TOLERACE: cosine = True elif name in REQUIRE_HIGHER_TOLERANCE: tolerance = 1e-3 elif name in REQUIRE_EVEN_HIGHER_TOLERANCE: tolerance = 8 * 1e-2 return tolerance, cosine def skip_accuracy_check_as_eager_non_deterministic(is_training): if is_training: return SKIP_ACCURACY_CHECK_AS_EAGER_NON_DETERMINISTIC_MODELS return set() def check_accuracy(tbmodel: 'torchbenchmark.util.model.BenchmarkModel') -> str: import torch import functools def _equal_nan_p(precision): equal_nan = True if precision == "fp32": equal_nan = False return equal_nan def reset_rng_state(): set_random_seed() def deepcopy_model(model, is_deepcopy): if not is_deepcopy: return model try: return copy.deepcopy(model) except TypeError: return model def maybe_cast(tbmodel, model, example_inputs): model = deepcopy_model(model, tbmodel.DEEPCOPY) example_inputs = clone_inputs(example_inputs) if tbmodel.dargs.precision == "fp32": model, example_inputs = cast_to(torch.float32, model, example_inputs) elif tbmodel.dargs.precision == "fp16": model, example_inputs = cast_to(torch.float16, model, example_inputs) elif tbmodel.dargs.precision == "bf16": model, example_inputs = cast_to(torch.bfloat16, model, example_inputs) return model, example_inputs model, example_inputs = tbmodel.get_module() name = tbmodel.name current_device = tbmodel.device optimizer = None is_training = tbmodel.test == "train" is_deepcopy = tbmodel.DEEPCOPY accuracy_status = "pass" contexts = [] equal_nan = _equal_nan_p(tbmodel.dargs.precision) if tbmodel.device == "cuda" and tbmodel.dargs.precision == "amp" and is_training: contexts.append(torch.cuda.amp.autocast) elif tbmodel.dargs.precision == "amp" and tbmodel.dargs.precision == "bf16" and tbmodel.device == "cpu": contexts.append(torch.cpu.amp.autocast) # Collect the fp64 reference outputs to be used later for accuracy checking. fp64_outputs = None try: model_fp64, inputs_fp64 = cast_to( torch.float64, deepcopy_model(model, is_deepcopy=True), clone_inputs(example_inputs), ) optimizer = init_optimizer(name, current_device, model_fp64.parameters(), is_training) fp64_outputs = run_n_iterations(model_fp64, inputs_fp64, contexts, optimizer, is_training) except Exception: log.warning( "fp64 golden ref were not generated for %s. Setting accuracy check to cosine", tbmodel.name, ) tbmodel.dargs.use_cosine_similarity = True fp64_outputs = None tolerance, cos_similarity = get_tolerance_and_cosine_flag( tbmodel, is_training, current_device, name ) # Cast the model to float16/float32 as necessary model, example_inputs = maybe_cast(tbmodel, model, example_inputs) with pick_grad(name, is_training): # Get results of native pytorch reset_rng_state() try: model_copy = deepcopy_model(model, is_deepcopy) optimizer = init_optimizer(name, current_device, model_copy.parameters(), is_training) correct_result = run_n_iterations( model_copy, clone_inputs(example_inputs), contexts, optimizer, is_training ) except Exception as e: accuracy_status = ( "eager_1st_run_OOM" if isinstance(e, torch.cuda.OutOfMemoryError) else "eager_1st_run_fail" ) print(e) log.exception(e) return accuracy_status # Rerun native pytorch reset_rng_state() try: model_copy = deepcopy_model(model, is_deepcopy) optimizer = init_optimizer(name, current_device, model_copy.parameters(), is_training) correct_rerun_result = run_n_iterations( model_copy, clone_inputs(example_inputs), contexts, optimizer, is_training ) except Exception as e: accuracy_status = ( "eager_2nd_run_OOM" if isinstance(e, torch.cuda.OutOfMemoryError) else "eager_2nd_run_fail" ) return accuracy_status # Two eager runs should have exactly same result is_same = True try: if ( name not in skip_accuracy_check_as_eager_non_deterministic(is_training) and not same( correct_result, correct_rerun_result, fp64_ref=None, cos_similarity=False, tol=0, equal_nan=equal_nan, ) ): is_same = False except Exception as e: # Sometimes torch.allclose may throw RuntimeError is_same = False if not is_same: accuracy_status = "eager_two_runs_differ" return accuracy_status if not hasattr(tbmodel.opt_args, 'torchdynamo') or not tbmodel.opt_args.torchdynamo: return accuracy_status correct_rerun_result = None # Run with Dynamo # Sometime CI fails with random triton compilation failure which will be skipped for now # TODO: revisit this after switching to new Triton runtime reset_rng_state() torch._dynamo.reset() optimize_ctx = functools.partial( torch.compile, backend=tbmodel.opt_args.torchdynamo, ) try: model_copy = deepcopy_model(model, is_deepcopy) optimizer = init_optimizer(name, current_device, model_copy.parameters(), is_training) optimized_model_iter_fn = optimize_ctx(run_n_iterations) new_result = optimized_model_iter_fn(model_copy, example_inputs, contexts, optimizer, is_training) except Exception as e: log.exception(e) accuracy_status = ( "OOM" if isinstance(e, torch.cuda.OutOfMemoryError) else "fail_to_run" ) return accuracy_status try: if not same( correct_result, new_result, fp64_outputs, equal_nan=equal_nan, cos_similarity=cos_similarity, tol=tolerance, ): is_same = False except Exception as e: # Sometimes torch.allclose may throw RuntimeError is_same = False if not is_same: accuracy_status = "fail_accuracy" return accuracy_status return accuracy_status def istype(obj, allowed_types): """isinstance() without subclasses""" if isinstance(allowed_types, (tuple, list, set)): return type(obj) in allowed_types return type(obj) is allowed_types def is_numpy_int_type(value): if HAS_NUMPY: import numpy as np return istype( value, ( np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64, ), ) else: return False def is_numpy_float_type(value): if HAS_NUMPY: import numpy as np return istype( value, ( np.float16, np.float32, np.float64, ), ) else: return False def is_numpy_ndarray(value): if HAS_NUMPY: import numpy as np return istype(value, np.ndarray) else: return False def rmse(ref, res): """ Calculate root mean squared error """ import torch return torch.sqrt(torch.mean(torch.square(ref - res))) def same( ref, res, fp64_ref=None, cos_similarity=False, tol=1e-4, equal_nan=False, exact_dtype=True, relax_numpy_equality=False, ignore_non_fp=False, log_error=log.error, ): """Check correctness to see if ref and res match""" import math import torch if fp64_ref is None: fp64_ref = ref if isinstance(ref, (list, tuple, torch.nn.ParameterList, torch.Size)): assert isinstance(res, (list, tuple)), f"type mismatch {type(ref)} {type(res)}" return len(ref) == len(res) and all( same( ai, bi, fp64_refi, cos_similarity, tol, equal_nan, exact_dtype, relax_numpy_equality, ignore_non_fp, log_error=log_error, ) for ai, bi, fp64_refi in zip(ref, res, fp64_ref) ) elif isinstance(ref, dict): assert isinstance(res, dict) assert set(ref.keys()) == set( res.keys() ), f"keys mismatch {set(ref.keys())} == {set(res.keys())}" for k in sorted(ref.keys()): if not ( same( ref[k], res[k], fp64_ref[k], cos_similarity=cos_similarity, tol=tol, equal_nan=equal_nan, exact_dtype=exact_dtype, relax_numpy_equality=relax_numpy_equality, ignore_non_fp=ignore_non_fp, log_error=log_error, ) ): log_error("Accuracy failed for key name %s", k) return False return True elif isinstance(ref, torch.Tensor): assert not isinstance(ref, torch._subclasses.FakeTensor) assert not isinstance(res, torch._subclasses.FakeTensor) if ref.is_sparse: assert res.is_sparse ref = ref.to_dense() res = res.to_dense() assert isinstance(res, torch.Tensor), f"type mismatch {type(ref)} {type(res)}" if exact_dtype: if ref.dtype != res.dtype: log_error("dtype mismatch %s, %s", ref.dtype, res.dtype) return False if ref.dtype == torch.bool: if ignore_non_fp: return True # triton stores bool as int8, so add this for more accurate checking r = torch.allclose( ref.to(dtype=torch.uint8), res.to(dtype=torch.uint8), atol=tol, rtol=tol, equal_nan=equal_nan, ) if not r: log_error("Accuracy failed: uint8 tensor did not match") return r if cos_similarity: ref = ref.flatten().to(torch.float32) res = res.flatten().to(torch.float32) if torch.allclose(ref, res, atol=tol, rtol=tol, equal_nan=True): # early exit that handles zero/nan better # cosine_similarity(zeros(10), zeros(10), dim=0) is 0 return True score = torch.nn.functional.cosine_similarity(ref, res, dim=0, eps=1e-6) if score < 0.99: log.warning("Similarity score=%s", score.cpu().detach().item()) return score >= 0.99 else: if not exact_dtype: ref = ref.to(res.dtype) # First try usual allclose if torch.allclose(ref, res, atol=tol, rtol=tol, equal_nan=equal_nan): return True # Check error from fp64 version if fp64_ref.dtype == torch.float64: ref_error = rmse(fp64_ref, ref).item() res_error = rmse(fp64_ref, res).item() multiplier = 2.0 if ( fp64_ref.numel() < 1000 or (ref.ndim == 4 and ref.shape[-1] == ref.shape[-2] == 1) # large tol means a benchmark has been specified as REQUIRE_HIGHER_TOLERANCE or tol >= 2 * 1e-2 ): # In the presence of noise, noise might dominate our error # metric for smaller tensors. # Similary, for 1x1 kernels, there seems to be high noise with amp. multiplier = 3.0 passes_test = res_error <= (multiplier * ref_error + tol / 10.0) if not passes_test: log_error( "RMSE (res-fp64): %.5f, (ref-fp64): %.5f and shape=%s", res_error, ref_error, res.size(), ) # import pdb; pdb.set_trace() return passes_test if ignore_non_fp: return True log_error("Accuracy failed: allclose not within tol=%s", tol) return False elif isinstance(ref, (str, int, type(None), bool, torch.device)): if ignore_non_fp: return True r = ref == res if not r: log_error("Accuracy failed (%s): %s != %s", type(ref), ref, res) return r elif isinstance(ref, float): r = math.isclose(ref, res, rel_tol=tol, abs_tol=tol) if not r: log_error( "Accuracy failed (float): %s != %s (within tol=%s)", ref, res, tol ) return r elif is_numpy_int_type(ref) or is_numpy_float_type(ref): if relax_numpy_equality and not ( is_numpy_int_type(res) or is_numpy_float_type(res) ): ref = ref.item() r = (type(ref) is type(res)) and (ref == res) if not r: log_error("Accuracy failed (numpy): %s != %s", ref, res) return r elif is_numpy_ndarray(ref): return (type(ref) is type(res)) and (ref == res).all() elif type(ref).__name__ in ( "MaskedLMOutput", "Seq2SeqLMOutput", "CausalLMOutputWithCrossAttentions", "LongformerMaskedLMOutput", "Instances", "SquashedNormal", "Boxes", "Normal", "TanhTransform", "Foo", "Variable", ): assert type(ref) is type(res) return all( same( getattr(ref, key), getattr(res, key), getattr(fp64_ref, key), cos_similarity=cos_similarity, tol=tol, equal_nan=equal_nan, exact_dtype=exact_dtype, relax_numpy_equality=relax_numpy_equality, ignore_non_fp=ignore_non_fp, log_error=log_error, ) for key in ref.__dict__.keys() ) else: raise RuntimeError(f"unsupported type: {type(ref).__name__}")
import re import torch from torch.ao.quantization import QuantWrapper, get_default_qconfig_mapping, get_default_qconfig_propagation_list from torch.ao.quantization.quantize_fx import _fuse_fx, prepare_fx, convert_fx from torchbenchmark.util.env_check import is_hf_model def _append_attr(fx_module, module, fx_white_list=[]): fx_attr = dir(fx_module) org_attr = dir(module) ignore_match_patterns = [r"_", r"quant", r"dequant", r"weight", r"bias", r'activation_post_process'] ignore_search_patterns = [r"_scale_", r"_zero_point_", r'_activation_post_process_'] add_special_patterns = [r"_forward_hooks", r"_forward_pre_hooks", r"_backward_hooks"] attr_names = [] for i in org_attr: if type(module) in fx_white_list and type(module) != torch.nn.Sequential \ and any([re.search(p, i) for p in add_special_patterns]): continue if any([re.search(p, i) for p in add_special_patterns]) \ or (i not in fx_attr \ and not any([re.match(p, i) for p in ignore_match_patterns]) \ and not any([re.search(p, i) for p in ignore_search_patterns])) : attr_names.append(i) for name in attr_names: attr = getattr(module, name, None) if isinstance(attr, torch.nn.Module) or \ isinstance(attr, torch.quantization.qconfig.QConfig): continue setattr(fx_module, name, attr) return fx_module def get_sub_module(model, module_dict, prefix): fx_white_list = get_default_qconfig_propagation_list() ignore_list = [] if is_hf_model: import transformers ignore_list.extend([transformers.models.gpt2.modeling_gpt2.GPT2Attention, transformers.models.t5.modeling_t5.T5DenseActDense]) def _get_sub_module(model, module_dict, prefix, sub_module_list): for name, module in model.named_children(): quant_wrap_flag = False if type(module) in ignore_list: continue op_name = prefix + "." + name if prefix != "" else name if op_name not in module_dict: continue if type(module) in fx_white_list and type(module) != torch.nn.Sequential: module = QuantWrapper(module) quant_wrap_flag = True try: graph_module = torch.fx.symbolic_trace(module) if not quant_wrap_flag and str(module.get_submodule).count("\n") != str(graph_module.get_submodule).count("\n"): continue _fuse_fx(graph_module, False) setattr(model, name, module) sub_module_list.append(op_name) except: module = _get_sub_module(module, module_dict, op_name, sub_module_list) setattr(model, name, module) return model sub_module_list = [] model = _get_sub_module(model, module_dict, prefix, sub_module_list) return model, sub_module_list def prepare_sub_module(sub_module_list, model, prefix, quant_engine:str='x86'): qconfig_mapping = get_default_qconfig_mapping(quant_engine) for name, module in model.named_children(): op_name = prefix + '.' + name if prefix != '' else name if op_name in sub_module_list: prepared_module = prepare_fx(module, qconfig_mapping, None) _append_attr(prepared_module, module) setattr(model, name, prepared_module) else: prepared_module = prepare_sub_module(sub_module_list, module, op_name, quant_engine) _append_attr(prepared_module, module) setattr(model, name, prepared_module) return model def convert_sub_module(sub_module_list, model, prefix): for name, module in model.named_children(): op_name = prefix + '.' + name if prefix != '' else name if op_name in sub_module_list: convert_module = convert_fx(module) setattr(model, name, convert_module) else: convert_module = convert_sub_module(sub_module_list, module, op_name) setattr(model, name, convert_module) return model
import json import os import pandas as pd import typing class BenchmarkData: def __init__(self): self._benchmark_data = {} self._machine_info = {} self._commit_info = {} self._names_all = set() self._names_common = set() self._tags = [] self._json_raw = [] def add_json_data(self, tag, json_data): names = set([b['name'] for b in json_data['benchmarks']]) self._names_all.update(names) if len(self._benchmark_data) == 0: self._names_common.update(names) else: self._names_common.intersection_update(names) self._benchmark_data[tag] = {b['name']: b for b in json_data['benchmarks']} self._machine_info[tag] = json_data['machine_info'] self._commit_info[tag] = json_data['commit_info'] self._tags.append(tag) self._json_raw.append(json_data) def tags(self): return list(self._benchmark_data.keys()) def benchmark_names(self, mode='common', keyword_filter=None): """ Return the names of benchmarks across the dataset. mode: 'common': intersection across dataset files - useful for comparison plot 'all': union across dataset files 'outliers': union - intersection across dataset files """ if mode == 'common': names = self._names_common elif mode == 'all': names = self._names_all elif mode == 'outliers': names = self._names_all - self._names_common if keyword_filter is not None: if isinstance(keyword_filter, str): keyword_filter = [keyword_filter] for kw in keyword_filter: names = [n for n in names if kw in n] return names def as_dataframe(self, name, max_data=100): df = pd.DataFrame() for i, tag in enumerate(self._benchmark_data): benchmark = self._benchmark_data[tag][name] df = df.append(pd.DataFrame() .assign(time=benchmark['stats']['data'][:max_data]) .assign(tag=tag) .assign(file_idx=i) .assign(git_repo=self._commit_info[tag]['project']) .assign(git_commit=self._commit_info[tag]['id']) .assign(torch=self._machine_info[tag]['pytorch_version']) .assign(torchvision=self._machine_info[tag]['torchvision_version']) .assign(date=self._commit_info[tag]['time']), ignore_index=True) return df def load_data_dir(data_dir, most_recent_files:int =None, use_history_file=True): """ load all the files in the given data dir, up to N most recent. if use_history_file=True, find most recent files using order in history file. """ history_file = os.path.join(data_dir, 'history') if os.path.isfile(history_file): with open(history_file) as hf: history = hf.read().splitlines() files = [os.path.join(data_dir, f) for f in history] else: files = sorted([os.path.join(data_dir, f) for f in os.listdir(data_dir) if os.path.splitext(f)[1] == '.json']) if most_recent_files is not None: files = files[:most_recent_files] return load_data_files(files) def load_data_files(files: typing.List[str]): data = BenchmarkData() for fname in files: try: with open(fname) as f: data.add_json_data(fname, json.load(f)) except: print(f"Error loading {fname}") raise return data
import os import sys import subprocess import traceback from pathlib import Path from torchbenchmark import REPO_PATH LIT_LLAMA_PATH = os.path.join(REPO_PATH, "submodules", "lit-llama") def update_lit_llama_submodule(): update_command = ["git", "submodule", "update", "--init", "--recursive", os.path.join("submodules", "lit-llama")] subprocess.check_call(update_command, cwd=REPO_PATH) def pip_install_requirements(): subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', os.path.join(LIT_LLAMA_PATH, "requirements.txt")]) def openllama_download(): if os.path.exists(os.path.join(LIT_LLAMA_PATH, "checkpoints/lit-llama/7B/lit-llama.pth")): return subprocess.check_call([ sys.executable, os.path.join(LIT_LLAMA_PATH, 'scripts/download.py'), '--repo_id', 'openlm-research/open_llama_7b_700bt_preview', '--local_dir', os.path.join(LIT_LLAMA_PATH, 'checkpoints/open-llama/7B') ]) subprocess.check_call([ sys.executable, os.path.join(LIT_LLAMA_PATH, 'scripts/convert_hf_checkpoint.py'), '--checkpoint_dir', os.path.join(LIT_LLAMA_PATH, 'checkpoints/open-llama/7B'), '--model_size', '7B', ], cwd=LIT_LLAMA_PATH) def install_lit_llama(): import torch update_lit_llama_submodule() pip_install_requirements() try: from pynvml import nvmlDeviceGetMemoryInfo info = nvmlDeviceGetMemoryInfo(torch.cuda._get_pynvml_handler()) if info.total < 40 * 1024 ** 3: print("not enough GPU memory for 7B parameters, skipping llama (avail: {info.total / 1024 ** 3}GB)") return except Exception as e: print("failed to test GPU memory, skipping llama weights") traceback.print_exc() return openllama_download()
import subprocess import os import sys from pathlib import Path CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__))) def pip_install_requirements(): requirements_file = os.path.join(CURRENT_DIR, "requirements.txt") subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', requirements_file]) def install_diffusers(): pip_install_requirements()
import torch from torchbenchmark.util.model import BenchmarkModel from diffusers import StableDiffusionInstructPix2PixPipeline, EulerAncestralDiscreteScheduler from typing import Optional, List class DiffuserModel(BenchmarkModel): DIFFUSER_MODEL = True def __init__(self, name: str, test: str, device: str, batch_size: Optional[int] = None, extra_args: List[str] = ...): super().__init__(test, device, batch_size, extra_args) if self.device == "cpu": raise NotImplementedError(f"Model {self.name} does not support CPU device.") if not self.dargs.precision == "fp16": raise NotImplementedError(f"Model {self.name} only supports fp16 precision.") pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(name, torch_dtype=torch.float16, safety_checker=None) pipe.to(self.device) pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) self.pipe = pipe prompt = "turn him into cyborg" # use the same size as the example image # https://raw.githubusercontent.com/timothybrooks/instruct-pix2pix/main/imgs/example.jpg self.example_inputs = (prompt, torch.randn(self.batch_size, 3, 32, 32).to(self.device)) def enable_fp16_half(self): pass def get_module(self): return self.pipe, self.example_inputs def train(self): raise NotImplementedError(f"Train is not implemented for model {self.name}") def eval(self): with torch.no_grad(): images = self.pipe(*self.example_inputs).images return images
import torch from typing import Tuple def enable_cudagraph(model: 'torchbenchmark.util.model.BenchmarkModel', example_inputs: Tuple[torch.tensor]): optimizer = model.optimizer loss_fn = model.loss_fn # warmup s = torch.cuda.Stream() s.wait_stream(torch.cuda.current_stream()) with torch.cuda.stream(s): for _ in range(3): optimizer.zero_grad(set_to_none=True) y_pred = model.model(*example_inputs) loss = loss_fn(y_pred, model.example_outputs) loss.backward() optimizer.step() torch.cuda.current_stream().wait_stream(s) # capture g = torch.cuda.CUDAGraph() optimizer.zero_grad(set_to_none=True) with torch.cuda.graph(g): static_y_pred = model.model(*example_inputs) static_loss = loss_fn(static_y_pred, model.example_outputs) static_loss.backward() optimizer.step() model.g = g
import os import torch import typing import torch.optim as optim import torchvision.models as models from contextlib import nullcontext from torchbenchmark.util.model import BenchmarkModel from typing import Tuple, Generator, Optional class TorchVisionModel(BenchmarkModel): # To recognize this is a torchvision model TORCHVISION_MODEL = True # These two variables should be defined by subclasses DEFAULT_TRAIN_BSIZE = None DEFAULT_EVAL_BSIZE = None # Default eval precision on CUDA device is fp16 DEFAULT_EVAL_CUDA_PRECISION = "fp16" # Whether to skip the opt zero grad SKIP_ZERO_GRAD = False def __init__(self, model_name, test, device, batch_size=None, weights=None, extra_args=[]): super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args) torch.backends.cudnn.deterministic = False torch.backends.cudnn.benchmark = True if weights is None: self.model = getattr(models, model_name)(pretrained=True).to(self.device) else: self.model = getattr(models, model_name)(weights=weights).to(self.device) self.example_inputs = (torch.randn((self.batch_size, 3, 224, 224)).to(self.device), ) if test == "train": # compute loss with torch.no_grad(): self.example_outputs = (torch.rand_like(self.model(*self.example_inputs)), ) self.model.train() # setup optimizer and loss_fn # if backend is cudagraph, must set optimizer to be capturable capturable = bool(int(os.getenv("ADAM_CAPTURABLE", 0))) \ if not (hasattr(self.opt_args, 'backend') and self.opt_args.backend == "cudagraph") else True self.opt = optim.Adam(self.model.parameters(), capturable=capturable) self.loss_fn = torch.nn.CrossEntropyLoss() elif test == "eval": self.model.eval() self.amp_context = nullcontext if hasattr(self.opt_args, 'backend') and self.opt_args.backend == "cudagraph": self.real_input = ( torch.rand_like(self.example_inputs[0]), ) self.real_output = ( torch.rand_like(self.example_outputs), ) def get_flops(self): # By default, FlopCountAnalysis count one fused-mult-add (FMA) as one flop. # However, in our context, we count 1 FMA as 2 flops instead of 1. # https://github.com/facebookresearch/fvcore/blob/7a0ef0c0839fa0f5e24d2ef7f5d48712f36e7cd7/fvcore/nn/flop_count.py assert self.test == "eval", "fvcore flops is only available on inference tests, as it doesn't measure backward pass." from fvcore.nn import FlopCountAnalysis FLOPS_FMA = 2.0 self.flops = FlopCountAnalysis(self.model, tuple(self.example_inputs)).total() self.flops = self.flops * FLOPS_FMA return self.flops def gen_inputs(self, num_batches:int=1) -> Tuple[Generator, Optional[int]]: def _gen_inputs(): while True: result = [] for _i in range(num_batches): result.append((torch.randn((self.batch_size, 3, 224, 224)).to(self.device),)) if self.dargs.precision == "fp16": result = list(map(lambda x: (x[0].half(), ), result)) yield result return (_gen_inputs(), None) def enable_fp16_half(self): self.model = self.model.half() self.example_inputs = (self.example_inputs[0].half(), ) def get_module(self): return self.model, self.example_inputs def train(self): if self.opt and not self.SKIP_ZERO_GRAD: self.opt.zero_grad() for data, target in zip(self.example_inputs, self.example_outputs): with self.amp_context(): pred = self.model(data) self.loss_fn(pred, target).backward() if self.opt: self.opt.step() def cudagraph_train(self): for data, target in zip(self.real_input, self.real_output): self.example_inputs[0].copy_(data) self.example_outputs.copy_(target) self.g.replay() def eval(self) -> typing.Tuple[torch.Tensor]: with torch.no_grad(): with self.amp_context(): return self.model(*self.example_inputs) def cudagraph_eval(self): for data, target in zip(self.real_input, self.real_output): self.example_inputs[0].copy_(data) self.example_outputs.copy_(target) self.g.replay() break return (self.example_outputs, )
import argparse def parse_tb_args(args): parser = argparse.ArgumentParser() parser.add_argument("--graph_type", choices=["dense", "sparse"], default="dense", help="Determine dense graph or sparse graph") args, unknown_args = parser.parse_known_args(args) return args, unknown_args
import subprocess import os.path import sys CURRENT_DIR = os.path.dirname(os.path.realpath(__file__)) def install_pytorch_geometric(): pip_install_requirements() def pip_install_requirements(): requirements_file = os.path.join(CURRENT_DIR, "requirements.txt") subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', requirements_file])
import torch import sys import typing from contextlib import nullcontext from torchbenchmark.util.model import BenchmarkModel import torch_geometric from torch_geometric.nn import GAT, GCN, GraphSAGE, GIN, EdgeCNN from torchbenchmark.tasks import GNN import torch.nn.functional as F from tqdm import tqdm from pathlib import Path from torch_geometric.loader import NeighborLoader from torchbenchmark.util.framework.gnn.config import parse_tb_args from typing import List from torch import Tensor models_dict = { 'gat': GAT, 'gcn': GCN, 'edgecnn': EdgeCNN, 'gin': GIN, 'sage': GraphSAGE, } class GNNModel(BenchmarkModel): # To recognize this is a GNN model GNN_MODEL = True # These two variables should be defined by subclasses DEFAULT_TRAIN_BSIZE = None DEFAULT_EVAL_BSIZE = None # Default eval precision on CUDA device is fp16 DEFAULT_EVAL_CUDA_PRECISION = "fp16" def __init__(self, model_name, test, device, batch_size = None, extra_args=[]): super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args) self.tb_args, self.extra_args = parse_tb_args(self.extra_args) root = str(Path(__file__).parent.parent.parent.parent) sparse = True if self.tb_args.graph_type == "sparse" else False if sparse: data = torch.load(f'{root}/data/.data/Reddit_minimal/sub_reddit_sparse.pt') else: data = torch.load(f'{root}/data/.data/Reddit_minimal/sub_reddit.pt') print(data) mask = None sampler = None kwargs = { 'batch_size': self.batch_size, 'shuffle': False, 'num_workers': 0, } self.subgraph_loader = NeighborLoader( data, num_neighbors=[-1], # layer-wise inference input_nodes=mask, sampler=sampler, **kwargs, ) Model = models_dict.get(model_name, None) num_layers = 1 hidden_channels = 64 input_channels = data.num_features out_channels = 41 # num_classes if model_name == "gat": num_heads = 2 self.model = Model(input_channels, hidden_channels, num_layers, out_channels, heads=num_heads) else: self.model = Model(input_channels, hidden_channels, num_layers, out_channels) self.model = self.model.to(device) tmp_example_inputs = [] tmp_example_outputs = [] self.num_batch = 0 for batch in self.subgraph_loader: self.num_batch += 1 if hasattr(batch, 'adj_t'): edge_index = batch.adj_t.to(device) else: edge_index = batch.edge_index.to(device) tmp_example_inputs.append({"x": batch.x.to(device), "edge_index": edge_index}) tmp_example_outputs.append(batch.y.to(device)) self.example_inputs = tmp_example_inputs self.example_outputs = tmp_example_outputs if test == "train": self.optimizer = torch.optim.Adam(self.model.parameters(), lr=0.001) self.model.train() elif test == "eval": self.model.eval() self.amp_context = nullcontext def get_module(self): return self.model, self.example_inputs[0] def train(self): for batch_id in range(self.num_batch): self.optimizer.zero_grad() out = self.model(**self.example_inputs[batch_id]) loss = F.cross_entropy(out, self.example_outputs[batch_id]) loss.backward() self.optimizer.step() def eval(self) -> typing.Tuple[torch.Tensor]: with self.amp_context(): xs: List[Tensor] = [] result = self.subgraph_loader.data.x.cpu() for batch_id in range(self.num_batch): x = self.model(**self.example_inputs[batch_id]) xs.append(x.cpu()) result = torch.cat(xs, dim=0) return (result, ) # Variation of GNNModel based off of test/nn/models/test_basic_gnn.py; the # difference is we don't bother with data loading or optimizer step class BasicGNNModel(BenchmarkModel): # This benchmark doesn't seem to have any batch size ALLOW_CUSTOMIZE_BSIZE = False DEFAULT_TRAIN_BSIZE = 1 DEFAULT_EVAL_BSIZE = 1 task = GNN.CLASSIFICATION def __init__(self, model_name, test, device, batch_size = None, extra_args=[]): super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args) Model = models_dict[model_name] self.model = Model(64, 64, num_layers=3).to(device) # Apply some global side effects to library (throw out the compiled # model though, we don't need it yet) torch_geometric.compile(self.model) # Make the model jittable # (TODO: This probably makes us overstate the speedup, as making the # model jittable also probably reduces its performance; but this is # matching the benchmark) self.model = sys.modules["torch_geometric.compile"].to_jittable(self.model) num_nodes, num_edges = 10_000, 200_000 x = torch.randn(num_nodes, 64, device=device) edge_index = torch.randint(num_nodes, (2, num_edges), device=device) self.example_inputs = (x, edge_index) def eval(self): self.model.eval() with torch.no_grad(): return (self.model(*self.example_inputs),) def train(self): # NB: This is a little different than test_basic_gnn.py, as we # are including the cost of randn_like in the overall computation here out = self.model(*self.example_inputs) out_grad = torch.randn_like(out) out.backward(out_grad) def get_module(self): return self.model, self.example_inputs
""" Hacked from https://github.com/rwightman/pytorch-image-models/blob/f7d210d759beb00a3d0834a3ce2d93f6e17f3d38/train.py ImageNet Training Script This is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet training results with some of the latest networks and training techniques. It favours canonical PyTorch and standard Python style over trying to be able to 'do it all.' That said, it offers quite a few speed and training result improvements over the usual PyTorch example scripts. Repurpose as you see fit. This script was started from an early version of the PyTorch ImageNet example (https://github.com/pytorch/examples/tree/master/imagenet) NVIDIA CUDA specific speedups adopted from NVIDIA Apex examples (https://github.com/NVIDIA/apex/tree/master/examples/imagenet) Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman) """ from contextlib import suppress import torch from torch import nn from torch.nn.parallel import DistributedDataParallel as NativeDDP from timm.models import create_model from timm.layers import convert_splitbn_model from timm.optim import create_optimizer_v2, optimizer_kwargs from timm.scheduler import create_scheduler from timm.utils import NativeScaler from timm.loss import JsdCrossEntropy, SoftTargetCrossEntropy, LabelSmoothingCrossEntropy from timm.data import create_loader, resolve_data_config, Mixup, FastCollateMixup, AugMixDataset from .loader import create_fake_imagenet_dataset def timm_instantiate_eval(args): # create eval model eval_model = create_model( args.model_name, pretrained=args.pretrained, num_classes=args.num_classes, in_chans=3, global_pool=args.gp, scriptable=args.torchscript) data_config = resolve_data_config(vars(args), model=eval_model, use_test_size=True, verbose=True) eval_model = eval_model.to(args.device) # enable channels last layout if set if args.channels_last: eval_model = eval_model.to(memory_format=torch.channels_last) if args.num_gpu > 1: eval_model = torch.nn.DataParallel(eval_model, device_ids=list(range(args.num_gpu))) crop_pct = data_config['crop_pct'] # create dataset dataset_eval = create_fake_imagenet_dataset(size=args.eval_num_batch*args.eval_batch_size) loader_eval = create_loader( dataset_eval, input_size=data_config['input_size'], batch_size=args.eval_batch_size, use_prefetcher=args.prefetcher, interpolation=data_config['interpolation'], mean=data_config['mean'], std=data_config['std'], num_workers=args.workers, crop_pct=crop_pct, pin_memory=args.pin_mem, tf_preprocessing=args.tf_preprocessing, persistent_workers=False, ) return eval_model, loader_eval def timm_instantiate_train(args): # create train model model = create_model( args.model_name, pretrained=args.pretrained, num_classes=args.num_classes, drop_rate=args.drop, drop_connect_rate=args.drop_connect, # DEPRECATED, use drop_path drop_path_rate=args.drop_path, drop_block_rate=args.drop_block, global_pool=args.gp, bn_tf=args.bn_tf, bn_momentum=args.bn_momentum, bn_eps=args.bn_eps, scriptable=args.torchscript, checkpoint_path=args.initial_checkpoint) data_config = resolve_data_config(vars(args), model=model, verbose=args.local_rank == 0) # setup augmentation batch splits for contrastive loss or split bn num_aug_splits = 0 if args.aug_splits > 0: assert args.aug_splits > 1, 'A split of 1 makes no sense' num_aug_splits = args.aug_splits # enable split bn (separate bn stats per batch-portion) if args.split_bn: assert num_aug_splits > 1 or args.resplit model = convert_splitbn_model(model, max(num_aug_splits, 2)) model = model.to(args.device) # enable channels last layout if set if args.channels_last: model = model.to(memory_format=torch.channels_last) # setup synchronized BatchNorm for distributed training if args.distributed and args.sync_bn: assert not args.split_bn model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) if args.local_rank == 0: print( 'Converted model to use Synchronized BatchNorm. WARNING: You may have issues if using ' 'zero initialized BN layers (enabled by default for ResNets) while sync-bn enabled.') # setup optimizer optimizer = create_optimizer_v2(model, **optimizer_kwargs(cfg=args)) # setup automatic mixed-precision (AMP) loss scaling and op casting amp_autocast = suppress # do nothing loss_scaler = None if args.use_amp == 'native': amp_autocast = torch.cuda.amp.autocast loss_scaler = NativeScaler() # setup distributed training if args.distributed: model = NativeDDP(model, device_ids=[args.local_rank], broadcast_buffers=not args.no_ddp_bb) # NOTE: EMA model does not need to be wrapped by DDP # setup learning rate schedule and starting epoch lr_scheduler, _ = create_scheduler(args, optimizer) # create fake imagenet dataset fake_dataset = create_fake_imagenet_dataset(size=args.batch_size * args.train_num_batch) dataset_train = fake_dataset dataset_eval = fake_dataset # setup mixup / cutmix collate_fn = None mixup_fn = None mixup_active = args.mixup > 0 or args.cutmix > 0. or args.cutmix_minmax is not None if mixup_active: mixup_args = dict( mixup_alpha=args.mixup, cutmix_alpha=args.cutmix, cutmix_minmax=args.cutmix_minmax, prob=args.mixup_prob, switch_prob=args.mixup_switch_prob, mode=args.mixup_mode, label_smoothing=args.smoothing, num_classes=args.num_classes) if args.prefetcher: assert not num_aug_splits # collate conflict (need to support deinterleaving in collate mixup) collate_fn = FastCollateMixup(**mixup_args) else: mixup_fn = Mixup(**mixup_args) # wrap dataset in AugMix helper if num_aug_splits > 1: dataset_train = AugMixDataset(dataset_train, num_splits=num_aug_splits) # create data loaders w/ augmentation pipeline train_interpolation = args.train_interpolation if args.no_aug or not train_interpolation: train_interpolation = data_config['interpolation'] loader_train = create_loader( dataset_train, input_size=data_config['input_size'], batch_size=args.batch_size, is_training=True, use_prefetcher=args.prefetcher, no_aug=args.no_aug, re_prob=args.reprob, re_mode=args.remode, re_count=args.recount, re_split=args.resplit, scale=args.scale, ratio=args.ratio, hflip=args.hflip, vflip=args.vflip, color_jitter=args.color_jitter, auto_augment=args.aa, # Not supported by timm 0.4.12 # num_aug_repeats=args.aug_repeats, num_aug_splits=num_aug_splits, interpolation=train_interpolation, mean=data_config['mean'], std=data_config['std'], num_workers=args.workers, distributed=args.distributed, collate_fn=collate_fn, pin_memory=args.pin_mem, use_multi_epochs_loader=args.use_multi_epochs_loader, # Not supported by timm 0.4.12 # worker_seeding=args.worker_seeding, persistent_workers=False, ) loader_validate = create_loader( dataset_eval, input_size=data_config['input_size'], batch_size=args.validation_batch_size or args.batch_size, is_training=False, use_prefetcher=args.prefetcher, interpolation=data_config['interpolation'], mean=data_config['mean'], std=data_config['std'], num_workers=args.workers, distributed=args.distributed, crop_pct=data_config['crop_pct'], pin_memory=args.pin_mem, persistent_workers=False, ) # setup loss function if args.jsd_loss: assert num_aug_splits > 1 # JSD only valid with aug splits set train_loss_fn = JsdCrossEntropy(num_splits=num_aug_splits, smoothing=args.smoothing) elif mixup_active: # NOTE: the latest timm package (0.4.12) doesn't support BinaryCrossEntropy # smoothing is handled with mixup target transform which outputs sparse, soft targets # if args.bce_loss: # train_loss_fn = BinaryCrossEntropy(target_threshold=args.bce_target_thresh) # else: train_loss_fn = SoftTargetCrossEntropy() elif args.smoothing: # if args.bce_loss: # train_loss_fn = BinaryCrossEntropy(smoothing=args.smoothing, target_threshold=args.bce_target_thresh) # else: train_loss_fn = LabelSmoothingCrossEntropy(smoothing=args.smoothing) else: train_loss_fn = nn.CrossEntropyLoss() train_loss_fn = train_loss_fn.to(args.device) validate_loss_fn = nn.CrossEntropyLoss().to(args.device) # return all the inputs needed by train and eval loop return model, loader_train, loader_validate, optimizer, \ train_loss_fn, lr_scheduler, amp_autocast, \ loss_scaler, mixup_fn, validate_loss_fn
""" Hacked from https://github.com/rwightman/pytorch-image-models/blob/f7d210d759beb00a3d0834a3ce2d93f6e17f3d38/train.py ImageNet Training Script This is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet training results with some of the latest networks and training techniques. It favours canonical PyTorch and standard Python style over trying to be able to 'do it all.' That said, it offers quite a few speed and training result improvements over the usual PyTorch example scripts. Repurpose as you see fit. This script was started from an early version of the PyTorch ImageNet example (https://github.com/pytorch/examples/tree/master/imagenet) NVIDIA CUDA specific speedups adopted from NVIDIA Apex examples (https://github.com/NVIDIA/apex/tree/master/examples/imagenet) Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman) """ from torchvision.datasets.fakedata import FakeData def create_fake_imagenet_dataset(size): fakedata = FakeData(size=size) return fakedata
""" Hacked from https://github.com/rwightman/pytorch-image-models/blob/f7d210d759beb00a3d0834a3ce2d93f6e17f3d38/train.py ImageNet Training Script This is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet training results with some of the latest networks and training techniques. It favours canonical PyTorch and standard Python style over trying to be able to 'do it all.' That said, it offers quite a few speed and training result improvements over the usual PyTorch example scripts. Repurpose as you see fit. This script was started from an early version of the PyTorch ImageNet example (https://github.com/pytorch/examples/tree/master/imagenet) NVIDIA CUDA specific speedups adopted from NVIDIA Apex examples (https://github.com/NVIDIA/apex/tree/master/examples/imagenet) Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman) """ import torch from collections import OrderedDict from contextlib import suppress from timm.utils import reduce_tensor, dispatch_clip_grad, accuracy from timm.utils import AverageMeter from timm.models.helpers import model_parameters def train_one_epoch( epoch, model, loader, optimizer, loss_fn, args, lr_scheduler=None, saver=None, output_dir=None, amp_autocast=suppress, loss_scaler=None, model_ema=None, mixup_fn=None): if args.mixup_off_epoch and epoch >= args.mixup_off_epoch: if args.prefetcher and loader.mixup_enabled: loader.mixup_enabled = False elif mixup_fn is not None: mixup_fn.mixup_enabled = False second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order # batch_time_m = AverageMeter() # data_time_m = AverageMeter() losses_m = AverageMeter() model.train() # end = time.time() last_idx = len(loader) - 1 num_updates = epoch * len(loader) for batch_idx, (input, target) in zip(range(args.train_num_batch), loader): last_batch = batch_idx == last_idx # data_time_m.update(time.time() - end) if not args.prefetcher and args.device == "cuda": input, target = input.cuda(), target.cuda() if mixup_fn is not None: input, target = mixup_fn(input, target) if args.channels_last: input = input.contiguous(memory_format=torch.channels_last) with amp_autocast(): output = model(input) loss = loss_fn(output, target) if not args.distributed: losses_m.update(loss.item(), input.size(0)) optimizer.zero_grad() if loss_scaler is not None: loss_scaler( loss, optimizer, clip_grad=args.clip_grad, clip_mode=args.clip_mode, parameters=model_parameters(model, exclude_head='agc' in args.clip_mode), create_graph=second_order) else: loss.backward(create_graph=second_order) if args.clip_grad is not None: dispatch_clip_grad( model_parameters(model, exclude_head='agc' in args.clip_mode), value=args.clip_grad, mode=args.clip_mode) optimizer.step() # if model_ema is not None: # model_ema.update(model) if args.device == "cuda": torch.cuda.synchronize() num_updates += 1 # batch_time_m.update(time.time() - end) if last_batch or batch_idx % args.log_interval == 0: lrl = [param_group['lr'] for param_group in optimizer.param_groups] lr = sum(lrl) / len(lrl) if args.distributed: reduced_loss = reduce_tensor(loss.data, args.world_size) losses_m.update(reduced_loss.item(), input.size(0)) # if args.local_rank == 0: # _logger.info( # 'Train: {} [{:>4d}/{} ({:>3.0f}%)] ' # 'Loss: {loss.val:#.4g} ({loss.avg:#.3g}) ' # 'Time: {batch_time.val:.3f}s, {rate:>7.2f}/s ' # '({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s) ' # 'LR: {lr:.3e} ' # 'Data: {data_time.val:.3f} ({data_time.avg:.3f})'.format( # epoch, # batch_idx, len(loader), # 100. * batch_idx / last_idx, # loss=losses_m, # batch_time=batch_time_m, # rate=input.size(0) * args.world_size / batch_time_m.val, # rate_avg=input.size(0) * args.world_size / batch_time_m.avg, # lr=lr, # data_time=data_time_m)) # if args.save_images and output_dir: # torchvision.utils.save_image( # input, # os.path.join(output_dir, 'train-batch-%d.jpg' % batch_idx), # padding=0, # normalize=True) # if saver is not None and args.recovery_interval and ( # last_batch or (batch_idx + 1) % args.recovery_interval == 0): # saver.save_recovery(epoch, batch_idx=batch_idx) if lr_scheduler is not None: lr_scheduler.step_update(num_updates=num_updates, metric=losses_m.avg) # end = time.time() # end for if hasattr(optimizer, 'sync_lookahead'): optimizer.sync_lookahead() # return OrderedDict([('loss', losses_m.avg)]) def validate(model, loader, loss_fn, args, amp_autocast=suppress, log_suffix=''): batch_time_m = AverageMeter() losses_m = AverageMeter() top1_m = AverageMeter() top5_m = AverageMeter() model.eval() # end = time.time() last_idx = len(loader) - 1 with torch.no_grad(): for batch_idx, (input, target) in enumerate(loader): last_batch = batch_idx == last_idx if not args.prefetcher and args.device == "cuda": input = input.cuda() target = target.cuda() if args.channels_last: input = input.contiguous(memory_format=torch.channels_last) with amp_autocast(): output = model(input) if isinstance(output, (tuple, list)): output = output[0] # augmentation reduction reduce_factor = args.tta if reduce_factor > 1: output = output.unfold(0, reduce_factor, reduce_factor).mean(dim=2) target = target[0:target.size(0):reduce_factor] loss = loss_fn(output, target) acc1, acc5 = accuracy(output, target, topk=(1, 5)) if args.distributed: reduced_loss = reduce_tensor(loss.data, args.world_size) acc1 = reduce_tensor(acc1, args.world_size) acc5 = reduce_tensor(acc5, args.world_size) else: reduced_loss = loss.data if args.device == "cuda": torch.cuda.synchronize() losses_m.update(reduced_loss.item(), input.size(0)) top1_m.update(acc1.item(), output.size(0)) top5_m.update(acc5.item(), output.size(0)) # batch_time_m.update(time.time() - end) # end = time.time() # if args.local_rank == 0 and (last_batch or batch_idx % args.log_interval == 0): # log_name = 'Test' + log_suffix # _logger.info( # '{0}: [{1:>4d}/{2}] ' # 'Time: {batch_time.val:.3f} ({batch_time.avg:.3f}) ' # 'Loss: {loss.val:>7.4f} ({loss.avg:>6.4f}) ' # 'Acc@1: {top1.val:>7.4f} ({top1.avg:>7.4f}) ' # 'Acc@5: {top5.val:>7.4f} ({top5.avg:>7.4f})'.format( # log_name, batch_idx, last_idx, batch_time=batch_time_m, # loss=losses_m, top1=top1_m, top5=top5_m)) metrics = OrderedDict([('loss', losses_m.avg), ('top1', top1_m.avg), ('top5', top5_m.avg)]) return metrics
""" Hacked from https://github.com/rwightman/pytorch-image-models/blob/f7d210d759beb00a3d0834a3ce2d93f6e17f3d38/train.py ImageNet Training Script This is intended to be a lean and easily modifiable ImageNet training script that reproduces ImageNet training results with some of the latest networks and training techniques. It favours canonical PyTorch and standard Python style over trying to be able to 'do it all.' That said, it offers quite a few speed and training result improvements over the usual PyTorch example scripts. Repurpose as you see fit. This script was started from an early version of the PyTorch ImageNet example (https://github.com/pytorch/examples/tree/master/imagenet) NVIDIA CUDA specific speedups adopted from NVIDIA Apex examples (https://github.com/NVIDIA/apex/tree/master/examples/imagenet) Hacked together by / Copyright 2020 Ross Wightman (https://github.com/rwightman) """ import os import yaml import torch import argparse def setup_args_distributed(args): args.distributed = False if 'WORLD_SIZE' in os.environ: args.distributed = int(os.environ['WORLD_SIZE']) > 1 args.world_size = 1 args.rank = 0 # global rank if args.distributed: args.device = 'cuda:%d' % args.local_rank torch.cuda.set_device(args.local_rank) torch.distributed.init_process_group(backend='nccl', init_method='env://') args.world_size = torch.distributed.get_world_size() args.rank = torch.distributed.get_rank() assert args.rank >= 0 return args def get_args(config_string="", config_file=None): def _parse_args(): # Do we have a config file to parse? if config_file: with open(config_file, 'r') as f: cfg = yaml.safe_load(f) parser.set_defaults(**cfg) # The main arg parser parses the rest of the args, the usual # defaults will have been overridden if config file specified. args = parser.parse_args(config_string) # Cache the args as a text string to save them in the output dir later args_text = yaml.safe_dump(args.__dict__, default_flow_style=False) return args, args_text # The first arg parser parses out only the --config argument, this argument is used to # load a yaml file containing key-values that override the defaults for the main parser below parser = argparse.ArgumentParser(description='Training Config', add_help=False) parser.add_argument('-c', '--config', default='', type=str, metavar='FILE', help='YAML config file specifying default arguments') parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') # Dataset parameters # parser.add_argument('data_dir', metavar='DIR', # help='path to dataset') parser.add_argument('--dataset', '-d', metavar='NAME', default='', help='dataset type (default: ImageFolder/ImageTar if empty)') parser.add_argument('--train-split', metavar='NAME', default='train', help='dataset train split (default: train)') parser.add_argument('--val-split', metavar='NAME', default='validation', help='dataset validation split (default: validation)') parser.add_argument('--dataset-download', action='store_true', default=False, help='Allow download of dataset for torch/ and tfds/ datasets that support it.') parser.add_argument('--class-map', default='', type=str, metavar='FILENAME', help='path to class to idx mapping file (default: "")') # Model parameters parser.add_argument('--model', default='resnet50', type=str, metavar='MODEL', help='Name of model to train (default: "resnet50"') parser.add_argument('--pretrained', action='store_true', default=False, help='Start with pretrained version of specified network (if avail)') parser.add_argument('--initial-checkpoint', default='', type=str, metavar='PATH', help='Initialize model from this checkpoint (default: none)') parser.add_argument('--resume', default='', type=str, metavar='PATH', help='Resume full model and optimizer state from checkpoint (default: none)') parser.add_argument('--no-resume-opt', action='store_true', default=False, help='prevent resume of optimizer state when resuming model') parser.add_argument('--num-classes', type=int, default=None, metavar='N', help='number of label classes (Model default if None)') parser.add_argument('--gp', default=None, type=str, metavar='POOL', help='Global pool type, one of (fast, avg, max, avgmax, avgmaxc). Model default if None.') parser.add_argument('--img-size', type=int, default=None, metavar='N', help='Image patch size (default: None => model default)') parser.add_argument('--input-size', default=None, nargs=3, type=int, metavar='N N N', help='Input all image dimensions (d h w, e.g. --input-size 3 224 224), uses model default if empty') parser.add_argument('--crop-pct', default=None, type=float, metavar='N', help='Input image center crop percent (for validation only)') parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN', help='Override mean pixel value of dataset') parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD', help='Override std deviation of of dataset') parser.add_argument('--interpolation', default='', type=str, metavar='NAME', help='Image resize interpolation type (overrides model)') parser.add_argument('--batch-size', type=int, default=128, metavar='N', help='input batch size for training (default: 128)') parser.add_argument('-vb', '--validation-batch-size', type=int, default=None, metavar='N', help='validation batch size override (default: None)') # Optimizer parameters parser.add_argument('--opt', default='sgd', type=str, metavar='OPTIMIZER', help='Optimizer (default: "sgd"') parser.add_argument('--opt-eps', default=None, type=float, metavar='EPSILON', help='Optimizer Epsilon (default: None, use opt default)') parser.add_argument('--opt-betas', default=None, type=float, nargs='+', metavar='BETA', help='Optimizer Betas (default: None, use opt default)') parser.add_argument('--momentum', type=float, default=0.9, metavar='M', help='Optimizer momentum (default: 0.9)') parser.add_argument('--weight-decay', type=float, default=2e-5, help='weight decay (default: 2e-5)') parser.add_argument('--clip-grad', type=float, default=None, metavar='NORM', help='Clip gradient norm (default: None, no clipping)') parser.add_argument('--clip-mode', type=str, default='norm', help='Gradient clipping mode. One of ("norm", "value", "agc")') # Learning rate schedule parameters parser.add_argument('--sched', default='cosine', type=str, metavar='SCHEDULER', help='LR scheduler (default: "step"') parser.add_argument('--lr', type=float, default=0.05, metavar='LR', help='learning rate (default: 0.05)') parser.add_argument('--lr-noise', type=float, nargs='+', default=None, metavar='pct, pct', help='learning rate noise on/off epoch percentages') parser.add_argument('--lr-noise-pct', type=float, default=0.67, metavar='PERCENT', help='learning rate noise limit percent (default: 0.67)') parser.add_argument('--lr-noise-std', type=float, default=1.0, metavar='STDDEV', help='learning rate noise std-dev (default: 1.0)') parser.add_argument('--lr-cycle-mul', type=float, default=1.0, metavar='MULT', help='learning rate cycle len multiplier (default: 1.0)') parser.add_argument('--lr-cycle-decay', type=float, default=0.5, metavar='MULT', help='amount to decay each learning rate cycle (default: 0.5)') parser.add_argument('--lr-cycle-limit', type=int, default=1, metavar='N', help='learning rate cycle limit, cycles enabled if > 1') parser.add_argument('--lr-k-decay', type=float, default=1.0, help='learning rate k-decay for cosine/poly (default: 1.0)') parser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='LR', help='warmup learning rate (default: 0.0001)') parser.add_argument('--min-lr', type=float, default=1e-6, metavar='LR', help='lower lr bound for cyclic schedulers that hit 0 (1e-5)') parser.add_argument('--epochs', type=int, default=300, metavar='N', help='number of epochs to train (default: 300)') parser.add_argument('--epoch-repeats', type=float, default=0., metavar='N', help='epoch repeat multiplier (number of times to repeat dataset epoch per train epoch).') parser.add_argument('--start-epoch', default=None, type=int, metavar='N', help='manual epoch number (useful on restarts)') parser.add_argument('--decay-epochs', type=float, default=100, metavar='N', help='epoch interval to decay LR') parser.add_argument('--warmup-epochs', type=int, default=3, metavar='N', help='epochs to warmup LR, if scheduler supports') parser.add_argument('--cooldown-epochs', type=int, default=10, metavar='N', help='epochs to cooldown LR at min_lr, after cyclic schedule ends') parser.add_argument('--patience-epochs', type=int, default=10, metavar='N', help='patience epochs for Plateau LR scheduler (default: 10') parser.add_argument('--decay-rate', '--dr', type=float, default=0.1, metavar='RATE', help='LR decay rate (default: 0.1)') # Augmentation & regularization parameters parser.add_argument('--no-aug', action='store_true', default=False, help='Disable all training augmentation, override other train aug args') parser.add_argument('--scale', type=float, nargs='+', default=[0.08, 1.0], metavar='PCT', help='Random resize scale (default: 0.08 1.0)') parser.add_argument('--ratio', type=float, nargs='+', default=[3./4., 4./3.], metavar='RATIO', help='Random resize aspect ratio (default: 0.75 1.33)') parser.add_argument('--hflip', type=float, default=0.5, help='Horizontal flip training aug probability') parser.add_argument('--vflip', type=float, default=0., help='Vertical flip training aug probability') parser.add_argument('--color-jitter', type=float, default=0.4, metavar='PCT', help='Color jitter factor (default: 0.4)') parser.add_argument('--aa', type=str, default=None, metavar='NAME', help='Use AutoAugment policy. "v0" or "original". (default: None)'), parser.add_argument('--aug-repeats', type=int, default=0, help='Number of augmentation repetitions (distributed training only) (default: 0)') parser.add_argument('--aug-splits', type=int, default=0, help='Number of augmentation splits (default: 0, valid: 0 or >=2)') parser.add_argument('--jsd-loss', action='store_true', default=False, help='Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`.') parser.add_argument('--bce-loss', action='store_true', default=False, help='Enable BCE loss w/ Mixup/CutMix use.') parser.add_argument('--bce-target-thresh', type=float, default=None, help='Threshold for binarizing softened BCE targets (default: None, disabled)') parser.add_argument('--reprob', type=float, default=0., metavar='PCT', help='Random erase prob (default: 0.)') parser.add_argument('--remode', type=str, default='pixel', help='Random erase mode (default: "pixel")') parser.add_argument('--recount', type=int, default=1, help='Random erase count (default: 1)') parser.add_argument('--resplit', action='store_true', default=False, help='Do not random erase first (clean) augmentation split') parser.add_argument('--mixup', type=float, default=0.0, help='mixup alpha, mixup enabled if > 0. (default: 0.)') parser.add_argument('--cutmix', type=float, default=0.0, help='cutmix alpha, cutmix enabled if > 0. (default: 0.)') parser.add_argument('--cutmix-minmax', type=float, nargs='+', default=None, help='cutmix min/max ratio, overrides alpha and enables cutmix if set (default: None)') parser.add_argument('--mixup-prob', type=float, default=1.0, help='Probability of performing mixup or cutmix when either/both is enabled') parser.add_argument('--mixup-switch-prob', type=float, default=0.5, help='Probability of switching to cutmix when both mixup and cutmix enabled') parser.add_argument('--mixup-mode', type=str, default='batch', help='How to apply mixup/cutmix params. Per "batch", "pair", or "elem"') parser.add_argument('--mixup-off-epoch', default=0, type=int, metavar='N', help='Turn off mixup after this epoch, disabled if 0 (default: 0)') parser.add_argument('--smoothing', type=float, default=0.1, help='Label smoothing (default: 0.1)') parser.add_argument('--train-interpolation', type=str, default='random', help='Training interpolation (random, bilinear, bicubic default: "random")') parser.add_argument('--drop', type=float, default=0.0, metavar='PCT', help='Dropout rate (default: 0.)') parser.add_argument('--drop-connect', type=float, default=None, metavar='PCT', help='Drop connect rate, DEPRECATED, use drop-path (default: None)') parser.add_argument('--drop-path', type=float, default=None, metavar='PCT', help='Drop path rate (default: None)') parser.add_argument('--drop-block', type=float, default=None, metavar='PCT', help='Drop block rate (default: None)') # Batch norm parameters (only works with gen_efficientnet based models currently) parser.add_argument('--bn-tf', action='store_true', default=False, help='Use Tensorflow BatchNorm defaults for models that support it (default: False)') parser.add_argument('--bn-momentum', type=float, default=None, help='BatchNorm momentum override (if not None)') parser.add_argument('--bn-eps', type=float, default=None, help='BatchNorm epsilon override (if not None)') parser.add_argument('--sync-bn', action='store_true', help='Enable NVIDIA Apex or Torch synchronized BatchNorm.') parser.add_argument('--dist-bn', type=str, default='reduce', help='Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")') parser.add_argument('--split-bn', action='store_true', help='Enable separate BN layers per augmentation split.') # Model Exponential Moving Average parser.add_argument('--model-ema', action='store_true', default=False, help='Enable tracking moving average of model weights') parser.add_argument('--model-ema-force-cpu', action='store_true', default=False, help='Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.') parser.add_argument('--model-ema-decay', type=float, default=0.9998, help='decay factor for model weights moving average (default: 0.9998)') # Misc parser.add_argument('--seed', type=int, default=42, metavar='S', help='random seed (default: 42)') parser.add_argument('--worker-seeding', type=str, default='all', help='worker seed mode (default: all)') parser.add_argument('--log-interval', type=int, default=50, metavar='N', help='how many batches to wait before logging training status') parser.add_argument('--recovery-interval', type=int, default=0, metavar='N', help='how many batches to wait before writing recovery checkpoint') parser.add_argument('--checkpoint-hist', type=int, default=10, metavar='N', help='number of checkpoints to keep (default: 10)') parser.add_argument('-j', '--workers', type=int, default=0, metavar='N', help='how many training processes to use (default: 0)') parser.add_argument('--save-images', action='store_true', default=False, help='save images of input bathes every log interval for debugging') parser.add_argument('--amp', action='store_true', default=False, help='use NVIDIA Apex AMP or Native AMP for mixed precision training') parser.add_argument('--apex-amp', action='store_true', default=False, help='Use NVIDIA Apex AMP mixed precision') parser.add_argument('--native-amp', action='store_true', default=False, help='Use Native Torch AMP mixed precision') parser.add_argument('--no-ddp-bb', action='store_true', default=False, help='Force broadcast buffers for native DDP to off.') parser.add_argument('--channels-last', action='store_true', default=False, help='Use channels_last memory layout') parser.add_argument('--pin-mem', action='store_true', default=False, help='Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.') parser.add_argument('--no-prefetcher', action='store_true', default=False, help='disable fast prefetcher') parser.add_argument('--output', default='', type=str, metavar='PATH', help='path to output folder (default: none, current dir)') parser.add_argument('--experiment', default='', type=str, metavar='NAME', help='name of train experiment, name of sub-folder for output') parser.add_argument('--eval-metric', default='top1', type=str, metavar='EVAL_METRIC', help='Best metric (default: "top1"') parser.add_argument('--tta', type=int, default=0, metavar='N', help='Test/inference time augmentation (oversampling) factor. 0=None (default: 0)') parser.add_argument("--local_rank", default=0, type=int) parser.add_argument('--use-multi-epochs-loader', action='store_true', default=False, help='use the multi-epochs-loader to save time at the beginning of every epoch') parser.add_argument('--torchscript', dest='torchscript', action='store_true', help='convert model torchscript for inference') parser.add_argument('--log-wandb', action='store_true', default=False, help='log training and validation metrics to wandb') # Inference args parser.add_argument('--eval-batch-size', type=int, default=256, metavar='N', help='input batch size for inference (default: 256)') parser.add_argument('--num-gpu', type=int, default=1, help='Number of GPUS to use') parser.add_argument('--tf-preprocessing', action='store_true', default=False, help='Use Tensorflow preprocessing pipeline (require CPU TF installed') args, _args_text = _parse_args() return args
import torch.nn as nn import dataclasses from timm.optim import create_optimizer @dataclasses.dataclass class OptimizerOption: lr: float opt: str weight_decay: float momentum: float class TimmConfig: def __init__(self, model, device): self.model = model self.device = device # Configurations self.num_classes = self.model.num_classes self.loss = nn.CrossEntropyLoss().to(self.device) self.target_shape = tuple() self.input_size = self.model.default_cfg["input_size"] # Default optimizer configurations borrowed from: # https://github.com/rwightman/pytorch-image-models/blob/779107b693010934ac87c8cecbeb65796e218488/timm/optim/optim_factory.py#L78 opt_args = OptimizerOption(lr=1e-4, opt="sgd", weight_decay = 0.0001, momentum = 0.9) self.optimizer = create_optimizer(opt_args, self.model)
from contextlib import suppress import torch import typing import timm from torchbenchmark.util.model import BenchmarkModel from .timm_config import TimmConfig from typing import Generator, Tuple, Optional class TimmModel(BenchmarkModel): # To recognize this is a timm model TIMM_MODEL = True # These two variables should be defined by subclasses DEFAULT_TRAIN_BSIZE = None DEFAULT_EVAL_BSIZE = None # Default eval precision on CUDA device is fp16 DEFAULT_EVAL_CUDA_PRECISION = "fp16" def __init__(self, model_name, test, device, batch_size=None, extra_args=[]): super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args) torch.backends.cudnn.deterministic = False torch.backends.cudnn.benchmark = True self.model = timm.create_model(model_name, pretrained=False, scriptable=True) self.cfg = TimmConfig(model = self.model, device = device) self.example_inputs = self._gen_input(self.batch_size) self.model.to( device=self.device ) if test == "train": self.model.train() elif test == "eval": self.model.eval() self.amp_context = suppress def gen_inputs(self, num_batches:int=1) -> Tuple[Generator, Optional[int]]: def _gen_inputs(): while True: result = [] for _i in range(num_batches): result.append((self._gen_input(self.batch_size), )) if self.dargs.precision == "fp16": result = list(map(lambda x: (x[0].half(), ), result)) yield result return (_gen_inputs(), None) def _gen_input(self, batch_size): return torch.randn((batch_size,) + self.cfg.input_size, device=self.device) def _gen_target(self, batch_size): return torch.empty( (batch_size,) + self.cfg.target_shape, device=self.device, dtype=torch.long).random_(self.cfg.num_classes) def _step_train(self): self.cfg.optimizer.zero_grad() with self.amp_context(): output = self.model(self.example_inputs) if isinstance(output, tuple): output = output[0] target = self._gen_target(output.shape[0]) self.cfg.loss(output, target).backward() self.cfg.optimizer.step() def _step_eval(self): output = self.model(self.example_inputs) return output def get_optimizer(self): return self.cfg.optimizer def set_optimizer(self, optimizer) -> None: self.cfg.optimizer = optimizer def enable_fp16_half(self): self.model = self.model.half() self.example_inputs = self.example_inputs.half() def enable_channels_last(self): self.model = self.model.to(memory_format=torch.channels_last) self.example_inputs = self.example_inputs.contiguous(memory_format=torch.channels_last) def get_module(self): return self.model, (self.example_inputs,) def train(self): self._step_train() def eval(self) -> typing.Tuple[torch.Tensor]: with torch.no_grad(): with self.amp_context(): out = self._step_eval() return (out, )
from datasets import load_dataset def prep_dataset(hf_args): # Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below) # or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/ # (the dataset will be downloaded automatically from the datasets Hub). # # For CSV/JSON files, this script will use the column called 'text' or the first column if no column called # 'text' is found. You can easily tweak this behavior (see below). # # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if hf_args.dataset_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset(hf_args.dataset_name, hf_args.dataset_config_name) else: data_files = {} if hf_args.train_file is not None: data_files["train"] = hf_args.train_file if hf_args.validation_file is not None: data_files["validation"] = hf_args.validation_file extension = hf_args.train_file.split(".")[-1] raw_datasets = load_dataset(extension, data_files=data_files) # See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at # https://huggingface.co/docs/datasets/loading_datasets.html. return raw_datasets def preprocess_dataset(hf_args, raw_datasets, tokenizer, prefix, accelerator): # Preprocessing the datasets. # First we tokenize all the texts. column_names = raw_datasets["train"].column_names # Get the language codes for input/target. source_lang = hf_args.source_lang.split("_")[0] target_lang = hf_args.target_lang.split("_")[0] padding = "max_length" if hf_args.pad_to_max_length else False # Temporarily set max_target_length for training. max_target_length = hf_args.max_target_length padding = "max_length" if hf_args.pad_to_max_length else False def preprocess_function(examples): inputs = [ex[source_lang] for ex in examples["translation"]] targets = [ex[target_lang] for ex in examples["translation"]] inputs = [prefix + inp for inp in inputs] model_inputs = tokenizer(inputs, max_length=hf_args.max_source_length, padding=padding, truncation=True) # Setup the tokenizer for targets with tokenizer.as_target_tokenizer(): labels = tokenizer(targets, max_length=max_target_length, padding=padding, truncation=True) # If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore # padding in the loss. if padding == "max_length" and hf_args.ignore_pad_token_for_loss: labels["input_ids"] = [ [(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"] ] model_inputs["labels"] = labels["input_ids"] return model_inputs with accelerator.main_process_first(): processed_datasets = raw_datasets.map( preprocess_function, batched=True, num_proc=hf_args.preprocessing_num_workers, remove_columns=column_names, load_from_cache_file=not hf_args.overwrite_cache, desc="Running tokenizer on dataset", ) train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["validation"] return train_dataset, eval_dataset
""" Hacked from https://github.com/huggingface/transformers/blob/main/examples/pytorch/translation/run_translation_no_trainer.py It runs HuggingFace transformer models translation on WMT16 """ import argparse from transformers import SchedulerType task_to_keys = { # hf args to include for different tasks # english to romanian "wmt-en-ro": [ "--dataset_name", "wmt16", "--dataset_config_name", "ro-en", "--source_lang", "en", "--target_lang", "ro", ], # english to german "wmt-en-de": [ "--dataset_name", "stas/wmt14-en-de-pre-processed", "--source_lang", "en", "--target_lang", "de", ], } def parse_torchbench_args(extra_args): parser = argparse.ArgumentParser() parser.add_argument("--task_name", default="wmt-en-ro", choices=task_to_keys.keys(), help="Name of task to run") # validate in train by default parser.add_argument("--validate_in_train", action="store_false", help="Validate result in train") # use fp16 mixed precision by default parser.add_argument("--fp16", default="amp", choices=["amp", "no"], help="Enable mixed precision") parser.add_argument( "--distributed", default="none", choices=["ddp", "fsdp", "deepspeed", "none"], help="distributed training paradigm, by default using DDP" ) tb_args = parser.parse_args(extra_args) return tb_args def parse_args(in_args): parser = argparse.ArgumentParser(description="Finetune a transformers model on a text classification task") parser.add_argument( "--dataset_name", type=str, default=None, help="The name of the dataset to use (via the datasets library).", ) parser.add_argument( "--predict_with_generate", type=bool, default=True, help="", ) parser.add_argument( "--dataset_config_name", type=str, default=None, help="The configuration name of the dataset to use (via the datasets library).", ) parser.add_argument( "--train_file", type=str, default=None, help="A csv or a json file containing the training data." ) parser.add_argument( "--num_beams", type=int, default=None, help=( "Number of beams to use for evaluation. This argument will be " "passed to ``model.generate``, which is used during ``evaluate`` and ``predict``." ), ) parser.add_argument( "--max_source_length", type=int, default=1024, help=( "The maximum total input sequence length after " "tokenization.Sequences longer than this will be truncated, sequences shorter will be padded." ), ) parser.add_argument( "--max_target_length", type=int, default=128, help=( "The maximum total sequence length for target text after " "tokenization. Sequences longer than this will be truncated, sequences shorter will be padded." "during ``evaluate`` and ``predict``." ), ) parser.add_argument( "--val_max_target_length", type=int, default=None, help=( "The maximum total sequence length for validation " "target text after tokenization.Sequences longer than this will be truncated, sequences shorter will be " "padded. Will default to `max_target_length`.This argument is also used to override the ``max_length`` " "param of ``model.generate``, which is used during ``evaluate`` and ``predict``." ), ) parser.add_argument( "--pad_to_max_length", type=bool, default=False, help=( "Whether to pad all samples to model maximum sentence " "length. If False, will pad the samples dynamically when batching to the maximum length in the batch. More" "efficient on GPU but very bad for TPU." ), ) parser.add_argument( "--validation_file", type=str, default=None, help="A csv or a json file containing the validation data." ) parser.add_argument( "--ignore_pad_token_for_loss", type=bool, default=True, help="Whether to ignore the tokens corresponding to padded labels in the loss computation or not.", ) parser.add_argument("--source_lang", type=str, default=None, help="Source language id for translation.") parser.add_argument("--target_lang", type=str, default=None, help="Target language id for translation.") parser.add_argument( "--source_prefix", type=str, default=None, help="A prefix to add before every source text (useful for T5 models).", ) parser.add_argument( "--preprocessing_num_workers", type=int, default=None, help="The number of processes to use for the preprocessing.", ) parser.add_argument( "--overwrite_cache", type=bool, default=None, help="Overwrite the cached training and evaluation sets" ) # seems to be unused, commenting out # parser.add_argument( # "--max_length", # type=int, # default=128, # help=( # "The maximum total input sequence length after tokenization. Sequences longer than this will be truncated," # " sequences shorter will be padded if `--pad_to_max_lengh` is passed." # ), # ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=False, ) parser.add_argument( "--config_name", type=str, default=None, help="Pretrained config name or path if not the same as model_name", ) parser.add_argument( "--tokenizer_name", type=str, default=None, help="Pretrained tokenizer name or path if not the same as model_name", ) parser.add_argument( "--use_slow_tokenizer", action="store_true", help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument( "--model_type", type=str, default=None, help="Model type to use if training from scratch.", # choices=MODEL_TYPES, # unused, commented out for simplicity ) parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") parser.add_argument( "--checkpointing_steps", type=str, default=None, help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch.", ) parser.add_argument( "--resume_from_checkpoint", type=str, default=None, help="If the training should continue from a checkpoint folder.", ) parser.add_argument( "--with_tracking", action="store_true", help="Whether to enable experiment trackers for logging.", ) parser.add_argument( "--report_to", type=str, default="all", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,' ' `"wandb"` and `"comet_ml"`. Use `"all"` (default) to report to all integrations.' "Only applicable when `--with_tracking` is passed." ), ) args = parser.parse_args(in_args) # Sanity checks if args.dataset_name is None and args.train_file is None and args.validation_file is None: raise ValueError("Need either a task name or a training/validation file.") if args.train_file is not None: extension = args.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if args.validation_file is not None: extension = args.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." if args.push_to_hub: assert args.output_dir is not None, "Need an `output_dir` to create a repo when `--push_to_hub` is passed." return args
from datasets import load_dataset from transformers import PretrainedConfig from .args import task_to_keys def preprocess_dataset(hf_args, config, model, tokenizer, raw_datasets, num_labels, label_list, is_regression, accelerator): # Preprocessing the raw_datasets if hf_args.task_name is not None: sentence1_key, sentence2_key = task_to_keys[hf_args.task_name] else: # Again, we try to have some nice defaults but don't hesitate to tweak to your use case. non_label_column_names = [name for name in raw_datasets["train"].column_names if name != "label"] if "sentence1" in non_label_column_names and "sentence2" in non_label_column_names: sentence1_key, sentence2_key = "sentence1", "sentence2" else: if len(non_label_column_names) >= 2: sentence1_key, sentence2_key = non_label_column_names[:2] else: sentence1_key, sentence2_key = non_label_column_names[0], None # Some models have set the order of the labels to use, so let's make sure we do use it. label_to_id = None if ( model.config.label2id != PretrainedConfig(num_labels=num_labels).label2id and hf_args.task_name is not None and not is_regression ): # Some have all caps in their config, some don't. label_name_to_id = {k.lower(): v for k, v in model.config.label2id.items()} if list(sorted(label_name_to_id.keys())) == list(sorted(label_list)): label_to_id = {i: int(label_name_to_id[label_list[i]]) for i in range(num_labels)} else: # logger.warning( # "Your model seems to have been trained with labels, but they don't match the dataset: ", # f"model labels: {list(sorted(label_name_to_id.keys()))}, dataset labels: {list(sorted(label_list))}." # "\nIgnoring the model labels as a result.", # ) pass elif hf_args.task_name is None and not is_regression: label_to_id = {v: i for i, v in enumerate(label_list)} if label_to_id is not None: model.config.label2id = label_to_id model.config.id2label = {id: label for label, id in config.label2id.items()} elif hf_args.task_name is not None and not is_regression: model.config.label2id = {l: i for i, l in enumerate(label_list)} model.config.id2label = {id: label for label, id in config.label2id.items()} padding = "max_length" if hf_args.pad_to_max_length else False def preprocess_function(examples): # Tokenize the texts texts = ( (examples[sentence1_key],) if sentence2_key is None else (examples[sentence1_key], examples[sentence2_key]) ) result = tokenizer(*texts, padding=padding, max_length=hf_args.max_length, truncation=True) if "label" in examples: if label_to_id is not None: # Map labels to IDs (not necessary for GLUE tasks) result["labels"] = [label_to_id[l] for l in examples["label"]] else: # In all cases, rename the column to labels because the model will expect that. result["labels"] = examples["label"] return result with accelerator.main_process_first(): processed_datasets = raw_datasets.map( preprocess_function, batched=True, remove_columns=raw_datasets["train"].column_names, ) train_dataset = processed_datasets["train"] eval_dataset = processed_datasets["validation_matched" if hf_args.task_name == "mnli" else "validation"] if hf_args.task_name == "mnli": mnli_eval_dataset = raw_datasets["validation_mismatched"] else: mnli_eval_dataset = None return train_dataset, eval_dataset, mnli_eval_dataset def prep_dataset(hf_args): # Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below) # or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub). # For CSV/JSON files, this script will use as labels the column called 'label' and as pair of sentences the # sentences in columns called 'sentence1' and 'sentence2' if such column exists or the first two columns not named # label if at least two columns are provided. # If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this # single column. You can easily tweak this behavior (see below) # In distributed training, the load_dataset function guarantee that only one local process can concurrently # download the dataset. if hf_args.task_name is not None: # Downloading and loading a dataset from the hub. raw_datasets = load_dataset("glue", hf_args.task_name) else: # Loading a dataset from your local files. # CSV/JSON training and evaluation files are needed. data_files = {"train": hf_args.train_file, "validation": hf_args.validation_file} # Get the test dataset: you can provide your own CSV/JSON test file (see below) # when you use `do_predict` without specifying a GLUE benchmark task. if hf_args.do_predict: if hf_args.test_file is not None: train_extension = hf_args.train_file.split(".")[-1] test_extension = hf_args.test_file.split(".")[-1] assert ( test_extension == train_extension ), "`test_file` should have the same extension (csv or json) as `train_file`." data_files["test"] = hf_args.test_file else: raise ValueError("Need either a GLUE task or a test file for `do_predict`.") # for key in data_files.keys(): # logger.info(f"load a local file for {key}: {data_files[key]}") if hf_args.train_file.endswith(".csv"): # Loading a dataset from local csv files raw_datasets = load_dataset("csv", data_files=data_files, cache_dir=hf_args.cache_dir) else: # Loading a dataset from local json files raw_datasets = load_dataset("json", data_files=data_files, cache_dir=hf_args.cache_dir) # See more about loading any type of standard or custom dataset at # https://huggingface.co/docs/datasets/loading_datasets.html. return raw_datasets def prep_labels(hf_args, raw_datasets): # Labels if hf_args.task_name is not None: is_regression = hf_args.task_name == "stsb" if not is_regression: label_list = raw_datasets["train"].features["label"].names num_labels = len(label_list) else: num_labels = 1 else: # Trying to have good defaults here, don't hesitate to tweak to your needs. is_regression = raw_datasets["train"].features["label"].dtype in ["float32", "float64"] if is_regression: num_labels = 1 else: # A useful fast method: # https://huggingface.co/docs/datasets/package_reference/main_classes.html#datasets.Dataset.unique label_list = raw_datasets["train"].unique("label") label_list.sort() # Let's sort it for determinism num_labels = len(label_list) return num_labels, label_list, is_regression
""" Hacked from https://github.com/huggingface/transformers/blob/6fc38adff272ea3148e05888edf67eeb00170453/examples/pytorch/text-classification/run_glue.py It runs HuggingFace transformer models on the GLUE benchmark """ import argparse from transformers import SchedulerType task_to_keys = { "cola": ("sentence", None), "mnli": ("premise", "hypothesis"), "mrpc": ("sentence1", "sentence2"), "qnli": ("question", "sentence"), "qqp": ("question1", "question2"), "rte": ("sentence1", "sentence2"), "sst2": ("sentence", None), "stsb": ("sentence1", "sentence2"), "wnli": ("sentence1", "sentence2"), } def parse_torchbench_args(extra_args): parser = argparse.ArgumentParser() parser.add_argument("--task_name", default="cola", choices=task_to_keys.keys(), help="Name of task to run") # validate in train by default parser.add_argument("--validate_in_train", action="store_false", help="Validate result in train") # use fp16 mixed precision by default parser.add_argument("--fp16", default="amp", choices=["amp", "no"], help="Enable mixed precision") parser.add_argument( "--distributed", default="none", choices=["ddp", "fsdp", "deepspeed", "none"], help="distributed training paradigm, by default using DDP" ) tb_args = parser.parse_args(extra_args) return tb_args def parse_args(in_args): parser = argparse.ArgumentParser(description="Finetune a transformers model on a text classification task") parser.add_argument( "--task_name", type=str, default=None, help="The name of the glue task to train on.", choices=list(task_to_keys.keys()), ) parser.add_argument( "--train_file", type=str, default=None, help="A csv or a json file containing the training data." ) parser.add_argument( "--validation_file", type=str, default=None, help="A csv or a json file containing the validation data." ) parser.add_argument( "--max_length", type=int, default=128, help=( "The maximum total input sequence length after tokenization. Sequences longer than this will be truncated," " sequences shorter will be padded if `--pad_to_max_lengh` is passed." ), ) parser.add_argument( "--pad_to_max_length", action="store_true", help="If passed, pad all samples to `max_length`. Otherwise, dynamic padding is used.", ) parser.add_argument( "--model_name_or_path", type=str, help="Path to pretrained model or model identifier from huggingface.co/models.", required=True, ) parser.add_argument( "--use_slow_tokenizer", action="store_true", help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).", ) parser.add_argument( "--per_device_train_batch_size", type=int, default=8, help="Batch size (per device) for the training dataloader.", ) parser.add_argument( "--per_device_eval_batch_size", type=int, default=8, help="Batch size (per device) for the evaluation dataloader.", ) parser.add_argument( "--learning_rate", type=float, default=5e-5, help="Initial learning rate (after the potential warmup period) to use.", ) parser.add_argument("--weight_decay", type=float, default=0.0, help="Weight decay to use.") parser.add_argument("--num_train_epochs", type=int, default=3, help="Total number of training epochs to perform.") parser.add_argument( "--max_train_steps", type=int, default=None, help="Total number of training steps to perform. If provided, overrides num_train_epochs.", ) parser.add_argument( "--gradient_accumulation_steps", type=int, default=1, help="Number of updates steps to accumulate before performing a backward/update pass.", ) parser.add_argument( "--lr_scheduler_type", type=SchedulerType, default="linear", help="The scheduler type to use.", choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"], ) parser.add_argument( "--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler." ) parser.add_argument("--output_dir", type=str, default=None, help="Where to store the final model.") parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.") parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.") parser.add_argument( "--hub_model_id", type=str, help="The name of the repository to keep in sync with the local `output_dir`." ) parser.add_argument("--hub_token", type=str, help="The token to use to push to the Model Hub.") args = parser.parse_args(in_args) # Sanity checks if args.task_name is None and args.train_file is None and args.validation_file is None: raise ValueError("Need either a task name or a training/validation file.") else: if args.train_file is not None: extension = args.train_file.split(".")[-1] assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." if args.validation_file is not None: extension = args.validation_file.split(".")[-1] assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." return args
import argparse def parse_tb_args(args): parser = argparse.ArgumentParser() # default resolution: 800x1333 parser.add_argument("--resize", choices=["default", "448x608"], default="default", help="Resize the image to specified size") args, unknown_args = parser.parse_known_args(args) return args, unknown_args
import os import shutil import sys import subprocess from pathlib import Path from urllib import request from utils import s3_utils CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__))) # Load pre-trained weights # copied from https://github.com/facebookresearch/detectron2/blob/5934a1452801e669bbf9479ae222ce1a8a51f52e/MODEL_ZOO.md MODEL_WEIGHTS_MAP = { "detectron2_fasterrcnn_r_50_c4": "https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_50_C4_1x/137257644/model_final_721ade.pkl", "detectron2_fasterrcnn_r_50_dc5": "https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_50_DC5_1x/137847829/model_final_51d356.pkl", "detectron2_fasterrcnn_r_50_fpn": "https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_50_FPN_1x/137257794/model_final_b275ba.pkl", "detectron2_fasterrcnn_r_101_c4": "https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_101_C4_3x/138204752/model_final_298dad.pkl", "detectron2_fasterrcnn_r_101_dc5": "https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_101_DC5_3x/138204841/model_final_3e0943.pkl", "detectron2_fasterrcnn_r_101_fpn": "https://dl.fbaipublicfiles.com/detectron2/COCO-Detection/faster_rcnn_R_101_FPN_3x/137851257/model_final_f6e8b1.pkl", "detectron2_maskrcnn_r_50_c4": "https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_50_C4_1x/137259246/model_final_9243eb.pkl", "detectron2_maskrcnn_r_50_fpn": "https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x/137260431/model_final_a54504.pkl", "detectron2_maskrcnn_r_101_c4": "https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_101_C4_3x/138363239/model_final_a2914c.pkl", "detectron2_maskrcnn_r_101_fpn": "https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x/138205316/model_final_a3ec72.pkl", "detectron2_maskrcnn": "https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x/137260431/model_final_a54504.pkl", "detectron2_fcos_r_50_fpn": None, } def install_model_weights(model_name, model_dir): assert model_name in MODEL_WEIGHTS_MAP, f"Model {model_name} is not in MODEL_WEIGHTS_MAP. Cannot download the model weights file." model_full_path = Path(os.path.join(model_dir, ".data", f"{model_name}.pkl")) if model_name in MODEL_WEIGHTS_MAP and MODEL_WEIGHTS_MAP[model_name]: # download the file if not exists # TODO: verify the model file integrity if os.path.exists(model_full_path): return model_full_path.parent.mkdir(parents=True, exist_ok=True) request.urlretrieve(MODEL_WEIGHTS_MAP[model_name], model_full_path) def pip_install_requirements(): requirements_file = os.path.join(CURRENT_DIR, "requirements.txt") subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', requirements_file]) # This is to workaround https://github.com/facebookresearch/detectron2/issues/3934 def remove_tools_directory(): try: import tools import detectron2 d2_dir_path = Path(detectron2.__file__).parent assumed_tools_path = d2_dir_path.parent.joinpath("tools") if tools.__file__ and assumed_tools_path.exists(): shutil.rmtree(str(assumed_tools_path)) except ImportError: # if the "tools" package doesn't exist, do nothing pass def install_detectron2(model_name, model_dir): s3_utils.checkout_s3_data("INPUT_TARBALLS", "coco128.tar.gz", decompress=True) install_model_weights(model_name, model_dir) pip_install_requirements() remove_tools_directory()
from torchbenchmark.util.framework.detectron2.config import parse_tb_args from torchbenchmark.util.model import BenchmarkModel import itertools import os from pathlib import Path import torch # setup environment variable CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__))) DATA_DIR = os.path.join(CURRENT_DIR.parent.parent.parent, "data", ".data", "coco2017-minimal") assert os.path.exists(DATA_DIR), "Couldn't find coco2017 minimal data dir, please run install.py again." if not 'DETECTRON2_DATASETS' in os.environ: os.environ['DETECTRON2_DATASETS'] = DATA_DIR from detectron2.modeling.meta_arch.build import META_ARCH_REGISTRY from detectron2.engine import default_argument_parser from detectron2.solver import build_optimizer from detectron2.config import LazyConfig, get_cfg, instantiate from detectron2 import model_zoo from detectron2.modeling import build_model from detectron2.utils.events import EventStorage from torch.utils._pytree import tree_map from detectron2.checkpoint import DetectionCheckpointer import detectron2.data.transforms as T from detectron2.config import LazyCall as L from detectron2.data import build_detection_test_loader, build_detection_train_loader from typing import Tuple def setup(args): if args.config_file.endswith(".yaml"): cfg = get_cfg() cfg.merge_from_file(args.config_file) cfg.SOLVER.BASE_LR = 0.001 # Avoid NaNs. Not useful in this script anyway. # set images per batch to 1 cfg.SOLVER.IMS_PER_BATCH = 1 cfg.MODEL.WEIGHTS = args.model_file if args.resize == "448x608": cfg.MODEL.RPN.POST_NMS_TOPK_TEST = 300 cfg.INPUT.MIN_SIZE_TEST = 448 cfg.INPUT.MAX_SIZE_TEST = 608 cfg.merge_from_list(args.opts) cfg.freeze() else: cfg = LazyConfig.load(args.config_file) cfg = LazyConfig.apply_overrides(cfg, args.opts) if args.fcos_use_bn: cfg.model.head.norm = "BN" return cfg def prefetch(dataloader, device, precision="fp32"): r = [] dtype = torch.float16 if precision == "fp16" else torch.float32 for batch in dataloader: r.append(tree_map(lambda x: x.to(device, dtype=dtype) if isinstance(x, torch.Tensor) else x, batch)) return r def get_abs_path(config): import detectron2 detectron2_root = os.path.abspath(os.path.dirname(detectron2.__file__)) return os.path.join(detectron2_root, "model_zoo", "configs", config) class Detectron2Model(BenchmarkModel): # To recognize this is a detectron2 model DETECTRON2_MODEL = True # Default eval precision on CUDA device is fp16 DEFAULT_EVAL_CUDA_PRECISION = "fp16" # Default batch sizes DEFAULT_TRAIN_BSIZE = 1 DEFAULT_EVAL_BSIZE = 1 # Skip correctness check, because the output tensor can't be verified using # cosine similarity or torch.close() SKIP_CORRECTNESS_CHECK = True def __init__(self, variant, test, device, batch_size=None, extra_args=[]): super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args) self.tb_args, self.extra_args = parse_tb_args(self.extra_args) torch.backends.cudnn.deterministic = False torch.backends.cudnn.benchmark = False # load model file assert hasattr(self, "model_file"), f"Detectron2 models must specify its model_file." if self.model_file: assert (os.path.exists(self.model_file)), f"Detectron2 model file specified {self.model_file} doesn't exist." parser = default_argument_parser() args = parser.parse_args(["--config-file", get_abs_path(variant)]) # setup pre-trained model weights args.model_file = self.model_file args.resize = self.tb_args.resize if hasattr(self, "FCOS_USE_BN") and self.FCOS_USE_BN: args.fcos_use_bn = True cfg = setup(args) if hasattr(cfg, "MODEL") and cfg.MODEL.DEVICE != self.device: cfg.defrost() cfg.MODEL.DEVICE = self.device cfg.freeze() if args.config_file.endswith(".yaml"): self.model = build_model(cfg).to(self.device) else: self.model = instantiate(cfg.model).to(self.device) # setup model and return the dataloader if self.test == "train": if hasattr(self, "FCOS_USE_BN") and self.FCOS_USE_BN: raise NotImplementedError("FCOS train is not supported by upstream detectron2. " \ "See GH Issue: https://github.com/facebookresearch/detectron2/issues/4369.") self.optimizer = build_optimizer(cfg, self.model) loader = self.setup_train() elif self.test == "eval": loader = self.setup_eval(cfg, args) self.example_inputs = prefetch(itertools.islice(loader, 100), self.device) # torchbench: only run 1 batch self.NUM_BATCHES = 1 def setup_train(self): if hasattr(self, "FCOS_USE_BN") and self.FCOS_USE_BN: raise NotImplementedError("FCOS train is not supported by upstream detectron2. " \ "See GH Issue: https://github.com/facebookresearch/detectron2/issues/4369.") checkpointer = DetectionCheckpointer(self.model, optimizer=self.optimizer) checkpointer.load(self.model_file) self.model.train() # Always use coco.py to initialize train data # setup train dataset data_cfg = model_zoo.get_config("common/data/coco.py").dataloader data_cfg.train.dataset.names = "coco_2017_val_100" data_cfg.train.total_batch_size = self.batch_size if self.tb_args.resize == "448x608": data_cfg.train.mapper.augmentations = [L(T.ResizeShortestEdge)(short_edge_length=448, max_size=608)] loader = instantiate(data_cfg.train) return loader def setup_eval(self, cfg, args): # load model from pretrained checkpoint DetectionCheckpointer(self.model).load(self.model_file) self.model.eval() if args.config_file.endswith(".yaml"): cfg.defrost() cfg.DATASETS.TEST = ("coco_2017_val_100", ) loader = build_detection_test_loader(cfg, cfg.DATASETS.TEST[0], batch_size=self.batch_size) else: data_cfg = model_zoo.get_config("common/data/coco.py").dataloader data_cfg.test.dataset.names = "coco_2017_val_100" data_cfg.test.batch_size = self.batch_size if self.tb_args.resize == "448x608": data_cfg.test.mapper.augmentations = [L(T.ResizeShortestEdge)(short_edge_length=448, max_size=608)] loader = instantiate(data_cfg.test) return loader def get_module(self): return self.model, (self.example_inputs[0], ) def get_optimizer(self): return self.optimizer def set_optimizer(self, optimizer) -> None: self.optimizer = optimizer self.setup_train() def enable_fp16_half(self): assert self.dargs.precision == "fp16", f"Expected precision fp16, get {self.dargs.precision}" self.model = self.model.half() self.example_inputs = prefetch(self.example_inputs, self.device, self.dargs.precision) def train(self): with EventStorage(): for batch_id in range(self.NUM_BATCHES): loss_dict = self.model(self.example_inputs[batch_id]) if isinstance(loss_dict, torch.Tensor): losses = loss_dict loss_dict = {"total_loss": loss_dict} else: losses = sum(loss_dict.values()) self.optimizer.zero_grad() losses.backward() self.optimizer.step() def eval(self) -> Tuple[torch.Tensor]: with torch.no_grad(): for batch_id in range(self.NUM_BATCHES): out = self.model(self.example_inputs[batch_id]) # retrieve output tensors outputs = [] for item in out: fields = list(map(lambda x: list(x.get_fields().values()), item.values())) for boxes in fields: tensor_box = list(filter(lambda x: isinstance(x, torch.Tensor), boxes)) outputs.extend(tensor_box) return tuple(outputs)
""" Patch the transformer source code to enable optimizations. """ import os import subprocess import sys from .model_factory import class_models from transformers import AutoConfig, ReformerConfig, BigBirdConfig, BertConfig, WhisperConfig, LlamaConfig PATCH_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "patches") def cache_model(name: str, **kwargs): import transformers model_config = eval(class_models[name][2]) model_ctor = getattr(transformers, class_models[name][3]) model_ctor.from_config(model_config, **kwargs) def patch_transformers(): import transformers transformers_dir = os.path.dirname(transformers.__file__) if not os.path.exists(PATCH_DIR): return for patch_file in os.listdir(PATCH_DIR): patch_file_fullpatch = os.path.join(PATCH_DIR, patch_file) if not patch_file_fullpatch.endswith(".patch"): continue try: subprocess.check_output(["patch", "-p1", "--forward", "-i", patch_file_fullpatch, "-r", "/tmp/rej"], cwd=transformers_dir) except subprocess.SubprocessError as e: output_str = str(e.output) if "previously applied" in output_str: return else: print(str(output_str)) sys.exit(1)
import argparse import torch from torchbenchmark.util.model import BenchmarkModel from typing import List, Dict, Tuple def add_bool_arg(parser: argparse.ArgumentParser, name: str, default_value: bool=True): group = parser.add_mutually_exclusive_group(required=False) group.add_argument('--' + name, dest=name, action='store_true') group.add_argument('--no-' + name, dest=name, action='store_false') parser.set_defaults(**{name: default_value}) def parse_args(model: BenchmarkModel, extra_args: List[str]) -> argparse.Namespace: parser = argparse.ArgumentParser() # by default, enable half precision for inference add_bool_arg(parser, "eval_fp16", default_value=True) args = parser.parse_args(extra_args) args.device = model.device # disable fp16 when device is CPU if args.device == "cpu": args.eval_fp16 = False return args def apply_args(model: BenchmarkModel, args: argparse.Namespace): # apply eval_fp16 if args.eval_fp16: model.model, model.example_inputs = enable_eval_fp16(model.model, model.example_inputs) def enable_eval_fp16(model: torch.nn.Module, example_input: Dict[str, torch.tensor]) -> Tuple[torch.nn.Module, Dict[str, torch.tensor]]: return model.half(), {'input_ids': example_input['input_ids'].half()}
import math import random import os import torch from contextlib import nullcontext from torch import optim import torch.nn as nn from torchbenchmark.util.model import BenchmarkModel from torchbenchmark.tasks import NLP import transformers from transformers import AutoConfig, ReformerConfig, BertConfig, GenerationConfig, WhisperConfig, LlamaConfig from typing import Tuple class_models = { # 'name': (train_max_length, eval_max_length, config, model) 'hf_GPT2': (512, 1024, 'AutoConfig.from_pretrained("gpt2")', 'AutoModelForCausalLM'), 'hf_GPT2_large': (512, 1024, 'AutoConfig.from_pretrained("gpt2-large")', 'AutoModelForCausalLM'), 'hf_T5': (1024, 2048, 'AutoConfig.from_pretrained("t5-small")', 'AutoModelForSeq2SeqLM'), 'hf_T5_base': (1024, 2048, 'AutoConfig.from_pretrained("t5-base")', 'AutoModelForSeq2SeqLM'), 'hf_T5_large': (512, 512, 'AutoConfig.from_pretrained("t5-large")', 'AutoModelForSeq2SeqLM'), 'hf_Bart': (512, 512, 'AutoConfig.from_pretrained("facebook/bart-base")', 'AutoModelForSeq2SeqLM'), 'hf_Reformer': (4096, 4096, 'ReformerConfig()', 'AutoModelForMaskedLM'), 'hf_BigBird': (1024, 4096, 'BigBirdConfig(attention_type="block_sparse",)', 'AutoModelForMaskedLM'), 'hf_Albert': (512, 512, 'AutoConfig.from_pretrained("albert-base-v2")', 'AutoModelForMaskedLM'), 'hf_DistilBert': (512, 512, 'AutoConfig.from_pretrained("distilbert-base-uncased")', 'AutoModelForMaskedLM'), 'hf_Longformer': (1024, 4096, 'AutoConfig.from_pretrained("allenai/longformer-base-4096")', 'AutoModelForMaskedLM'), 'hf_Bert': (512, 512, 'BertConfig()', 'AutoModelForMaskedLM'), # see https://huggingface.co/bert-large-cased 'hf_Bert_large': (512, 512, 'BertConfig(hidden_size=1024, num_hidden_layers=24, num_attention_heads=16)', 'AutoModelForMaskedLM'), 'hf_Whisper': (1024, 1024, 'WhisperConfig()', 'AutoModelForAudioClassification'), # default num_hidden_layers=32 but that OOMs, feel free to change this config to something more real 'llama_v2_7b_16h' : (512,512, 'LlamaConfig(num_hidden_layers=16)', 'AutoModelForCausalLM'), 'hf_MPT_7b_instruct': (512, 512, 'AutoConfig.from_pretrained("mosaicml/mpt-7b-instruct", trust_remote_code=True)', 'AutoModelForCausalLM'), 'llama_v2_7b' : (512,512, 'AutoConfig.from_pretrained("meta-llama/Llama-2-7b-hf")', 'AutoModelForCausalLM'), 'llama_v2_13b' : (512,512, 'AutoConfig.from_pretrained("meta-llama/Llama-2-13b-hf")', 'AutoModelForCausalLM'), 'llama_v2_70b' : (512, 512, 'AutoConfig.from_pretrained("meta-llama/Llama-2-70b-hf")', 'AutoModelForMaskedLM'), } cpu_input_slice = { 'hf_BigBird': 5, 'hf_Longformer': 8, 'hf_T5': 4, 'hf_GPT2': 4, 'hf_Reformer': 2, } class ArgsToKwargsWrapper(torch.nn.Module): def __init__(self, model): super(ArgsToKwargsWrapper, self).__init__() self.model = model def forward(self, input_ids, decoder_input_ids): return self.model(input_ids=input_ids, decoder_input_ids=decoder_input_ids) class HuggingFaceModel(BenchmarkModel): HF_MODEL = True # Default eval precision on CUDA device is fp16(half mode) DEFAULT_EVAL_CUDA_PRECISION = "fp16" # If you suffix a model with '_generate', we will instead wrap the # unsuffixed model with GenerationWrapper which will make it do # autoregressive text generation instead of a probability prediction # NB: name is used as kwarg, cannot rename it here def __init__(self, name, test, device, batch_size=None, extra_args=[]): super().__init__(test=test, device=device, batch_size=batch_size, extra_args=extra_args) self.name = name if name.endswith('_generate'): self.is_generate = True self.unqual_name = name[:-len('_generate')] else: self.is_generate = False self.unqual_name = name name = self.unqual_name # we don't want to refer to the qualified name anymore if test == "train": self.max_length = class_models[name][0] elif test == "eval": self.max_length = class_models[name][1] # workaround the bigbird config import if name == "hf_BigBird": from transformers import BigBirdConfig config = eval(class_models[name][2]) if class_models[name][2] == "ReformerConfig()" and not config.num_buckets: # silence "config.num_buckets is not set. Setting config.num_buckets to 128" config.num_buckets = 128 class_ctor = getattr(transformers, class_models[name][3]) kwargs = {} if name == "hf_Falcon_7b" or name == "hf_MPT_7b_instruct": kwargs["trust_remote_code"] = True self.model = class_ctor.from_config(config, **kwargs).to(device) self.optimizer = optim.Adam( self.model.parameters(), lr=0.001, # TODO resolve https://github.com/pytorch/torchdynamo/issues/1083 capturable=bool(int(os.getenv("ADAM_CAPTURABLE", 0) ))) # populate these on-demand to avoid wasting memory when not used self.vocab_size = config.vocab_size self.dynamic_example_inputs = None if test == "train": input_ids = torch.randint(0, config.vocab_size, (self.batch_size, self.max_length)).to(device) decoder_ids = torch.randint(0, config.vocab_size, (self.batch_size, self.max_length)).to(device) self.example_inputs = {'input_ids': input_ids, 'labels': decoder_ids} self.model.train() elif test == "eval": # Cut the length of sentence when running on CPU, to reduce test time if self.device == "cpu" and name in cpu_input_slice: self.max_length = int(self.max_length / cpu_input_slice[name]) eval_context = torch.randint(0, config.vocab_size, (self.batch_size, self.max_length)).to(device) self.example_inputs = {'input_ids': eval_context, } if class_models[name][3] == 'AutoModelForSeq2SeqLM': self.example_inputs['decoder_input_ids'] = eval_context self.model.eval() self.amp_context = nullcontext def get_module(self, wrap_model=True): if not self.is_generate and class_models[self.unqual_name][3] == 'AutoModelForSeq2SeqLM': k = 'labels' if self.test == 'train' else 'decoder_input_ids' if not wrap_model: return self.model, ( self.example_inputs['input_ids'], self.example_inputs[k]) return ArgsToKwargsWrapper(self.model), ( self.example_inputs['input_ids'], self.example_inputs[k]) return self.model, (self.example_inputs["input_ids"], ) def get_dynamic_shapes_module(self): if self.dynamic_example_inputs is None: nbuckets = 8 nsamples = 32 n = int(math.log2(self.max_length)) buckets = [2**n for n in range(n - nbuckets, n)] self.dynamic_example_inputs = [ { 'input_ids': torch.randint(0, self.vocab_size, (self.batch_size, bucket_len)).to(self.device), 'labels': torch.randint(0, self.vocab_size, (self.batch_size, bucket_len)).to(self.device)} for bucket_len in random.choices(buckets, k=nsamples) ] if class_models[self.unqual_name][3] == 'AutoModelForSeq2SeqLM': raise NotImplementedError("Not yet supported") # TODO(whc) why is labels not passed through? return self.model, [(i['input_ids'],) for i in self.dynamic_example_inputs] def enable_fp16_half(self): self.model = self.model.half() def train(self): with self.amp_context(): outputs = self.model(**self.example_inputs) loss = outputs.loss loss.backward() self.optimizer.step() def eval(self) -> Tuple[torch.Tensor]: with torch.no_grad(): with self.amp_context(): out = self.model(**self.example_inputs) # logits: prediction scores of language modeling head # https://github.com/huggingface/transformers/blob/v4.16.2/src/transformers/modeling_outputs.py#L455 # transformations such as fx2trt will cast the original output type to dict if isinstance(out, tuple): return out elif hasattr(out, 'logits'): return (out.logits, ) else: return (out["logits"], ) class HuggingFaceAuthMixin: def __init__(self): if not 'HUGGING_FACE_HUB_TOKEN' in os.environ: raise NotImplementedError("Make sure to set `HUGGING_FACE_HUB_TOKEN` so you can download weights") class HuggingFaceGenerationModel(HuggingFaceModel): task = NLP.GENERATION DEFAULT_EVAL_BSIZE = 1 """ Instead of just running __call__ on the model, use generate to generate text. """ def __init__(self, name, test, device, batch_size=None, extra_args=[]): super().__init__(name=name, test=test, device=device, batch_size=batch_size, extra_args=extra_args) # Make this configurable with extra_args # NB: this is *fixed* generation size as eos_token_id is None # These params were cribbed off of # https://github.com/younesbelkada/hf-torch-compile-benchmark generation_config = GenerationConfig( max_new_tokens=256, pad_token_id=0, eos_token_id=None, do_sample=False, num_beams=1, use_cache=True, ) self.model = GenerationWrapper(self.model, generation_config) def train(self): raise NotImplementedError("_generate variant doesn't train") def eval(self) -> Tuple[torch.Tensor]: with torch.no_grad(): with self.amp_context(): out = self.model(self.example_inputs['input_ids']) return (out,) class GenerationWrapper(nn.Module): def __init__(self, model, generation_config): super().__init__() self.model = model self.generation_config = generation_config def forward(self, inputs): return self.model.generate(inputs, self.generation_config)
import argparse import importlib import os import sys import torch import uuid from pathlib import Path from typing import List try: import submitit except ImportError: submitit = None def parse_args(args: List[str]=None): parser = argparse.ArgumentParser(description='PyTorch Distributed Benchmark', add_help=False) parser.add_argument( "--scheduler", default="slurm", type=str, choices=["local", "slurm"], help="Where to launch the job on a specific infrastructure" ) parser.add_argument( "--ngpus", default=2, type=int, help="Number of gpus to request on each node" ) parser.add_argument( "--nodes", default=1, type=int, help="Number of nodes to request" ) parser.add_argument( "--timeout", default=1440, type=int, help="Duration of the job" ) parser.add_argument( "--profiler", default=False, type=bool, help="Measure with PyTorch Profiler. Disabled by default, as it crashes on AWS" ) parser.add_argument( "--partition", default="train", type=str, help="The Slurm partition to submit to" ) parser.add_argument( "--cluster", default=None, help="Which slurm cluster to target. Use 'local' to run jobs locally, 'debug' to run jobs in process" ) parser.add_argument( "--job_dir", default=os.getcwd(), type=str, help="A shared folder across all worker processes" ) parser.add_argument( "--model", type=str, default="torchbenchmark.e2e_models.hf_bert.Model", help="specify the model to experiment with, by default uses e2e_models.hf_bert" ) parser.add_argument( "--trainer", type=str, default="torchbenchmark.util.distributed.trainer.Trainer", help="trainer loop class, can be customized for specific behavior", ) parser.add_argument( "--distributed", type=str, choices=["ddp", "ddp_no_static_graph", "fsdp", "deepspeed", "none"], default="ddp", help="distributed training paradigm, by default using DDP", ) parser.add_argument( "--exclude", type=str, default="", help="comma-separated list of nodes to exclude from the slurm allocation", ) try: if args: return parser.parse_known_args(args) else: return parser.parse_known_args() except: parser.print_help() sys.exit(0) def get_init_file(args): # Init file must not exist, but it's parent dir must exist. os.makedirs(args.job_dir, exist_ok=True) init_file = Path(args.job_dir) / f"{uuid.uuid4().hex}_init" print(init_file) if init_file.exists(): os.remove(str(init_file)) return init_file class TrainerWrapper: def __init__(self, args, model_args=None): self.args = args self.model_args = model_args self.args.output_dir = args.job_dir def __call__(self): self._setup_gpu_args() pos = self.args.model.rfind(".") module = importlib.import_module(self.args.model[:pos]) model_class = getattr(module, self.args.model[(pos+1):]) pos = self.args.trainer.rfind(".") module = importlib.import_module(self.args.trainer[:pos]) trainer_class = getattr(module, self.args.trainer[(pos+1):]) return trainer_class(self.args, model_class, model_args=self.model_args).measure() def checkpoint(self): self.args.dist_url = get_init_file(self.args).as_uri() checkpoint_file = os.path.join(self.args.output_dir, "checkpoint.pth") if os.path.exists(checkpoint_file): self.args.resume = checkpoint_file print("Requeuing ", self.args) empty_trainer = type(self)(self.args) return submitit.helpers.DelayedSubmission(empty_trainer) def _setup_gpu_args(self): job_env = submitit.JobEnvironment() self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id))) self.args.gpu = job_env.local_rank self.args.rank = job_env.global_rank self.args.world_size = job_env.num_tasks print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}") os.environ["LOCAL_RANK"] = str(job_env.local_rank) os.environ["RANK"] = str(job_env.global_rank) os.environ["WORLD_SIZE"] = str(job_env.num_tasks) def main(): args, model_args, = parse_args() # Note that the folder will depend on the job_id, to easily track experiments executor = submitit.AutoExecutor(folder=args.job_dir, cluster=args.cluster, slurm_max_num_timeout=3000) executor.update_parameters( gpus_per_node=args.ngpus, # one task per GPU tasks_per_node=args.ngpus, cpus_per_task=10, nodes=args.nodes, timeout_min=args.timeout, # Below are cluster dependent parameters slurm_partition=args.partition, slurm_signal_delay_s=120, slurm_exclude=args.exclude, ) executor.update_parameters(name="distbench", slurm_array_parallelism=1, timeout_min=1000) args.dist_url = get_init_file(args).as_uri() args.output_dir = args.job_dir job = executor.submit(TrainerWrapper(args, model_args)) # print ID of the Slurm job print(job.job_id) # waits for completion and returns output print(job.results()) if __name__=="__main__": main()
from datetime import datetime import os from pathlib import Path from statistics import stdev import torch from torch.cuda import Event from torch.profiler import profile, ProfilerActivity, tensorboard_trace_handler from torchbenchmark.util.e2emodel import E2EBenchmarkModel, nested import torch.distributed as dist class Trainer(): DEFAULT_MEASURE_ITERATIONS = 10 def __init__(self, args, model_class, mode="SPMD", model_args=None): self.args = args self.model_args = model_args self.model_class = model_class self.mode = mode self.local_rank = int(os.getenv("LOCAL_RANK", -1)) self.setup() extra_args = [ "--distributed", self.args.distributed, ] extra_args.extend(self.model_args) # create model instance after Trainer setup, so that # visible devices won't be revised in model constructor self.e2e_benchmark: E2EBenchmarkModel = model_class("train", batch_size=None, extra_args=extra_args) expected_attrs = ["model", "optimizer", "train_dataloader", "accelerator", "run_contexts"] assert all(attr in dir(self.e2e_benchmark) for attr in expected_attrs), ( "Missing attributes in the input E2EBenchmarkModel implementation: " f"{[attr for attr in expected_attrs if attr not in dir(self.e2e_benchmark)]}" ) self.rank = dist.get_rank() def setup(self): if self.mode == "SPMD": # set the visible devices so that each SPMD process only sees one # CUDA device # N.B.: this has to be done before using any CUDA API from torch # N.B.: Remove the following block as HF Accelerator by default puts # the model to the device corresponding to LOCAL_RANK. It's better # to use CUDA_VISIBLE_DEVICES and cuda:0 if HF Accelerator can avoid # using local_rank as the device id. """ os.environ["CUDA_VISIBLE_DEVICES"] = f"{local_rank}" assert torch.cuda.device_count() == 1, ( "SPMD Trainer expects 1 visible device per process, but saw " f"{torch.cuda.device_count()} devices." ) """ torch.cuda.set_device(self.local_rank) world_size = int(os.getenv("WORLD_SIZE", -1)) rank = int(os.getenv("RANK", -1)) assert self.local_rank != -1 and world_size != -1 and rank != -1, ( "Failed to retrieve SPMD configurations from environment " f"variables. local_rank={self.local_rank}, world_size={world_size}, " f"rank={rank}." ) # TODO: hardcode NCCL for now, make this configurable if necessary dist.init_process_group("nccl", init_method=self.args.dist_url, rank=rank, world_size=world_size) else: raise ValueError(f"Unrecognized distributed training mode {self.mode}") def measure(self): niters = self.DEFAULT_MEASURE_ITERATIONS # TODO: using dummy data for now to rule out dataloader delays batch = self.e2e_benchmark.next_batch() ###################################### # 1. warming up CUDACachingAllocator # ###################################### for _ in range(self.DEFAULT_MEASURE_ITERATIONS): with nested(*self.e2e_benchmark.run_contexts): loss = self.e2e_benchmark.run_forward(batch) self.e2e_benchmark.run_backward(loss) self.e2e_benchmark.run_optimizer_step() # wait for all pending CUDA ops to finish torch.cuda.synchronize(device=self.local_rank) now = datetime.now() name = f"{type(self).__name__}_{now.strftime('%Y_%m_%d_%H_%M_%S')}" ################################################################## # 2. measure raw delays and memory to rule out profiler overhead # ################################################################## events_pre_fwd = [Event(enable_timing=True) for _ in range(niters)] events_pre_bwd = [Event(enable_timing=True) for _ in range(niters)] events_pre_opt = [Event(enable_timing=True) for _ in range(niters)] events_post_opt = [Event(enable_timing=True) for _ in range(niters)] with nested(*self.e2e_benchmark.run_contexts): for i in range(niters): events_pre_fwd[i].record() loss = self.e2e_benchmark.run_forward(batch) events_pre_bwd[i].record() self.e2e_benchmark.run_backward(loss) events_pre_opt[i].record() self.e2e_benchmark.run_optimizer_step() events_post_opt[i].record() # wait for all pending CUDA ops to finish torch.cuda.synchronize(device=self.local_rank) delays_fwd = [pre.elapsed_time(post) for pre, post in zip(events_pre_fwd, events_pre_bwd)] delays_bwd = [pre.elapsed_time(post) for pre, post in zip(events_pre_bwd, events_pre_opt)] delays_opt = [pre.elapsed_time(post) for pre, post in zip(events_pre_opt, events_post_opt)] mean_fwd = float(sum(delays_fwd)) / len(delays_fwd) stdev_fwd = stdev(delays_fwd) mean_bwd = float(sum(delays_bwd)) / len(delays_bwd) stdev_bwd = stdev(delays_bwd) mean_opt = float(sum(delays_opt)) / len(delays_opt) stdev_opt = stdev(delays_opt) iter_time = events_pre_fwd[0].elapsed_time(events_post_opt[-1]) / niters # write results delay_dir = f"{self.args.job_dir}/delay" Path(delay_dir).mkdir(parents=True, exist_ok=True) fout = open(f"{delay_dir}/{name}.log", "w") fout.write( f"{mean_fwd:.2f}, {stdev_fwd:.2f}, " f"{mean_bwd:.2f}, {stdev_bwd:.2f}, " f"{mean_opt:.2f}, {stdev_opt:.2f}\n" ) fout.close() if self.args.profiler: # N.B.: disable PyTorch Profiler by default due to # https://github.com/pytorch/pytorch/issues/75369 ################################################ # 3. meausre complete metrics through profiler # ################################################ with profile( activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], record_shapes=True, # Causes seg fault in export_chrome_trace with_stack=True, # Causes seg fault with EFA with_flops=True, # Causes seg fault in export_chrome_trace on_trace_ready=tensorboard_trace_handler( f"{self.args.job_dir}/tb/{name}", self.rank, use_gzip=True, ) ): for i in range(niters): loss = self.e2e_benchmark.run_forward(batch) self.e2e_benchmark.run_backward(loss) self.e2e_benchmark.run_optimizer_step() # wait for all pending CUDA ops to finish torch.cuda.synchronize(device=self.local_rank) # wait for all peers to finish dist.barrier(device_ids=[self.local_rank]) return { "iter" : iter_time, "fwd_mean" : mean_fwd, "fwd_stdev" : stdev_fwd, "bwd_mean" : mean_bwd, "bwd_stdev" : stdev_bwd, "opt_mean" : mean_opt, "opt_stdev" : stdev_opt, } def teardown(self): if self.mode == "SPMD": dist.destroy_process_group()
from datetime import datetime import os from pathlib import Path from statistics import stdev from typing import Optional import numpy as np import torch from torch.cuda import Event from torch.profiler import profile, ProfilerActivity, schedule, tensorboard_trace_handler from torchbenchmark.util.env_check import same from torchbenchmark.util.model import BenchmarkModel import torch.distributed as dist class Trainer(): DEFAULT_MEASURE_ITERATIONS = 10 PROFILE_ITERATIONS = 2 def __init__(self, args, model_class, mode="SPMD", model_args=None): self.args = args self.model_args = model_args self.model_class = model_class self.mode = mode self.local_rank = int(os.getenv("LOCAL_RANK", -1)) self.global_rank = int(os.getenv("RANK", -1)) self.setup() # specify the name of the distributed trainer extra_args = [ "--distributed", self.args.distributed, ] extra_args.extend(model_args) batch_size = getattr(args, "batch_size", None) # create model instance after Trainer setup, so that # visible devices won't be revised in model constructor self.benchmark: BenchmarkModel = model_class(test="train", device="cuda", batch_size=batch_size, extra_args=extra_args) # options: "reference" or "test" self.check_correctness_distributed : Optional[str] = getattr(args, "check_correctness_distributed", None) self.reference_data_path : Optional[str] = getattr(args, "reference_data_path", None) # reduce iterations to speed up the tests if self.check_correctness_distributed: self.DEFAULT_MEASURE_ITERATIONS = 2 self.rank = dist.get_rank() def setup(self): if self.mode == "SPMD": # set the visible devices so that each SPMD process only sees one # CUDA device # N.B.: this has to be done before using any CUDA API from torch # N.B.: Remove the following block as HF Accelerator by default puts # the model to the device corresponding to LOCAL_RANK. It's better # to use CUDA_VISIBLE_DEVICES and cuda:0 if HF Accelerator can avoid # using local_rank as the device id. """ os.environ["CUDA_VISIBLE_DEVICES"] = f"{local_rank}" assert torch.cuda.device_count() == 1, ( "SPMD Trainer expects 1 visible device per process, but saw " f"{torch.cuda.device_count()} devices." ) """ torch.cuda.set_device(self.local_rank) world_size = int(os.getenv("WORLD_SIZE", -1)) rank = int(os.getenv("RANK", -1)) assert self.local_rank != -1 and world_size != -1 and rank != -1, ( "Failed to retrieve SPMD configurations from environment " f"variables. local_rank={self.local_rank}, world_size={world_size}, " f"rank={rank}." ) # TODO: hardcode NCCL for now, make this configurable if necessary dist.init_process_group("nccl", init_method=self.args.dist_url, rank=rank, world_size=world_size) else: raise ValueError(f"Unrecognized distributed training mode {self.mode}") def measure(self): niters = self.DEFAULT_MEASURE_ITERATIONS correctness = None if self.check_correctness_distributed is not None: self.benchmark.invoke() if self.global_rank == 0: grad_params = {} for name, param in self.benchmark.model.named_parameters(): if param.requires_grad: if param.grad is not None: grad_params[name + ".grad"] = param.grad.cpu() else: grad_params[name + ".grad"] = None if self.check_correctness_distributed == "reference": with open(self.reference_data_path, "wb") as f: torch.save(grad_params, f) elif self.check_correctness_distributed == "test": with open(self.reference_data_path, "rb") as f: ref_params = torch.load(f) def do_correctness_check(): correctness = True for ref_name, ref_param in ref_params.items(): if ref_name not in grad_params: correctness = False print(f"correctness failure: {ref_name} in reference params but not in test params") test_param = grad_params[ref_name] atol = rtol = 1e-4 if not same(test_param, ref_param, cos_similarity=False, atol=atol*40, rtol=rtol*40): correctness=False print(f"correctness failure: Test model differs from reference model in parameter: {ref_name}") for test_name, test_param in grad_params.items(): if test_name not in ref_params: correctness = False print(f"correctness failure: {test_name} in reference params but not in ref params") return correctness correctness = do_correctness_check() ###################################### # 1. warming up CUDACachingAllocator # ###################################### for _ in range(self.DEFAULT_MEASURE_ITERATIONS): self.benchmark.invoke() torch.cuda.reset_peak_memory_stats() self.benchmark.invoke() # wait for all pending CUDA ops to finish torch.cuda.synchronize(device=self.local_rank) max_memory = torch.cuda.max_memory_allocated(device=self.local_rank) now = datetime.now() name = f"{type(self).__name__}_{now.strftime('%Y_%m_%d_%H_%M_%S')}" ################################################################## # 2. measure raw delays and memory to rule out profiler overhead # ################################################################## events_pre_train = [Event(enable_timing=True) for _ in range(niters)] events_post_train = [Event(enable_timing=True) for _ in range(niters)] for i in range(niters): events_pre_train[i].record() self.benchmark.invoke() events_post_train[i].record() # wait for all pending CUDA ops to finish torch.cuda.synchronize(device=self.local_rank) latency_train = [pre.elapsed_time(post) for pre, post in zip(events_pre_train, events_post_train)] median_latency = np.median(latency_train) stdev_latency = stdev(latency_train) if self.args.profiler: # N.B.: disable PyTorch Profiler by default due to # https://github.com/pytorch/pytorch/issues/75369 ################################################ # 3. meausre complete metrics through profiler # ################################################ wait_runs = 2 warmup_runs = 2 with profile( activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], record_shapes=True, # Causes seg fault in export_chrome_trace with_stack=True, # Causes seg fault with EFA with_flops=True, # Causes seg fault in export_chrome_trace on_trace_ready=tensorboard_trace_handler( f"{self.args.job_dir}/tb/{name}", self.rank, use_gzip=True, ), schedule=schedule(wait=wait_runs, warmup=warmup_runs, active=self.PROFILE_ITERATIONS), ) as profiler: for i in range(self.PROFILE_ITERATIONS + warmup_runs + wait_runs): self.benchmark.invoke() profiler.step() # wait for all pending CUDA ops to finish torch.cuda.synchronize(device=self.local_rank) # wait for all peers to finish dist.barrier(device_ids=[self.local_rank]) return { "latency_median" : median_latency, "latency_stdev" : stdev_latency, "max_memory" : max_memory, **({"correctness": correctness} if correctness is not None else {}), } def teardown(self): if self.mode == "SPMD": dist.destroy_process_group()
from io import UnsupportedOperation import os import torch from torch.nn.parallel import DistributedDataParallel as DDP from torch.distributed.fsdp import FullyShardedDataParallel as FSDP def apply_trainer(model, trainer): local_rank = int(os.getenv("LOCAL_RANK", -1)) if trainer == "ddp" or trainer == "ddp_no_static_graph": static_graph = (trainer == "ddp") ddp_model = DDP( model, device_ids=[local_rank], # If buffer broadcast is necessary, specific optimizations might be # necessary to optimize performance. Disable it by default. broadcast_buffers=False, # Set gradient as bucket view to avoid unnecessary copies gradient_as_bucket_view=True, # TODO: tune bucket_cap_mb static_graph=static_graph, ) return ddp_model elif trainer == "fsdp": fsdp_model = FSDP( model, device_id = torch.cuda.current_device() ) return fsdp_model raise UnsupportedOperation(f"Only DDP, FSDP are currently supported, but tried to use {trainer}")
import torch import argparse from torchbenchmark.util.backends import create_backend from typing import List def parse_torchscript_args(args: List[str]) -> argparse.Namespace: parser = argparse.ArgumentParser() # enable ofi by default parser.add_argument("--no-ofi", action='store_true', help="disable optimize_for_inference") parser.add_argument("--fuser", type=str, default="", choices=["fuser0", "fuser1", "fuser2", "fuser3"], help="enable fuser") args, unknown_args = parser.parse_known_args(args) return args, unknown_args @create_backend def torchscript(model: 'torchbenchmark.util.model.BenchmarkModel', backend_args: List[str]): model.jit = True backend_args, extra_args = parse_torchscript_args(backend_args) if model.device == "cpu" and backend_args.fuser == "fuser2": raise NotImplementedError(f"{backend_args.fuser} only works with GPU.") if model.test != "eval" and backend_args.fuser == "fuser3": raise NotImplementedError(f"{backend_args.fuser} only works with eval mode.") if backend_args.fuser: model.add_context(lambda: torch.jit.fuser(backend_args.fuser)) def _torchscript(): # customized jit callback function if hasattr(model, 'jit_callback'): if backend_args.no_ofi: raise NotImplementedError("Customized jit callback doesn't support options.") model.jit_callback() return module, example_inputs = model.get_module() if hasattr(torch.jit, '_script_pdt'): module = torch.jit._script_pdt(module, example_inputs=[example_inputs, ]) else: module = torch.jit.script(module, example_inputs=[example_inputs, ]) if model.test == "eval" and not backend_args.no_ofi: if backend_args.fuser != "fuser3": module = torch.jit.optimize_for_inference(module) else: module = torch.jit.freeze(module) model.set_module(module) return _torchscript, extra_args
import os import argparse import torch from torchbenchmark.util.backends import create_backend from typing import List, Tuple try: from fx2ait.acc_tracer import acc_tracer from fx2ait.ait_module import AITModule from fx2ait.fx2ait import AITInterpreter except: # if fx2ait is not available, skip it. pass def parse_ait_args(args: List[str]) -> Tuple[argparse.Namespace, List[str]]: parser = argparse.ArgumentParser() parser.add_argument("--use_cuda_graph", action='store_true', help="enable CUDA Graph") args, unknown_args = parser.parse_known_args(args) return args, unknown_args @create_backend def fx2ait(model: 'torchbenchmark.util.model.BenchmarkModel', backend_args: List[str]): AIT_WORK_PATH = os.path.join("/tmp", ".torchbench", "ait") assert model.dargs.precision == "fp16", f"AITemplate only support float16 precision, but get {model.dargs.precision}" OSS_AITModel = False try: # Load Non-OSS torch.ops.load_library("//deeplearning/ait:AITModel") except Exception: torch.ops.load_library("build/libait_model.so") OSS_AITModel = True ait_options, extra_args = parse_ait_args(backend_args) def _ait(): mod, inputs = model.get_module() traced = acc_tracer.trace(mod, inputs) interp = AITInterpreter(traced, inputs, AIT_WORK_PATH, "logs") interp_result = interp.run() ctor = torch.classes.ait.AITModel if OSS_AITModel else torch.classes.fb.AITModel ait_mod = AITModule( ctor( interp_result.engine.lib_path, interp_result.input_names, interp_result.output_names, torch.float16, torch.float16, 1, # num_runtimes ), interp_result, ) ait_mod.engine.use_cuda_graph = ait_options.use_cuda_graph return _ait, extra_args