python_code
stringlengths 0
258k
|
---|
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import math
def is_iterable(maybe_iterable):
return isinstance(maybe_iterable, list) or isinstance(maybe_iterable, tuple)
def flatten_list(tens_list):
"""
flatten_list
"""
if not is_iterable(tens_list):
return tens_list
return torch.cat(tens_list, dim=0).view(len(tens_list), *tens_list[0].size() )
#These modules always assumes batch_first
class bidirectionalRNN(nn.Module):
"""
bidirectionalRNN
"""
def __init__(self, inputRNN, num_layers=1, dropout = 0):
super(bidirectionalRNN, self).__init__()
self.dropout = dropout
self.fwd = stackedRNN(inputRNN, num_layers=num_layers, dropout = dropout)
self.bckwrd = stackedRNN(inputRNN.new_like(), num_layers=num_layers, dropout = dropout)
self.rnns = nn.ModuleList([self.fwd, self.bckwrd])
#collect hidden option will return all hidden/cell states from entire RNN
def forward(self, input, collect_hidden=False):
"""
forward()
"""
seq_len = input.size(0)
bsz = input.size(1)
fwd_out, fwd_hiddens = list(self.fwd(input, collect_hidden = collect_hidden))
bckwrd_out, bckwrd_hiddens = list(self.bckwrd(input, reverse=True, collect_hidden = collect_hidden))
output = torch.cat( [fwd_out, bckwrd_out], -1 )
hiddens = tuple( torch.cat(hidden, -1) for hidden in zip( fwd_hiddens, bckwrd_hiddens) )
return output, hiddens
def reset_parameters(self):
"""
reset_parameters()
"""
for rnn in self.rnns:
rnn.reset_parameters()
def init_hidden(self, bsz):
"""
init_hidden()
"""
for rnn in self.rnns:
rnn.init_hidden(bsz)
def detach_hidden(self):
"""
detach_hidden()
"""
for rnn in self.rnns:
rnn.detachHidden()
def reset_hidden(self, bsz):
"""
reset_hidden()
"""
for rnn in self.rnns:
rnn.reset_hidden(bsz)
def init_inference(self, bsz):
"""
init_inference()
"""
for rnn in self.rnns:
rnn.init_inference(bsz)
#assumes hidden_state[0] of inputRNN is output hidden state
#constructor either takes an RNNCell or list of RNN layers
class stackedRNN(nn.Module):
"""
stackedRNN
"""
def __init__(self, inputRNN, num_layers=1, dropout=0):
super(stackedRNN, self).__init__()
self.dropout = dropout
if isinstance(inputRNN, RNNCell):
self.rnns = [inputRNN]
for i in range(num_layers-1):
self.rnns.append(inputRNN.new_like(inputRNN.output_size))
elif isinstance(inputRNN, list):
assert len(inputRNN) == num_layers, "RNN list length must be equal to num_layers"
self.rnns=inputRNN
else:
raise RuntimeError()
self.nLayers = len(self.rnns)
self.rnns = nn.ModuleList(self.rnns)
'''
Returns output as hidden_state[0] Tensor([sequence steps][batch size][features])
If collect hidden will also return Tuple(
[n_hidden_states][sequence steps] Tensor([layer][batch size][features])
)
If not collect hidden will also return Tuple(
[n_hidden_states] Tensor([layer][batch size][features])
'''
def forward(self, input, collect_hidden=False, reverse=False):
"""
forward()
"""
seq_len = input.size(0)
bsz = input.size(1)
inp_iter = reversed(range(seq_len)) if reverse else range(seq_len)
hidden_states = [[] for i in range(self.nLayers)]
outputs = []
for seq in inp_iter:
for layer in range(self.nLayers):
if layer == 0:
prev_out = input[seq]
outs = self.rnns[layer](prev_out)
if collect_hidden:
hidden_states[layer].append(outs)
elif seq == seq_len-1:
hidden_states[layer].append(outs)
prev_out = outs[0]
outputs.append(prev_out)
if reverse:
outputs = list(reversed(outputs))
'''
At this point outputs is in format:
list( [seq_length] x Tensor([bsz][features]) )
need to convert it to:
list( Tensor([seq_length][bsz][features]) )
'''
output = flatten_list(outputs)
'''
hidden_states at this point is in format:
list( [layer][seq_length][hidden_states] x Tensor([bsz][features]) )
need to convert it to:
For not collect hidden:
list( [hidden_states] x Tensor([layer][bsz][features]) )
For collect hidden:
list( [hidden_states][seq_length] x Tensor([layer][bsz][features]) )
'''
if not collect_hidden:
seq_len = 1
n_hid = self.rnns[0].n_hidden_states
new_hidden = [ [ [ None for k in range(self.nLayers)] for j in range(seq_len) ] for i in range(n_hid) ]
for i in range(n_hid):
for j in range(seq_len):
for k in range(self.nLayers):
new_hidden[i][j][k] = hidden_states[k][j][i]
hidden_states = new_hidden
#Now in format list( [hidden_states][seq_length][layer] x Tensor([bsz][features]) )
#Reverse seq_length if reverse
if reverse:
hidden_states = list( list(reversed(list(entry))) for entry in hidden_states)
#flatten layer dimension into tensor
hiddens = list( list(
flatten_list(seq) for seq in hidden )
for hidden in hidden_states )
#Now in format list( [hidden_states][seq_length] x Tensor([layer][bsz][features]) )
#Remove seq_length dimension if not collect_hidden
if not collect_hidden:
hidden_states = list( entry[0] for entry in hidden_states)
return output, hidden_states
def reset_parameters(self):
"""
reset_parameters()
"""
for rnn in self.rnns:
rnn.reset_parameters()
def init_hidden(self, bsz):
"""
init_hidden()
"""
for rnn in self.rnns:
rnn.init_hidden(bsz)
def detach_hidden(self):
"""
detach_hidden()
"""
for rnn in self.rnns:
rnn.detach_hidden()
def reset_hidden(self, bsz):
"""
reset_hidden()
"""
for rnn in self.rnns:
rnn.reset_hidden(bsz)
def init_inference(self, bsz):
"""
init_inference()
"""
for rnn in self.rnns:
rnn.init_inference(bsz)
class RNNCell(nn.Module):
"""
RNNCell
gate_multiplier is related to the architecture you're working with
For LSTM-like it will be 4 and GRU-like will be 3.
Always assumes input is NOT batch_first.
Output size that's not hidden size will use output projection
Hidden_states is number of hidden states that are needed for cell
if one will go directly to cell as tensor, if more will go as list
"""
def __init__(self, gate_multiplier, input_size, hidden_size, cell, n_hidden_states = 2, bias = False, output_size = None):
super(RNNCell, self).__init__()
self.gate_multiplier = gate_multiplier
self.input_size = input_size
self.hidden_size = hidden_size
self.cell = cell
self.bias = bias
self.output_size = output_size
if output_size is None:
self.output_size = hidden_size
self.gate_size = gate_multiplier * self.hidden_size
self.n_hidden_states = n_hidden_states
self.w_ih = nn.Parameter(torch.Tensor(self.gate_size, self.input_size))
self.w_hh = nn.Parameter(torch.Tensor(self.gate_size, self.output_size))
#Check if there's recurrent projection
if(self.output_size != self.hidden_size):
self.w_ho = nn.Parameter(torch.Tensor(self.output_size, self.hidden_size))
self.b_ih = self.b_hh = None
if self.bias:
self.b_ih = nn.Parameter(torch.Tensor(self.gate_size))
self.b_hh = nn.Parameter(torch.Tensor(self.gate_size))
#hidden states for forward
self.hidden = [ None for states in range(self.n_hidden_states)]
self.reset_parameters()
def new_like(self, new_input_size=None):
"""
new_like()
"""
if new_input_size is None:
new_input_size = self.input_size
return type(self)(self.gate_multiplier,
new_input_size,
self.hidden_size,
self.cell,
self.n_hidden_states,
self.bias,
self.output_size)
#Use xavier where we can (weights), otherwise use uniform (bias)
def reset_parameters(self, gain=1):
"""
reset_parameters()
"""
stdev = 1.0 / math.sqrt(self.hidden_size)
for param in self.parameters():
param.data.uniform_(-stdev, stdev)
'''
Xavier reset:
def reset_parameters(self, gain=1):
stdv = 1.0 / math.sqrt(self.gate_size)
for param in self.parameters():
if (param.dim() > 1):
torch.nn.init.xavier_normal(param, gain)
else:
param.data.uniform_(-stdv, stdv)
'''
def init_hidden(self, bsz):
"""
init_hidden()
"""
for param in self.parameters():
if param is not None:
a_param = param
break
for i, _ in enumerate(self.hidden):
if(self.hidden[i] is None or self.hidden[i].data.size()[0] != bsz):
if i==0:
hidden_size = self.output_size
else:
hidden_size = self.hidden_size
tens = a_param.data.new(bsz, hidden_size).zero_()
self.hidden[i] = Variable(tens, requires_grad=False)
def reset_hidden(self, bsz):
"""
reset_hidden()
"""
for i, _ in enumerate(self.hidden):
self.hidden[i] = None
self.init_hidden(bsz)
def detach_hidden(self):
"""
detach_hidden()
"""
for i, _ in enumerate(self.hidden):
if self.hidden[i] is None:
raise RuntimeError("Must initialize hidden state before you can detach it")
for i, _ in enumerate(self.hidden):
self.hidden[i] = self.hidden[i].detach()
def forward(self, input):
"""
forward()
if not inited or bsz has changed this will create hidden states
"""
self.init_hidden(input.size()[0])
hidden_state = self.hidden[0] if self.n_hidden_states == 1 else self.hidden
self.hidden = self.cell(input, hidden_state, self.w_ih, self.w_hh, b_ih=self.b_ih, b_hh=self.b_hh)
if(self.n_hidden_states > 1):
self.hidden = list(self.hidden)
else:
self.hidden=[self.hidden]
if self.output_size != self.hidden_size:
self.hidden[0] = F.linear(self.hidden[0], self.w_ho)
return tuple(self.hidden)
|
import torch
from torch.nn._functions.rnn import LSTMCell, RNNReLUCell, RNNTanhCell, GRUCell
from .RNNBackend import bidirectionalRNN, stackedRNN, RNNCell
from .cells import mLSTMRNNCell, mLSTMCell
def toRNNBackend(inputRNN, num_layers, bidirectional=False, dropout = 0):
"""
:class:`toRNNBackend`
"""
if bidirectional:
return bidirectionalRNN(inputRNN, num_layers, dropout = dropout)
else:
return stackedRNN(inputRNN, num_layers, dropout = dropout)
def LSTM(input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, bidirectional=False, output_size = None):
"""
:class:`LSTM`
"""
inputRNN = RNNCell(4, input_size, hidden_size, LSTMCell, 2, bias, output_size)
return toRNNBackend(inputRNN, num_layers, bidirectional, dropout=dropout)
def GRU(input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, bidirectional=False, output_size = None):
"""
:class:`GRU`
"""
inputRNN = RNNCell(3, input_size, hidden_size, GRUCell, 1, bias, output_size)
return toRNNBackend(inputRNN, num_layers, bidirectional, dropout=dropout)
def ReLU(input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, bidirectional=False, output_size = None):
"""
:class:`ReLU`
"""
inputRNN = RNNCell(1, input_size, hidden_size, RNNReLUCell, 1, bias, output_size)
return toRNNBackend(inputRNN, num_layers, bidirectional, dropout=dropout)
def Tanh(input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, bidirectional=False, output_size = None):
"""
:class:`Tanh`
"""
inputRNN = RNNCell(1, input_size, hidden_size, RNNTanhCell, 1, bias, output_size)
return toRNNBackend(inputRNN, num_layers, bidirectional, dropout=dropout)
def mLSTM(input_size, hidden_size, num_layers, bias=True, batch_first=False, dropout=0, bidirectional=False, output_size = None):
"""
:class:`mLSTM`
"""
inputRNN = mLSTMRNNCell(input_size, hidden_size, bias=bias, output_size=output_size)
return toRNNBackend(inputRNN, num_layers, bidirectional, dropout=dropout)
|
from .models import LSTM, GRU, ReLU, Tanh, mLSTM
__all__ = ['models']
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from .RNNBackend import RNNCell
from torch.nn._functions.thnn import rnnFusedPointwise as fusedBackend
import math
class mLSTMRNNCell(RNNCell):
"""
mLSTMRNNCell
"""
def __init__(self, input_size, hidden_size, bias = False, output_size = None):
gate_multiplier = 4
super(mLSTMRNNCell, self).__init__(gate_multiplier, input_size, hidden_size, mLSTMCell, n_hidden_states = 2, bias = bias, output_size = output_size)
self.w_mih = nn.Parameter(torch.Tensor(self.output_size, self.input_size))
self.w_mhh = nn.Parameter(torch.Tensor(self.output_size, self.output_size))
self.reset_parameters()
def forward(self, input):
"""
mLSTMRNNCell.forward()
"""
#if not inited or bsz has changed this will create hidden states
self.init_hidden(input.size()[0])
hidden_state = self.hidden[0] if self.n_hidden_states == 1 else self.hidden
self.hidden = list(
self.cell(input, hidden_state, self.w_ih, self.w_hh, self.w_mih, self.w_mhh,
b_ih=self.b_ih, b_hh=self.b_hh)
)
if self.output_size != self.hidden_size:
self.hidden[0] = F.linear(self.hidden[0], self.w_ho)
return tuple(self.hidden)
def new_like(self, new_input_size=None):
if new_input_size is None:
new_input_size = self.input_size
return type(self)(
new_input_size,
self.hidden_size,
self.bias,
self.output_size)
def mLSTMCell(input, hidden, w_ih, w_hh, w_mih, w_mhh, b_ih=None, b_hh=None):
"""
mLSTMCell
"""
if input.is_cuda:
igates = F.linear(input, w_ih)
m = F.linear(input, w_mih) * F.linear(hidden[0], w_mhh)
hgates = F.linear(m, w_hh)
state = fusedBackend.LSTMFused.apply
return state(igates, hgates, hidden[1], b_ih, b_hh)
hx, cx = hidden
m = F.linear(input, w_mih) * F.linear(hidden[0], w_mhh)
gates = F.linear(input, w_ih, b_ih) + F.linear(m, w_hh, b_hh)
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = F.sigmoid(ingate)
forgetgate = F.sigmoid(forgetgate)
cellgate = F.tanh(cellgate)
outgate = F.sigmoid(outgate)
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * F.tanh(cy)
return hy, cy
|
import torch
import torch.distributed as dist
from torch.nn import Parameter
from torch.nn import Module
from apex.parallel import DistributedDataParallel as DDP
import argparse
import os
parser = argparse.ArgumentParser(description='allreduce hook example')
parser.add_argument("--local_rank", default=0, type=int)
args = parser.parse_args()
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
if args.distributed:
args.gpu = args.local_rank % torch.cuda.device_count()
torch.cuda.set_device(args.gpu)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
args.world_size = torch.distributed.get_world_size()
torch.set_printoptions(precision=10)
torch.manual_seed(args.local_rank)
class Model(Module):
def __init__(self):
super(Model, self).__init__()
self.a = Parameter(torch.cuda.FloatTensor(4096*4096).fill_(1.0))
self.b = Parameter(torch.cuda.FloatTensor(4096*4096).fill_(2.0))
def forward(self, input):
return (input*self.a)*self.b
model = Model()
# model = DDP(model, message_size=1, gradient_predivide_factor=8.0)
model = DDP(model, delay_allreduce=True)
# model = DDP(model, message_size=1, allreduce_trigger_params=[model.b])
x = torch.cuda.FloatTensor(4096*4096)
passed = True
torch.cuda.cudart().cudaProfilerStart()
for i in range(10):
x.fill_(i + args.local_rank) # fill x with new values every iteration for sanity
model.zero_grad()
out = model(x)
loss = out.sum()
# torch.cuda.nvtx.range_push("backward")
loss.backward()
# torch.cuda.nvtx.range_pop()
# torch.cuda.nvtx.range_push("synchronize() + info")
# torch.cuda.synchronize()
print("i = {}".format(i))
def info(name, param, val):
expected = val*4096*4096*(2.*i+1)/2.
actual = param.grad.data.sum().item()
print(name+": grad.data_ptr() = {}, expected sum {}, got {}".format(
param.grad.data_ptr(), expected, actual))
return (expected == actual)
if not info("model.a", model.module.a, 2.): passed = False
if not info("model.b", model.module.b, 1.): passed = False
# torch.cuda.nvtx.range_pop()
torch.cuda.cudart().cudaProfilerStop()
print("passed = ", passed)
|
import torch
import argparse
import os
from apex import amp
# FOR DISTRIBUTED: (can also use torch.nn.parallel.DistributedDataParallel instead)
from apex.parallel import DistributedDataParallel
parser = argparse.ArgumentParser()
# FOR DISTRIBUTED: Parse for the local_rank argument, which will be supplied
# automatically by torch.distributed.launch.
parser.add_argument("--local_rank", default=0, type=int)
args = parser.parse_args()
# FOR DISTRIBUTED: If we are running under torch.distributed.launch,
# the 'WORLD_SIZE' environment variable will also be set automatically.
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
if args.distributed:
# FOR DISTRIBUTED: Set the device according to local_rank.
torch.cuda.set_device(args.local_rank)
# FOR DISTRIBUTED: Initialize the backend. torch.distributed.launch will provide
# environment variables, and requires that you use init_method=`env://`.
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
torch.manual_seed(torch.distributed.get_rank())
torch.backends.cudnn.benchmark = True
N, D_in, D_out = 64, 1024, 16
# Each process receives its own batch of "fake input data" and "fake target data."
# The "training loop" in each process just uses this fake batch over and over.
# https://github.com/NVIDIA/apex/tree/master/examples/imagenet provides a more realistic
# example of distributed data sampling for both training and validation.
x = torch.randn(N, D_in, device='cuda')
y = torch.randn(N, D_out, device='cuda')
model = torch.nn.Linear(D_in, D_out).cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
model, optimizer = amp.initialize(model, optimizer, opt_level="O2")
if args.distributed:
# FOR DISTRIBUTED: After amp.initialize, wrap the model with
# apex.parallel.DistributedDataParallel.
model = DistributedDataParallel(model)
# torch.nn.parallel.DistributedDataParallel is also fine, with some added args:
# model = torch.nn.parallel.DistributedDataParallel(model,
# device_ids=[args.local_rank],
# output_device=args.local_rank)
loss_fn = torch.nn.MSELoss()
for t in range(500):
optimizer.zero_grad()
y_pred = model(x)
loss = loss_fn(y_pred, y)
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
if args.local_rank == 0:
print("final loss = ", loss)
torch.save(list(model.parameters()), "rank{}model.pth".format(torch.distributed.get_rank()))
torch.save(list(amp.master_params(optimizer)), "rank{}master.pth".format(torch.distributed.get_rank()))
|
import torch
model_params_rank0 = torch.load("rank0model.pth",
map_location = lambda storage, loc: storage.cuda(0))
model_params_rank1 = torch.load("rank1model.pth",
map_location = lambda storage, loc: storage.cuda(0))
master_params_rank0 = torch.load("rank0master.pth",
map_location = lambda storage, loc: storage.cuda(0))
master_params_rank1 = torch.load("rank1master.pth",
map_location = lambda storage, loc: storage.cuda(0))
for model_rank0, model_rank1, master_rank0, master_rank1 in zip(
model_params_rank0,
model_params_rank1,
master_params_rank0,
master_params_rank1):
assert torch.allclose(model_rank0, model_rank1), "Model param mismatch"
assert torch.allclose(master_rank0, master_rank1), "Master param mismatch"
# Some debugging/investigation assistance code:
# maxval, maxind = torch.max(((torch.abs(model_rank0).float())/torch.abs(master_rank0)).view(-1), 0)
# offending_val_half = model_rank0.view(-1)[maxind.item()]
# offending_val_float = master_rank0.view(-1)[maxind.item()]
# print(maxval.item(), maxind.item(), offending_val_half.item(), offending_val_float.item(),
# offending_val_float.half().item())
# rtol needs to be > 2^-11 because of denormals...
assert torch.allclose(model_rank0, master_rank0.half(), rtol=.005), "Model-master mismatch"
print("OK: Model and master params match across ranks.")
|
import torch
import numpy as np
import apex
import syncbn
import os
import argparse
import torch.optim as optim
def compare(desc, inp1, inp2, error):
a = inp1.clone().detach().cpu().numpy()
b = inp2.clone().detach().cpu().numpy()
close = np.allclose(a,b, error, error)
if not close:
print(desc, close)
z = a - b
index = (np.abs(z) >= error + error * np.abs(b)).nonzero()
print("dif : ", z[index])
print("inp1 : ", a[index])
print("inp2 : ", b[index])
return close
feature_size = 10
space_size = 40
batch_size = 32
from apex.parallel import DistributedDataParallel as DDP
parser = argparse.ArgumentParser()
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument("--fp16", action='store_true', default=False)
parser.add_argument("--fp64", action='store_true', default=False)
args = parser.parse_args()
args.world_size = int(os.environ['WORLD_SIZE'])
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
start = args.local_rank * batch_size//args.world_size
finish = (args.local_rank + 1) * batch_size//args.world_size
error = 1e-5
dtype = np.float32
if args.fp16:
error = 1e-3
dtype = np.float16
elif args.fp64:
error = 1e-8
dtype = np.float64
np.random.seed(18)
inp = np.random.randn(batch_size, feature_size, space_size, space_size).astype(dtype)
grad = np.random.randn(batch_size, feature_size, space_size, space_size).astype(dtype)
weight = np.random.randn(feature_size).astype(dtype)
bias = np.random.randn(feature_size).astype(dtype)
type_tensor = torch.cuda.FloatTensor
if args.fp16:
type_tensor = torch.cuda.HalfTensor
if args.fp64:
type_tensor = torch.cuda.DoubleTensor
ref_tensor = torch.cuda.DoubleTensor
inp_t = type_tensor(inp)
weight_t = type_tensor(weight)
bias_t = type_tensor(bias)
inp_r = ref_tensor(inp.transpose(1, 0, 2, 3).reshape(feature_size, -1))
inp2_r = ref_tensor(inp)
weight_r = ref_tensor(weight).view(-1, 1, 1)
bias_r = ref_tensor(bias).view(-1, 1, 1)
grad_output_t = type_tensor(grad)
m = inp_r.mean(1)
b_v = inp_r.var(1, unbiased=False)
unb_v = inp_r.var(1, unbiased=True)
eps = 1e-5
mean, var_biased = syncbn.welford_mean_var(inp_t)
inv_std = 1.0 / torch.sqrt(var_biased + eps)
bn = torch.nn.BatchNorm2d(feature_size).cuda()
bn.momentum = 1.0
bn.weight.data = weight_t.clone()
bn.bias.data = bias_t.clone()
if args.fp16:
bn.half()
if args.fp64:
bn.double()
inp_bn = inp_t.clone().requires_grad_()
grad_bn = grad_output_t.clone().detach()
out_bn = bn(inp_bn)
out_bn.backward(grad_bn)
# compensating the averaging over processes done by DDP
# in order to produce mathematically equivalent result
# https://github.com/NVIDIA/apex/issues/134#issuecomment-458307368
for param in bn.parameters():
param.grad = param.grad / args.world_size
bn_opt = optim.SGD(bn.parameters(), lr=1.0)
sbn = apex.parallel.SyncBatchNorm(feature_size).cuda()
sbn.momentum = 1.0
sbn.weight.data = weight_t.clone()
sbn.bias.data = bias_t.clone()
if args.fp16:
sbn.half()
if args.fp64:
sbn.double()
sbn = DDP(sbn)
sbn_opt = optim.SGD(sbn.parameters(), lr=1.0)
inp_sbn = inp_t.clone().requires_grad_()
grad_sbn = grad_output_t.clone().detach()
out_sbn = sbn(inp_sbn[start:finish])
out_sbn.backward(grad_sbn[start:finish])
sbn_result = True
bn_result = True
if args.local_rank == 0:
sbn_result = compare("comparing mean: ", mean, m, error) and sbn_result
sbn_result = compare("comparing biased variance: ", var_biased, b_v, error) and sbn_result
out = syncbn.batchnorm_forward(inp_t, mean, inv_std, weight_t, bias_t)
out_r = weight_r * (inp2_r - m.view(-1, 1, 1)) * torch.rsqrt(b_v.view(-1,1,1) + eps) + bias_r
if args.local_rank == 0:
sbn_result = compare("comparing output: ", out, out_r, error) and sbn_result
compare("comparing bn output: ", out_bn, out_r, error)
grad_output_t = type_tensor(grad)
grad_output_r = ref_tensor(grad.transpose(1, 0, 2, 3).reshape(feature_size, -1))
grad_output2_r = ref_tensor(grad)
grad_bias_r = grad_output_r.sum(1)
grad_weight_r = ((inp2_r - m.view(-1, 1, 1)) * torch.rsqrt(b_v.view(-1,1,1) + eps) * grad_output2_r).transpose(1,0).contiguous().view(feature_size, -1).sum(1)
mean_dy_r = grad_output_r.mean(1)
mean_dy_xmu_r = ((inp2_r - m.view(-1, 1, 1)) * grad_output2_r).transpose(1,0).contiguous().view(feature_size, -1).mean(1)
grad_input_r = (grad_output2_r - mean_dy_r.view(-1, 1, 1) - (inp2_r - m.view(-1, 1, 1)) / (b_v.view(-1,1,1) + eps) * mean_dy_xmu_r.view(-1, 1, 1) ) * torch.rsqrt(b_v.view(-1,1,1) + eps) * weight_r.view(-1,1,1)
mean_dy, mean_dy_xmu, grad_weight, grad_bias = syncbn.reduce_bn(grad_output_t, inp_t, mean, inv_std, weight_t)
grad_input = syncbn.batchnorm_backward(grad_output_t, inp_t, mean, inv_std, weight_t, mean_dy, mean_dy_xmu)
if args.local_rank == 0:
sbn_result = compare("comparing bias grad: ", grad_bias, grad_bias_r, error) and sbn_result
sbn_result = compare("comparing weight grad: ", grad_weight, grad_weight_r, error) and sbn_result
sbn_result = compare("comparing mean_dy grad: ", mean_dy, mean_dy_r, error) and sbn_result
sbn_result = compare("comparing mean_dy_xmu grad: ", mean_dy_xmu, mean_dy_xmu_r, error) and sbn_result
sbn_result = compare("comparing input grad: ", grad_input, grad_input_r, error) and sbn_result
compare("comparing bn input grad: ", inp_bn.grad, grad_input_r, error)
if args.local_rank == 0:
sbn_result = compare("comparing running_mean: ", bn.running_mean.data, sbn.module.running_mean.data, error) and sbn_result
sbn_result = compare("comparing running_variance: ", bn.running_var.data, sbn.module.running_var.data, error) and sbn_result
# execute by both
compare("comparing layers output: ", out_bn[start:finish], out_sbn, error) and sbn_result
compare("comparing layers grad_input: ", inp_bn.grad[start:finish], inp_sbn.grad[start:finish], error) and sbn_result
bn_opt.step()
sbn_opt.step()
if args.local_rank == 0:
compare("comparing bn vs sbn bias: ", bn.bias, sbn.module.bias, error)
compare("comparing bn vs sbn weight: ", bn.weight, sbn.module.weight, error)
if sbn_result:
print("====SBN two gpu passed tests")
else:
print("*SBN two gpu failed*")
|
import torch
import numpy as np
import apex
if True:
print("using setup tools")
import syncbn
else:
print("using jit")
from torch.utils.cpp_extension import load
syncbn = load(name='syncbn', sources=['../../csrc/syncbn.cpp', '../../csrc/welford.cu'])
def compare(desc, inp1, inp2, error):
a = inp1.clone().detach().cpu().numpy()
b = inp2.clone().detach().cpu().numpy()
close = np.allclose(a,b, error, error)
if not close:
print(desc, close)
z = a - b
index = (np.abs(z) >= error + error * np.abs(b)).nonzero()
print("dif : ", z[index])
print("inp1 : ", a[index])
print("inp2 : ", b[index])
return close
feature_size = 10
space_size = 16
batch_size = 5
error = 1e-5
np.random.seed(1)
dtype = np.float32
inp = (np.random.randn(batch_size, feature_size, space_size, space_size)).astype(dtype)
grad = (np.random.randn(batch_size, feature_size, space_size, space_size)).astype(dtype)
weight = (np.random.randn(feature_size)).astype(dtype)
bias = (np.random.randn(feature_size)).astype(dtype)
type_tensor = torch.cuda.FloatTensor
ref_tensor = torch.cuda.DoubleTensor
inp_t = type_tensor(inp)
weight_t = type_tensor(weight)
bias_t = type_tensor(bias)
inp_r = ref_tensor(inp.transpose(1, 0, 2, 3).reshape(feature_size, -1))
inp2_r = ref_tensor(inp)
weight_r = ref_tensor(weight).view(-1, 1, 1)
bias_r = ref_tensor(bias).view(-1, 1, 1)
grad_output_t = type_tensor(grad)
m = inp_r.mean(1)
b_v = inp_r.var(1, unbiased=False)
unb_v = inp_r.var(1, unbiased=True)
eps = 1e-5
#mean, var, var_biased = syncbn.welford_mean_var(inp_t)
mean, var_biased = syncbn.welford_mean_var(inp_t)
inv_std = 1.0 / torch.sqrt(var_biased + eps)
bn = torch.nn.BatchNorm2d(feature_size).cuda()
bn.momentum = 1.0
bn.weight.data = weight_t.clone()
bn.bias.data = bias_t.clone()
inp_bn = inp_t.clone().requires_grad_()
grad_bn = grad_output_t.clone().detach()
out_bn = bn(inp_bn)
out_bn.backward(grad_bn)
sbn = apex.parallel.SyncBatchNorm(feature_size).cuda()
sbn.momentum = 1.0
sbn.weight.data = weight_t.clone()
sbn.bias.data = bias_t.clone()
inp_sbn = inp_t.clone().requires_grad_()
grad_sbn = grad_output_t.clone().detach()
out_sbn = sbn(inp_sbn)
out_sbn.backward(grad_sbn)
sbn_c_last = apex.parallel.SyncBatchNorm(feature_size, channel_last=True).cuda()
sbn_c_last.momentum = 1.0
sbn_c_last.weight.data = weight_t.clone()
sbn_c_last.bias.data = bias_t.clone()
inp_sbn_c_last = inp_t.clone().transpose(-1, 1).contiguous().requires_grad_()
grad_sbn_c_last = grad_output_t.clone().transpose(-1, 1).contiguous().detach()
out_sbn_c_last = sbn_c_last(inp_sbn_c_last)
out_sbn_c_last.backward(grad_sbn_c_last)
sbn_result = True
sbn_result_c_last = True
bn_result = True
sbn_result = compare("comparing mean: ", mean, m, error) and sbn_result
#sbn_result = compare("comparing variance: ", var, unb_v, error) and sbn_result
sbn_result = compare("comparing biased variance: ", var_biased, b_v, error) and sbn_result
out = syncbn.batchnorm_forward(inp_t, mean, inv_std, weight_t, bias_t)
out_r = weight_r * (inp2_r - m.view(-1, 1, 1)) * torch.rsqrt(b_v.view(-1,1,1) + eps) + bias_r
sbn_result = compare("comparing output: ", out, out_r, error) and sbn_result
compare("comparing bn output: ", out_bn, out_r, error)
grad_output_t = type_tensor(grad)
grad_output_r = ref_tensor(grad.transpose(1, 0, 2, 3).reshape(feature_size, -1))
grad_output2_r = ref_tensor(grad)
grad_bias_r = grad_output_r.sum(1)
grad_weight_r = ((inp2_r - m.view(-1, 1, 1)) * torch.rsqrt(b_v.view(-1,1,1) + eps) * grad_output2_r).transpose(1,0).contiguous().view(feature_size, -1).sum(1)
mean_dy_r = grad_output_r.mean(1)
mean_dy_xmu_r = ((inp2_r - m.view(-1, 1, 1)) * grad_output2_r).transpose(1,0).contiguous().view(feature_size, -1).mean(1)
grad_input_r = (grad_output2_r - mean_dy_r.view(-1, 1, 1) - (inp2_r - m.view(-1, 1, 1)) / (b_v.view(-1,1,1) + eps) * mean_dy_xmu_r.view(-1, 1, 1) ) * torch.rsqrt(b_v.view(-1,1,1) + eps) * weight_r.view(-1,1,1)
mean_dy, mean_dy_xmu, grad_weight, grad_bias = syncbn.reduce_bn(grad_output_t, inp_t, mean, inv_std, weight_t)
grad_input = syncbn.batchnorm_backward(grad_output_t, inp_t, mean, inv_std, weight_t, mean_dy, mean_dy_xmu)
sbn_result = compare("comparing bias grad: ", grad_bias, grad_bias_r, error) and sbn_result
sbn_result = compare("comparing weight grad: ", grad_weight, grad_weight_r, error) and sbn_result
sbn_result = compare("comparing mean_dy grad: ", mean_dy, mean_dy_r, error) and sbn_result
sbn_result = compare("comparing mean_dy_xmu grad: ", mean_dy_xmu, mean_dy_xmu_r, error) and sbn_result
sbn_result = compare("comparing input grad: ", grad_input, grad_input_r, error) and sbn_result
compare("comparing bn input grad: ", inp_bn.grad, grad_input_r, error)
sbn_result = compare("comparing sbn input grad: ", inp_sbn.grad, grad_input_r, error) and sbn_result
compare("comparing bn/sbn output: ", out_bn, out_sbn, error)
sbn_result = compare("comparing running_mean: ", bn.running_mean.data, sbn.running_mean.data, error) and sbn_result
sbn_result = compare("comparing running_variance: ", bn.running_var.data, sbn.running_var.data, error) and sbn_result
compare("comparing grad_input: ", inp_bn.grad, inp_sbn.grad, error)
compare("comparing grad_bias: ", bn.bias.grad, sbn.bias.grad, error)
compare("comparing grad_bias bn to ref: ", bn.bias.grad, grad_bias_r, error)
sbn_result = compare("comparing grad_bias sbn to ref: ", sbn.bias.grad, grad_bias_r, error) and sbn_result
compare("comparing grad_weight: ", bn.weight.grad, sbn.weight.grad, error)
compare("comparing grad_weight bn to ref: ", bn.weight.grad, grad_weight_r, error)
sbn_result = compare("comparing grad_weight sbn to ref: ", sbn.weight.grad, grad_weight_r, error) and sbn_result
compare("comparing channel last bn/sbn output: ", out_bn, out_sbn_c_last.transpose(-1, 1).contiguous(), error)
sbn_result_c_last = compare("comparing channel last running_mean: ", bn.running_mean.data, sbn_c_last.running_mean.data, error) and sbn_result_c_last
sbn_result_c_last = compare("comparing channel last running_variance: ", bn.running_var.data, sbn_c_last.running_var.data, error) and sbn_result_c_last
compare("comparing channel last grad_input: ", inp_bn.grad, inp_sbn_c_last.grad.transpose(-1, 1).contiguous(), error)
compare("comparing channel last grad_bias: ", bn.bias.grad, sbn_c_last.bias.grad, error)
sbn_result_c_last = compare("comparing channel last grad_bias sbn to ref: ", sbn_c_last.bias.grad, grad_bias_r, error) and sbn_result_c_last
compare("comparing channel last grad_weight: ", bn.weight.grad, sbn_c_last.weight.grad, error)
sbn_result_c_last = compare("comparing channel last grad_weight sbn to ref: ", sbn_c_last.weight.grad, grad_weight_r, error) and sbn_result_c_last
if sbn_result:
print("====SBN single gpu passed tests")
else:
print("*SBN single gpu failed*")
if sbn_result_c_last:
print("====SBN channel last single gpu passed tests")
else:
print("*SBN channel last single gpu failed*")
|
import torch
import numpy as np
import apex
import syncbn
import os
import argparse
import torch.optim as optim
def compare(desc, inp1, inp2, error):
a = inp1.clone().detach().cpu().numpy()
b = inp2.clone().detach().cpu().numpy()
close = np.allclose(a,b, error, error)
if not close:
print(desc, close)
z = a - b
index = (np.abs(z) >= error + error * np.abs(b)).nonzero()
print("dif : ", z[index])
print("inp1 : ", a[index])
print("inp2 : ", b[index])
return close
feature_size = 10
space_size = 40
batch_size = 32
from apex.parallel import DistributedDataParallel as DDP
parser = argparse.ArgumentParser()
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument("--fp16", action='store_true', default=False)
parser.add_argument("--fp64", action='store_true', default=False)
parser.add_argument("--group_size", default=0, type=int)
args = parser.parse_args()
try:
args.world_size = int(os.environ['WORLD_SIZE'])
except:
print("This is a multi-gpu test. To run it please use 'python -m torch.distributed.launch --nproc_per_node=<num gpus> test_groups.py <more options>'")
exit(1)
torch.cuda.set_device(args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='env://')
start = (args.local_rank%args.group_size) * batch_size//args.group_size
finish = (args.local_rank%args.group_size + 1) * batch_size//args.group_size
error = 1e-5
dtype = np.float32
if args.fp16:
error = 1e-3
dtype = np.float16
elif args.fp64:
error = 1e-8
dtype = np.float64
np.random.seed(18 + args.local_rank//args.group_size)
inp = np.random.randn(batch_size, feature_size, space_size, space_size).astype(dtype)
grad = np.random.randn(batch_size, feature_size, space_size, space_size).astype(dtype)
weight = np.random.randn(feature_size).astype(dtype)
bias = np.random.randn(feature_size).astype(dtype)
type_tensor = torch.cuda.FloatTensor
if args.fp16:
type_tensor = torch.cuda.HalfTensor
if args.fp64:
type_tensor = torch.cuda.DoubleTensor
ref_tensor = torch.cuda.DoubleTensor
inp_t = type_tensor(inp)
weight_t = type_tensor(weight)
bias_t = type_tensor(bias)
inp_r = ref_tensor(inp.transpose(1, 0, 2, 3).reshape(feature_size, -1))
inp2_r = ref_tensor(inp)
weight_r = ref_tensor(weight).view(-1, 1, 1)
bias_r = ref_tensor(bias).view(-1, 1, 1)
grad_output_t = type_tensor(grad)
m = inp_r.mean(1)
b_v = inp_r.var(1, unbiased=False)
unb_v = inp_r.var(1, unbiased=True)
eps = 1e-5
mean, var_biased = syncbn.welford_mean_var(inp_t)
inv_std = 1.0 / torch.sqrt(var_biased + eps)
bn = torch.nn.BatchNorm2d(feature_size).cuda()
bn.momentum = 1.0
bn.weight.data = weight_t.clone()
bn.bias.data = bias_t.clone()
if args.fp16:
bn.half()
if args.fp64:
bn.double()
bn = DDP(bn)
inp_bn = inp_t.clone().requires_grad_()
grad_bn = grad_output_t.clone().detach()
out_bn = bn(inp_bn)
out_bn.backward(grad_bn)
# compensating the averaging over processes done by DDP
# in order to produce mathematically equivalent result
# https://github.com/NVIDIA/apex/issues/134#issuecomment-458307368
for param in bn.parameters():
param.grad = param.grad / args.group_size
bn_opt = optim.SGD(bn.parameters(), lr=1.0)
sbn = apex.parallel.SyncBatchNorm(feature_size, process_group=apex.parallel.create_syncbn_process_group(args.group_size)).cuda()
sbn.momentum = 1.0
sbn.weight.data = weight_t.clone()
sbn.bias.data = bias_t.clone()
if args.fp16:
sbn.half()
if args.fp64:
sbn.double()
sbn = DDP(sbn)
sbn_opt = optim.SGD(sbn.parameters(), lr=1.0)
inp_sbn = inp_t.clone().requires_grad_()
grad_sbn = grad_output_t.clone().detach()
out_sbn = sbn(inp_sbn[start:finish])
out_sbn.backward(grad_sbn[start:finish])
sbn_result = True
bn_result = True
if args.local_rank == 0:
sbn_result = compare("comparing mean: ", mean, m, error) and sbn_result
sbn_result = compare("comparing biased variance: ", var_biased, b_v, error) and sbn_result
out = syncbn.batchnorm_forward(inp_t, mean, inv_std, weight_t, bias_t)
out_r = weight_r * (inp2_r - m.view(-1, 1, 1)) * torch.rsqrt(b_v.view(-1,1,1) + eps) + bias_r
if args.local_rank == 0:
sbn_result = compare("comparing output: ", out, out_r, error) and sbn_result
compare("comparing bn output: ", out_bn, out_r, error)
grad_output_t = type_tensor(grad)
grad_output_r = ref_tensor(grad.transpose(1, 0, 2, 3).reshape(feature_size, -1))
grad_output2_r = ref_tensor(grad)
grad_bias_r = grad_output_r.sum(1)
grad_weight_r = ((inp2_r - m.view(-1, 1, 1)) * torch.rsqrt(b_v.view(-1,1,1) + eps) * grad_output2_r).transpose(1,0).contiguous().view(feature_size, -1).sum(1)
mean_dy_r = grad_output_r.mean(1)
mean_dy_xmu_r = ((inp2_r - m.view(-1, 1, 1)) * grad_output2_r).transpose(1,0).contiguous().view(feature_size, -1).mean(1)
grad_input_r = (grad_output2_r - mean_dy_r.view(-1, 1, 1) - (inp2_r - m.view(-1, 1, 1)) / (b_v.view(-1,1,1) + eps) * mean_dy_xmu_r.view(-1, 1, 1) ) * torch.rsqrt(b_v.view(-1,1,1) + eps) * weight_r.view(-1,1,1)
mean_dy, mean_dy_xmu, grad_weight, grad_bias = syncbn.reduce_bn(grad_output_t, inp_t, mean, inv_std, weight_t)
grad_input = syncbn.batchnorm_backward(grad_output_t, inp_t, mean, inv_std, weight_t, mean_dy, mean_dy_xmu)
if args.local_rank == 0:
sbn_result = compare("comparing bias grad: ", grad_bias, grad_bias_r, error) and sbn_result
sbn_result = compare("comparing weight grad: ", grad_weight, grad_weight_r, error) and sbn_result
sbn_result = compare("comparing mean_dy grad: ", mean_dy, mean_dy_r, error) and sbn_result
sbn_result = compare("comparing mean_dy_xmu grad: ", mean_dy_xmu, mean_dy_xmu_r, error) and sbn_result
sbn_result = compare("comparing input grad: ", grad_input, grad_input_r, error) and sbn_result
compare("comparing bn input grad: ", inp_bn.grad, grad_input_r, error)
if args.local_rank == 0:
sbn_result = compare("comparing running_mean: ", bn.module.running_mean.data, sbn.module.running_mean.data, error) and sbn_result
sbn_result = compare("comparing running_variance: ", bn.module.running_var.data, sbn.module.running_var.data, error) and sbn_result
# execute by both
compare("comparing layers output: ", out_bn[start:finish], out_sbn, error) and sbn_result
compare("comparing layers grad_input: ", inp_bn.grad[start:finish], inp_sbn.grad[start:finish], error) and sbn_result
bn_opt.step()
sbn_opt.step()
if args.local_rank == 0:
compare("comparing bn vs sbn bias: ", bn.module.bias, sbn.module.bias, error)
compare("comparing bn vs sbn weight: ", bn.module.weight, sbn.module.weight, error)
if sbn_result:
print("====SBN group test passed")
else:
print("*SBN group test failed*")
|
import unittest
import sys
test_dirs = ["run_amp", "run_fp16util", "run_mixed_adam", "run_fused_layer_norm"]
runner = unittest.TextTestRunner(verbosity=2)
errcode = 0
for test_dir in test_dirs:
suite = unittest.TestLoader().discover(test_dir)
print("\nExecuting tests from " + test_dir)
result = runner.run(suite)
if not result.wasSuccessful():
errcode = 1
sys.exit(errcode)
|
import unittest
import os
import random
import torch
import apex
class TestFusedAdam(unittest.TestCase):
def setUp(self, max_abs_diff=1e-3, max_rel_diff=1, iters=7):
self.max_abs_diff = max_abs_diff
self.max_rel_diff = max_rel_diff
self.iters = iters
torch.cuda.manual_seed(9876)
def tearDown(self):
pass
def gen_param_optim(self, tensors, adam_option):
ref_param = []
tst_param = []
for tensor in tensors:
ref_param.append(torch.nn.Parameter(tensor.clone()))
tst_param.append(torch.nn.Parameter(tensor.clone()))
ref_optim = torch.optim.Adam(ref_param, **adam_option)
tst_optim = apex.optimizers.FusedAdam(tst_param, **adam_option)
return (ref_param, tst_param, ref_optim, tst_optim)
def gen_grad(self, ref_param, tst_param):
for p_ref, p_tst in zip(ref_param, tst_param):
p_ref.grad = torch.rand_like(p_ref)
p_tst.grad = p_ref.grad
def gen_mixed_grad(self, ref_param, tst_param, scale=1.0):
half_grads = []
for p_ref, p_tst in zip(ref_param, tst_param):
half_grads.append(torch.rand_like(p_ref).half())
p_ref.grad = half_grads[-1].float() / scale
return half_grads
def get_max_diff(self, ref_param, tst_param):
max_abs_diff = max_rel_diff = 0
for p_ref, p_tst in zip(ref_param, tst_param):
max_abs_diff_p = (p_ref - p_tst).abs().max().item()
max_rel_diff_p = ((p_ref - p_tst) / p_ref).abs().max().item()
if max_abs_diff_p > max_abs_diff: max_abs_diff = max_abs_diff_p
if max_rel_diff_p > max_rel_diff: max_rel_diff = max_rel_diff_p
return max_abs_diff, max_rel_diff
def gen_single_type_test(self, param_type=torch.float):
nelem = 278011
adam_option = {'lr':5e-4, 'betas':(0.9, 0.999), 'eps':1e-08,
'weight_decay':0, 'amsgrad':False}
tensor = torch.rand(nelem, dtype=param_type, device='cuda')
ref_param, tst_param, ref_optim, tst_optim = \
self.gen_param_optim([tensor], adam_option)
for i in range(self.iters):
self.gen_grad(ref_param, tst_param)
ref_optim.step()
tst_optim.step()
max_abs_diff, max_rel_diff = self.get_max_diff(ref_param, tst_param)
self.assertLessEqual(max_abs_diff, self.max_abs_diff)
self.assertLessEqual(max_rel_diff, self.max_rel_diff)
def test_double(self):
self.gen_single_type_test(param_type=torch.double)
def test_float(self):
self.gen_single_type_test(param_type=torch.float)
def test_half(self):
nelem = 278011
adam_option = {'lr':5e-4, 'betas':(0.9, 0.999), 'eps':1e-08,
'weight_decay':0, 'amsgrad':False}
tensor = torch.rand(nelem, dtype=torch.float, device='cuda')
ref_param, tst_param, ref_optim, tst_optim = \
self.gen_param_optim([tensor], adam_option)
for i in range(self.iters):
half_grads = self.gen_mixed_grad(ref_param, tst_param)
ref_optim.step()
tst_optim.step(grads=half_grads)
max_abs_diff, max_rel_diff = self.get_max_diff(ref_param, tst_param)
self.assertLessEqual(max_abs_diff, self.max_abs_diff)
self.assertLessEqual(max_rel_diff, self.max_rel_diff)
def test_multi_params(self):
sizes = [[4096, 1024], [4096], [4096, 2048], [32320, 1024], [1]]
adam_option = {'lr':5e-4, 'betas':(0.9, 0.999), 'eps':1e-08,
'weight_decay':0, 'amsgrad':False}
tensors = []
for size in sizes:
tensors.append(torch.rand(size, dtype=torch.float, device='cuda'))
ref_param, tst_param, ref_optim, tst_optim = \
self.gen_param_optim(tensors, adam_option)
for i in range(self.iters):
half_grads = self.gen_mixed_grad(ref_param, tst_param)
ref_optim.step()
tst_optim.step(grads=half_grads)
max_abs_diff, max_rel_diff = self.get_max_diff(ref_param, tst_param)
self.assertLessEqual(max_abs_diff, self.max_abs_diff)
self.assertLessEqual(max_rel_diff, self.max_rel_diff)
def test_scale(self):
nelem = 278011
adam_option = {'lr':5e-4, 'betas':(0.9, 0.999), 'eps':1e-08,
'weight_decay':0, 'amsgrad':False}
tensor = torch.rand(nelem, dtype=torch.float, device='cuda')
ref_param, tst_param, ref_optim, tst_optim = \
self.gen_param_optim([tensor], adam_option)
for i in range(self.iters):
scale = random.random() * 1000
half_grads = self.gen_mixed_grad(ref_param, tst_param, scale)
ref_optim.step()
tst_optim.step(grads=half_grads, scale=scale)
max_abs_diff, max_rel_diff = self.get_max_diff(ref_param, tst_param)
self.assertLessEqual(max_abs_diff, self.max_abs_diff)
self.assertLessEqual(max_rel_diff, self.max_rel_diff)
def test_fp16_output(self):
nelem = 278011
adam_option = {'lr':5e-4, 'betas':(0.9, 0.999), 'eps':1e-08,
'weight_decay':0, 'amsgrad':False}
tensor = torch.rand(nelem, dtype=torch.float, device='cuda')
ref_param, tst_param, ref_optim, tst_optim = \
self.gen_param_optim([tensor], adam_option)
fp16_param = torch.nn.Parameter(tensor.clone().half())
for i in range(self.iters):
half_grads = self.gen_mixed_grad(ref_param, tst_param)
ref_optim.step()
tst_optim.step(grads=half_grads, output_params=[fp16_param])
max_abs_diff, max_rel_diff = self.get_max_diff(ref_param, tst_param)
self.assertLessEqual(max_abs_diff, self.max_abs_diff)
self.assertLessEqual(max_rel_diff, self.max_rel_diff)
max_abs_diff, max_rel_diff = self.get_max_diff(tst_param, \
[fp16_param.float()])
self.assertLessEqual(max_abs_diff, self.max_abs_diff)
self.assertLessEqual(max_rel_diff, self.max_rel_diff)
def test_adam_option(self):
nelem = 1
adam_option = {'lr':0.01, 'betas':(0.6, 0.9), 'eps':3e-06,
'weight_decay':0, 'amsgrad':False}
tensor = torch.rand(nelem, dtype=torch.float, device='cuda')
ref_param, tst_param, ref_optim, tst_optim = \
self.gen_param_optim([tensor], adam_option)
for i in range(self.iters):
self.gen_grad(ref_param, tst_param)
ref_optim.step()
tst_optim.step()
max_abs_diff, max_rel_diff = self.get_max_diff(ref_param, tst_param)
self.assertLessEqual(max_abs_diff, self.max_abs_diff)
self.assertLessEqual(max_rel_diff, self.max_rel_diff)
if __name__ == '__main__':
script_path = os.path.dirname(os.path.realpath(__file__))
unittest.main()
|
import unittest
import torch
import apex
class TestFP16Optimizer(unittest.TestCase):
def setUp(self, max_abs_diff=1e-3, max_rel_diff=1, iters=7):
self.max_abs_diff = max_abs_diff
self.max_rel_diff = max_rel_diff
self.iters = iters
torch.cuda.manual_seed(13337)
N, D_in, D_out = 64, 1024, 16
self.N = N
self.D_in = D_in
self.D_out = D_out
self.x = torch.randn((N, D_in), dtype=torch.float16, device='cuda')
self.ref_model = torch.nn.Linear(D_in, D_out).cuda().half()
self.tst_model = torch.nn.Linear(D_in, D_out).cuda().half()
for p,q in zip(self.tst_model.parameters(), self.ref_model.parameters()):
p.data.copy_(q.data)
def get_max_diff(self, ref_param, tst_param):
max_abs_diff = max_rel_diff = 0
for p_ref, p_tst in zip(ref_param, tst_param):
max_abs_diff_p = (p_ref - p_tst).abs().max().item()
max_rel_diff_p = ((p_ref - p_tst) / p_ref).abs().max().item()
if max_abs_diff_p > max_abs_diff: max_abs_diff = max_abs_diff_p
if max_rel_diff_p > max_rel_diff: max_rel_diff = max_rel_diff_p
return max_abs_diff, max_rel_diff
def test_fp16_optimizer(self):
ref_optim = torch.optim.Adam(self.ref_model.parameters())
ref_optim = apex.fp16_utils.FP16_Optimizer(ref_optim, verbose=False)
tst_optim = apex.optimizers.FusedAdam(self.tst_model.parameters())
tst_optim = apex.optimizers.FP16_Optimizer(tst_optim)
for i in range(self.iters):
ref_loss = self.ref_model(self.x).sum()
ref_optim.backward(ref_loss)
ref_optim.step()
tst_loss = self.tst_model(self.x).sum()
tst_optim.backward(tst_loss)
tst_optim.step()
max_abs_diff, max_rel_diff = self.get_max_diff(self.ref_model.parameters(), self.tst_model.parameters())
self.assertLessEqual(max_abs_diff, self.max_abs_diff)
self.assertLessEqual(max_rel_diff, self.max_rel_diff)
def test_loss_scaling(self):
ref_optim = torch.optim.Adam(self.ref_model.parameters())
ref_optim = apex.fp16_utils.FP16_Optimizer(ref_optim, static_loss_scale=128.0, verbose=False)
tst_optim = apex.optimizers.FusedAdam(self.tst_model.parameters())
tst_optim = apex.optimizers.FP16_Optimizer(tst_optim, static_loss_scale=128.0)
for i in range(self.iters):
ref_loss = self.ref_model(self.x).sum()
ref_optim.backward(ref_loss)
ref_optim.step()
tst_loss = self.tst_model(self.x).sum()
tst_optim.backward(tst_loss)
tst_optim.step()
max_abs_diff, max_rel_diff = self.get_max_diff(self.ref_model.parameters(), self.tst_model.parameters())
self.assertLessEqual(max_abs_diff, self.max_abs_diff)
self.assertLessEqual(max_rel_diff, self.max_rel_diff)
def test_parameter_groups(self):
ref_groups = [{'params': [self.ref_model.weight]},{'params': [self.ref_model.bias]}]
ref_optim = torch.optim.Adam(ref_groups)
ref_optim = apex.fp16_utils.FP16_Optimizer(ref_optim, verbose=False)
tst_groups = [{'params': [self.tst_model.weight]},{'params': [self.tst_model.bias]}]
tst_optim = apex.optimizers.FusedAdam(tst_groups)
tst_optim = apex.optimizers.FP16_Optimizer(tst_optim)
for i in range(self.iters):
ref_loss = self.ref_model(self.x).sum()
ref_optim.backward(ref_loss)
ref_optim.step()
tst_loss = self.tst_model(self.x).sum()
tst_optim.backward(tst_loss)
tst_optim.step()
max_abs_diff, max_rel_diff = self.get_max_diff(self.ref_model.parameters(), self.tst_model.parameters())
self.assertLessEqual(max_abs_diff, self.max_abs_diff)
self.assertLessEqual(max_rel_diff, self.max_rel_diff)
def test_grad_clip(self):
ref_optim = torch.optim.Adam(self.ref_model.parameters())
ref_optim = apex.fp16_utils.FP16_Optimizer(ref_optim, verbose=False)
tst_optim = apex.optimizers.FusedAdam(self.tst_model.parameters(), max_grad_norm=0.01)
tst_optim = apex.optimizers.FP16_Optimizer(tst_optim)
for i in range(self.iters):
ref_loss = self.ref_model(self.x).sum()
ref_optim.backward(ref_loss)
ref_optim.clip_master_grads(0.01)
ref_optim.step()
tst_loss = self.tst_model(self.x).sum()
tst_optim.backward(tst_loss)
tst_optim.step()
max_abs_diff, max_rel_diff = self.get_max_diff(self.ref_model.parameters(), self.tst_model.parameters())
self.assertLessEqual(max_abs_diff, self.max_abs_diff)
self.assertLessEqual(max_rel_diff, self.max_rel_diff)
@unittest.skip('Not support grad being None')
def test_grad_None(self):
self.fail()
@unittest.skip('Not support same weight decay as pytorch')
def test_weight_decay(self):
self.fail()
@unittest.skip('Not support empty parameter groups')
def test_group_empty(self):
self.fail()
if __name__ == '__main__':
script_path = os.path.dirname(os.path.realpath(__file__))
unittest.main()
|
import unittest
import os
import random
import torch
import apex
class TestFusedLayerNorm(unittest.TestCase):
def setUp(self):
self.module = apex.normalization.FusedLayerNorm(normalized_shape=[32, 64], elementwise_affine=False)
self.input_ = torch.randn(16, 32, 64)
torch.cuda.manual_seed(42)
def forward_cpu(self, input_):
self.module.cpu()
return self.module(input_.cpu())
def forward_cuda(self, input_):
self.module.cuda()
return self.module(input_.cuda())
def test_forward_cuda(self):
out_ = self.forward_cuda(self.input_)
assert out_.is_cuda == True
def test_forward_cpu(self):
out_ = self.forward_cpu(self.input_)
assert out_.is_cuda == False
def test_same_output(self):
out_cpu = self.forward_cpu(self.input_)
out_cuda = self.forward_cuda(self.input_)
torch.testing.assert_allclose(out_cpu, out_cuda.cpu())
class TestFusedLayerNormElemWise(TestFusedLayerNorm):
def setUp(self):
self.module = apex.normalization.FusedLayerNorm(normalized_shape=[32, 64], elementwise_affine=True)
self.input_ = torch.randn(16, 32, 64)
torch.cuda.manual_seed(42) |
import unittest
import functools as ft
import itertools as it
from apex import amp
from apex.amp import _amp_state
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import Parameter
from utils import common_init, HALF, FLOAT,\
ALWAYS_HALF, ALWAYS_FLOAT, MATCH_INPUT
class MyModel(torch.nn.Module):
def __init__(self, unique):
super(MyModel, self).__init__()
self.weight0 = Parameter(unique +
torch.arange(2, device='cuda', dtype=torch.float32))
self.weight1 = Parameter(1. + unique + torch.arange(2, device='cuda', dtype=torch.float16))
@staticmethod
def ops(input, weight0, weight1):
return ((input*(weight0.float()))*(weight1.float())).sum()
def forward(self, input):
return self.ops(input, self.weight0, self.weight1)
# Abandon all hope, ye who enter here.
# This is hands down the ugliest code I have ever written, but it succeeds in testing
# multiple models/optimizers/losses fairly thoroughly. Many of the different test cases
# require slightly divergent code in a way that seems near-impossible to genericize into a simple
# cross product or nested loops.
class TestMultipleModelsOptimizersLosses(unittest.TestCase):
def setUp(self):
self.x = torch.ones((2), device='cuda', dtype=torch.float32)
common_init(self)
def tearDown(self):
pass
def test_2models2losses1optimizer(self):
model0 = MyModel(1)
model1 = MyModel(2)
optimizer = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25},
{'params' : model1.parameters(), 'lr' : 0.5}],
momentum=0.125)
reference_grads = []
for i in range(2):
optimizer.zero_grad()
loss0 = model0(self.x)
loss1 = model1(self.x)
loss0.backward()
loss1.backward()
reference_grads.append([param.grad.data.clone() for param in model0.parameters()] +
[param.grad.data.clone() for param in model1.parameters()])
optimizer.step()
final_params = [param.data.clone() for param in model0.parameters()] + \
[param.data.clone() for param in model1.parameters()]
for opt_level in ("O0", "O1", "O2", "O3"):
for how_to_zero in ("none", "model", "optimizer"):
for use_multiple_loss_scalers in (True, False):
if opt_level == "O1" or opt_level == "O2":
inject_inf_iters = (-1, 0, 1)
else:
inject_inf_iters = (-1,)
for inject_inf in inject_inf_iters:
if inject_inf >= 0:
inject_inf_locs = ("fp16", "fp32")
which_backwards = (0, 1)
else:
inject_inf_locs = ("fdsa",)
which_backwards = (None,)
for inject_inf_loc in inject_inf_locs:
for which_backward in which_backwards:
if use_multiple_loss_scalers:
num_losses = 2
loss_ids = [0, 1]
else:
num_losses = 1
loss_ids = [0, 0]
if inject_inf >= 0:
iters = 3
else:
iters = 2
model0 = MyModel(1)
model1 = MyModel(2)
models = [model0, model1]
optimizer = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25},
{'params' : model1.parameters(), 'lr' : 0.5}],
momentum=0.125)
_amp_state.allow_incoming_model_not_fp32 = True
[model0, model1], optimizer = amp.initialize(
[model0, model1],
optimizer,
opt_level=opt_level,
verbosity=0,
cast_model_type=False,
num_losses=num_losses)
_amp_state.allow_incoming_model_not_fp32 = False
_amp_state.loss_scalers[0]._loss_scale = 4.0
if use_multiple_loss_scalers:
_amp_state.loss_scalers[1]._loss_scale = 16.0
unskipped = 0
for i in range(iters):
if how_to_zero == "none":
for model in models:
for param in model.parameters():
param.grad = None
elif how_to_zero == "model":
for model in models:
model.zero_grad()
else:
optimizer.zero_grad()
loss0 = model0(self.x)
loss1 = model1(self.x)
with amp.scale_loss(loss0, optimizer, loss_id=loss_ids[0]) as scaled_loss:
scaled_loss.backward()
if i == inject_inf and which_backward == 0:
if inject_inf_loc == "fp32":
model0.weight0.grad[0] = float('inf')
elif inject_inf_loc == "fp16":
model0.weight1.grad[0] = float('inf')
with amp.scale_loss(loss1, optimizer, loss_id=loss_ids[1]) as scaled_loss:
scaled_loss.backward()
if i == inject_inf and which_backward == 1:
if inject_inf_loc == "fp32":
model1.weight0.grad[0] = float('inf')
elif inject_inf_loc == "fp16":
model1.weight1.grad[0] = float('inf')
if i != inject_inf:
for param, reference_grad in zip(amp.master_params(optimizer),
reference_grads[unskipped]):
self.assertTrue(torch.allclose(param.grad.float(), reference_grad.float()))
unskipped += 1
optimizer.step()
model_params = [p for p in model0.parameters()] + [p for p in model1.parameters()]
for model, master, reference in zip(
model_params,
amp.master_params(optimizer),
final_params):
self.assertTrue(torch.allclose(model, reference))
self.assertTrue(torch.allclose(model, master.to(model.dtype)))
if opt_level == "O1":
_amp_state.handle._deactivate()
def test_3models2losses1optimizer(self):
model0 = MyModel(1)
model1 = MyModel(2)
model2 = MyModel(3)
optimizer = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25},
{'params' : model1.parameters(), 'lr' : 0.5},
{'params' : model2.parameters(), 'lr' : 0.125}],
momentum=0.125)
reference_grads = []
for i in range(2):
optimizer.zero_grad()
loss0 = model0(self.x) + model2(self.x)
loss1 = model1(self.x) + model2(self.x)
loss0.backward()
loss1.backward()
reference_grads.append([param.grad.data.clone() for param in model0.parameters()] +
[param.grad.data.clone() for param in model1.parameters()] +
[param.grad.data.clone() for param in model2.parameters()])
optimizer.step()
final_params = [param.data.clone() for param in model0.parameters()] + \
[param.data.clone() for param in model1.parameters()] + \
[param.data.clone() for param in model2.parameters()]
for opt_level in ("O0", "O1", "O2", "O3"):
for how_to_zero in ("none", "model", "optimizer"):
for use_multiple_loss_scalers in (True, False):
if opt_level == "O1" or opt_level == "O2":
inject_inf_iters = (-1, 0, 1)
else:
inject_inf_iters = (-1,)
for inject_inf in inject_inf_iters:
if inject_inf >= 0:
inject_inf_locs = ("fp16", "fp32")
which_backwards = (0, 1)
else:
inject_inf_locs = ("fdsa",)
which_backwards = (None,)
for inject_inf_loc in inject_inf_locs:
for which_backward in which_backwards:
if use_multiple_loss_scalers:
num_losses = 2
loss_ids = [0, 1]
else:
num_losses = 1
loss_ids = [0, 0]
if inject_inf >= 0:
iters = 3
if which_backward == 0:
which_models = (0, 2)
elif which_backward == 1:
which_models = (1, 2)
else:
iters = 2
which_models = (None,)
for which_model in which_models:
model0 = MyModel(1)
model1 = MyModel(2)
model2 = MyModel(3)
models = [model0, model1, model2]
optimizer = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25},
{'params' : model1.parameters(), 'lr' : 0.5},
{'params' : model2.parameters(), 'lr' : 0.125}],
momentum=0.125)
_amp_state.allow_incoming_model_not_fp32 = True
[model0, model1, model2], optimizer = amp.initialize(
[model0, model1, model2],
optimizer,
opt_level=opt_level,
verbosity=0,
cast_model_type=False,
num_losses=num_losses)
_amp_state.allow_incoming_model_not_fp32 = False
_amp_state.loss_scalers[0]._loss_scale = 4.0
if use_multiple_loss_scalers:
_amp_state.loss_scalers[1]._loss_scale = 16.0
unskipped = 0
for i in range(iters):
if how_to_zero == "none":
for model in models:
for param in model.parameters():
param.grad = None
elif how_to_zero == "model":
for model in models:
model.zero_grad()
else:
optimizer.zero_grad()
# print("opt_level {} i {} inject_inf {} which_backward {} inject_inf_loc {} which_model {} use_multiple_loss_scalers {}".format(opt_level, i, inject_inf, which_backward, inject_inf_loc, which_model, use_multiple_loss_scalers))
loss0 = model0(self.x) + model2(self.x)
loss1 = model1(self.x) + model2(self.x)
with amp.scale_loss(loss0, optimizer, loss_id=loss_ids[0]) as scaled_loss:
scaled_loss.backward()
if i == inject_inf and which_backward == 0:
if which_model == 0:
inj_model = model0
elif which_model == 2:
inj_model = model2
else:
raise RuntimeError(which_model + " invalid for loss 0")
if inject_inf_loc == "fp32":
inj_model.weight0.grad[0] = float('inf')
elif inject_inf_loc == "fp16":
inj_model.weight1.grad[0] = float('inf')
with amp.scale_loss(loss1, optimizer, loss_id=loss_ids[1]) as scaled_loss:
scaled_loss.backward()
if i == inject_inf and which_backward == 1:
if which_model == 1:
inj_model = model1
elif which_model == 2:
inj_model = model2
else:
raise RuntimeError(which_model + " invalid for loss 1 ")
if inject_inf_loc == "fp32":
inj_model.weight0.grad[0] = float('inf')
elif inject_inf_loc == "fp16":
inj_model.weight1.grad[0] = float('inf')
if i != inject_inf:
for param, reference_grad in zip(amp.master_params(optimizer),
reference_grads[unskipped]):
self.assertTrue(torch.allclose(param.grad.float(), reference_grad.float()))
unskipped += 1
optimizer.step()
model_params = [p for p in model0.parameters()] + \
[p for p in model1.parameters()] + \
[p for p in model2.parameters()]
for model, master, reference in zip(
model_params,
amp.master_params(optimizer),
final_params):
self.assertTrue(torch.allclose(model, reference))
self.assertTrue(torch.allclose(model, master.to(model.dtype)))
if opt_level == "O1":
_amp_state.handle._deactivate()
def test_2models2losses2optimizers(self):
model0 = MyModel(1)
model1 = MyModel(2)
optimizer0 = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25}],
momentum=0.125)
optimizer1 = torch.optim.SGD([{'params' : model1.parameters(), 'lr' : 0.5}],
momentum=0.25)
# Don't do it like this: reference_grads = [[]]*5
# because then it creates a list of 5 references to the same "[]" and appending
# to any of them effectively makes you append to all of them, which multiplies
# the resulting size of reference_grads by 5x and needless to say makes the test fail.
reference_grads = [[], [], [], [], []]
final_params = [None, None, None, None, None]
for i in range(2):
optimizer0.zero_grad()
optimizer1.zero_grad()
loss0 = model0(self.x)
loss1 = model1(self.x)
loss0.backward()
loss1.backward()
reference_grads[0].append([param.grad.data.clone() for param in model0.parameters()] +
[param.grad.data.clone() for param in model1.parameters()])
optimizer0.step()
optimizer1.step()
final_params[0] = [param.data.clone() for param in model0.parameters()] + \
[param.data.clone() for param in model1.parameters()]
def what_got_skipped(which_iter, which_backward):
if which_iter == 0 and which_backward == 0:
return 1
if which_iter == 0 and which_backward == 1:
return 2
if which_iter == 1 and which_backward == 0:
return 3
if which_iter == 1 and which_backward == 1:
return 4
return 0
for which_iter in (0,1):
for which_backward in (0,1):
model0 = MyModel(1)
model1 = MyModel(2)
optimizer0 = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25}],
momentum=0.125)
optimizer1 = torch.optim.SGD([{'params' : model1.parameters(), 'lr' : 0.5}],
momentum=0.25)
for i in range(3):
optimizer0.zero_grad()
optimizer1.zero_grad()
loss0 = model0(self.x)
loss1 = model1(self.x)
loss0.backward()
loss1.backward()
if i != which_iter:
reference_grads[what_got_skipped(which_iter, which_backward)].append(
[param.grad.data.clone() for param in model0.parameters()] +
[param.grad.data.clone() for param in model1.parameters()])
if i == which_iter:
if which_backward == 0:
optimizer1.step()
else:
optimizer0.step()
else:
optimizer0.step()
optimizer1.step()
final_params[what_got_skipped(which_iter, which_backward)] = \
[param.data.clone() for param in model0.parameters()] + \
[param.data.clone() for param in model1.parameters()]
for opt_level in ("O0", "O1", "O2", "O3"):
for how_to_zero in ("none", "model", "optimizer"):
for use_multiple_loss_scalers in (True, False):
if opt_level == "O1" or opt_level == "O2":
inject_inf_iters = (-1, 0, 1)
else:
inject_inf_iters = (-1,)
for inject_inf in inject_inf_iters:
if inject_inf >= 0:
inject_inf_locs = ("fp16", "fp32")
which_backwards = (0, 1)
else:
inject_inf_locs = ("fdsa",)
which_backwards = (None,)
for inject_inf_loc in inject_inf_locs:
for which_backward in which_backwards:
if use_multiple_loss_scalers:
num_losses = 2
loss_ids = [0, 1]
else:
num_losses = 1
loss_ids = [0, 0]
if inject_inf >= 0:
iters = 3
else:
iters = 2
model0 = MyModel(1)
model1 = MyModel(2)
models = [model0, model1]
optimizer0 = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25}],
momentum=0.125)
optimizer1 = torch.optim.SGD([{'params' : model1.parameters(), 'lr' : 0.5}],
momentum=0.25)
_amp_state.allow_incoming_model_not_fp32 = True
[model0, model1], [optimizer0, optimizer1] = amp.initialize(
[model0, model1],
[optimizer0, optimizer1],
opt_level=opt_level,
verbosity=0,
cast_model_type=False,
num_losses=num_losses)
_amp_state.allow_incoming_model_not_fp32 = False
_amp_state.loss_scalers[0]._loss_scale = 4.0
if use_multiple_loss_scalers:
_amp_state.loss_scalers[1]._loss_scale = 16.0
unskipped = 0
for i in range(iters):
if how_to_zero == "none":
for model in models:
for param in model.parameters():
param.grad = None
elif how_to_zero == "model":
for model in models:
model.zero_grad()
else:
optimizer0.zero_grad()
optimizer1.zero_grad()
loss0 = model0(self.x)
loss1 = model1(self.x)
with amp.scale_loss(loss0, optimizer0, loss_id=loss_ids[0]) as scaled_loss:
scaled_loss.backward()
if i == inject_inf and which_backward == 0:
if inject_inf_loc == "fp32":
model0.weight0.grad[0] = float('inf')
elif inject_inf_loc == "fp16":
model0.weight1.grad[0] = float('inf')
with amp.scale_loss(loss1, optimizer1, loss_id=loss_ids[1]) as scaled_loss:
scaled_loss.backward()
if i == inject_inf and which_backward == 1:
if inject_inf_loc == "fp32":
model1.weight0.grad[0] = float('inf')
elif inject_inf_loc == "fp16":
model1.weight1.grad[0] = float('inf')
# print("opt_level {} i {} inject_inf {} which_backward {} inject_inf_loc {} use_multiple_loss_scalers {}".format(opt_level, i, inject_inf, which_backward, inject_inf_loc, use_multiple_loss_scalers))
if i != inject_inf:
master_params = list(amp.master_params(optimizer0)) + \
list(amp.master_params(optimizer1))
for param, reference_grad in zip(master_params,
reference_grads[what_got_skipped(inject_inf, which_backward)][unskipped]):
self.assertTrue(torch.allclose(param.grad.float(), reference_grad.float()))
unskipped += 1
optimizer0.step()
optimizer1.step()
model_params = [p for p in model0.parameters()] + [p for p in model1.parameters()]
master_params = [p for p in amp.master_params(optimizer0)] + \
[p for p in amp.master_params(optimizer1)]
for model, master, reference in zip(
model_params,
master_params,
final_params[what_got_skipped(inject_inf, which_backward)]):
self.assertTrue(torch.allclose(model, reference))
self.assertTrue(torch.allclose(model, master.to(model.dtype)))
if opt_level == "O1":
_amp_state.handle._deactivate()
def test_3models2losses2optimizers(self):
model0 = MyModel(1)
model1 = MyModel(2)
model2 = MyModel(3)
optimizer0 = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25},
{'params' : model1.parameters(), 'lr' : 1.0}],
momentum=0.5)
optimizer1 = torch.optim.SGD([{'params' : model2.parameters(), 'lr' : 0.5}],
momentum=0.25)
# Again, can't do this: reference_grads = [[]]*9
reference_grads = [[], [], [], [], [], [], [], [], []]
final_params = [None, None, None, None, None, None, None, None, None]
for i in range(2):
optimizer0.zero_grad()
optimizer1.zero_grad()
loss0 = model0(self.x) + model1(self.x)
loss1 = model2(self.x) + model1(self.x)
loss0.backward()
loss1.backward()
reference_grads[0].append([param.grad.data.clone() for param in model0.parameters()] +
[param.grad.data.clone() for param in model1.parameters()])
optimizer0.step()
optimizer1.step()
final_params[0] = \
[param.data.clone() for param in model0.parameters()] + \
[param.data.clone() for param in model1.parameters()] + \
[param.data.clone() for param in model2.parameters()]
def what_got_skipped(which_iter, which_backward, which_model):
if which_iter == 0:
if which_backward == 0:
if which_model == 0:
return 1
if which_model == 1:
return 2
if which_backward == 1:
if which_model == 2:
return 3
if which_model == 1:
return 4
if which_iter == 1:
if which_backward == 0:
if which_model == 0:
return 5
if which_model == 1:
return 6
if which_backward == 1:
if which_model == 2:
return 7
if which_model == 1:
return 8
return 0
for which_iter in (0,1):
for which_backward in (0,1):
if which_backward == 0:
which_models = (0,1)
if which_backward == 1:
which_models = (2,1)
for which_model in which_models:
model0 = MyModel(1)
model1 = MyModel(2)
model2 = MyModel(3)
optimizer0 = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25},
{'params' : model1.parameters(), 'lr' : 1.0}],
momentum=0.5)
optimizer1 = torch.optim.SGD([{'params' : model2.parameters(), 'lr' : 0.5}],
momentum=0.25)
for i in range(3):
optimizer0.zero_grad()
optimizer1.zero_grad()
loss0 = model0(self.x) + model1(self.x)
loss1 = model2(self.x) + model1(self.x)
loss0.backward()
loss1.backward()
if i != which_iter:
reference_grads[what_got_skipped(which_iter,
which_backward, which_model)].append(
[param.grad.data.clone() for param in model0.parameters()] +
[param.grad.data.clone() for param in model1.parameters()])
if i == which_iter:
if which_backward == 0:
# if which_model == 0:
optimizer1.step()
# if which_model == 1:
# optimizer1.step()
if which_backward == 1:
# if which_model == 2:
# optimizer0.step()
# if which_model == 1:
continue
else:
optimizer0.step()
optimizer1.step()
final_params[what_got_skipped(which_iter, which_backward, which_model)] = \
[param.data.clone() for param in model0.parameters()] + \
[param.data.clone() for param in model1.parameters()] + \
[param.data.clone() for param in model2.parameters()]
for opt_level in ("O0", "O1", "O2", "O3"):
for how_to_zero in ("none", "model", "optimizer"):
for use_multiple_loss_scalers in (True, False):
if opt_level == "O1" or opt_level == "O2":
inject_inf_iters = (-1, 0, 1)
else:
inject_inf_iters = (-1,)
for inject_inf in inject_inf_iters:
if inject_inf >= 0:
inject_inf_locs = ("fp16", "fp32")
which_backwards = (0, 1)
else:
inject_inf_locs = ("fdsa",)
which_backwards = (None,)
for inject_inf_loc in inject_inf_locs:
for which_backward in which_backwards:
if use_multiple_loss_scalers:
num_losses = 2
loss_ids = [0, 1]
else:
num_losses = 1
loss_ids = [0, 0]
if inject_inf >= 0:
iters = 3
if which_backward == 0:
which_models = (0, 1)
elif which_backward == 1:
which_models = (2, 1)
else:
iters = 2
which_models = (None,)
for which_model in which_models:
model0 = MyModel(1)
model1 = MyModel(2)
model2 = MyModel(3)
models = [model0, model1, model2]
optimizer0 = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25},
{'params' : model1.parameters(), 'lr' : 1.0}],
momentum=0.5)
optimizer1 = torch.optim.SGD([{'params' : model2.parameters(), 'lr' : 0.5}],
momentum=0.25)
_amp_state.allow_incoming_model_not_fp32 = True
[model0, model1, model2], [optimizer0, optimizer1] = amp.initialize(
[model0, model1, model2],
[optimizer0, optimizer1],
opt_level=opt_level,
verbosity=0,
cast_model_type=False,
num_losses=num_losses)
_amp_state.allow_incoming_model_not_fp32 = False
_amp_state.loss_scalers[0]._loss_scale = 4.0
if use_multiple_loss_scalers:
_amp_state.loss_scalers[1]._loss_scale = 16.0
unskipped = 0
for i in range(iters):
if how_to_zero == "none":
for model in models:
for param in model.parameters():
param.grad = None
elif how_to_zero == "model":
for model in models:
model.zero_grad()
else:
optimizer0.zero_grad()
optimizer1.zero_grad()
loss0 = model0(self.x) + model1(self.x)
loss1 = model2(self.x) + model1(self.x)
with amp.scale_loss(loss0, optimizer0, loss_id=loss_ids[0]) as scaled_loss:
scaled_loss.backward()
if i == inject_inf and which_backward == 0:
if which_model == 0:
inj_model = model0
elif which_model == 1:
inj_model = model1
else:
raise RuntimeError(which_model + " invalid for loss 0")
if inject_inf_loc == "fp32":
inj_model.weight0.grad[0] = float('inf')
elif inject_inf_loc == "fp16":
inj_model.weight1.grad[0] = float('inf')
with amp.scale_loss(loss1, [optimizer0, optimizer1], loss_id=loss_ids[1]) as scaled_loss:
scaled_loss.backward()
if i == inject_inf and which_backward == 1:
if which_model == 2:
inj_model = model2
elif which_model == 1:
inj_model = model1
else:
raise RuntimeError(which_model + " invalid for loss 1 ")
if inject_inf_loc == "fp32":
inj_model.weight0.grad[0] = float('inf')
elif inject_inf_loc == "fp16":
inj_model.weight1.grad[0] = float('inf')
if i != inject_inf:
master_params = list(amp.master_params(optimizer0)) + \
list(amp.master_params(optimizer1))
for param, reference_grad in zip(master_params,
reference_grads[what_got_skipped(inject_inf,
which_backward, which_model)][unskipped]):
self.assertTrue(torch.allclose(param.grad.float(), reference_grad.float()))
unskipped += 1
optimizer0.step()
optimizer1.step()
model_params = [p for p in model0.parameters()] + \
[p for p in model1.parameters()] + \
[p for p in model2.parameters()]
master_params = [p for p in amp.master_params(optimizer0)] + \
[p for p in amp.master_params(optimizer1)]
# print("opt_level {} i {} inject_inf {} which_backward {} inject_inf_loc {} use_multiple_loss_scalers {} which_model {}".format(opt_level, i, inject_inf, which_backward, inject_inf_loc, use_multiple_loss_scalers, which_model))
for model, master, reference in zip(
model_params,
master_params,
final_params[what_got_skipped(inject_inf, which_backward, which_model)]):
self.assertTrue(torch.allclose(model, reference))
self.assertTrue(torch.allclose(model, master.to(model.dtype)))
if opt_level == "O1":
_amp_state.handle._deactivate()
if __name__ == '__main__':
unittest.main()
|
import unittest
import functools as ft
import itertools as it
from apex import amp
import torch
from torch import nn
import torch.nn.functional as F
from utils import common_init, HALF, FLOAT,\
ALWAYS_HALF, ALWAYS_FLOAT, MATCH_INPUT
try:
import amp_C
from amp_C import multi_tensor_l2norm
from apex.multi_tensor_apply import MultiTensorApply
disabled = False
except ImportError as err:
print("amp_C fused kernels unavailable, disabling TestMultiTensorApply. ImportError was ", err)
disabled = True
class TestMultiTensorL2Norm(unittest.TestCase):
def setUp(self):
common_init(self)
self.val = 4.0
self.overflow_buf = torch.cuda.IntTensor(1).zero_()
def tearDown(self):
pass
# The tensor creation here is written for convenience, not speed.
def l2norm(self, sizea, sizeb, applier, repeat_tensors, in_type, per_tensor):
self.overflow_buf.zero_()
a = torch.cuda.FloatTensor(sizea).fill_(self.val)
b = torch.cuda.FloatTensor(sizeb).fill_(self.val)
in_list = []
for i in range(repeat_tensors):
in_list += [a.clone().to(in_type), b.clone().to(in_type)]
if per_tensor:
norm, norm_per_tensor = applier(multi_tensor_l2norm, self.overflow_buf, [in_list], True)
normab = torch.cat((a.norm().view(1), b.norm().view(1)))
norm_per_tensor = norm_per_tensor.view(-1, 2)
else:
norm, _ = applier(multi_tensor_l2norm, self.overflow_buf, [in_list], True)
reference = torch.cuda.FloatTensor((sizea + sizeb)*repeat_tensors).fill_(self.val).norm()
self.assertTrue(torch.allclose(norm, reference))
if per_tensor:
self.assertTrue(torch.allclose(norm_per_tensor, normab))
self.assertTrue(self.overflow_buf.item() == 0)
@unittest.skipIf(disabled, "amp_C is unavailable")
def test_fuzz(self):
input_size_pairs = (
(7777*77, 555*555),
(777, 555),
(555, 2048*32+1),
(2048*32+1, 555),
(555, 2048*32),
(2048*32, 555),
(33333, 555),
(555, 33333))
appliers = (
MultiTensorApply(2048*32),
MultiTensorApply(333),
MultiTensorApply(33333))
repeat_tensors = (
1,
55)
for sizea, sizeb in input_size_pairs:
for applier in appliers:
for repeat in repeat_tensors:
for in_type in (torch.float32, torch.float16):
for per_tensor in (False, True):
self.l2norm(sizea, sizeb, applier, repeat, in_type, per_tensor)
if __name__ == '__main__':
unittest.main()
|
import unittest
import functools as ft
import itertools as it
from apex import amp
from apex.amp import _amp_state
import torch
from torch import nn
import torch.nn.functional as F
from torch.nn import Parameter
from utils import common_init, HALF, FLOAT,\
ALWAYS_HALF, ALWAYS_FLOAT, MATCH_INPUT
class MyModel(torch.nn.Module):
def __init__(self, unique):
super(MyModel, self).__init__()
self.weight0 = Parameter(unique +
torch.arange(2, device='cuda', dtype=torch.float32))
self.weight1 = Parameter(1. + unique + torch.arange(2, device='cuda', dtype=torch.float16))
@staticmethod
def ops(input, weight0, weight1):
return ((input*(weight0.float()))*(weight1.float())).sum()
def forward(self, input):
return self.ops(input, self.weight0, self.weight1)
# Abandon all hope, ye who enter here.
class TestAddParamGroup(unittest.TestCase):
def setUp(self):
self.x = torch.ones((2), device='cuda', dtype=torch.float32)
common_init(self)
def tearDown(self):
pass
def zero_grad(self, models, optimizer, how_to_zero):
if how_to_zero == "none":
for model in models:
for param in model.parameters():
param.grad = None
elif how_to_zero == "model":
for model in models:
model.zero_grad()
elif how_to_zero == "optimizer":
optimizer.zero_grad()
def test_add_param_group(self):
for opt_level in ("O0", "O1", "O2", "O3"):
for zero_before_add in (True, False):
for try_accumulation in (True, False):
model0 = MyModel(1)
model1 = MyModel(2)
optimizer = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25}],
momentum=0.125)
optimizer.zero_grad()
loss = model0(self.x)
loss.backward()
optimizer.step()
if zero_before_add:
optimizer.zero_grad()
optimizer.add_param_group({'params' : model1.parameters(), 'lr' : 0.5})
if not zero_before_add:
optimizer.zero_grad()
loss = model0(self.x) + model1(self.x)
loss.backward(retain_graph=try_accumulation)
if try_accumulation:
loss.backward()
optimizer.step()
# Once more to make sure the new params pick up momemtums properly
optimizer.zero_grad()
loss = model0(self.x) + model1(self.x)
loss.backward(retain_graph=try_accumulation)
if try_accumulation:
loss.backward()
optimizer.step()
reference_params = [param.data.clone() for param in model0.parameters()] + \
[param.data.clone() for param in model1.parameters()]
for how_to_zero in "none", "model", "optimizer":
model0 = MyModel(1)
model1 = MyModel(2)
optimizer = torch.optim.SGD([{'params' : model0.parameters(), 'lr' : 0.25}],
momentum=0.125)
_amp_state.allow_incoming_model_not_fp32 = True
[model0, model1], optimizer = amp.initialize([model0, model1],
optimizer,
opt_level=opt_level,
verbosity=0,
cast_model_type=False)
_amp_state.allow_incoming_model_not_fp32 = False
_amp_state.loss_scalers[0]._loss_scale = 4.0
self.zero_grad([model0, model1], optimizer, how_to_zero)
loss = model0(self.x)
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
if zero_before_add:
self.zero_grad([model0, model1], optimizer, how_to_zero)
optimizer.add_param_group({'params' : model1.parameters(), 'lr' : 0.5})
if not zero_before_add:
self.zero_grad([model0, model1], optimizer, how_to_zero)
loss = model0(self.x) + model1(self.x)
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward(retain_graph=try_accumulation)
if try_accumulation:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
# Once more to make sure the new params pick up momentums properly
self.zero_grad([model0, model1], optimizer, how_to_zero)
loss = model0(self.x) + model1(self.x)
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward(retain_graph=try_accumulation)
if try_accumulation:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
final_params = [param.data.clone() for param in model0.parameters()] + \
[param.data.clone() for param in model1.parameters()]
for reference, final in zip(reference_params, final_params):
self.assertTrue(torch.allclose(reference.to(final.dtype), final),
"opt_level = {}, how_to_zero = {}, zero_before_add = {}".format(
opt_level, how_to_zero, zero_before_add))
if __name__ == '__main__':
unittest.main()
|
import unittest
import itertools as it
from apex import amp
import torch
from torch import nn
import torch.nn.functional as F
from utils import common_init, HALF, FLOAT, DTYPES
class TestPromotion(unittest.TestCase):
def setUp(self):
self.handle = amp.init(enabled=True)
common_init(self)
def tearDown(self):
self.handle._deactivate()
def run_binary_promote_test(self, fns, input_shape, x_inplace=False):
type_pairs = it.product(DTYPES, DTYPES)
for fn, (xtype, ytype) in it.product(fns, type_pairs):
x = torch.randn(input_shape, dtype=xtype).requires_grad_()
x_leaf = x
if x_inplace:
# We need a non-leaf to call in place on
x = x.clone()
y = torch.randn(input_shape, dtype=ytype)
out = fn(x, y)
if x_inplace:
# In place: always match xtype
self.assertEqual(out.type(), x.type())
else:
# Out of place: match widest type
if xtype == torch.float or ytype == torch.float:
self.assertEqual(out.type(), FLOAT)
else:
self.assertEqual(out.type(), HALF)
out.float().sum().backward()
self.assertEqual(x_leaf.grad.dtype, xtype)
def test_atan2_matches_widest(self):
fns = [lambda x, y : torch.atan2(x, y),
lambda x, y : x.atan2(y)]
self.run_binary_promote_test(fns, (self.b,))
def test_mul_matches_widest(self):
fns = [lambda x, y : torch.mul(x, y),
lambda x, y: x.mul(y)]
self.run_binary_promote_test(fns, (self.b,))
def test_cat_matches_widest(self):
shape = self.b
ys = [torch.randn(shape, dtype=torch.half) for _ in range(5)]
x_float = torch.randn(shape)
out = torch.cat(ys + [x_float])
self.assertEqual(out.type(), FLOAT)
x_half = torch.randn(shape, dtype=torch.half)
out = torch.cat(ys + [x_half])
self.assertEqual(out.type(), HALF)
def test_inplace_exp_is_error_for_half(self):
xs = torch.randn(self.b)
xs.exp_()
self.assertEqual(xs.type(), FLOAT)
xs = torch.randn(self.b, dtype=torch.half)
with self.assertRaises(NotImplementedError):
xs.exp_()
def test_inplace_add_matches_self(self):
fn = lambda x, y: x.add_(y)
self.run_binary_promote_test([fn], (self.b,), x_inplace=True)
if __name__ == '__main__':
unittest.main()
|
import unittest
import functools as ft
import itertools as it
from apex import amp
import torch
from torch import nn
import torch.nn.functional as F
from utils import common_init, HALF, FLOAT,\
ALWAYS_HALF, ALWAYS_FLOAT, MATCH_INPUT
try:
import amp_C
from amp_C import multi_tensor_axpby
from apex.multi_tensor_apply import MultiTensorApply
disabled = False
except ImportError as err:
print("amp_C fused kernels unavailable, disabling TestMultiTensorApply. ImportError was ", err)
disabled = True
class TestMultiTensorAxpby(unittest.TestCase):
def setUp(self):
common_init(self)
self.a = 2.0
self.b = 8.0
self.xval = 4.0
self.yval = 16.0
self.overflow_buf = torch.cuda.IntTensor(1).zero_()
self.ref = torch.cuda.FloatTensor([136.0])
def tearDown(self):
pass
# The tensor creation here is written for convenience, not speed.
def axpby(self, sizea, sizeb, applier, repeat_tensors,
x_type, y_type, out_type, inplace=False):
self.overflow_buf.zero_()
t1 = torch.cuda.FloatTensor(sizea).fill_(1.0)
t2 = torch.cuda.FloatTensor(sizeb).fill_(1.0)
y_list = []
for i in range(repeat_tensors):
y_list += [t1.clone().to(y_type)*self.yval, t2.clone().to(y_type)*self.yval]
x_list = [x.clone().to(x_type)*(self.xval/self.yval) for x in y_list]
if inplace:
out_list = y_list
else:
out_list = [out.clone().to(out_type)*3.0 for out in y_list]
applier(multi_tensor_axpby, self.overflow_buf, [x_list, y_list, out_list], self.a, self.b, -1)
self.assertTrue(all([torch.allclose(out, self.ref.to(out_type)) for out in out_list]),
msg="{} {} {} {} {} {} {}".format(sizea, sizeb, repeat_tensors,
x_type, y_type, out_type, inplace))
self.assertTrue(self.overflow_buf.item() == 0,
msg="{} {} {} {} {} {} {}".format(sizea, sizeb, repeat_tensors,
x_type, y_type, out_type, inplace))
# def find_inf(self, sizea, sizeb, applier, repeat_tensors, in_type, out_type, t, ind, val, inplace=False):
# self.overflow_buf.zero_()
# a = torch.cuda.FloatTensor(sizea).fill_(self.scale)
# b = torch.cuda.FloatTensor(sizeb).fill_(self.scale)
# out_list = []
# for i in range(repeat_tensors):
# out_list += [a.clone().to(out_type), b.clone().to(out_type)]
# if inplace:
# in_list = out_list
# else:
# in_list = [out.clone().to(in_type) for out in out_list]
# applier(multi_tensor_scale, self.overflow_buf, [in_list, out_list], 1./self.scale)
# self.overflow_buf.zero_()
# in_list[t][ind] = val
# applier(multi_tensor_scale, self.overflow_buf, [in_list, out_list], 1./self.scale)
# self.assertTrue(self.overflow_buf.item())
@unittest.skipIf(disabled, "amp_C is unavailable")
def test_fuzz(self):
input_size_pairs = (
(7777*77, 555*555),
(777, 555),
(555, 2048*32+1),
(2048*32+1, 555),
(555, 2048*32),
(2048*32, 555),
(33333, 555),
(555, 33333))
appliers = (
MultiTensorApply(2048*32),
MultiTensorApply(333),
MultiTensorApply(33333))
repeat_tensors = (
1,
55)
for sizea, sizeb in input_size_pairs:
for applier in appliers:
for repeat in repeat_tensors:
for x_type in (torch.float32, torch.float16):
for y_type in (torch.float32, torch.float16):
for out_type in (torch.float32, torch.float16):
for inplace in (True, False):
if inplace is True and (y_type is not out_type):
continue
else:
self.axpby(sizea, sizeb, applier, repeat,
x_type, y_type, out_type, inplace=inplace)
# self.find_inf(sizea, sizeb, applier, repeat, in_type, out_type,
# 0, 0, float('nan'), inplace=inplace)
# self.find_inf(sizea, sizeb, applier, repeat, in_type, out_type,
# 2*repeat-1, sizeb-1, float('inf'), inplace=inplace)
# self.find_inf(sizea, sizeb, applier, repeat, in_type, out_type,
# 2*(repeat//2), sizea//2, float('inf'), inplace=inplace)
if __name__ == '__main__':
unittest.main()
|
import torch
HALF = 'torch.cuda.HalfTensor'
FLOAT = 'torch.cuda.FloatTensor'
DTYPES = [torch.half, torch.float]
ALWAYS_HALF = {torch.float: HALF,
torch.half: HALF}
ALWAYS_FLOAT = {torch.float: FLOAT,
torch.half: FLOAT}
MATCH_INPUT = {torch.float: FLOAT,
torch.half: HALF}
def common_init(test_case):
test_case.h = 64
test_case.b = 16
test_case.c = 16
test_case.k = 3
test_case.t = 10
torch.set_default_tensor_type(torch.cuda.FloatTensor)
|
import unittest
from apex import amp
import random
import torch
from torch import nn
from utils import common_init, HALF
class TestRnnCells(unittest.TestCase):
def setUp(self):
self.handle = amp.init(enabled=True)
common_init(self)
def tearDown(self):
self.handle._deactivate()
def run_cell_test(self, cell, state_tuple=False):
shape = (self.b, self.h)
for typ in [torch.float, torch.half]:
xs = [torch.randn(shape, dtype=typ).requires_grad_()
for _ in range(self.t)]
hidden_fn = lambda: torch.zeros(shape, dtype=typ)
if state_tuple:
hidden = (hidden_fn(), hidden_fn())
else:
hidden = hidden_fn()
outputs = []
for i in range(self.t):
hidden = cell(xs[i], hidden)
if state_tuple:
output = hidden[0]
else:
output = hidden
outputs.append(output)
for y in outputs:
self.assertEqual(y.type(), HALF)
outputs[-1].float().sum().backward()
for i, x in enumerate(xs):
self.assertEqual(x.grad.dtype, x.dtype)
def test_rnn_cell_is_half(self):
cell = nn.RNNCell(self.h, self.h)
self.run_cell_test(cell)
def test_gru_cell_is_half(self):
cell = nn.GRUCell(self.h, self.h)
self.run_cell_test(cell)
def test_lstm_cell_is_half(self):
cell = nn.LSTMCell(self.h, self.h)
self.run_cell_test(cell, state_tuple=True)
class TestRnns(unittest.TestCase):
def setUp(self):
self.handle = amp.init(enabled=True)
common_init(self)
def tearDown(self):
self.handle._deactivate()
def run_rnn_test(self, rnn, layers, bidir, state_tuple=False):
for typ in [torch.float, torch.half]:
x = torch.randn((self.t, self.b, self.h), dtype=typ).requires_grad_()
hidden_fn = lambda: torch.zeros((layers + (layers * bidir),
self.b, self.h), dtype=typ)
if state_tuple:
hidden = (hidden_fn(), hidden_fn())
else:
hidden = hidden_fn()
output, _ = rnn(x, hidden)
self.assertEqual(output.type(), HALF)
output[-1, :, :].float().sum().backward()
self.assertEqual(x.grad.dtype, x.dtype)
def test_rnn_is_half(self):
configs = [(1, False), (2, False), (2, True)]
for layers, bidir in configs:
rnn = nn.RNN(input_size=self.h, hidden_size=self.h, num_layers=layers,
nonlinearity='relu', bidirectional=bidir)
self.run_rnn_test(rnn, layers, bidir)
def test_gru_is_half(self):
configs = [(1, False), (2, False), (2, True)]
for layers, bidir in configs:
rnn = nn.GRU(input_size=self.h, hidden_size=self.h, num_layers=layers,
bidirectional=bidir)
self.run_rnn_test(rnn, layers, bidir)
def test_lstm_is_half(self):
configs = [(1, False), (2, False), (2, True)]
for layers, bidir in configs:
rnn = nn.LSTM(input_size=self.h, hidden_size=self.h, num_layers=layers,
bidirectional=bidir)
self.run_rnn_test(rnn, layers, bidir, state_tuple=True)
def test_rnn_packed_sequence(self):
num_layers = 2
rnn = nn.RNN(input_size=self.h, hidden_size=self.h, num_layers=num_layers)
for typ in [torch.float, torch.half]:
x = torch.randn((self.t, self.b, self.h), dtype=typ).requires_grad_()
lens = sorted([random.randint(self.t // 2, self.t) for _ in range(self.b)],
reverse=True)
# `pack_padded_sequence` breaks if default tensor type is non-CPU
torch.set_default_tensor_type(torch.FloatTensor)
lens = torch.tensor(lens, dtype=torch.int64, device=torch.device('cpu'))
packed_seq = nn.utils.rnn.pack_padded_sequence(x, lens)
torch.set_default_tensor_type(torch.cuda.FloatTensor)
hidden = torch.zeros((num_layers, self.b, self.h), dtype=typ)
output, _ = rnn(packed_seq, hidden)
self.assertEqual(output.data.type(), HALF)
output.data.float().sum().backward()
self.assertEqual(x.grad.dtype, x.dtype)
if __name__ == '__main__':
unittest.main()
|
import unittest
import functools as ft
import itertools as it
from apex import amp
import torch
from torch import nn
import torch.nn.functional as F
from utils import common_init, HALF, FLOAT,\
ALWAYS_HALF, ALWAYS_FLOAT, MATCH_INPUT
try:
import amp_C
from amp_C import multi_tensor_scale
from apex.multi_tensor_apply import MultiTensorApply
disabled = False
except ImportError as err:
print("amp_C fused kernels unavailable, disabling TestMultiTensorApply. ImportError was ", err)
disabled = True
class TestMultiTensorScale(unittest.TestCase):
def setUp(self):
common_init(self)
self.scale = 4.0
self.overflow_buf = torch.cuda.IntTensor(1).zero_()
self.ref = torch.cuda.FloatTensor([1.0])
def tearDown(self):
pass
# The tensor creation here is written for convenience, not speed.
def downscale(self, sizea, sizeb, applier, repeat_tensors, in_type, out_type, inplace=False):
self.overflow_buf.zero_()
a = torch.cuda.FloatTensor(sizea).fill_(self.scale)
b = torch.cuda.FloatTensor(sizeb).fill_(self.scale)
out_list = []
for i in range(repeat_tensors):
out_list += [a.clone().to(out_type), b.clone().to(out_type)]
if inplace:
in_list = out_list
else:
in_list = [out.clone().to(in_type) for out in out_list]
applier(multi_tensor_scale, self.overflow_buf, [in_list, out_list], 1./self.scale)
self.assertTrue(all([torch.allclose(out, self.ref.to(out_type)) for out in out_list]))
self.assertTrue(self.overflow_buf.item() == 0)
def find_inf(self, sizea, sizeb, applier, repeat_tensors, in_type, out_type, t, ind, val, inplace=False):
self.overflow_buf.zero_()
a = torch.cuda.FloatTensor(sizea).fill_(self.scale)
b = torch.cuda.FloatTensor(sizeb).fill_(self.scale)
out_list = []
for i in range(repeat_tensors):
out_list += [a.clone().to(out_type), b.clone().to(out_type)]
if inplace:
in_list = out_list
else:
in_list = [out.clone().to(in_type) for out in out_list]
applier(multi_tensor_scale, self.overflow_buf, [in_list, out_list], 1./self.scale)
self.overflow_buf.zero_()
in_list[t][ind] = val
applier(multi_tensor_scale, self.overflow_buf, [in_list, out_list], 1./self.scale)
self.assertTrue(self.overflow_buf.item())
# Currently, the fused kernel gives a hard error if you attempt to downscale
# into fp16 output, which imo is the desired behavior. Maybe someday we
# will learn otherwise.
# @unittest.skipIf(disabled, "amp_C is unavailable")
# def test_fp16_to_fp16(self):
# self.downscale(self.fp16, self.fp16, self.fp16_ref)
#
# @unittest.skipIf(disabled, "amp_C is unavailable")
# def test_fp32_to_fp16(self):
# self.downscale(self.fp32, self.fp16, self.fp16_ref)
@unittest.skipIf(disabled, "amp_C is unavailable")
def test_fuzz(self):
input_size_pairs = (
(7777*77, 555*555),
(777, 555),
(555, 2048*32+1),
(2048*32+1, 555),
(555, 2048*32),
(2048*32, 555),
(33333, 555),
(555, 33333))
appliers = (
MultiTensorApply(2048*32),
MultiTensorApply(333),
MultiTensorApply(33333))
repeat_tensors = (
1,
55)
for sizea, sizeb in input_size_pairs:
for applier in appliers:
for repeat in repeat_tensors:
for in_type in (torch.float32, torch.float16):
for out_type in (torch.float32, torch.float16):
for inplace in (True, False):
if inplace is True and (out_type is not in_type):
continue
else:
self.downscale(sizea, sizeb, applier, repeat, in_type, out_type, inplace=inplace)
self.find_inf(sizea, sizeb, applier, repeat, in_type, out_type,
0, 0, float('nan'), inplace=inplace)
self.find_inf(sizea, sizeb, applier, repeat, in_type, out_type,
2*repeat-1, sizeb-1, float('inf'), inplace=inplace)
self.find_inf(sizea, sizeb, applier, repeat, in_type, out_type,
2*(repeat//2), sizea//2, float('inf'), inplace=inplace)
if __name__ == '__main__':
unittest.main()
|
import unittest
import functools as ft
import itertools as it
from apex import amp
from apex.amp import _amp_state
import torch
from torch import nn
import torch.nn.functional as F
from utils import common_init, HALF, FLOAT,\
ALWAYS_HALF, ALWAYS_FLOAT, MATCH_INPUT
def get_reference_grad(i, w, ops):
# Creating new tensors ensures, among other things, that the new tensors are not in the cache.
# In fact, they are guaranteed not to use the cache because they are not torch.nn.Parameters.
fp32_i = i.detach().clone().float()
fp32_w = w.detach().clone().float().requires_grad_()
loss = ops(fp32_i, fp32_w)
loss.backward()
return fp32_w.grad
class WhitelistModule(torch.nn.Module):
def __init__(self, dtype):
super(WhitelistModule, self).__init__()
self.weight = torch.nn.Parameter(torch.arange(8*8, device='cuda', dtype=dtype).view(8,8))
@staticmethod
def ops(input, weight):
return (input.mm(weight)).mm(weight).sum()
def forward(self, input):
return self.ops(input, self.weight)
class BlacklistModule(torch.nn.Module):
def __init__(self, dtype):
super(BlacklistModule, self).__init__()
self.weight = torch.nn.Parameter(torch.arange(2*8, device='cuda', dtype=dtype).view(2,8))
@staticmethod
def ops(input, weight):
return (input + torch.pow(weight, 2) + torch.pow(weight, 2)).sum()
def forward(self, input):
return self.ops(input, self.weight)
class PromoteModule(torch.nn.Module):
def __init__(self, dtype):
super(PromoteModule, self).__init__()
self.weight = torch.nn.Parameter(torch.arange(2*8, device='cuda', dtype=dtype).view(2,8))
@staticmethod
def ops(input, weight):
return ((input*weight)*weight).sum()
def forward(self, input):
return self.ops(input, self.weight)
class TestCache(unittest.TestCase):
def setUp(self):
self.x = torch.ones((2, 8), device='cuda', dtype=torch.float32)
common_init(self)
def tearDown(self):
pass
def train_eval_train_test(self, module, t):
model = module(t).cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=1.0)
_amp_state.allow_incoming_model_not_fp32 = True
model, optimizer = amp.initialize(model, optimizer, opt_level="O1", verbosity=0)
_amp_state.allow_incoming_model_not_fp32 = False
def training_step():
for param in model.parameters():
param.grad = None
loss = model(self.x).sum()
_amp_state.loss_scalers[0]._loss_scale = 4.0
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
self.assertEqual(len([p.grad for p in model.parameters() if p.grad is not None]), 1)
self.assertEqual(model.weight.grad.type(), model.weight.type())
reference_grad = get_reference_grad(self.x, model.weight, model.ops)
# Currently there's no difference in the allclose calls, so no need for branching,
# but I'm keeping this in case we want different tolerances for fp16 and fp32 checks.
if model.weight.grad.type() == "torch.cuda.HalfTensor":
self.assertTrue(torch.allclose(model.weight.grad.float(), reference_grad))
elif model.weight.grad.type() == "torch.cuda.FloatTensor":
self.assertTrue(torch.allclose(model.weight.grad.float(), reference_grad))
else:
raise RuntimeError("model.weight.grad.type = {}".format(model.weight.grad.type()))
model.weight.data -= 1.
# Simulates first epoch
training_step()
# Simulates eval
with torch.no_grad():
loss = model(self.x).sum()
# Simulates resuming training after eval
training_step()
_amp_state.handle._deactivate()
# I could easily have these as a set of for loops in a single test,
# instead of going for granularity.
def test_whitelist_module_fp16_weight(self):
self.train_eval_train_test(WhitelistModule, torch.float16)
def test_whitelist_module_fp32_weight(self):
self.train_eval_train_test(WhitelistModule, torch.float32)
def test_blacklist_module_fp16_weight(self):
self.train_eval_train_test(BlacklistModule, torch.float16)
def test_blacklist_module_fp32_weight(self):
self.train_eval_train_test(BlacklistModule, torch.float32)
def test_promote_module_fp16_weight(self):
self.train_eval_train_test(PromoteModule, torch.float16)
def test_promote_module_fp32_weight(self):
self.train_eval_train_test(PromoteModule, torch.float32)
if __name__ == '__main__':
unittest.main()
|
import unittest
import functools as ft
import itertools as it
from apex import amp
import torch
from torch import nn
import torch.nn.functional as F
from utils import common_init, HALF, FLOAT,\
ALWAYS_HALF, ALWAYS_FLOAT, MATCH_INPUT
def run_layer_test(test_case, fns, expected, input_shape, test_backward=True):
for fn, typ in it.product(fns, expected.keys()):
x = torch.randn(input_shape, dtype=typ).requires_grad_()
y = fn(x)
test_case.assertEqual(y.type(), expected[typ])
if test_backward:
y.float().sum().backward()
test_case.assertEqual(x.grad.type(), MATCH_INPUT[typ])
class TestBasicCasts(unittest.TestCase):
def setUp(self):
self.handle = amp.init(enabled=True)
common_init(self)
def tearDown(self):
self.handle._deactivate()
def test_linear_is_half(self):
m = nn.Linear(self.h, self.h)
f = ft.partial(F.linear, weight=m.weight, bias=m.bias)
run_layer_test(self, [m, f], ALWAYS_HALF, (self.b, self.h))
def test_conv2d_is_half(self):
m = nn.Conv2d(self.c, self.c, self.k)
f = ft.partial(F.conv2d, weight=m.weight, bias=m.bias)
run_layer_test(self, [m, f], ALWAYS_HALF, (self.b, self.c, self.h, self.h))
def test_softmax_is_float(self):
m = nn.Softmax(dim=1)
f = ft.partial(F.softmax, dim=1)
run_layer_test(self, [m, f], ALWAYS_FLOAT, (self.b, self.h))
def test_group_norm_is_float(self):
m = nn.GroupNorm(num_groups=4, num_channels=self.c)
run_layer_test(self, [m], ALWAYS_FLOAT, (self.b, self.c, self.h, self.h))
def test_mse_loss_is_float(self):
shape = (self.b, self.h)
target = torch.randn(shape)
mod = nn.MSELoss()
m = lambda x: mod(x, target)
f = ft.partial(F.mse_loss, target=target)
run_layer_test(self, [m], ALWAYS_FLOAT, shape)
def test_relu_is_match(self):
run_layer_test(self, [nn.ReLU(), F.relu], MATCH_INPUT, (self.b, self.h))
def test_batch_norm_is_match(self):
m = nn.BatchNorm2d(num_features=self.c)
f = ft.partial(F.batch_norm, running_mean=m.running_mean, running_var=m.running_var,
weight=m.weight, bias=m.bias, training=True)
run_layer_test(self, [m], MATCH_INPUT, (self.b, self.c, self.h, self.h))
# Test forward-only for BN inference
m.eval()
f = ft.partial(F.batch_norm, running_mean=m.running_mean, running_var=m.running_var,
weight=m.weight, bias=m.bias, training=False)
run_layer_test(self, [m, f], MATCH_INPUT, (self.b, self.c, self.h, self.h),
test_backward=False)
class TestBannedMethods(unittest.TestCase):
def setUp(self):
self.handle = amp.init(enabled=True)
common_init(self)
def tearDown(self):
self.handle._deactivate()
def bce_common(self, assertion):
shape = (self.b, self.h)
target = torch.rand(shape)
mod = nn.BCELoss()
m = lambda x: mod(x, target)
f = ft.partial(F.binary_cross_entropy, target=target)
for fn in [m, f]:
x = torch.rand(shape, dtype=torch.half)
assertion(fn, x)
def test_bce_raises_by_default(self):
assertion = lambda fn, x: self.assertRaises(NotImplementedError, fn, x)
self.bce_common(assertion)
def test_bce_is_float_with_allow_banned(self):
self.handle._deactivate()
self.handle = amp.init(enabled=True, allow_banned=True)
assertion = lambda fn, x: self.assertEqual(fn(x).type(), FLOAT)
self.bce_common(assertion)
class TestTensorCasts(unittest.TestCase):
def setUp(self):
self.handle = amp.init(enabled=True)
common_init(self)
def tearDown(self):
self.handle._deactivate()
def test_matmul_method_is_half(self):
other = torch.randn(self.h, self.h)
lhs = lambda x: x.matmul(other)
rhs = lambda x: other.matmul(x)
run_layer_test(self, [lhs, rhs], ALWAYS_HALF, (self.h, self.h))
def test_matmul_op_is_half(self):
other = torch.randn(self.h, self.h)
lhs = lambda x: x @ other
rhs = lambda x: other @ x
run_layer_test(self, [lhs, rhs], ALWAYS_HALF, (self.h, self.h))
def test_pow_method_is_float(self):
fn = lambda x: x.pow(2.)
run_layer_test(self, [fn], ALWAYS_FLOAT, (self.b, self.h))
def test_pow_op_is_float(self):
fn = lambda x: x ** 2.
run_layer_test(self, [fn], ALWAYS_FLOAT, (self.b, self.h))
def test_cpu_is_float(self):
fn = lambda x: x.cpu()
always_cpu_float = {torch.float: 'torch.FloatTensor',
torch.half: 'torch.FloatTensor'}
run_layer_test(self, [fn], always_cpu_float, (self.b, self.h))
def test_sum_is_float(self):
fn = lambda x: x.sum()
run_layer_test(self, [fn], ALWAYS_FLOAT, (self.b, self.h))
class TestDisabledCasts(unittest.TestCase):
def setUp(self):
self.handle = amp.init(enabled=False)
common_init(self)
def test_disabled_linear(self):
m = nn.Linear(self.h, self.h)
f = ft.partial(F.linear, weight=m.weight, bias=m.bias)
input_shape = (self.b, self.h)
for fn in [m, f]:
x = torch.randn(input_shape, dtype=torch.float).requires_grad_()
y = fn(x)
self.assertEqual(y.type(), FLOAT)
y.sum().backward()
self.assertEqual(x.grad.type(), FLOAT)
x = torch.randn(input_shape, dtype=torch.half).requires_grad_()
self.assertRaises(RuntimeError, fn, x)
# TODO: maybe more tests on disabled casting?
if __name__ == '__main__':
unittest.main()
|
import unittest
import torch
import torch.nn as nn
from apex.fp16_utils import FP16Model
class DummyBlock(nn.Module):
def __init__(self):
super(DummyBlock, self).__init__()
self.conv = nn.Conv2d(10, 10, 2)
self.bn = nn.BatchNorm2d(10, affine=True)
def forward(self, x):
return self.conv(self.bn(x))
class DummyNet(nn.Module):
def __init__(self):
super(DummyNet, self).__init__()
self.conv1 = nn.Conv2d(3, 10, 2)
self.bn1 = nn.BatchNorm2d(10, affine=False)
self.db1 = DummyBlock()
self.db2 = DummyBlock()
def forward(self, x):
out = x
out = self.conv1(out)
out = self.bn1(out)
out = self.db1(out)
out = self.db2(out)
return out
class DummyNetWrapper(nn.Module):
def __init__(self):
super(DummyNetWrapper, self).__init__()
self.bn = nn.BatchNorm2d(3, affine=True)
self.dn = DummyNet()
def forward(self, x):
return self.dn(self.bn(x))
class TestFP16Model(unittest.TestCase):
def setUp(self):
self.N = 64
self.C_in = 3
self.H_in = 16
self.W_in = 32
self.in_tensor = torch.randn((self.N, self.C_in, self.H_in, self.W_in)).cuda()
self.orig_model = DummyNetWrapper().cuda()
self.fp16_model = FP16Model(self.orig_model)
def test_params_and_buffers(self):
exempted_modules = [
self.fp16_model.network.bn,
self.fp16_model.network.dn.db1.bn,
self.fp16_model.network.dn.db2.bn,
]
for m in self.fp16_model.modules():
expected_dtype = torch.float if (m in exempted_modules) else torch.half
for p in m.parameters(recurse=False):
assert p.dtype == expected_dtype
for b in m.buffers(recurse=False):
assert b.dtype in (expected_dtype, torch.int64)
def test_output_is_half(self):
out_tensor = self.fp16_model(self.in_tensor)
assert out_tensor.dtype == torch.half
|
import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import numpy as np
try:
from apex.parallel import DistributedDataParallel as DDP
from apex.fp16_utils import *
from apex import amp, optimizers
from apex.multi_tensor_apply import multi_tensor_applier
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size per process (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='Initial learning rate. Will be scaled by <global batch size>/256: args.lr = args.lr*float(args.batch_size*args.world_size)/256. A warmup schedule will also be applied over the first 5 epochs.')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--prof', dest='prof', action='store_true',
help='Only run 10 iterations for profiling.')
parser.add_argument('--deterministic', action='store_true')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--sync_bn', action='store_true',
help='enabling apex sync BN.')
parser.add_argument('--has-ext', action='store_true')
parser.add_argument('--opt-level', type=str)
parser.add_argument('--keep-batchnorm-fp32', type=str, default=None)
parser.add_argument('--loss-scale', type=str, default=None)
parser.add_argument('--fused-adam', action='store_true')
parser.add_argument('--prints-to-process', type=int, default=10)
cudnn.benchmark = True
def fast_collate(batch):
imgs = [img[0] for img in batch]
targets = torch.tensor([target[1] for target in batch], dtype=torch.int64)
w = imgs[0].size[0]
h = imgs[0].size[1]
tensor = torch.zeros( (len(imgs), 3, h, w), dtype=torch.uint8 )
for i, img in enumerate(imgs):
nump_array = np.asarray(img, dtype=np.uint8)
if(nump_array.ndim < 3):
nump_array = np.expand_dims(nump_array, axis=-1)
nump_array = np.rollaxis(nump_array, 2)
tensor[i] += torch.from_numpy(nump_array)
return tensor, targets
best_prec1 = 0
args = parser.parse_args()
# Let multi_tensor_applier be the canary in the coalmine
# that verifies if the backend is what we think it is
assert multi_tensor_applier.available == args.has_ext
print("opt_level = {}".format(args.opt_level))
print("keep_batchnorm_fp32 = {}".format(args.keep_batchnorm_fp32), type(args.keep_batchnorm_fp32))
print("loss_scale = {}".format(args.loss_scale), type(args.loss_scale))
print("\nCUDNN VERSION: {}\n".format(torch.backends.cudnn.version()))
if args.deterministic:
cudnn.benchmark = False
cudnn.deterministic = True
torch.manual_seed(args.local_rank)
torch.set_printoptions(precision=10)
def main():
global best_prec1, args
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.gpu = 0
args.world_size = 1
if args.distributed:
args.gpu = args.local_rank % torch.cuda.device_count()
torch.cuda.set_device(args.gpu)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
args.world_size = torch.distributed.get_world_size()
assert torch.backends.cudnn.enabled, "Amp requires cudnn backend to be enabled."
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if args.sync_bn:
import apex
print("using apex synced BN")
model = apex.parallel.convert_syncbn_model(model)
model = model.cuda()
# Scale learning rate based on global batch size
args.lr = args.lr*float(args.batch_size*args.world_size)/256.
if args.fused_adam:
optimizer = optimizers.FusedAdam(model.parameters())
else:
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
model, optimizer = amp.initialize(
model, optimizer,
# enabled=False,
opt_level=args.opt_level,
keep_batchnorm_fp32=args.keep_batchnorm_fp32,
loss_scale=args.loss_scale
)
if args.distributed:
# By default, apex.parallel.DistributedDataParallel overlaps communication with
# computation in the backward pass.
# model = DDP(model)
# delay_allreduce delays all communication to the end of the backward pass.
model = DDP(model, delay_allreduce=True)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
# Optionally resume from a checkpoint
if args.resume:
# Use a local scope to avoid dangling references
def resume():
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location = lambda storage, loc: storage.cuda(args.gpu))
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
resume()
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
if(args.arch == "inception_v3"):
crop_size = 299
val_size = 320 # I chose this value arbitrarily, we can adjust.
else:
crop_size = 224
val_size = 256
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(crop_size),
transforms.RandomHorizontalFlip(),
# transforms.ToTensor(), Too slow
# normalize,
]))
val_dataset = datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(val_size),
transforms.CenterCrop(crop_size),
]))
train_sampler = None
val_sampler = None
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler, collate_fn=fast_collate)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True,
sampler=val_sampler,
collate_fn=fast_collate)
if args.evaluate:
validate(val_loader, model, criterion)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch)
if args.prof:
break
# evaluate on validation set
prec1 = validate(val_loader, model, criterion)
# remember best prec@1 and save checkpoint
if args.local_rank == 0:
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer' : optimizer.state_dict(),
}, is_best)
class data_prefetcher():
def __init__(self, loader):
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
self.mean = torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255]).cuda().view(1,3,1,1)
self.std = torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255]).cuda().view(1,3,1,1)
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.mean = self.mean.half()
# self.std = self.std.half()
self.preload()
def preload(self):
try:
self.next_input, self.next_target = next(self.loader)
except StopIteration:
self.next_input = None
self.next_target = None
return
with torch.cuda.stream(self.stream):
self.next_input = self.next_input.cuda(non_blocking=True)
self.next_target = self.next_target.cuda(non_blocking=True)
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.next_input = self.next_input.half()
# else:
self.next_input = self.next_input.float()
self.next_input = self.next_input.sub_(self.mean).div_(self.std)
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
input = self.next_input
target = self.next_target
self.preload()
return input, target
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
run_info_dict = {"Iteration" : [],
"Loss" : [],
"Speed" : []}
prefetcher = data_prefetcher(train_loader)
input, target = prefetcher.next()
i = -1
while input is not None:
i += 1
# No learning rate warmup for this test, to expose bitwise inaccuracies more quickly
# adjust_learning_rate(optimizer, epoch, i, len(train_loader))
if args.prof:
if i > 10:
break
# measure data loading time
data_time.update(time.time() - end)
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data)
prec1 = reduce_tensor(prec1)
prec5 = reduce_tensor(prec5)
else:
reduced_loss = loss.data
losses.update(to_python_float(reduced_loss), input.size(0))
top1.update(to_python_float(prec1), input.size(0))
top5.update(to_python_float(prec5), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
# for param in model.parameters():
# print(param.data.double().sum().item(), param.grad.data.double().sum().item())
# torch.cuda.synchronize()
torch.cuda.nvtx.range_push("step")
optimizer.step()
torch.cuda.nvtx.range_pop()
torch.cuda.synchronize()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# If you decide to refactor this test, like examples/imagenet, to sample the loss every
# print_freq iterations, make sure to move this prefetching below the accuracy calculation.
input, target = prefetcher.next()
if i % args.print_freq == 0 and i > 1:
if args.local_rank == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {3:.3f} ({4:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.10f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader),
args.world_size * args.batch_size / batch_time.val,
args.world_size * args.batch_size / batch_time.avg,
batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
run_info_dict["Iteration"].append(i)
run_info_dict["Loss"].append(losses.val)
run_info_dict["Speed"].append(args.world_size * args.batch_size / batch_time.val)
if len(run_info_dict["Loss"]) == args.prints_to_process:
if args.local_rank == 0:
torch.save(run_info_dict,
str(args.has_ext) + "_" + str(args.opt_level) + "_" +
str(args.loss_scale) + "_" + str(args.keep_batchnorm_fp32) + "_" +
str(args.fused_adam))
quit()
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
prefetcher = data_prefetcher(val_loader)
input, target = prefetcher.next()
i = -1
while input is not None:
i += 1
# compute output
with torch.no_grad():
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data)
prec1 = reduce_tensor(prec1)
prec5 = reduce_tensor(prec5)
else:
reduced_loss = loss.data
losses.update(to_python_float(reduced_loss), input.size(0))
top1.update(to_python_float(prec1), input.size(0))
top5.update(to_python_float(prec5), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if args.local_rank == 0 and i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {2:.3f} ({3:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader),
args.world_size * args.batch_size / batch_time.val,
args.world_size * args.batch_size / batch_time.avg,
batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
input, target = prefetcher.next()
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, step, len_epoch):
"""LR schedule that should yield 76% converged accuracy with batch size 256"""
factor = epoch // 30
if epoch >= 80:
factor = factor + 1
lr = args.lr*(0.1**factor)
"""Warmup"""
if epoch < 5:
lr = lr*float(1 + step + epoch*len_epoch)/(5.*len_epoch)
# if(args.local_rank == 0):
# print("epoch = {}, step = {}, lr = {}".format(epoch, step, lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def reduce_tensor(tensor):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.reduce_op.SUM)
rt /= args.world_size
return rt
if __name__ == '__main__':
main()
|
import argparse
import torch
parser = argparse.ArgumentParser(description='Compare')
parser.add_argument('--opt-level', type=str)
parser.add_argument('--keep-batchnorm-fp32', type=str, default=None)
parser.add_argument('--loss-scale', type=str, default=None)
parser.add_argument('--fused-adam', action='store_true')
parser.add_argument('--use_baseline', action='store_true')
args = parser.parse_args()
base_file = str(args.opt_level) + "_" +\
str(args.loss_scale) + "_" +\
str(args.keep_batchnorm_fp32) + "_" +\
str(args.fused_adam)
file_e = "True_" + base_file
file_p = "False_" + base_file
if args.use_baseline:
file_b = "baselines/True_" + base_file
dict_e = torch.load(file_e)
dict_p = torch.load(file_p)
if args.use_baseline:
dict_b = torch.load(file_b)
torch.set_printoptions(precision=10)
print(file_e)
print(file_p)
if args.use_baseline:
print(file_b)
# ugly duplication here...
if not args.use_baseline:
for n, (i_e, i_p) in enumerate(zip(dict_e["Iteration"], dict_p["Iteration"])):
assert i_e == i_p, "i_e = {}, i_p = {}".format(i_e, i_p)
loss_e = dict_e["Loss"][n]
loss_p = dict_p["Loss"][n]
assert loss_e == loss_p, "Iteration {}, loss_e = {}, loss_p = {}".format(i_e, loss_e, loss_p)
print("{:4} {:15.10f} {:15.10f} {:15.10f} {:15.10f}".format(
i_e,
loss_e,
loss_p,
dict_e["Speed"][n],
dict_p["Speed"][n]))
else:
for n, (i_e, i_p) in enumerate(zip(dict_e["Iteration"], dict_p["Iteration"])):
assert i_e == i_p, "i_e = {}, i_p = {}".format(i_e, i_p)
loss_e = dict_e["Loss"][n]
loss_p = dict_p["Loss"][n]
loss_b = dict_b["Loss"][n]
assert loss_e == loss_p, "Iteration {}, loss_e = {}, loss_p = {}".format(i_e, loss_e, loss_p)
assert loss_e == loss_b, "Iteration {}, loss_e = {}, loss_b = {}".format(i_e, loss_e, loss_b)
print("{:4} {:15.10f} {:15.10f} {:15.10f} {:15.10f} {:15.10f} {:15.10f}".format(
i_e,
loss_b,
loss_e,
loss_p,
dict_b["Speed"][n],
dict_e["Speed"][n],
dict_p["Speed"][n]))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# PyTorch documentation build configuration file, created by
# sphinx-quickstart on Fri Dec 23 13:31:47 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# sys.path.insert(0, os.path.abspath('../../apex/parallel/'))
import apex
# import multiproc
import sphinx_rtd_theme
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'sphinx.ext.extlinks',
]
napoleon_use_ivar = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Apex'
copyright = '2018'
author = 'Christian Sarofeen, Natalia Gimelshein, Michael Carilli, Raul Puri'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# TODO: change to [:2] at v1.0
# version = 'master (' + torch.__version__ + ' )'
version = '0.1'
# The full version, including alpha/beta/rc tags.
# TODO: verify this works as expected
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'collapse_navigation': False,
'display_version': True,
'logo_only': True,
}
# html_logo = '_static/img/nv-pytorch2.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# html_style_path = 'css/pytorch_theme.css'
html_context = {
'css_files': [
'https://fonts.googleapis.com/css?family=Lato',
'_static/css/pytorch_theme.css'
],
}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyTorchdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'apex.tex', 'Apex Documentation',
'Torch Contributors', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'Apex', 'Apex Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Apex', 'Apex Documentation',
author, 'Apex', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'numpy': ('http://docs.scipy.org/doc/numpy/', None),
}
# -- A patch that prevents Sphinx from cross-referencing ivar tags -------
# See http://stackoverflow.com/a/41184353/3343043
from docutils import nodes
from sphinx.util.docfields import TypedField
from sphinx import addnodes
def patched_make_field(self, types, domain, items, **kw):
# `kw` catches `env=None` needed for newer sphinx while maintaining
# backwards compatibility when passed along further down!
# type: (List, unicode, Tuple) -> nodes.field
def handle_item(fieldarg, content):
par = nodes.paragraph()
par += addnodes.literal_strong('', fieldarg) # Patch: this line added
# par.extend(self.make_xrefs(self.rolename, domain, fieldarg,
# addnodes.literal_strong))
if fieldarg in types:
par += nodes.Text(' (')
# NOTE: using .pop() here to prevent a single type node to be
# inserted twice into the doctree, which leads to
# inconsistencies later when references are resolved
fieldtype = types.pop(fieldarg)
if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text):
typename = u''.join(n.astext() for n in fieldtype)
typename = typename.replace('int', 'python:int')
typename = typename.replace('long', 'python:long')
typename = typename.replace('float', 'python:float')
typename = typename.replace('type', 'python:type')
par.extend(self.make_xrefs(self.typerolename, domain, typename,
addnodes.literal_emphasis, **kw))
else:
par += fieldtype
par += nodes.Text(')')
par += nodes.Text(' -- ')
par += content
return par
fieldname = nodes.field_name('', self.label)
if len(items) == 1 and self.can_collapse:
fieldarg, content = items[0]
bodynode = handle_item(fieldarg, content)
else:
bodynode = self.list_type()
for fieldarg, content in items:
bodynode += nodes.list_item('', handle_item(fieldarg, content))
fieldbody = nodes.field_body('', bodynode)
return nodes.field('', fieldname, fieldbody)
TypedField.make_field = patched_make_field
|
import torch
import argparse
import os
from apex import amp
# FOR DISTRIBUTED: (can also use torch.nn.parallel.DistributedDataParallel instead)
from apex.parallel import DistributedDataParallel
parser = argparse.ArgumentParser()
# FOR DISTRIBUTED: Parse for the local_rank argument, which will be supplied
# automatically by torch.distributed.launch.
parser.add_argument("--local_rank", default=0, type=int)
args = parser.parse_args()
# FOR DISTRIBUTED: If we are running under torch.distributed.launch,
# the 'WORLD_SIZE' environment variable will also be set automatically.
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
if args.distributed:
# FOR DISTRIBUTED: Set the device according to local_rank.
torch.cuda.set_device(args.local_rank)
# FOR DISTRIBUTED: Initialize the backend. torch.distributed.launch will provide
# environment variables, and requires that you use init_method=`env://`.
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
torch.backends.cudnn.benchmark = True
N, D_in, D_out = 64, 1024, 16
# Each process receives its own batch of "fake input data" and "fake target data."
# The "training loop" in each process just uses this fake batch over and over.
# https://github.com/NVIDIA/apex/tree/master/examples/imagenet provides a more realistic
# example of distributed data sampling for both training and validation.
x = torch.randn(N, D_in, device='cuda')
y = torch.randn(N, D_out, device='cuda')
model = torch.nn.Linear(D_in, D_out).cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
model, optimizer = amp.initialize(model, optimizer, opt_level="O1")
if args.distributed:
# FOR DISTRIBUTED: After amp.initialize, wrap the model with
# apex.parallel.DistributedDataParallel.
model = DistributedDataParallel(model)
# torch.nn.parallel.DistributedDataParallel is also fine, with some added args:
# model = torch.nn.parallel.DistributedDataParallel(model,
# device_ids=[args.local_rank],
# output_device=args.local_rank)
loss_fn = torch.nn.MSELoss()
for t in range(500):
optimizer.zero_grad()
y_pred = model(x)
loss = loss_fn(y_pred, y)
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
optimizer.step()
if args.local_rank == 0:
print("final loss = ", loss)
|
import argparse
import os
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import numpy as np
try:
from apex.parallel import DistributedDataParallel as DDP
from apex.fp16_utils import *
from apex import amp, optimizers
from apex.multi_tensor_apply import multi_tensor_applier
except ImportError:
raise ImportError("Please install apex from https://www.github.com/nvidia/apex to run this example.")
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR',
help='path to dataset')
parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet18',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=90, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N', help='mini-batch size per process (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
metavar='LR', help='Initial learning rate. Will be scaled by <global batch size>/256: args.lr = args.lr*float(args.batch_size*args.world_size)/256. A warmup schedule will also be applied over the first 5 epochs.')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=10, type=int,
metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
help='use pre-trained model')
parser.add_argument('--prof', default=-1, type=int,
help='Only run 10 iterations for profiling.')
parser.add_argument('--deterministic', action='store_true')
parser.add_argument("--local_rank", default=0, type=int)
parser.add_argument('--sync_bn', action='store_true',
help='enabling apex sync BN.')
parser.add_argument('--opt-level', type=str)
parser.add_argument('--keep-batchnorm-fp32', type=str, default=None)
parser.add_argument('--loss-scale', type=str, default=None)
cudnn.benchmark = True
def fast_collate(batch):
imgs = [img[0] for img in batch]
targets = torch.tensor([target[1] for target in batch], dtype=torch.int64)
w = imgs[0].size[0]
h = imgs[0].size[1]
tensor = torch.zeros( (len(imgs), 3, h, w), dtype=torch.uint8 )
for i, img in enumerate(imgs):
nump_array = np.asarray(img, dtype=np.uint8)
if(nump_array.ndim < 3):
nump_array = np.expand_dims(nump_array, axis=-1)
nump_array = np.rollaxis(nump_array, 2)
tensor[i] += torch.from_numpy(nump_array)
return tensor, targets
best_prec1 = 0
args = parser.parse_args()
print("opt_level = {}".format(args.opt_level))
print("keep_batchnorm_fp32 = {}".format(args.keep_batchnorm_fp32), type(args.keep_batchnorm_fp32))
print("loss_scale = {}".format(args.loss_scale), type(args.loss_scale))
print("\nCUDNN VERSION: {}\n".format(torch.backends.cudnn.version()))
if args.deterministic:
cudnn.benchmark = False
cudnn.deterministic = True
torch.manual_seed(args.local_rank)
torch.set_printoptions(precision=10)
def main():
global best_prec1, args
args.distributed = False
if 'WORLD_SIZE' in os.environ:
args.distributed = int(os.environ['WORLD_SIZE']) > 1
args.gpu = 0
args.world_size = 1
if args.distributed:
args.gpu = args.local_rank
torch.cuda.set_device(args.gpu)
torch.distributed.init_process_group(backend='nccl',
init_method='env://')
args.world_size = torch.distributed.get_world_size()
assert torch.backends.cudnn.enabled, "Amp requires cudnn backend to be enabled."
# create model
if args.pretrained:
print("=> using pre-trained model '{}'".format(args.arch))
model = models.__dict__[args.arch](pretrained=True)
else:
print("=> creating model '{}'".format(args.arch))
model = models.__dict__[args.arch]()
if args.sync_bn:
import apex
print("using apex synced BN")
model = apex.parallel.convert_syncbn_model(model)
model = model.cuda()
# Scale learning rate based on global batch size
args.lr = args.lr*float(args.batch_size*args.world_size)/256.
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay)
# Initialize Amp. Amp accepts either values or strings for the optional override arguments,
# for convenient interoperation with argparse.
model, optimizer = amp.initialize(model, optimizer,
opt_level=args.opt_level,
keep_batchnorm_fp32=args.keep_batchnorm_fp32,
loss_scale=args.loss_scale
)
# For distributed training, wrap the model with apex.parallel.DistributedDataParallel.
# This must be done AFTER the call to amp.initialize. If model = DDP(model) is called
# before model, ... = amp.initialize(model, ...), the call to amp.initialize may alter
# the types of model's parameters in a way that disrupts or destroys DDP's allreduce hooks.
if args.distributed:
# By default, apex.parallel.DistributedDataParallel overlaps communication with
# computation in the backward pass.
# model = DDP(model)
# delay_allreduce delays all communication to the end of the backward pass.
model = DDP(model, delay_allreduce=True)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
# Optionally resume from a checkpoint
if args.resume:
# Use a local scope to avoid dangling references
def resume():
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location = lambda storage, loc: storage.cuda(args.gpu))
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
resume()
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
if(args.arch == "inception_v3"):
raise RuntimeError("Currently, inception_v3 is not supported by this example.")
# crop_size = 299
# val_size = 320 # I chose this value arbitrarily, we can adjust.
else:
crop_size = 224
val_size = 256
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(crop_size),
transforms.RandomHorizontalFlip(),
# transforms.ToTensor(), Too slow
# normalize,
]))
val_dataset = datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(val_size),
transforms.CenterCrop(crop_size),
]))
train_sampler = None
val_sampler = None
if args.distributed:
train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
num_workers=args.workers, pin_memory=True, sampler=train_sampler, collate_fn=fast_collate)
val_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True,
sampler=val_sampler,
collate_fn=fast_collate)
if args.evaluate:
validate(val_loader, model, criterion)
return
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion)
# remember best prec@1 and save checkpoint
if args.local_rank == 0:
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer' : optimizer.state_dict(),
}, is_best)
class data_prefetcher():
def __init__(self, loader):
self.loader = iter(loader)
self.stream = torch.cuda.Stream()
self.mean = torch.tensor([0.485 * 255, 0.456 * 255, 0.406 * 255]).cuda().view(1,3,1,1)
self.std = torch.tensor([0.229 * 255, 0.224 * 255, 0.225 * 255]).cuda().view(1,3,1,1)
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.mean = self.mean.half()
# self.std = self.std.half()
self.preload()
def preload(self):
try:
self.next_input, self.next_target = next(self.loader)
except StopIteration:
self.next_input = None
self.next_target = None
return
# if record_stream() doesn't work, another option is to make sure device inputs are created
# on the main stream.
# self.next_input_gpu = torch.empty_like(self.next_input, device='cuda')
# self.next_target_gpu = torch.empty_like(self.next_target, device='cuda')
# Need to make sure the memory allocated for next_* is not still in use by the main stream
# at the time we start copying to next_*:
# self.stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.stream):
self.next_input = self.next_input.cuda(non_blocking=True)
self.next_target = self.next_target.cuda(non_blocking=True)
# more code for the alternative if record_stream() doesn't work:
# copy_ will record the use of the pinned source tensor in this side stream.
# self.next_input_gpu.copy_(self.next_input, non_blocking=True)
# self.next_target_gpu.copy_(self.next_target, non_blocking=True)
# self.next_input = self.next_input_gpu
# self.next_target = self.next_target_gpu
# With Amp, it isn't necessary to manually convert data to half.
# if args.fp16:
# self.next_input = self.next_input.half()
# else:
self.next_input = self.next_input.float()
self.next_input = self.next_input.sub_(self.mean).div_(self.std)
def next(self):
torch.cuda.current_stream().wait_stream(self.stream)
input = self.next_input
target = self.next_target
if input is not None:
input.record_stream(torch.cuda.current_stream())
if target is not None:
target.record_stream(torch.cuda.current_stream())
self.preload()
return input, target
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
prefetcher = data_prefetcher(train_loader)
input, target = prefetcher.next()
i = 0
while input is not None:
i += 1
if args.prof >= 0 and i == args.prof:
print("Profiling begun at iteration {}".format(i))
torch.cuda.cudart().cudaProfilerStart()
if args.prof >= 0: torch.cuda.nvtx.range_push("Body of iteration {}".format(i))
adjust_learning_rate(optimizer, epoch, i, len(train_loader))
# compute output
if args.prof >= 0: torch.cuda.nvtx.range_push("forward")
output = model(input)
if args.prof >= 0: torch.cuda.nvtx.range_pop()
loss = criterion(output, target)
# compute gradient and do SGD step
optimizer.zero_grad()
if args.prof >= 0: torch.cuda.nvtx.range_push("backward")
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
if args.prof >= 0: torch.cuda.nvtx.range_pop()
# for param in model.parameters():
# print(param.data.double().sum().item(), param.grad.data.double().sum().item())
if args.prof >= 0: torch.cuda.nvtx.range_push("optimizer.step()")
optimizer.step()
if args.prof >= 0: torch.cuda.nvtx.range_pop()
if i%args.print_freq == 0:
# Every print_freq iterations, check the loss, accuracy, and speed.
# For best performance, it doesn't make sense to print these metrics every
# iteration, since they incur an allreduce and some host<->device syncs.
# Measure accuracy
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
# Average loss and accuracy across processes for logging
if args.distributed:
reduced_loss = reduce_tensor(loss.data)
prec1 = reduce_tensor(prec1)
prec5 = reduce_tensor(prec5)
else:
reduced_loss = loss.data
# to_python_float incurs a host<->device sync
losses.update(to_python_float(reduced_loss), input.size(0))
top1.update(to_python_float(prec1), input.size(0))
top5.update(to_python_float(prec5), input.size(0))
torch.cuda.synchronize()
batch_time.update((time.time() - end)/args.print_freq)
end = time.time()
if args.local_rank == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {3:.3f} ({4:.3f})\t'
'Loss {loss.val:.10f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader),
args.world_size*args.batch_size/batch_time.val,
args.world_size*args.batch_size/batch_time.avg,
batch_time=batch_time,
loss=losses, top1=top1, top5=top5))
if args.prof >= 0: torch.cuda.nvtx.range_push("prefetcher.next()")
input, target = prefetcher.next()
if args.prof >= 0: torch.cuda.nvtx.range_pop()
# Pop range "Body of iteration {}".format(i)
if args.prof >= 0: torch.cuda.nvtx.range_pop()
if args.prof >= 0 and i == args.prof + 10:
print("Profiling ended at iteration {}".format(i))
torch.cuda.cudart().cudaProfilerStop()
quit()
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
prefetcher = data_prefetcher(val_loader)
input, target = prefetcher.next()
i = 0
while input is not None:
i += 1
# compute output
with torch.no_grad():
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
if args.distributed:
reduced_loss = reduce_tensor(loss.data)
prec1 = reduce_tensor(prec1)
prec5 = reduce_tensor(prec5)
else:
reduced_loss = loss.data
losses.update(to_python_float(reduced_loss), input.size(0))
top1.update(to_python_float(prec1), input.size(0))
top5.update(to_python_float(prec5), input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# TODO: Change timings to mirror train().
if args.local_rank == 0 and i % args.print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Speed {2:.3f} ({3:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader),
args.world_size * args.batch_size / batch_time.val,
args.world_size * args.batch_size / batch_time.avg,
batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
input, target = prefetcher.next()
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch, step, len_epoch):
"""LR schedule that should yield 76% converged accuracy with batch size 256"""
factor = epoch // 30
if epoch >= 80:
factor = factor + 1
lr = args.lr*(0.1**factor)
"""Warmup"""
if epoch < 5:
lr = lr*float(1 + step + epoch*len_epoch)/(5.*len_epoch)
# if(args.local_rank == 0):
# print("epoch = {}, step = {}, lr = {}".format(epoch, step, lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def reduce_tensor(tensor):
rt = tensor.clone()
dist.all_reduce(rt, op=dist.reduce_op.SUM)
rt /= args.world_size
return rt
if __name__ == '__main__':
main()
|
from pprint import pprint
with open('cachegrind.out', 'rb') as f:
events = None
while True:
l = f.readline()
if l is None:
break
if l.startswith(b"events: "):
events = l.decode('ascii').split()[1:]
break
assert events is not None
f.seek(-200, 2) # SEEK_END
while True:
l = f.readline()
if l is None:
break
if l.startswith(b"summary: "):
summary = map(int, l.decode('ascii').split()[1:])
break
assert summary is not None
r = dict(zip(events, summary))
r['Ir'] -= 2068041
r['I1mr'] -= 1130
r['D1mr'] -= 13017
r['D1mw'] -= 2117
r['ILmr'] -= 1098
r['DLmr'] -= 7828
r['DLmw'] -= 1325
r['Bcm'] -= 14810
r['Bim'] -= 264
r['L1m'] = r['I1mr'] + r['D1mr'] + r['D1mw']
r['LLm'] = r['ILmr'] + r['DLmr'] + r['DLmw']
r['Bm'] = r['Bcm'] + r['Bim']
r['CEst'] = r['Ir'] + 10 * r.get('Bm', 0) + 10 * r['L1m'] + 100 * r['LLm']
print('')
print('instrs: {: 10,d}'.format(r['Ir']))
print('l1 misses: {: 10,d} (i {:,d} + dr {:,d} + dw {:,d})'.format(r['L1m'], r['I1mr'], r['D1mr'], r['D1mw']))
print('ll misses: {: 10,d} (i {:,d} + dr {:,d} + dw {:,d})'.format(r['LLm'], r['ILmr'], r['DLmr'], r['DLmw']))
print('b misses: {: 10,d} (cond {:,d} + indirect {:,d})'.format(r['Bm'], r['Bcm'], r['Bim']))
print('cycle estimate: {:,d}'.format(r['CEst']))
|
"""test_bench.py
Runs hub models in benchmark mode using pytest-benchmark. Run setup separately first.
Usage:
python install.py
pytest test_bench.py
See pytest-benchmark help (pytest test_bench.py -h) for additional options
e.g. --benchmark-autosave
--benchmark-compare
-k <filter expression>
...
"""
import os
import pytest
import time
import torch
from components._impl.workers import subprocess_worker
from torchbenchmark import _list_model_paths, ModelTask, get_metadata_from_yaml
from torchbenchmark.util.machine_config import get_machine_state
from torchbenchmark.util.metadata_utils import skip_by_metadata
def pytest_generate_tests(metafunc):
# This is where the list of models to test can be configured
# e.g. by using info in metafunc.config
devices = ['cpu', 'cuda']
if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
devices.append('mps')
if metafunc.config.option.cpu_only:
devices = ['cpu']
if metafunc.config.option.cuda_only:
devices = ['cuda']
if metafunc.config.option.mps_only:
devices = ['mps']
if metafunc.cls and metafunc.cls.__name__ == "TestBenchNetwork":
paths = _list_model_paths()
metafunc.parametrize(
'model_path', paths,
ids=[os.path.basename(path) for path in paths],
scope="class")
metafunc.parametrize('device', devices, scope='class')
metafunc.parametrize('compiler', ['jit', 'eager'], scope='class')
@pytest.mark.benchmark(
warmup=True,
warmup_iterations=3,
disable_gc=False,
timer=time.perf_counter,
group='hub',
)
class TestBenchNetwork:
def test_train(self, model_path, device, compiler, benchmark):
try:
if skip_by_metadata(test="train", device=device, jit=(compiler == 'jit'), \
extra_args=[], metadata=get_metadata_from_yaml(model_path)):
raise NotImplementedError("Test skipped by its metadata.")
# TODO: skipping quantized tests for now due to BC-breaking changes for prepare
# api, enable after PyTorch 1.13 release
if "quantized" in model_path:
return
task = ModelTask(model_path)
if not task.model_details.exists:
return # Model is not supported.
task.make_model_instance(test="train", device=device, jit=(compiler == 'jit'))
benchmark(task.invoke)
benchmark.extra_info['machine_state'] = get_machine_state()
benchmark.extra_info['batch_size'] = task.get_model_attribute('batch_size')
benchmark.extra_info['precision'] = task.get_model_attribute("dargs", "precision")
benchmark.extra_info['test'] = 'train'
except NotImplementedError:
print(f'Test train on {device} is not implemented, skipping...')
def test_eval(self, model_path, device, compiler, benchmark, pytestconfig):
try:
if skip_by_metadata(test="eval", device=device, jit=(compiler == 'jit'), \
extra_args=[], metadata=get_metadata_from_yaml(model_path)):
raise NotImplementedError("Test skipped by its metadata.")
# TODO: skipping quantized tests for now due to BC-breaking changes for prepare
# api, enable after PyTorch 1.13 release
if "quantized" in model_path:
return
task = ModelTask(model_path)
if not task.model_details.exists:
return # Model is not supported.
task.make_model_instance(test="eval", device=device, jit=(compiler == 'jit'))
with task.no_grad(disable_nograd=pytestconfig.getoption("disable_nograd")):
benchmark(task.invoke)
benchmark.extra_info['machine_state'] = get_machine_state()
benchmark.extra_info['batch_size'] = task.get_model_attribute('batch_size')
benchmark.extra_info['precision'] = task.get_model_attribute("dargs", "precision")
benchmark.extra_info['test'] = 'eval'
if pytestconfig.getoption("check_opt_vs_noopt_jit"):
task.check_opt_vs_noopt_jit()
except NotImplementedError:
print(f'Test eval on {device} is not implemented, skipping...')
@pytest.mark.benchmark(
warmup=True,
warmup_iterations=3,
disable_gc=False,
timer=time.perf_counter,
group='hub',
)
class TestWorker:
"""Benchmark SubprocessWorker to make sure we aren't skewing results."""
def test_worker_noop(self, benchmark):
worker = subprocess_worker.SubprocessWorker()
benchmark(lambda: worker.run("pass"))
def test_worker_store(self, benchmark):
worker = subprocess_worker.SubprocessWorker()
benchmark(lambda: worker.store("x", 1))
def test_worker_load(self, benchmark):
worker = subprocess_worker.SubprocessWorker()
worker.store("x", 1)
benchmark(lambda: worker.load("x"))
|
import os
import traceback
import argparse
import importlib
from pathlib import Path
from typing import Dict
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
def list_benchmarks() -> Dict[str, str]:
benchmarks = {}
import userbenchmark
bdir = Path(userbenchmark.__file__).parent.resolve()
fb_bdir = bdir.joinpath("fb")
if fb_bdir.exists():
for fb_bm in filter(lambda x: x.is_dir(), fb_bdir.iterdir()):
benchmarks[fb_bm.name] = f"fb.{fb_bm.name}"
for bm in filter(lambda x: x.is_dir() and not x.name == "fb", bdir.iterdir()):
benchmarks[bm.name] = bm.name
return benchmarks
def run():
available_benchmarks = list_benchmarks()
parser = argparse.ArgumentParser(description="Run a TorchBench user benchmark")
parser.add_argument("bm_name", choices=available_benchmarks.keys(), help='name of the user benchmark')
args, bm_args = parser.parse_known_args()
try:
benchmark = importlib.import_module(f"userbenchmark.{available_benchmarks[args.bm_name]}")
benchmark.run(bm_args)
except ImportError as e:
print(f"Failed to import user benchmark module {args.bm_name}, error: {str(e)}")
traceback.print_exc()
if __name__ == "__main__":
run()
|
"""
A lightweight runner that just sets up a model and runs one of its functions in a particular configuration.
Intended for debugging/exploration/profiling use cases, where the test/measurement harness is overhead.
DANGER: make sure to `python install.py` first or otherwise make sure the benchmark you are going to run
has been installed. This script intentionally does not automate or enforce setup steps.
Wall time provided for sanity but is not a sane benchmark measurement.
"""
import argparse
import logging
import time
import numpy as np
import torch
import torch.profiler as profiler
import traceback
from torchbenchmark import load_canary_model_by_name, load_model_by_name, ModelNotFoundError
from torchbenchmark.util.experiment.metrics import get_peak_memory
WARMUP_ROUNDS = 3
SUPPORT_DEVICE_LIST = ["cpu", "cuda"]
if hasattr(torch.backends, "mps") and torch.backends.mps.is_available():
SUPPORT_DEVICE_LIST.append("mps")
SUPPORT_PROFILE_LIST = [
"record_shapes",
"profile_memory",
"with_stack",
"with_flops",
"with_modules",
]
def run_one_step_with_cudastreams(func, streamcount):
print("Running Utilization Scaling Using Cuda Streams")
streamlist = []
for i in range(1, streamcount + 1, 1):
# create additional streams and prime with load
while len(streamlist) < i :
s = torch.cuda.Stream()
streamlist.append(s)
for s in streamlist:
with torch.cuda.stream(s):
func()
torch.cuda.synchronize() # Wait for the events to be recorded!
# now run benchmark using streams
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
for s in streamlist:
with torch.cuda.stream(s):
func()
end_event.record()
torch.cuda.synchronize()
print(f"Cuda StreamCount:{len(streamlist)}")
print('{:<20} {:>20}'.format("GPU Time:", "%.3f milliseconds" % start_event.elapsed_time(end_event)), sep='')
def printResultSummaryTime(result_summary, metrics_needed=[], model=None, flops_model_analyzer=None, cpu_peak_mem=None, mem_device_id=None, gpu_peak_mem=None):
if args.device == "cuda":
gpu_time = np.median(list(map(lambda x: x[0], result_summary)))
cpu_walltime = np.median(list(map(lambda x: x[1], result_summary)))
if hasattr(model, "NUM_BATCHES"):
print('{:<20} {:>20}'.format("GPU Time per batch:", "%.3f milliseconds" %
(gpu_time / model.NUM_BATCHES), sep=''))
print('{:<20} {:>20}'.format("CPU Wall Time per batch:", "%.3f milliseconds" %
(cpu_walltime / model.NUM_BATCHES), sep=''))
else:
print('{:<20} {:>20}'.format("GPU Time:", "%.3f milliseconds" % gpu_time, sep=''))
print('{:<20} {:>20}'.format("CPU Total Wall Time:", "%.3f milliseconds" % cpu_walltime, sep=''))
else:
cpu_walltime = np.median(list(map(lambda x: x[0], result_summary)))
print('{:<20} {:>20}'.format("CPU Total Wall Time:", "%.3f milliseconds" % cpu_walltime, sep=''))
# if model_flops is not None, output the TFLOPs per sec
if 'flops' in metrics_needed:
if flops_model_analyzer.metrics_backend_mapping['flops'] == 'dcgm':
tflops_device_id, tflops = flops_model_analyzer.calculate_flops()
else:
flops, batch_size = model.get_flops()
tflops = flops * batch_size / (cpu_walltime / 1.0e3) / 1.0e12
print('{:<20} {:>20}'.format("GPU %d FLOPS:" % tflops_device_id, "%.4f TFLOPs per second" % tflops, sep=''))
if gpu_peak_mem is not None:
print('{:<20} {:>20}'.format("GPU %d Peak Memory:" % mem_device_id, "%.4f GB" % gpu_peak_mem, sep=''))
if cpu_peak_mem is not None:
print('{:<20} {:>20}'.format("CPU Peak Memory:", "%.4f GB" % cpu_peak_mem, sep=''))
def run_one_step(func, nwarmup=WARMUP_ROUNDS, num_iter=10, model=None, export_metrics_file=None, stress=0, metrics_needed=[], metrics_gpu_backend=None):
# Warm-up `nwarmup` rounds
for _i in range(nwarmup):
func()
result_summary = []
flops_model_analyzer = None
if 'flops' in metrics_needed:
from components.model_analyzer.TorchBenchAnalyzer import ModelAnalyzer
flops_model_analyzer = ModelAnalyzer(export_metrics_file, ['flops'], metrics_gpu_backend)
flops_model_analyzer.start_monitor()
if stress:
cur_time = time.time_ns()
start_time = cur_time
target_time = stress * 1e9 + start_time
num_iter = -1
last_time = start_time
_i = 0
last_it = 0
first_print_out = True
while (not stress and _i < num_iter) or (stress and cur_time < target_time) :
if args.device == "cuda":
torch.cuda.synchronize()
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
# Collect time_ns() instead of time() which does not provide better precision than 1
# second according to https://docs.python.org/3/library/time.html#time.time.
t0 = time.time_ns()
start_event.record()
func()
end_event.record()
torch.cuda.synchronize()
t1 = time.time_ns()
result_summary.append((start_event.elapsed_time(end_event), (t1 - t0) / 1_000_000))
elif args.device == "mps":
t0 = time.time_ns()
func()
t1 = time.time_ns()
wall_latency = t1 - t0
# TODO: modify this to add GPU time as well
result_summary.append([(t1 - t0) / 1_000_000])
else:
t0 = time.time_ns()
func()
t1 = time.time_ns()
result_summary.append([(t1 - t0) / 1_000_000])
if stress:
cur_time = time.time_ns()
# print out the status every 10s.
if (cur_time - last_time) >= 10 * 1e9:
if first_print_out:
print('|{:^20}|{:^20}|{:^20}|'.format("Iterations", "Time/Iteration(ms)", "Rest Time(s)"))
first_print_out = False
est = (target_time - cur_time) / 1e9
time_per_it = (cur_time - last_time) / (_i - last_it) / 1e6
print('|{:^20}|{:^20}|{:^20}|'.format("%d" % _i, "%.2f" % time_per_it , "%d" % int(est)))
last_time = cur_time
last_it = _i
_i += 1
if flops_model_analyzer is not None:
flops_model_analyzer.stop_monitor()
flops_model_analyzer.aggregate()
cpu_peak_mem = None
gpu_peak_mem = None
mem_device_id = None
if 'cpu_peak_mem' in metrics_needed or 'gpu_peak_mem' in metrics_needed:
cpu_peak_mem, mem_device_id, gpu_peak_mem = get_peak_memory(func, model.device, export_metrics_file=export_metrics_file, metrics_needed=metrics_needed, metrics_gpu_backend=metrics_gpu_backend)
printResultSummaryTime(result_summary, metrics_needed, model, flops_model_analyzer, cpu_peak_mem, mem_device_id, gpu_peak_mem)
def profile_one_step(func, nwarmup=WARMUP_ROUNDS):
activity_groups = []
result_summary = []
device_to_activity = {'cuda': profiler.ProfilerActivity.CUDA, 'cpu': profiler.ProfilerActivity.CPU}
if args.profile_devices:
activity_groups = [
device_to_activity[device] for device in args.profile_devices if (device in device_to_activity)
]
else:
if args.device == 'cuda':
activity_groups = [
profiler.ProfilerActivity.CUDA,
profiler.ProfilerActivity.CPU,
]
elif args.device == 'cpu':
activity_groups = [profiler.ProfilerActivity.CPU]
profile_opts = {}
for opt in SUPPORT_PROFILE_LIST:
profile_opts[opt] = True if args.profile_options is not None and opt in args.profile_options else False
if args.profile_eg:
from datetime import datetime
import os
from torch.profiler import ExecutionGraphObserver
start_time = datetime.now()
timestamp = int(datetime.timestamp(start_time))
eg_file = f"{args.model}_{timestamp}_eg.json"
eg = ExecutionGraphObserver()
if not os.path.exists(args.profile_eg_folder):
os.makedirs(args.profile_eg_folder)
eg.register_callback(f"{args.profile_eg_folder}/{eg_file}")
nwarmup = 0
eg.start()
with profiler.profile(
schedule=profiler.schedule(wait=0, warmup=nwarmup, active=1, repeat=1),
activities=activity_groups,
record_shapes=args.profile_detailed if args.profile_detailed else profile_opts["record_shapes"],
profile_memory=args.profile_detailed if args.profile_detailed else profile_opts["profile_memory"],
with_stack=args.profile_detailed if args.profile_detailed else profile_opts["with_stack"],
with_flops=args.profile_detailed if args.profile_detailed else profile_opts["with_flops"],
with_modules=args.profile_detailed if args.profile_detailed else profile_opts["with_modules"],
on_trace_ready=profiler.tensorboard_trace_handler(args.profile_folder)
) as prof:
if args.device == "cuda":
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
for i in range(nwarmup + 1):
t0 = time.time_ns()
start_event.record()
func()
torch.cuda.synchronize() # Need to sync here to match run_one_step()'s timed run.
end_event.record()
t1 = time.time_ns()
if i >= nwarmup:
result_summary.append((start_event.elapsed_time(end_event), (t1 - t0) / 1_000_000))
prof.step()
else:
for i in range(nwarmup + 1):
t0 = time.time_ns()
func()
t1 = time.time_ns()
if i >= nwarmup:
result_summary.append([(t1 - t0) / 1_000_000])
prof.step()
if args.profile_eg and eg:
eg.stop()
eg.unregister_callback()
print(f"Save Exeution Graph to : {args.profile_eg_folder}/{eg_file}")
print(prof.key_averages(group_by_input_shape=True).table(sort_by="cpu_time_total", row_limit=30))
print(f"Saved TensorBoard Profiler traces to {args.profile_folder}.")
printResultSummaryTime(result_summary)
def _validate_devices(devices: str):
devices_list = devices.split(",")
valid_devices = SUPPORT_DEVICE_LIST
for d in devices_list:
if d not in valid_devices:
raise ValueError(f'Invalid device {d} passed into --profile-devices. Expected devices: {valid_devices}.')
return devices_list
def _validate_profile_options(profile_options: str):
profile_options_list = profile_options.split(",")
for opt in profile_options_list:
if opt not in SUPPORT_PROFILE_LIST:
raise ValueError(f'Invalid profile option {opt} passed into --profile-options. Expected options: {SUPPORT_PROFILE_LIST}.')
return profile_options_list
if __name__ == "__main__":
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("model", help="Full or partial name of a model to run. If partial, picks the first match.")
parser.add_argument("-d", "--device", choices=SUPPORT_DEVICE_LIST, default="cpu", help="Which device to use.")
parser.add_argument("-m", "--mode", choices=["eager", "jit"], default="eager", help="Which mode to run.")
parser.add_argument("-t", "--test", choices=["eval", "train"], default="eval", help="Which test to run.")
parser.add_argument("--profile", action="store_true", help="Run the profiler around the function")
parser.add_argument("--profile-options", type=_validate_profile_options, help=f"Select which profile options to enable. Valid options: {SUPPORT_PROFILE_LIST}.")
parser.add_argument("--amp", action="store_true", help="enable torch.autocast()")
parser.add_argument("--profile-folder", default="./logs", help="Save profiling model traces to this directory.")
parser.add_argument("--profile-detailed", action="store_true",
help=f"Enable all profile options, including {SUPPORT_PROFILE_LIST}. Overrides --profile-options.")
parser.add_argument("--profile-devices", type=_validate_devices,
help="Profile comma separated list of activities such as cpu,cuda.")
parser.add_argument("--profile-eg", action="store_true", help="Collect execution graph by PARAM")
parser.add_argument("--profile-eg-folder", default="./eg_logs",
help="Save execution graph traces to this directory.")
parser.add_argument("--cudastreams", action="store_true",
help="Utilization test using increasing number of cuda streams.")
parser.add_argument("--bs", type=int, help="Specify batch size to the test.")
parser.add_argument("--export-metrics", action="store_true",
help="Export all specified metrics records to a csv file. The default csv file name is [model_name]_all_metrics.csv.")
parser.add_argument("--stress", type=float, default=0, help="Specify execution time (seconds) to stress devices.")
parser.add_argument("--metrics", type=str, default="cpu_peak_mem,gpu_peak_mem",
help="Specify metrics [cpu_peak_mem,gpu_peak_mem,flops]to be collected. You can also set `none` to disable all metrics. The metrics are separated by comma such as cpu_peak_mem,gpu_peak_mem.")
parser.add_argument("--metrics-gpu-backend", choices=["dcgm", "default"], default="default", help="""Specify the backend [dcgm, default] to collect metrics. \nIn default mode, the latency(execution time) is collected by time.time_ns() and it is always enabled. Optionally,
\n - you can specify cpu peak memory usage by --metrics cpu_peak_mem, and it is collected by psutil.Process(). \n - you can specify gpu peak memory usage by --metrics gpu_peak_mem, and it is collected by nvml library.\n - you can specify flops by --metrics flops, and it is collected by fvcore.\nIn dcgm mode, the latency(execution time) is collected by time.time_ns() and it is always enabled. Optionally,\n - you can specify cpu peak memory usage by --metrics cpu_peak_mem, and it is collected by psutil.Process().\n - you can specify cpu and gpu peak memory usage by --metrics cpu_peak_mem,gpu_peak_mem, and they are collected by dcgm library.""")
parser.add_argument("--channels-last", action="store_true", help="enable torch.channels_last()")
args, extra_args = parser.parse_known_args()
if args.cudastreams and not args.device == "cuda":
print("cuda device required to use --cudastreams option!")
exit(-1)
found = False
Model = None
try:
Model = load_model_by_name(args.model)
except ModuleNotFoundError:
traceback.print_exc()
exit(-1)
except ModelNotFoundError:
print(f"Warning: The model {args.model} cannot be found at core set.")
if not Model:
try:
Model = load_canary_model_by_name(args.model)
except ModuleNotFoundError:
traceback.print_exc()
exit(-1)
except ModelNotFoundError:
print(f"Error: The model {args.model} cannot be found at either core or canary model set.")
exit(-1)
m = Model(device=args.device, test=args.test, jit=(args.mode == "jit"), batch_size=args.bs, extra_args=extra_args)
if m.dynamo:
mode = f"dynamo {m.opt_args.torchdynamo}"
elif m.opt_args.backend:
mode = f"{m.opt_args.backend}"
else:
mode = "eager"
print(f"Running {args.test} method from {Model.name} on {args.device} in {mode} mode with input batch size {m.batch_size} and precision {m.dargs.precision}.")
if args.channels_last:
m.enable_channels_last()
test = m.invoke
if args.amp:
test = torch.autocast(m.device)(test)
metrics_needed = [_ for _ in args.metrics.split(',') if _.strip()] if args.metrics else []
if 'none' in metrics_needed:
metrics_needed = []
# only enabled gpu_peak_mem for cuda device
if args.device != 'cuda' and 'gpu_peak_mem' in metrics_needed:
metrics_needed.remove('gpu_peak_mem')
metrics_needed = list(set(metrics_needed))
metrics_gpu_backend = args.metrics_gpu_backend
if metrics_needed:
if metrics_gpu_backend == 'dcgm':
from components.model_analyzer.TorchBenchAnalyzer import check_dcgm
check_dcgm()
elif 'gpu_peak_mem' in metrics_needed:
from components.model_analyzer.TorchBenchAnalyzer import check_nvml
check_nvml()
if 'gpu_peak_mem' in metrics_needed or ('flops' in metrics_needed and metrics_gpu_backend == 'dcgm'):
assert args.device == 'cuda', "gpu_peak_mem and flops:dcgm are only available for cuda device."
if 'flops' in metrics_needed and metrics_gpu_backend == 'default':
assert hasattr(m, "get_flops"), f"The model {args.model} does not support calculating flops."
m.get_flops()
if args.export_metrics:
if not args.metrics:
print("You have to specifiy at least one metrics to export.")
exit(-1)
export_metrics_file = "%s_all_metrics.csv" % args.model
else:
export_metrics_file = None
if args.profile:
profile_one_step(test)
elif args.cudastreams:
run_one_step_with_cudastreams(test, 10)
else:
run_one_step(test, model=m, export_metrics_file=export_metrics_file,
stress=args.stress, metrics_needed=metrics_needed, metrics_gpu_backend=args.metrics_gpu_backend)
if hasattr(m, 'correctness'):
print('{:<20} {:>20}'.format("Correctness: ", str(m.correctness)), sep='')
# Print dynamo compilation metrics, if there are any.
try:
from torch._dynamo.utils import compile_times
compile_time = dict(zip(*compile_times(repr="csv", aggregate=True)))["_compile"]
print('{:<20} {:>18}'.format("PT2 Compilation time: ", "%.3f seconds" % float(compile_time)), sep='')
except:
pass
|
import os
import pytest
import torch
from torchbenchmark.util.machine_config import get_machine_config, check_machine_configured
def pytest_addoption(parser):
parser.addoption("--fuser", help="Use one of the available fusers: te, old, nvfuser", default="te", choices=["te", "old", "nvfuser"])
parser.addoption("--ignore_machine_config",
action='store_true',
help="Disable checks/assertions for machine configuration for stable benchmarks")
parser.addoption("--disable_nograd", action='store_true',
help="Disable no_grad for eval() runs")
parser.addoption("--check_opt_vs_noopt_jit",
action='store_true',
help="The best attempt to check results for inference runs. Not all models support this!")
parser.addoption("--cpu_only", action='store_true',
help="Run benchmarks on cpu only and ignore machine configuration checks")
parser.addoption("--cuda_only", action='store_true',
help="Run benchmarks on cuda only and ignore machine configuration checks")
parser.addoption("--mps_only", action='store_true',
help="Run benchmarks on mps only and ignore machine configuration checks")
def set_fuser(fuser):
if fuser == "te":
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
torch._C._jit_override_can_fuse_on_cpu(True)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(True)
elif fuser == "old":
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
torch._C._jit_override_can_fuse_on_gpu(True)
torch._C._jit_set_texpr_fuser_enabled(False)
elif fuser == "nvfuser":
os.environ['PYTORCH_NVFUSER_DISABLE_FALLBACK'] = '1'
torch._C._jit_set_texpr_fuser_enabled(False)
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(True)
torch._C._jit_can_fuse_on_cpu()
torch._C._jit_can_fuse_on_gpu()
torch._C._jit_override_can_fuse_on_cpu(False)
torch._C._jit_override_can_fuse_on_gpu(False)
torch._C._jit_set_nvfuser_guard_mode(True)
torch._C._jit_set_nvfuser_enabled(True)
else:
# pytest_addoption should always set the default fuser
assert(False)
def pytest_sessionstart(session):
try:
check_machine_configured()
except Exception as e:
if not session.config.getoption('ignore_machine_config'):
pytest.exit(f"{e}\nSee README.md for machine tuning script usage, or use --ignore_machine_config")
def pytest_configure(config):
set_fuser(config.getoption("fuser"))
def pytest_benchmark_update_machine_info(config, machine_info):
machine_info['pytorch_version'] = torch.__version__
machine_info['pytorch_git_version'] = torch.version.git_version
machine_info['cuda_version'] = torch.version.cuda
try:
import torchtext
machine_info['torchtext_version'] = torchtext.__version__
except ImportError:
machine_info['torchtext_version'] = '*not-installed*'
try:
import torchvision
machine_info['torchvision_version'] = torchvision.__version__
except ImportError:
machine_info['torchvision_version'] = '*not-installed*'
machine_info['github_run_id'] = os.environ.get("GITHUB_RUN_ID")
machine_info['torchbench_score_version'] = os.environ.get("TORCHBENCH_VER")
try:
# if running on unexpected machine/os, get_machine_config _may_ not work
machine_info['torchbench_machine_config'] = get_machine_config()
except Exception:
if not config.getoption('ignore_machine_config'):
raise
|
#!/usr/bin/env python
import argparse
import gc
import logging
import os
import re
import warnings
from torchbenchmark import list_models
import torch
NO_JIT = {"demucs", "dlrm", "maml", "yolov3", "moco", "pytorch_CycleGAN_and_pix2pix", "tacotron2"}
NO_GET_MODULE = {"Background_Matting"}
def get_dump_filename(name, device, args):
if args.no_profiling:
return f"{name}.{device}.last_executed_graph.noprofile.log"
if args.inlined_graph:
return f"{name}.{device}.inlined_graph.log"
return f"{name}.{device}.last_executed_graph.log"
def iter_models(args):
device = "cpu"
for benchmark_cls in list_models():
bench_name = benchmark_cls.name
if args.benchmark and args.benchmark != bench_name:
continue
if bench_name in NO_GET_MODULE:
print(f"{bench_name} has no get_module, skipped")
continue
if bench_name in NO_JIT:
print(f"{bench_name} has no scripted module, skipped")
continue
try:
# disable profiling mode so that the collected graph does not contain
# profiling node
if args.no_profiling:
torch._C._jit_set_profiling_mode(False)
benchmark = benchmark_cls(device=device, jit=True)
model, example_inputs = benchmark.get_module()
# extract ScriptedModule object for BERT model
if bench_name == "BERT_pytorch":
model = model.bert
fname = get_dump_filename(bench_name, device, args)
print(f"Dump Graph IR for {bench_name} to {fname}")
# default mode need to warm up ProfileExecutor
if not (args.no_profiling or args.inlined_graph):
model.graph_for(*example_inputs)
with open(fname, 'w') as dump_file:
if args.inlined_graph:
print(model.inlined_graph, file=dump_file)
else:
print(model.graph_for(*example_inputs), file=dump_file)
except NotImplementedError:
print(f"Cannot collect graph IR dump for {bench_name}")
pass
def main(args=None):
parser = argparse.ArgumentParser(description="dump last_executed graph for all benchmarks with JIT implementation")
parser.add_argument("--benchmark", "-b",
help="dump graph for <BENCHMARK>")
parser.add_argument("--no_profiling", action="store_true",
help="dump last_executed graphs w/o profiling executor")
parser.add_argument("--inlined_graph", action="store_true",
help="dump graphs dumped by module.inlined_graph")
args = parser.parse_args(args)
iter_models(args)
if __name__ == '__main__':
main()
|
"""
Compute the benchmark score given a frozen score configuration and current benchmark data.
"""
import argparse
import json
import math
import yaml
import sys
import os
from torchbenchmark.score.compute_score import TorchBenchScore
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--score_version", choices=['v1', 'v2'], default="v1",
help="which version of score to use - choose from v1 or v2")
parser.add_argument("--benchmark_data_file",
help="pytest-benchmark json file with current benchmark data")
parser.add_argument("--benchmark_data_dir",
help="directory containing multiple .json files for each of which to compute a score")
parser.add_argument("--relative", action='store_true',
help="use the first json file in benchmark data dir instead of the reference yaml")
parser.add_argument("--output-norm-only", action='store_true',
help="use the benchmark data file specified to output reference norm yaml")
args = parser.parse_args()
if args.benchmark_data_file is None and args.benchmark_data_dir is None:
parser.print_help(sys.stderr)
raise ValueError("Invalid command-line arguments. You must specify a data file or a data dir.")
files = []
benchmark_data = []
scores = []
if args.benchmark_data_file is not None:
with open(args.benchmark_data_file) as data_file:
data = json.load(data_file)
files.append(args.benchmark_data_file)
benchmark_data.append(data)
elif args.benchmark_data_dir is not None:
for f in sorted(os.listdir(args.benchmark_data_dir)):
path = os.path.join(args.benchmark_data_dir, f)
if os.path.isfile(path) and os.path.splitext(path)[1] == '.json':
with open(path) as data_file:
data = json.load(data_file)
files.append(f)
benchmark_data.append(data)
if args.output_norm_only:
score_config = TorchBenchScore(ref_data=benchmark_data[0], version=args.score_version)
print(yaml.dump(score_config.get_norm(benchmark_data[0])))
exit(0)
if args.relative:
score_config = TorchBenchScore(ref_data=benchmark_data[0], version=args.score_version)
else:
score_config = TorchBenchScore(version=args.score_version)
results = []
for fname, data in zip(files, benchmark_data):
try:
result = {}
score = score_config.compute_score(data)
result["file"] = fname
result["pytorch_version"] = data['machine_info']['pytorch_version']
result["score"] = score
results.append(result)
except ValueError as e:
print(f"Error when analyzing file {fname}: {e}")
print(json.dumps(results, indent=4))
|
import time
import torch
import argparse
import json
from dataclasses import asdict
from torchbenchmark.e2e import E2EBenchmarkResult, load_e2e_model_by_name
from typing import Dict
SUPPORT_DEVICE_LIST = ["cpu", "cuda"]
def run(func) -> Dict[str, float]:
if torch.cuda.is_available():
torch.cuda.synchronize()
result = {}
# Collect time_ns() instead of time() which does not provide better precision than 1
# second according to https://docs.python.org/3/library/time.html#time.time.
t0 = time.time_ns()
func()
if torch.cuda.is_available():
torch.cuda.synchronize()
t2 = time.time_ns()
result["latency_ms"] = (t2 - t0) / 1_000_000.0
return result
def gen_result(m, run_result):
num_epochs = getattr(m, "num_epochs", 1)
r = E2EBenchmarkResult(device=m.device, device_num=m.device_num,
test=m.test, num_examples=m.num_examples,
num_epochs=num_epochs, batch_size=m.batch_size, result=dict())
r.result["latency"] = run_result["latency_ms"] / 1000.0
r.result["qps"] = r.num_examples / r.result["latency"] * r.num_epochs
# add accuracy result if available
if hasattr(m, "accuracy"):
r.result["accuracy"] = m.accuracy
return r
if __name__ == "__main__":
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("model", help="Full name of the end-to-end model.")
parser.add_argument("-t", "--test", choices=["eval", "train"], default="eval", help="Which test to run.")
parser.add_argument("--bs", type=int, help="Specify batch size.")
args, extra_args = parser.parse_known_args()
found = False
Model = load_e2e_model_by_name(args.model)
if not Model:
print(f"Unable to find model matching {args.model}.")
exit(-1)
m = Model(test=args.test, batch_size=args.bs, extra_args=extra_args)
test = getattr(m, args.test)
result = gen_result(m, run(test))
result_json = json.dumps(asdict(result))
print(result_json)
|
"""
Run a config of benchmarking with a list of models.
If unspecified, run a sweep of all models.
"""
import argparse
import json
import os
import sys
import numpy
import sys
import torch
import time
import pathlib
import dataclasses
import itertools
import torch
from typing import List, Optional, Dict, Any, Tuple
from torchbenchmark import ModelTask
WARMUP_ROUNDS = 3
WORKER_TIMEOUT = 600 # seconds
MODEL_DIR = ['torchbenchmark', 'models']
NANOSECONDS_PER_MILLISECONDS = 1_000_000.0
def run_one_step(func, device: str, nwarmup=WARMUP_ROUNDS, num_iter=10) -> Tuple[float, Optional[Tuple[torch.Tensor]]]:
"Run one step of the model, and return the latency in milliseconds."
# Warm-up `nwarmup` rounds
for _i in range(nwarmup):
func()
result_summary = []
for _i in range(num_iter):
if device == "cuda":
torch.cuda.synchronize()
# Collect time_ns() instead of time() which does not provide better precision than 1
# second according to https://docs.python.org/3/library/time.html#time.time.
t0 = time.time_ns()
func()
torch.cuda.synchronize() # Wait for the events to be recorded!
t1 = time.time_ns()
else:
t0 = time.time_ns()
func()
t1 = time.time_ns()
result_summary.append((t1 - t0) / NANOSECONDS_PER_MILLISECONDS)
wall_latency = numpy.median(result_summary)
return wall_latency
@dataclasses.dataclass
class ModelTestResult:
name: str
test: str
device: str
extra_args: List[str]
status: str
batch_size: Optional[int]
precision: str
results: Dict[str, Any]
def _list_model_paths(models: List[str]) -> List[str]:
p = pathlib.Path(__file__).parent.joinpath(*MODEL_DIR)
model_paths = sorted(child for child in p.iterdir() if child.is_dir())
valid_model_paths = sorted(filter(lambda x: x.joinpath("__init__.py").exists(), model_paths))
if models:
valid_model_paths = sorted(filter(lambda x: x.name in models, valid_model_paths))
return valid_model_paths
def _validate_tests(tests: str) -> List[str]:
tests_list = list(map(lambda x: x.strip(), tests.split(",")))
valid_tests = ['train', 'eval']
for t in tests_list:
if t not in valid_tests:
raise ValueError(f'Invalid test {t} passed into --tests. Expected tests: {valid_tests}.')
return tests_list
def _validate_devices(devices: str) -> List[str]:
devices_list = list(map(lambda x: x.strip(), devices.split(",")))
valid_devices = ['cpu', 'cuda']
for d in devices_list:
if d not in valid_devices:
raise ValueError(f'Invalid device {d} passed into --devices. Expected devices: {valid_devices}.')
return devices_list
def _run_model_test(model_path: pathlib.Path, test: str, device: str, jit: bool, batch_size: Optional[int], extra_args: List[str]) -> ModelTestResult:
assert test == "train" or test == "eval", f"Test must be either 'train' or 'eval', but get {test}."
result = ModelTestResult(name=model_path.name, test=test, device=device, extra_args=extra_args, batch_size=None, precision="fp32",
status="OK", results={})
# Run the benchmark test in a separate process
print(f"Running model {model_path.name} ... ", end='', flush=True)
status: str = "OK"
bs_name = "batch_size"
correctness_name = "correctness"
error_message: Optional[str] = None
try:
task = ModelTask(os.path.basename(model_path), timeout=WORKER_TIMEOUT)
if not task.model_details.exists:
status = "NotExist"
return
task.make_model_instance(test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
# Check the batch size in the model matches the specified value
result.batch_size = task.get_model_attribute(bs_name)
result.precision = task.get_model_attribute("dargs", "precision")
if batch_size and (not result.batch_size == batch_size):
raise ValueError(f"User specify batch size {batch_size}, but model {result.name} runs with batch size {result.batch_size}. Please report a bug.")
result.results["latency_ms"] = run_one_step(task.invoke, device)
# if NUM_BATCHES is set, update to per-batch latencies
num_batches = task.get_model_attribute("NUM_BATCHES")
if num_batches:
result.results["latency_ms"] = result.results["latency_ms"] / num_batches
# if the model provides eager eval result, save it for cosine similarity
correctness = task.get_model_attribute(correctness_name)
if correctness is not None:
result.results[correctness_name] = str(correctness)
except NotImplementedError as e:
status = "NotImplemented"
error_message = str(e)
except TypeError as e: # TypeError is raised when the model doesn't support variable batch sizes
status = "TypeError"
error_message = str(e)
except KeyboardInterrupt as e:
status = "UserInterrupted"
error_message = str(e)
except Exception as e:
status = f"{type(e).__name__}"
error_message = str(e)
finally:
print(f"[ {status} ]")
result.status = status
if error_message:
result.results["error_message"] = error_message
if status == "UserInterrupted":
sys.exit(1)
return result
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--models", nargs='+', default=[],
help="Specify one or more models to run. If not set, trigger a sweep-run on all models.")
parser.add_argument("-t", "--tests", required=True, type=_validate_tests, help="Specify tests, choice of train, or eval.")
parser.add_argument("-d", "--devices", required=True, type=_validate_devices, help="Specify devices, choice of cpu, or cuda.")
parser.add_argument("-b", "--bs", type=int, help="Specify batch size.")
parser.add_argument("--jit", action='store_true', help="Turn on torchscript.")
parser.add_argument("-o", "--output", type=str, default="tb-output.json", help="The default output json file.")
parser.add_argument("--proper-bs", action='store_true', help="Find the best batch_size for current devices.")
args, extra_args = parser.parse_known_args()
args.models = _list_model_paths(args.models)
results = []
for element in itertools.product(*[args.models, args.tests, args.devices]):
model_path, test, device = element
if args.proper_bs:
if test != 'eval':
print("Error: Only batch size of eval test is tunable.")
sys.exit(1)
from scripts.proper_bs import _run_model_test_proper_bs
r = _run_model_test_proper_bs(model_path, test, device, args.jit, batch_size=args.bs, extra_args=extra_args)
else:
r = _run_model_test(model_path, test, device, args.jit, batch_size=args.bs, extra_args=extra_args)
results.append(r)
results_to_export = list(map(lambda x: dataclasses.asdict(x), results))
parent_dir = pathlib.Path(args.output).parent
parent_dir.mkdir(exist_ok=True, parents=True)
with open(args.output, "w") as outfile:
json.dump(results_to_export, outfile, indent=4)
|
"""
A Benchmark Summary Metadata tool to extract and generate metadata from models at runtime.
"""
import argparse
from copy import deepcopy
import os
import yaml
from typing import Any, Dict, List, Tuple
import torch
from torchbenchmark import list_models, load_model_by_name, _list_model_paths, ModelTask, ModelDetails, str_to_bool
TIMEOUT = 300 # seconds
torchbench_dir = 'torchbenchmark'
model_dir = 'models'
_DEFAULT_METADATA_ = {
'train_benchmark': True,
'train_deterministic': False,
'eval_benchmark': True,
'eval_deterministic': False,
'eval_nograd': True,
# 'origin': None,
# 'train_dtype': 'float32',
# 'eval_dtype': 'float32',
}
def _parser_helper(input):
return None if input is None else str_to_bool(str(input))
def _process_model_details_to_metadata(train_detail: ModelDetails, eval_detail: ModelDetails) -> Dict[str, Any]:
metadata = {}
for k, v in _DEFAULT_METADATA_.items():
if hasattr(train_detail, k):
metadata[k] = getattr(train_detail, k)
elif train_detail and k in train_detail.metadata:
metadata[k] = train_detail.metadata[k]
elif eval_detail and k in eval_detail.metadata:
metadata[k] = eval_detail.metadata[k]
else:
metadata[k] = v
return metadata
def _extract_detail(path: str) -> Dict[str, Any]:
name = os.path.basename(path)
device = "cuda"
t_detail = None
e_detail = None
# Separate train and eval to isolated processes.
task_t = ModelTask(path, timeout=TIMEOUT)
try:
task_t.make_model_instance(device=device, jit=False)
task_t.set_train()
task_t.train()
task_t.extract_details_train()
task_t.del_model_instance()
t_detail = deepcopy(task_t._details)
except NotImplementedError:
print(f'Model {name} train is not fully implemented. skipping...')
del task_t
task_e = ModelTask(path, timeout=TIMEOUT)
try:
task_e.make_model_instance(device=device, jit=False)
task_e.set_eval()
task_e.eval()
task_e.extract_details_eval()
task_e.del_model_instance()
e_detail = deepcopy(task_e._details)
except NotImplementedError:
print(f'Model {name} eval is not fully implemented. skipping...')
del task_e
return _process_model_details_to_metadata(t_detail, e_detail)
def _extract_all_details(model_names: List[str]) -> List[Tuple[str, Dict[str, Any]]]:
details = []
for model_path in _list_model_paths():
model_name = os.path.basename(model_path)
if model_name not in model_names:
continue
ed = _extract_detail(model_path)
details.append((model_path, ed))
return details
def _print_extracted_details(extracted_details: List[Tuple[str, Dict[str, Any]]]):
for path, ex_detail in extracted_details:
name = os.path.basename(path)
print(f'Model: {name} , Details: {ex_detail}')
def _maybe_override_extracted_details(args, extracted_details: List[Tuple[str, Dict[str, Any]]]):
for _path, ex_detail in extracted_details:
if args.train_benchmark is not None:
ex_detail['train_benchmark'] = args.train_benchmark
elif args.train_deterministic is not None:
ex_detail['train_deterministic'] = args.train_deterministic
elif args.eval_benchmark is not None:
ex_detail['eval_benchmark'] = args.eval_benchmark
elif args.eval_deterministic is not None:
ex_detail['eval_deterministic'] = args.eval_deterministic
elif args.eval_nograd is not None:
ex_detail['eval_nograd'] = args.eval_nograd
def _write_metadata_yaml_files(extracted_details: List[Tuple[str, Dict[str, Any]]]):
for path, ex_detail in extracted_details:
metadata_path = path + "/metadata.yaml"
with open(metadata_path, 'w') as file:
yaml.dump(ex_detail, file)
print(f"Processed file: {metadata_path}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(__doc__)
parser.add_argument("--model", default=None,
help="Full name of a model to update. If absent, applies to all models.")
parser.add_argument("--extract-only", default=False, action="store_true",
help="Only extract model details.")
parser.add_argument("--train-benchmark", default=None, type=_parser_helper,
help="Whether to enable PyTorch benchmark mode during train.")
parser.add_argument("--train-deterministic", default=None, type=_parser_helper,
help="Whether to enable deterministic during train.")
parser.add_argument("--eval-benchmark", default=None, type=_parser_helper,
help="Whether to enable PyTorch benchmark mode during eval.")
parser.add_argument("--eval-deterministic", default=None, type=_parser_helper,
help="Whether to enable deterministic during eval.")
parser.add_argument("--eval-nograd", default=None, type=_parser_helper,
help="Whether to enable no_grad during eval.")
# parser.add_argument("--origin", default=None,
# help="Location of benchmark's origin. Such as torchtext or torchvision.")
# parser.add_argument("--train-dtype", default=None,
# choices=['float32', 'float16', 'bfloat16', 'amp'], help="Which fp type to perform training.")
# parser.add_argument("--eval-dtype", default=None,
# choices=['float32', 'float16', 'bfloat16', 'amp'], help="Which fp type to perform eval.")
args = parser.parse_args()
# Only allow this script for cuda for now.
if not torch.cuda.is_available():
print("This tool is currently only supported when the system has a cuda device.")
exit(1)
# Find the matching model, or use all models.
models = []
model_names = []
if args.model is not None:
Model = load_model_by_name(args.model)
if not Model:
print(f"Unable to find model matching: {args.model}.")
exit(-1)
models.append(Model)
model_names.append(Model.name)
print(f"Generating metadata to select model: {model_names}.")
else:
models.extend(list_models(model_match=args.model))
model_names.extend([m.name for m in models])
print("Generating metadata to all models.")
# Extract all model details from models.
extracted_details = _extract_all_details(model_names)
print("Printing extracted metadata.")
_print_extracted_details(extracted_details)
# Stop here for extract-only.
if args.extract_only:
print("--extract-only is set. Stop here.")
exit(0)
# Apply details passed in by flags.
_maybe_override_extracted_details(args, extracted_details)
print("Printing metadata after applying any modifications.")
_print_extracted_details(extracted_details)
# TODO: Modify and update the model to apply metadata changes by the user.
# Generate metadata files for each matching models.
_write_metadata_yaml_files(extracted_details)
|
"""test.py
Setup and Run hub models.
Make sure to enable an https proxy if necessary, or the setup steps may hang.
"""
# This file shows how to use the benchmark suite from user end.
import gc
import functools
import os
import traceback
import unittest
from unittest.mock import patch
import yaml
import torch
from torchbenchmark import _list_model_paths, ModelTask, get_metadata_from_yaml
from torchbenchmark.util.metadata_utils import skip_by_metadata
# Some of the models have very heavyweight setup, so we have to set a very
# generous limit. That said, we don't want the entire test suite to hang if
# a single test encounters an extreme failure, so we give up after a test is
# unresponsive to 5 minutes. (Note: this does not require that the entire
# test case completes in 5 minutes. It requires that if the worker is
# unresponsive for 5 minutes the parent will presume it dead / incapacitated.)
TIMEOUT = 300 # Seconds
class TestBenchmark(unittest.TestCase):
def setUp(self):
gc.collect()
def tearDown(self):
gc.collect()
def test_fx_profile(self):
try:
from torch.fx.interpreter import Interpreter
except ImportError: # older versions of PyTorch
raise unittest.SkipTest("Requires torch>=1.8")
from fx_profile import main, ProfileAggregate
with patch.object(ProfileAggregate, "save") as mock_save:
# just run one model to make sure things aren't completely broken
main(["--repeat=1", "--filter=pytorch_struct", "--device=cpu"])
self.assertGreaterEqual(mock_save.call_count, 1)
def _create_example_model_instance(task: ModelTask, device: str):
skip = False
try:
task.make_model_instance(test="eval", device=device, jit=False)
except NotImplementedError:
try:
task.make_model_instance(test="train", device=device, jit=False)
except NotImplementedError:
skip = True
finally:
if skip:
raise NotImplementedError(f"Model is not implemented on the device {device}")
def _load_test(path, device):
def _skip_cuda_memory_check_p(metadata):
if device != "cuda":
return True
if "skip_cuda_memory_leak" in metadata and metadata["skip_cuda_memory_leak"]:
return True
return False
def example_fn(self):
task = ModelTask(path, timeout=TIMEOUT)
with task.watch_cuda_memory(skip=_skip_cuda_memory_check_p(metadata), assert_equal=self.assertEqual):
try:
_create_example_model_instance(task, device)
task.check_example()
task.del_model_instance()
except NotImplementedError:
self.skipTest(f'Method `get_module()` on {device} is not implemented, skipping...')
def train_fn(self):
metadata = get_metadata_from_yaml(path)
task = ModelTask(path, timeout=TIMEOUT)
allow_customize_batch_size = task.get_model_attribute("ALLOW_CUSTOMIZE_BSIZE", classattr=True)
# to speedup test, use batch size 1 if possible
batch_size = 1 if allow_customize_batch_size else None
with task.watch_cuda_memory(skip=_skip_cuda_memory_check_p(metadata), assert_equal=self.assertEqual):
try:
task.make_model_instance(test="train", device=device, jit=False, batch_size=batch_size)
task.invoke()
task.check_details_train(device=device, md=metadata)
task.del_model_instance()
except NotImplementedError:
self.skipTest(f'Method train on {device} is not implemented, skipping...')
def eval_fn(self):
metadata = get_metadata_from_yaml(path)
task = ModelTask(path, timeout=TIMEOUT)
allow_customize_batch_size = task.get_model_attribute("ALLOW_CUSTOMIZE_BSIZE", classattr=True)
# to speedup test, use batch size 1 if possible
batch_size = 1 if allow_customize_batch_size else None
with task.watch_cuda_memory(skip=_skip_cuda_memory_check_p(metadata), assert_equal=self.assertEqual):
try:
task.make_model_instance(test="eval", device=device, jit=False, batch_size=batch_size)
task.invoke()
task.check_details_eval(device=device, md=metadata)
task.check_eval_output()
task.del_model_instance()
except NotImplementedError:
self.skipTest(f'Method eval on {device} is not implemented, skipping...')
def check_device_fn(self):
task = ModelTask(path, timeout=TIMEOUT)
with task.watch_cuda_memory(skip=_skip_cuda_memory_check_p(metadata), assert_equal=self.assertEqual):
try:
task.make_model_instance(test="eval", device=device, jit=False)
task.check_device()
task.del_model_instance()
except NotImplementedError:
self.skipTest(f'Method check_device on {device} is not implemented, skipping...')
name = os.path.basename(path)
metadata = get_metadata_from_yaml(path)
for fn, fn_name in zip([example_fn, train_fn, eval_fn, check_device_fn],
["example", "train", "eval", "check_device"]):
# set exclude list based on metadata
setattr(TestBenchmark, f'test_{name}_{fn_name}_{device}',
(unittest.skipIf(skip_by_metadata(test=fn_name, device=device,\
jit=False, extra_args=[], metadata=metadata), "This test is skipped by its metadata")(fn)))
def _load_tests():
devices = ['cpu']
if torch.cuda.is_available():
devices.append('cuda')
if hasattr(torch.backends, 'mps') and torch.backends.mps.is_available():
devices.append('mps')
for path in _list_model_paths():
# TODO: skipping quantized tests for now due to BC-breaking changes for prepare
# api, enable after PyTorch 1.13 release
if "quantized" in path:
continue
for device in devices:
_load_test(path, device)
_load_tests()
if __name__ == '__main__':
unittest.main()
|
import argparse
import json
# import pandas as pd
import os
# import sys
# import re
import yaml
import itertools
# from bokeh.layouts import column, row, layout, gridplot
# from bokeh.plotting import figure, output_file, show
# from bokeh.sampledata.autompg import autompg
# from bokeh.transform import jitter
from bokeh.palettes import Category10
from bokeh.models import HoverTool, Div, Range1d, HoverTool
from bokeh.plotting import figure, output_file, show
# from bokeh.models import Legend
# from bokeh.models import ColumnDataSource, CategoricalTicker, Div
# from bokeh.models import ColumnDataSource, DataTable, DateFormatter, TableColumn
# from bokeh.transform import jitter
from collections import defaultdict
from datetime import datetime as dt
from torchbenchmark.util.data import load_data_dir, load_data_files
from torchbenchmark.score.compute_score import TorchBenchScore
TORCHBENCH_SCORE_VERSION = "v1"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("data_dir", nargs='+',
help="One or more directories containing benchmark json files. "
"Each directory will be plotted as a separate series. "
"By default, the first file in the first directory will be used"
" to generate a score configuration with a target of 1000,"
" and everything else will be relative to that.")
parser.add_argument("--output_html", default='plot.html', help="html file to write")
parser.add_argument("--plot_all", action='store_true',
help="Plots the scores for each configuration")
parser.add_argument("--reference_json", required=True,
help="file defining score norm values, usually first json in first data_dir")
args = parser.parse_args()
plot_height = 800
plot_width = 1000
assert len(args.data_dir) > 0, "Must provide at least one data directory"
compare_datasets = [load_data_dir(d, most_recent_files=-1) for d in args.data_dir]
with open(args.reference_json) as f:
ref_data = json.load(f)
plot_all = args.plot_all
score_config = TorchBenchScore(ref_data=ref_data, version=TORCHBENCH_SCORE_VERSION)
p = figure(plot_width=plot_width, plot_height=plot_height,
x_axis_type='datetime')
xs = []
ys = []
zs = []
max_score = 0
for d in compare_datasets:
scores = {}
scores_db = defaultdict(list)
for i in range(len(d._json_raw)):
data = d._json_raw[i]
pytorch_ver = data['machine_info']['pytorch_version']
# Slice the portion after '+'
pytorch_ver_cuda_loc = pytorch_ver.rfind('+')
pytorch_ver = pytorch_ver[:pytorch_ver_cuda_loc]
date = dt.strptime(pytorch_ver[pytorch_ver.index("dev") + len("dev"):], "%Y%m%d")
score = score_config.compute_score(data)
scores[date] = score
dates = []
total_scores = []
all_scores = []
for date in sorted(scores.keys()):
dates.append(date)
total_scores.append(scores[date]["total"])
max_score = max(max_score, max(total_scores))
all_scores.append(scores[date])
xs.append(dates)
ys.append(total_scores)
if plot_all:
zs.append(all_scores)
colors = itertools.cycle(Category10[10])
basenames = map(os.path.basename, args.data_dir)
if plot_all:
for x, z in zip(xs, zs):
basename = next(basenames)
color = next(colors)
configs = z[0].keys()
for config in configs:
if not ("subscore" in config or "total" in config):
continue
color = next(colors)
scores = []
for s in z:
scores.append(s[config])
p.line(x, scores, color=color, line_width=2, legend_label=basename + '-' + config)
p.legend.click_policy = "hide"
else:
for x, y, color in zip(xs, ys, colors):
p.line(x, y, color=color, line_width=2, legend_label=next(basenames))
for x, y, color in zip(xs, ys, colors):
p.circle(x, y, color=color)
p.legend.location = "bottom_right"
p.y_range = Range1d(0, max_score * 1.25)
p.add_tools(HoverTool(
tooltips=[
('date', '@x{%F}'),
('score', '@y{0.00 a}'),
],
formatters={
'@x': 'datetime',
'@y': 'numeral',
},
))
output_file(args.output_html)
show(p)
|
import argparse
import subprocess
import os
import sys
import yaml
import tarfile
from utils import TORCH_DEPS, proxy_suggestion, get_pkg_versions, _test_https
from pathlib import Path
REPO_ROOT = Path(__file__).parent
def s3_checkout():
S3_URL_BASE = "https://ossci-datasets.s3.amazonaws.com/torchbench"
data_dir = REPO_ROOT.joinpath("torchbenchmark", "data")
model_dir = REPO_ROOT.joinpath("torchbenchmark", "models")
index_file = REPO_ROOT.joinpath("torchbenchmark", "data", "index.yaml")
import requests
with open(index_file, "r") as ind:
index = yaml.safe_load(ind)
for input_file in index["INPUT_TARBALLS"]:
s3_url = f"{S3_URL_BASE}/data/{input_file}"
r = requests.get(s3_url, allow_redirects=True)
with open(str(data_dir.joinpath(input_file)), "wb") as output:
print(f"Checking out {s3_url} to {data_dir.joinpath(input_file)}")
output.write(r.content)
for model_file in index["MODEL_PKLS"]:
s3_url = f"{S3_URL_BASE}/models/{model_file}"
r = requests.get(s3_url, allow_redirects=True)
with open(str(model_dir.joinpath(model_file)), "wb") as output:
print(f"Checking out {s3_url} to {model_dir.joinpath(model_file)}")
output.write(r.content)
def decompress_input():
tb_dir = os.path.dirname(os.path.realpath(__file__))
data_dir = os.path.join(tb_dir, "torchbenchmark", "data")
# Hide decompressed file in .data directory so that they won't be checked in
decompress_dir = os.path.join(data_dir, ".data")
os.makedirs(decompress_dir, exist_ok=True)
# Decompress every tar.gz file
for tarball in filter(lambda x: x.endswith(".tar.gz"), os.listdir(data_dir)):
tarball_path = os.path.join(data_dir, tarball)
print(f"decompressing input tarball: {tarball}...", end="", flush=True)
tar = tarfile.open(tarball_path)
tar.extractall(path=decompress_dir)
tar.close()
print("OK")
def pip_install_requirements(requirements_txt="requirements.txt"):
if not _test_https():
print(proxy_suggestion)
sys.exit(-1)
try:
subprocess.run([sys.executable, '-m', 'pip', 'install', '-q', '-r', requirements_txt],
check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
return (False, e.output)
except Exception as e:
return (False, e)
return True, None
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("models", nargs='*', default=[],
help="Specify one or more models to install. If not set, install all models.")
parser.add_argument("--test-mode", action="store_true", help="Run in test mode and check package versions")
parser.add_argument("--canary", action="store_true", help="Install canary model.")
parser.add_argument("--continue_on_fail", action="store_true")
parser.add_argument("--verbose", "-v", action="store_true")
parser.add_argument("--component", choices=["distributed"], help="Install requirements for optional components.")
args = parser.parse_args()
os.chdir(os.path.realpath(os.path.dirname(__file__)))
print(f"checking packages {', '.join(TORCH_DEPS)} are installed...", end="", flush=True)
try:
versions = get_pkg_versions(TORCH_DEPS)
except ModuleNotFoundError as e:
print("FAIL")
print(f"Error: Users must first manually install packages {TORCH_DEPS} before installing the benchmark.")
sys.exit(-1)
print("OK")
print("checking out input files from Amazon S3 ...", end="", flush=True)
s3_checkout()
print("OK")
decompress_input()
if args.component == "distributed":
success, errmsg = pip_install_requirements(requirements_txt="torchbenchmark/util/distributed/requirements.txt")
if not success:
print("Failed to install torchbenchmark distributed requirements:")
print(errmsg)
if not args.continue_on_fail:
sys.exit(-1)
sys.exit(0)
success, errmsg = pip_install_requirements()
if not success:
print("Failed to install torchbenchmark requirements:")
print(errmsg)
if not args.continue_on_fail:
sys.exit(-1)
new_versions = get_pkg_versions(TORCH_DEPS)
if versions != new_versions:
print(f"The torch packages are re-installed after installing the benchmark deps. \
Before: {versions}, after: {new_versions}")
sys.exit(-1)
from torchbenchmark import setup
success &= setup(models=args.models, verbose=args.verbose, continue_on_fail=args.continue_on_fail, test_mode=args.test_mode, allow_canary=args.canary)
if not success:
if args.continue_on_fail:
print("Warning: some benchmarks were not installed due to failure")
else:
raise RuntimeError("Failed to complete setup")
|
"""
The regression detector of TorchBench Userbenchmark.
"""
import json
import argparse
import importlib
from dataclasses import dataclass, asdict
import os
import yaml
from pathlib import Path
import time
from datetime import datetime
from typing import Any, List, Dict, Optional
from userbenchmark.utils import PLATFORMS, USERBENCHMARK_OUTPUT_PREFIX, REPO_PATH, \
TorchBenchABTestResult, get_date_from_metrics, \
get_ub_name, get_latest_jsons_in_s3_from_last_n_days, get_date_from_metrics_s3_key
from utils.s3_utils import S3Client, USERBENCHMARK_S3_BUCKET, USERBENCHMARK_S3_OBJECT
GITHUB_ISSUE_TEMPLATE = """
TorchBench CI has detected a performance signal or runtime regression.
Base PyTorch commit: {start}
Affected PyTorch commit: {end}
Affected Tests:
{test_details}
Tests that were no longer run on affected commit:
{control_only_tests}
Tests that were newly added on affected commit:
{treatment_only_tests}
Runtime regressions found?
{runtime_regressions_msg}
GitHub workflow that triggered this issue: {github_run_url}
cc {owner}
"""
DEFAULT_GH_ISSUE_OWNER = "@xuzhao9"
def call_userbenchmark_detector(detector, start_file: str, end_file: str) -> Optional[TorchBenchABTestResult]:
return detector(start_file, end_file)
def get_default_output_path(bm_name: str) -> str:
# By default, write result to $REPO_DIR/.userbenchmark/<userbenchmark-name>/regression-<time>.json
output_path = os.path.join(REPO_PATH, USERBENCHMARK_OUTPUT_PREFIX, bm_name)
fname = "regression-{}.yaml".format(datetime.fromtimestamp(time.time()).strftime("%Y%m%d%H%M%S"))
return os.path.join(output_path, fname)
def generate_regression_dict(control, treatment) -> Dict[Any, Any]:
assert control["name"] == treatment["name"], f'Expected the same userbenchmark name from metrics files, \
but getting {control["name"]} and {treatment["name"]}.'
bm_name = control["name"]
detector = importlib.import_module(f"userbenchmark.{bm_name}.regression_detector").run
# Process control and treatment to include only shared keys
filtered_control_metrics = {}
control_only_metrics = {}
filtered_treatment_metrics = {}
treatment_only_metrics = {}
for control_name, control_metric in control["metrics"].items():
if control_name in treatment["metrics"]:
filtered_control_metrics[control_name] = control_metric
else:
control_only_metrics[control_name] = control_metric
for treatment_name, treatment_metric in treatment["metrics"].items():
if treatment_name in control["metrics"]:
filtered_treatment_metrics[treatment_name] = treatment_metric
else:
treatment_only_metrics[treatment_name] = treatment_metric
control["metrics"] = filtered_control_metrics
treatment["metrics"] = filtered_treatment_metrics
assert filtered_control_metrics.keys() == filtered_treatment_metrics.keys()
# Local file comparison, return the regression detection object
result = call_userbenchmark_detector(detector, control, treatment)
if result or control_only_metrics or treatment_only_metrics:
# organize metrics into a JSON dictionary
result_dict = asdict(result)
result_dict["control_only_metrics"] = control_only_metrics
result_dict["treatment_only_metrics"] = treatment_only_metrics
return result_dict if result_dict else {}
def process_regressions_into_yaml(regressions_dict, output_path: str, control_file: str, treatment_file: str) -> None:
if regressions_dict == {}:
print(f"No performance signal detected between file {control_file} and {treatment_file}.")
return
# create the output directory if doesn't exist
output_dir = Path(os.path.dirname(output_path))
output_dir.mkdir(parents=True, exist_ok=True)
output_yaml_str = yaml.safe_dump(regressions_dict, sort_keys=False)
print(output_yaml_str)
with open(output_path, "w") as ofptr:
ofptr.write(output_yaml_str)
print(f"Wrote above yaml to {output_path}.")
def process_regressions_into_gh_issue(regressions_dict, owner: str, output_path: str, errors_path: str) -> None:
troubled_tests = ""
for test, stats in regressions_dict["details"].items():
delta = stats["delta"]
if delta != 0:
sign = "+" if delta > 0 else ""
troubled_tests += f"- {test}: {sign}{delta:.5f}%\n"
control_only_tests = ""
for test, stat in regressions_dict["control_only_metrics"].items():
control_only_tests += f"- {test}: {stat}\n"
treatment_only_tests = ""
for test, stat in regressions_dict["treatment_only_metrics"].items():
treatment_only_tests += f"- {test}: {stat}\n"
control_commit = regressions_dict["control_env"]["pytorch_git_version"]
treatment_commit = regressions_dict["treatment_env"]["pytorch_git_version"]
runtime_regressions_msg = "No runtime errors were found in the " + \
"new benchmarks run--you are all good there!"
errors_log_exists = Path(errors_path).exists()
if errors_log_exists:
runtime_regressions_msg = "An errors log was found. Please investigate runtime " + \
"errors by looking into the logs of the workflow linked."
if troubled_tests == "" and control_only_tests == "" and treatment_only_tests == "" and not errors_log_exists:
print(f"No regressions found between {control_commit} and {treatment_commit}.")
return
if "GITHUB_ENV" in os.environ:
fname = os.environ["GITHUB_ENV"]
content = f"TORCHBENCH_REGRESSION_DETECTED='{treatment_commit}'\n"
with open(fname, 'a') as fo:
fo.write(content)
github_run_id = os.environ.get("GITHUB_RUN_ID", None)
github_run_url = "No URL found, please look for the failing action in " + \
"https://github.com/pytorch/benchmark/actions"
if github_run_id is not None:
github_run_url = f"https://github.com/pytorch/benchmark/actions/runs/{github_run_id}"
issue_config: Dict[str, str] = {
"start": control_commit,
"end": treatment_commit,
"test_details": troubled_tests,
"control_only_tests": control_only_tests,
"treatment_only_tests": treatment_only_tests,
"runtime_regressions_msg": runtime_regressions_msg,
"github_run_url": github_run_url,
"owner": owner
}
issue_body = GITHUB_ISSUE_TEMPLATE.format(**issue_config)
print(issue_body)
with open(output_path, "w") as f:
f.write(issue_body)
def get_best_start_date(latest_metrics_jsons: List[str], end_date: datetime) -> Optional[datetime]:
"""Get the date closest to `end_date` from `latest_metrics_jsons`"""
for metrics_json in latest_metrics_jsons:
start_datetime = get_date_from_metrics_s3_key(metrics_json)
if start_datetime < end_date:
return start_datetime
return None
def get_metrics_by_date(latest_metrics_jsons: List[str], pick_date: datetime):
pick_metrics_json_key: Optional[str] = None
for metrics_json_key in latest_metrics_jsons:
metric_datetime = get_date_from_metrics_s3_key(metrics_json_key)
# Use the latest metric file on on the same day
if metric_datetime.date() == pick_date.date():
pick_metrics_json_key = metrics_json_key
break
assert pick_metrics_json_key, f"Selected date {pick_date} is not found in the latest_metrics_jsons: {latest_metrics_jsons}"
s3 = S3Client(USERBENCHMARK_S3_BUCKET, USERBENCHMARK_S3_OBJECT)
metrics_json = s3.get_file_as_json(pick_metrics_json_key)
return metrics_json
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Local metrics file comparison
parser.add_argument("--control", default=None, help="The control group metrics file for comparison. "
"If unprovided, will attempt to download and compare the previous JSON from S3 "
"within the past week. The platform flag must be specified in this case.")
parser.add_argument("--treatment", default=None, help="The treatment metrics file for comparison.")
# S3 metrics file comparison
parser.add_argument("--name", help="Name of the userbenchmark to detect regression.")
parser.add_argument("--platform", choices=PLATFORMS, default=None, help="The name of platform of the regression.")
parser.add_argument("--start-date", default=None, help="The start date to detect regression.")
parser.add_argument("--end-date", default=None, help="The latest date to detect regression.")
# output file path
parser.add_argument("--output", default=None, help="Output path to print the regression detection file.")
# GitHub issue details
parser.add_argument("--owner", nargs="*", default=["@xuzhao9"], help="Owner(s) to cc on regression issues, e.g., @janeyx99.")
parser.add_argument("--gh-issue-path", default="gh-issue.md", help="Output path to print the issue body")
parser.add_argument("--errors-path", default="errors.txt",
help="Path to errors log generated by the benchmarks run. " +
"Its existence ONLY is used to detect whether runtime regressions occurred.")
args = parser.parse_args()
# User provided both control and treatment files
if args.control and args.treatment:
with open(args.control, "r") as cfptr:
control = json.load(cfptr)
with open(args.treatment, "r") as tfptr:
treatment = json.load(tfptr)
output_path = args.output if args.output else get_default_output_path(control["name"])
regressions_dict = generate_regression_dict(control, treatment)
process_regressions_into_yaml(regressions_dict, output_path, args.control, args.treatment)
exit(0)
# Query S3 to get control and treatment json files
if not args.platform:
raise ValueError("A platform must be specified with the --platform flag to retrieve the "
"previous metrics JSONs as control from S3.")
# User only provide the treatement file, and expect us to download from S3
control, treatment = None, None
if not args.control and args.treatment:
json_path = Path(args.treatment)
assert json_path.exists(), f"Specified result json path {args.treatment} does not exist."
end_date: datetime = datetime.strptime(get_date_from_metrics(json_path.stem), "%Y-%m-%d")
userbenchmark_name: str = get_ub_name(args.treatment)
with open(json_path, "r") as cfptr:
treatment = json.load(cfptr)
else:
assert args.name, f"To detect regression with S3, you must specify a userbenchmark name."
userbenchmark_name = args.name
end_date = datetime.strptime(args.end_date, "%Y-%m-%d")
available_metrics_jsons = get_latest_jsons_in_s3_from_last_n_days(userbenchmark_name, args.platform, end_date, ndays=7)
# Download control from S3
if len(available_metrics_jsons) == 0:
raise RuntimeWarning(f"No previous JSONS in a week found to compare towards the end date {end_date}. No regression info has been generated.")
print(f"Found metrics json files on S3: {available_metrics_jsons}")
start_date = args.start_date if args.start_date else get_best_start_date(available_metrics_jsons, end_date)
if not start_date:
raise RuntimeWarning(f"No start date in previous JSONS found to compare towards the end date {end_date}. User specified start date: {args.start_date}. " +
f"Available JSON dates: {available_metrics_jsons.keys()}. No regression info has been generated.")
print(f"[TorchBench Regression Detector] Detecting regression of {userbenchmark_name} on platform {args.platform}, start date: {start_date}, end date: {end_date}.")
control = get_metrics_by_date(available_metrics_jsons, start_date) if not control else control
treatment = get_metrics_by_date(available_metrics_jsons, end_date) if not treatment else treatment
regressions_dict = generate_regression_dict(control, treatment)
output_path = args.output if args.output else get_default_output_path(control["name"])
process_regressions_into_yaml(regressions_dict, output_path, args.control, args.treatment)
owner = " ".join(args.owner) if args.owner else DEFAULT_GH_ISSUE_OWNER
process_regressions_into_gh_issue(regressions_dict, owner, args.gh_issue_path, args.errors_path)
|
import argparse
import json
from collections import namedtuple
Result = namedtuple("Result", ["name", "base_time", "diff_time"])
def get_times(pytest_data):
return {b["name"]: b["stats"]["mean"] for b in pytest_data["benchmarks"]}
parser = argparse.ArgumentParser("compare two pytest jsons")
parser.add_argument('base', help="base json file")
parser.add_argument('diff', help='diff json file')
parser.add_argument('--format', default='md', type=str, help='output format (csv, md, json, table)')
args = parser.parse_args()
with open(args.base, "r") as base:
base_times = get_times(json.load(base))
with open(args.diff, "r") as diff:
diff_times = get_times(json.load(diff))
all_keys = set(base_times.keys()).union(diff_times.keys())
results = [
Result(name, base_times.get(name, float("nan")), diff_times.get(name, float("nan")))
for name in sorted(all_keys)
]
header_fmt = {'table' : '{:48s} {:>13s} {:>15s} {:>10s}',
'md' : '| {:48s} | {:>13s} | {:>15s} | {:>10s} |',
'csv' : '{:s}, {:s}, {:s}, {:s}'}
data_fmt = {'table' : '{:48s} {:13.6f} {:15.6f} {:9.1f}%',
'md' : '| {:48s} | {:13.6f} | {:15.6f} | {:9.1f}% |',
'csv' : '{:s}, {:.6f}, {:.6f}, {:.2f}%'}
if args.format in ['table', 'md', 'csv']:
header_fmt_str = header_fmt[args.format]
data_fmt_str = data_fmt[args.format]
print(header_fmt_str.format("name", "base time (s)", "diff time (s)", "% change"))
if args.format == 'md':
print(header_fmt_str.format(":---", "---:", "---:", "---:"))
for r in results:
print(data_fmt_str.format(r.name, r.base_time, r.diff_time, (r.diff_time / r.base_time - 1.0) * 100.0))
elif args.format == 'json':
print(json.dumps(results))
else:
raise ValueError('Unknown output format: ' + args.format)
|
#!/usr/bin/env python
from collections import Counter, defaultdict
from functools import partial
from torch.cuda import synchronize
from typing import Any, Dict, Callable, Optional
import argparse
import gc
import logging
import os
import pandas as pd
import re
import time
import warnings
os.environ["FX_PATCH_GETITEM"] = "1" # make BERT fx.symbolic_trace
from torchbenchmark import list_models
from torch.fx import symbolic_trace, Node, GraphModule
from torch.fx.interpreter import Interpreter
import torch
# These do not fx.symbolic_trace()
SKIP = {"attention_is_all_you_need_pytorch", "demucs", "dlrm", "maml",
"yolov3", "tacotron2", "moco", "Super_SloMo"}
class ProfileStats(object):
@staticmethod
def _norm(cnt: Counter):
""" Normalize to unit length """
total = sum(cnt.values())
return Counter({k: v / total for k, v in cnt.items()})
def __init__(self, get_name: Optional[Callable]):
super(ProfileStats, self).__init__()
self.times: Dict[str, float] = Counter()
self.counts: Dict[str, int] = Counter()
self.get_name = get_name
def record(self, node: Node, sec: float):
""" Record timings of a single call """
name = self.get_name(node)
self.times[name] += sec
self.counts[name] += 1
def summary(self, n=5):
most_common = self._norm(self.times).most_common(n - 1)
return " ".join([f"{k}:{v:.0%}" for k, v in most_common] +
[f"other:{1.0 - sum(v for k, v in most_common):.0%}"])
class ProfileAggregate(ProfileStats):
def __init__(self, name: str):
super(ProfileAggregate, self).__init__(None)
self.df = pd.DataFrame()
self.name = name
def update(self, other: ProfileStats, name):
""" Merge stats from a finished benchmark run into this """
nt = self._norm(other.times).most_common(None)
self.times.update(nt)
self.counts.update(self._norm(other.counts))
self.df = self.df.append(pd.DataFrame(
[[t for n, t in nt]],
index=[name],
columns=[n for n, t in nt],
))
def save(self):
df = self.df.fillna(0.0).transpose()
df.insert(0, "AVERAGE", df.mean(axis=1))
df.sort_values("AVERAGE", ascending=False, inplace=True)
df.to_csv(f"{self.name}.csv")
print(f"wrote {self.name}.csv")
PROFILES = [
ProfileAggregate("operators"),
ProfileAggregate("successors1"),
ProfileAggregate("successors2"),
ProfileAggregate("predecessors1"),
ProfileAggregate("predecessors2"),
]
class FXProfiler(Interpreter):
def __init__(self, module: GraphModule):
super(FXProfiler, self).__init__(module)
self.profile_stats = [
ProfileStats(self.get_name),
ProfileStats(partial(self.succ_name, depth=2)),
ProfileStats(partial(self.succ_name, depth=3)),
ProfileStats(partial(self.pred_name, depth=2)),
ProfileStats(partial(self.pred_name, depth=3)),
]
self.successors = defaultdict(list)
self.predecessors = defaultdict(list)
for node in self.module.graph.nodes:
def visit(other_node):
self.successors[other_node].append(node)
self.predecessors[node].append(other_node)
torch.fx.map_arg((node.args, node.kwargs), visit)
def run_node(self, n: Node) -> Any:
""" Timing wrapper around executing an FX Node """
start = time.perf_counter()
result = super().run_node(n)
synchronize()
sec = time.perf_counter() - start
for prof in self.profile_stats:
prof.record(n, sec)
return result
_op_node_to_name = {
"call_function": lambda i, t: t.__name__,
"call_method": lambda i, t: t,
"call_module": lambda i, t: type(i.fetch_attr(t)).__name__,
"get_attr": lambda i, t: "get_attr",
"output": lambda i, t: "output",
"placeholder": lambda i, t: "placeholder",
}
def get_name(self, n: Node) -> Callable:
""" Coverts a Node to a string name """
return self._op_node_to_name[n.op](self, n.target).lower()
def pred_name(self, node: Node, depth: int) -> Callable:
""" A string name that includes names of predecessor nodes """
if depth <= 1:
return self.get_name(node)
pred_str = ','.join(self.pred_name(x, depth - 1) for x in self.predecessors[node])
return f"{self.get_name(node)}({pred_str})"
def succ_name(self, node: Node, depth: int) -> Callable:
""" A string name that includes names of successor nodes """
s = self.successors[node]
if depth <= 1 or len(s) == 0:
return self.get_name(node)
elif len(s) > 1:
succ_str = "MANY"
else:
succ_str = self.succ_name(s[0], depth - 1)
return f"{self.get_name(node)}->{succ_str}"
def profile(device, name, model, example_inputs, args):
model = torch.fx.symbolic_trace(model)
prof = FXProfiler(model)
for _ in range(args.warmup):
model(*example_inputs)
for _ in range(args.repeat):
synchronize()
prof.run(*example_inputs)
for aggregate, stats in zip(PROFILES, prof.profile_stats):
print(f"{device:4} {name:20} {aggregate.name:13} {stats.summary()}")
aggregate.update(stats, name=name)
return model
def short_name(name, limit=20):
""" Truncate a model name to limit chars"""
return name if len(name) <= limit else f"{name[:limit - 3].rstrip('_')}..."
def iter_models(args):
for benchmark_cls in list_models():
if (not re.search("|".join(args.filter), benchmark_cls.name, re.I) or
re.search("|".join(args.exclude), benchmark_cls.name, re.I) or
benchmark_cls.name in SKIP):
continue
try:
benchmark = benchmark_cls(test="eval", device=args.device, jit=False)
model, example_inputs = benchmark.get_module()
model.eval()
gc.collect()
yield short_name(benchmark.name), model, example_inputs
except NotImplementedError:
pass
def noop():
pass
def main(args=None):
parser = argparse.ArgumentParser()
parser.add_argument("--filter", "-k", action="append",
help="filter benchmarks")
parser.add_argument("--exclude", "-x", action="append",
help="filter benchmarks")
parser.add_argument("--device", "-d", help="cpu or cuda")
parser.add_argument("--warmup", type=int, default=1,
help="warmup runs to do")
parser.add_argument("--repeat", "-n", type=int, default=10,
help="number of timing runs")
parser.add_argument("--threads", "-p", type=int,
help="number threads")
parser.add_argument("--cpu-fusion", action="store_true",
help="enable can_fuse_on_cpu")
parser.add_argument("--no-skip", "-a", action="store_true",
help="run models that don't fx cleanly")
args = parser.parse_args(args)
# defaults
args.device = args.device or "cpu"
args.filter = args.filter or [r"."]
args.exclude = args.exclude or [r"^$"]
if args.device == "cpu":
global synchronize
synchronize = noop
if args.no_skip:
SKIP.clear()
if args.cpu_fusion:
torch._C._jit_override_can_fuse_on_cpu(True)
if args.threads:
torch.set_num_threads(args.threads)
for name, model, example_inputs in iter_models(args):
profile(args.device, name, model, example_inputs, args)
for prof in PROFILES:
prof.save()
if __name__ == '__main__':
logging.basicConfig(level=logging.WARNING)
warnings.filterwarnings("ignore")
main()
|
"""bisection.py
Runs bisection to determine PRs that cause performance change.
It assumes that the pytorch, torchbench, torchtext, torchvision, and torchaudio repositories provided are all clean with the latest code.
By default, the torchaudio, torchvision and torchtext packages will be fixed to the latest commit on the same pytorch commit date.
Usage:
python bisection.py --work-dir <WORK-DIR> \
--pytorch-src <PYTORCH_SRC_DIR> \
--torchbench-src <TORCHBENCH_SRC_DIR> \
--config <BISECT_CONFIG> --output <OUTPUT_FILE_PATH>
"""
import os
import json
import shutil
import yaml
import argparse
from tabulate import tabulate
import re
import subprocess
from datetime import datetime
from typing import Optional, List, Dict, Tuple
from torchbenchmark.util import gitutils
from utils.cuda_utils import prepare_cuda_env, DEFAULT_CUDA_VERSION
TORCH_GITREPO="https://github.com/pytorch/pytorch.git"
TORCHBENCH_GITREPO="https://github.com/pytorch/benchmark.git"
TORCHBENCH_DEPS = {
"torchdata": (os.path.expandvars("${HOME}/data"), "main"),
"torchtext": (os.path.expandvars("${HOME}/text"), "main"),
"torchvision": (os.path.expandvars("${HOME}/vision"), "main"),
"torchaudio": (os.path.expandvars("${HOME}/audio"), "main"),
}
def exist_dir_path(string):
if os.path.isdir(string):
return string
else:
raise NotADirectoryError(string)
# Translates test name to filter
# For example, ["test_eval[yolov3-cpu-eager]", "test_train[yolov3-gpu-eager]"]
# -> "((eval and yolov3 and cpu and eager) or (train and yolov3 and gpu and eager))"
# If targets is None, run everything except slomo
def targets_to_bmfilter(targets: List[str], models: List[str]) -> str:
bmfilter_names = []
if targets == None or len(targets) == 0:
return "(not slomo)"
for test in targets:
regex = re.compile("test_(train|eval)\[([a-zA-Z0-9_]+)-([a-z]+)-([a-z]+)\]")
m = regex.match(test)
if not m:
if test in models:
partial_name = test
else:
print(f"Cannot recognize the TorchBench filter: {test}. Exit.")
exit(1)
else:
partial_name = " and ".join(m.groups())
bmfilter_names.append(f"({partial_name})")
return "(" + " or ".join(bmfilter_names) + ")"
# Find the latest non-empty json file in the directory
def find_latest_json_file(result_dir: str):
json_files = list(filter(lambda x: x.endswith(".json"), os.listdir(result_dir)))
json_files.sort(reverse=True)
for f in json_files:
# Return the first non-empty json file
json_path = os.path.join(result_dir, f)
if os.path.exists(json_path) and os.stat(json_path).st_size:
return json_path
print(f"Can't find non-empty json files in path: {result_dir}")
return str()
def get_delta_str(reference: float, current: float) -> str:
delta_num = ((current - reference) / current * 100)
delta_str = "{:+3f}".format(delta_num) + "%"
if (abs(delta_num) >= 5):
delta_str = delta_str + "*"
return delta_str
def get_means(data):
rc = dict()
for param in data["benchmarks"]:
name = param["name"]
mean = param["stats"]["mean"]
rc[name] = mean
return rc
def analyze_abtest_result_dir(result_dir: str):
dirs = [ os.path.join(result_dir, name) for name in os.listdir(result_dir) if os.path.isdir(os.path.join(result_dir, name)) ]
delta = False
json_files = list(filter(len, map(find_latest_json_file, dirs)))
out = [['Benchmark']]
assert json_files, f"Don't find benchmark result files in {result_dir}."
# If there are only two json files, we believe it is an abtest, so print delta of the mean
if len(json_files) == 2:
delta = True
with open(json_files[0], "r") as fp:
cur_result = json.load(fp)
means = get_means(cur_result)
for key in means:
out.append([])
out[-1].append(key)
for index, json_file in enumerate(json_files):
with open(json_file, "r") as fp:
jsonobj = json.load(fp)
header = f"Run {os.path.basename(os.path.dirname(json_file))}"
out[0].append(header)
means = get_means(jsonobj)
if delta and index == 0:
reference = means
for key_index, key in enumerate(means):
out[key_index+1].append(means[key])
if delta and index == 1:
out[0].append("Delta")
out[key_index+1].append(get_delta_str(reference[key], means[key]))
out_str = tabulate(out, headers='firstrow')
return out_str
class Commit:
sha: str
ctime: str
digest: Dict[str, float]
def __init__(self, sha, ctime):
self.sha = sha
self.ctime = ctime
self.digest = None
def __str__(self):
return self.sha
class TorchSource:
srcpath: str
build_lazy: bool
commits: List[Commit]
build_env: os._Environ
# Map from commit SHA to index in commits
commit_dict: Dict[str, int]
def __init__(self, srcpath: str, build_lazy: bool):
self.srcpath = srcpath
self.build_lazy = build_lazy
self.commits = []
self.commit_dict = dict()
def prep(self, build_env: os._Environ) -> bool:
repo_origin_url = gitutils.get_git_origin(self.srcpath)
if not repo_origin_url == TORCH_GITREPO:
print(f"WARNING: Unmatched repo origin url: {repo_origin_url} with standard {TORCH_GITREPO}")
self.update_repos()
# Clean up the existing packages
self.cleanup()
self.build_env = build_env
return True
# Update pytorch, torchtext, torchvision, and torchaudio repo
def update_repos(self):
repos = [(self.srcpath, "main")]
repos.extend(TORCHBENCH_DEPS.values())
for (repo, branch) in repos:
gitutils.clean_git_repo(repo)
assert gitutils.update_git_repo(repo, branch), f"Failed to update {branch} branch of repository {repo}."
# Get all commits between start and end, save them in self.commits
def init_commits(self, start: str, end: str, abtest: bool) -> bool:
if not abtest:
commits = gitutils.get_git_commits(self.srcpath, start, end)
else:
commits = [start, end]
if not commits or len(commits) < 2:
print(f"Failed to retrieve commits from {start} to {end} in {self.srcpath}.")
return False
for count, commit in enumerate(commits):
ctime = gitutils.get_git_commit_date(self.srcpath, commit)
self.commits.append(Commit(sha=commit, ctime=ctime))
self.commit_dict[commit] = count
return True
def get_mid_commit(self, left: Commit, right: Commit) -> Optional[Commit]:
left_index = self.commit_dict[left.sha]
right_index = self.commit_dict[right.sha]
if right_index == left_index + 1:
return None
else:
return self.commits[int((left_index + right_index) / 2)]
def setup_build_env(self, env) -> Dict[str, str]:
env["USE_CUDA"] = "1"
env["BUILD_CAFFE2_OPS"] = "0"
# Do not build the test
env["BUILD_TEST"] = "0"
env["USE_MKLDNN"] = "1"
env["USE_MKL"] = "1"
env["USE_CUDNN"] = "1"
env["CMAKE_PREFIX_PATH"] = env["CONDA_PREFIX"]
return env
# Checkout the last commit of dependencies on date
def checkout_deps(self, cdate: datetime):
for pkg in TORCHBENCH_DEPS:
pkg_path, branch = TORCHBENCH_DEPS[pkg]
gitutils.checkout_git_branch(pkg_path, branch)
dep_commit = gitutils.get_git_commit_on_date(pkg_path, cdate)
print(f"Checking out {pkg} commit {dep_commit} ...", end="", flush=True)
assert dep_commit, "Failed to find the commit on {cdate} of {pkg}"
assert gitutils.checkout_git_commit(pkg_path, dep_commit), "Failed to checkout commit {commit} of {pkg}"
print("done.")
# Install dependencies such as torchtext and torchvision
def build_install_deps(self, build_env):
# Build torchdata (required by torchtext)
print(f"Building torchdata ...", end="", flush=True)
command = "python setup.py install"
subprocess.check_call(command, cwd=TORCHBENCH_DEPS["torchdata"][0], env=build_env, shell=True)
print("done")
# Build torchvision
print(f"Building torchvision ...", end="", flush=True)
command = "python setup.py install"
subprocess.check_call(command, cwd=TORCHBENCH_DEPS["torchvision"][0], env=build_env, shell=True)
print("done")
# Build torchtext
print(f"Building torchtext ...", end="", flush=True)
command = "python setup.py clean install"
subprocess.check_call(command, cwd=TORCHBENCH_DEPS["torchtext"][0], env=build_env, shell=True)
# Build torchaudio
print(f"Building torchaudio ...", end="", flush=True)
command = "python setup.py clean install"
subprocess.check_call(command, cwd=TORCHBENCH_DEPS["torchaudio"][0], env=build_env, shell=True)
print("done")
def _build_lazy_tensor(self, commit: Commit, build_env: Dict[str, str]):
if self.build_lazy:
print(f"Building pytorch lazy tensor on {commit.sha} ...", end="", flush=True)
lazy_tensor_path = os.path.join(self.srcpath, "lazy_tensor_core")
command = "./scripts/apply_patches.sh"
subprocess.check_call(command, cwd=self.lazy_tensor_path, env=build_env, shell=True)
command = "python setup.py install"
subprocess.check_call(command, cwd=self.lazy_tensor_path, env=build_env, shell=True)
print("done")
def build(self, commit: Commit):
# checkout pytorch commit
print(f"Checking out pytorch commit {commit.sha} ...", end="", flush=True)
gitutils.checkout_git_commit(self.srcpath, commit.sha)
print("done.")
# checkout pytorch deps commit
ctime = datetime.strptime(commit.ctime.split(" ")[0], "%Y-%m-%d")
self.checkout_deps(ctime)
# setup environment variables
build_env = self.setup_build_env(self.build_env)
# build pytorch
print(f"Building pytorch commit {commit.sha} ...", end="", flush=True)
# Check if version.py exists, if it does, remove it.
# This is to force pytorch update the version.py file upon incremental compilation
version_py_path = os.path.join(self.srcpath, "torch/version.py")
if os.path.exists(version_py_path):
os.remove(version_py_path)
try:
command = "python setup.py install"
subprocess.check_call(command, cwd=self.srcpath, env=build_env, shell=True)
command_testbuild = "python -c 'import torch'"
subprocess.check_call(command_testbuild, cwd=os.environ["HOME"], env=build_env, shell=True)
except subprocess.CalledProcessError:
# Remove the build directory, then try build it again
build_path = os.path.join(self.srcpath, "build")
if os.path.exists(build_path):
shutil.rmtree(build_path)
subprocess.check_call(command, cwd=self.srcpath, env=build_env, shell=True)
print("done")
# build pytorch lazy tensor if needed
self._build_lazy_tensor(commit, build_env)
self.build_install_deps(build_env)
def cleanup(self):
packages = ["torch"] + list(TORCHBENCH_DEPS.keys())
CLEANUP_ROUND = 5
# Clean up multiple times to make sure the packages are all uninstalled
for _ in range(CLEANUP_ROUND):
command = "pip uninstall -y " + " ".join(packages) + " || true"
subprocess.check_call(command, shell=True)
print("done")
class TorchBench:
srcpath: str # path to pytorch/benchmark source code
branch: str
timelimit: int # timeout limit in minutes
workdir: str
models: List[str]
first_time: bool
torch_src: TorchSource
bench_env: os._Environ
def __init__(self, srcpath: str,
torch_src: TorchSource,
timelimit: int,
workdir: str):
self.srcpath = srcpath
self.torch_src = torch_src
self.timelimit = timelimit
self.workdir = workdir
self.first_time = True
self.models = list()
def prep(self, bench_env) -> bool:
self.bench_env = bench_env
# Verify the code in srcpath is pytorch/benchmark
repo_origin_url = gitutils.get_git_origin(self.srcpath)
if not repo_origin_url == TORCHBENCH_GITREPO:
print(f"WARNING: Unmatched repo origin url: {repo_origin_url} with standard {TORCHBENCH_GITREPO}")
# get the name of current branch
self.branch = gitutils.get_current_branch(self.srcpath)
# get list of models
self.models = [ model for model in os.listdir(os.path.join(self.srcpath, "torchbenchmark", "models"))
if os.path.isdir(os.path.join(self.srcpath, "torchbenchmark", "models", model)) ]
return True
def _install_benchmark(self):
"Install and build TorchBench dependencies"
command = ["python", "install.py"]
subprocess.check_call(command, cwd=self.srcpath, env=self.bench_env, shell=False)
def run_benchmark(self, commit: Commit, targets: List[str]) -> str:
# Return the result json file path
output_dir = os.path.join(self.workdir, commit.sha)
# If the directory already exists, clear its contents
if os.path.exists(output_dir):
assert os.path.isdir(output_dir), "Must specify output directory: {output_dir}"
filelist = [ f for f in os.listdir(output_dir) ]
for f in filelist:
os.remove(os.path.join(output_dir, f))
else:
os.mkdir(output_dir)
bmfilter = targets_to_bmfilter(targets, self.models)
# If the first time to run benchmark, install the dependencies first
if self.first_time:
self._install_benchmark()
self.first_time = False
print(f"Running TorchBench for commit: {commit.sha}, filter {bmfilter} ...", end="", flush=True)
command = f"""bash .github/scripts/run.sh "{output_dir}" "{bmfilter}" 2>&1 | tee {output_dir}/benchmark.log"""
try:
subprocess.check_call(command, cwd=self.srcpath, env=self.bench_env, shell=True, timeout=self.timelimit * 60)
except subprocess.TimeoutExpired:
print(f"Benchmark timeout for {commit.sha}. Result will be None.")
return output_dir
print("done.")
return output_dir
def gen_digest(self, result_dir: str, targets: List[str]) -> Dict[str, float]:
filelist = [ f for f in os.listdir(result_dir) if f.endswith(".json") ]
out = dict()
if not len(filelist):
print(f"Empty directory or json file in {result_dir}. Return empty digest.")
return out
# Use the first json as the benchmark data file
data_file = os.path.join(result_dir, filelist[0])
if not os.stat(data_file).st_size:
print(f"Empty json file {filelist[0]} in {result_dir}. Return empty digest.")
return out
with open(data_file, "r") as df:
data = json.load(df)
# Fill in targets if it is None
if targets == None:
targets = list()
for each in data["benchmarks"]:
targets.append(each["name"])
old_targets = targets.copy()
for t in filter(lambda x: x in self.models, old_targets):
targets.remove(t)
names = filter(lambda y: t in y, map(lambda x: x["name"], data["benchmarks"]))
targets.extend(list(names))
for each in data["benchmarks"]:
if each["name"] in targets:
out[each["name"]] = each["stats"]["mean"]
# Make sure all target tests are available
for target in targets:
assert out[target], f"Don't find benchmark result of {target} in {filelist[0]}."
return out
def get_digest(self, commit: Commit, targets: List[str], debug: bool) -> Dict[str, float]:
# digest is cached
if commit.digest is not None:
return commit.digest
# if debug mode, skip the build and benchmark run
if debug:
result_dir = os.path.join(self.workdir, commit.sha)
if os.path.isdir(result_dir):
filelist = [ f for f in os.listdir(result_dir) if f.endswith(".json") ]
if len(filelist):
data_file = os.path.join(result_dir, filelist[0])
if os.stat(data_file).st_size:
commit.digest = self.gen_digest(result_dir, targets)
return commit.digest
# Build pytorch and its dependencies
self.torch_src.build(commit)
# Run benchmark
result_dir = self.run_benchmark(commit, targets)
commit.digest = self.gen_digest(result_dir, targets)
print(f"Cleaning up packages from commit {commit.sha} ...", end="", flush=True)
self.torch_src.cleanup()
return commit.digest
class TorchBenchBisection:
workdir: str
start: str
end: str
threshold: float
direction: str
targets: List[str]
# left commit, right commit, targets to test
bisectq: List[Tuple[Commit, Commit, List[str]]]
result: List[Tuple[Commit, Commit]]
torch_src: TorchSource
bench: TorchBench
output_json: str
debug: bool
abtest: bool
def __init__(self,
workdir: str,
torch_src: str,
bench_src: str,
start: str,
end: str,
threshold: float,
direction: str,
timeout: int,
targets: List[str],
output_json: str,
build_lazy: bool = False,
debug: bool = False):
self.workdir = workdir
self.start = start
self.end = end
self.threshold = threshold
self.direction = direction
self.targets = targets
self.bisectq = list()
self.result = list()
self.torch_src = TorchSource(srcpath = torch_src, build_lazy=build_lazy)
self.bench = TorchBench(srcpath = bench_src,
torch_src = self.torch_src,
timelimit = timeout,
workdir = self.workdir)
self.output_json = output_json
self.debug = debug
# Special treatment for abtest
self.abtest = False
if self.threshold == 100.0 and self.direction == "decrease":
self.abtest = True
# Left: older commit; right: newer commit
# Return: List of targets that satisfy the regression rule: <threshold, direction>
def regression(self, left: Commit, right: Commit, targets: List[str]) -> List[str]:
# If uncalculated, commit.digest will be None
assert left.digest, "Commit {left.sha} must have a digest"
assert right.digest, "Commit {right.sha} must have a digest"
out = []
for target in targets:
# digest could be empty if benchmark timeout
left_mean = left.digest[target] if len(left.digest) else 0
right_mean = right.digest[target] if len(right.digest) else 0
# If either left or right timeout, diff is 100. Otherwise use the min mean value to calculate diff.
diff = abs(left_mean - right_mean) / min(left_mean, right_mean) * 100 if min(left_mean, right_mean) else 100
# If both timeout, diff is zero percent
diff = 0 if not max(left_mean, right_mean) else diff
print(f"Target {target}: left commit {left.sha} mean {left_mean} vs. right commit {right.sha} mean {right_mean}. Diff: {diff}.")
if diff >= self.threshold:
if self.direction == "increase" and left_mean < right_mean:
# Time increase == performance regression
out.append(target)
elif self.direction == "decrease" and left_mean > right_mean:
# Time decrease == performance optimization
out.append(target)
elif self.direction == "both":
out.append(target)
return out
def prep(self) -> bool:
base_build_env = prepare_cuda_env(cuda_version=DEFAULT_CUDA_VERSION)
if not self.torch_src.prep(base_build_env):
return False
if not self.torch_src.init_commits(self.start, self.end, self.abtest):
return False
if not self.bench.prep(base_build_env):
return False
left_commit = self.torch_src.commits[0]
right_commit = self.torch_src.commits[-1]
self.bisectq.append((left_commit, right_commit, self.targets))
return True
def run(self):
while len(self.bisectq):
(left, right, targets) = self.bisectq.pop(0)
self.bench.get_digest(left, targets, self.debug)
self.bench.get_digest(right, targets, self.debug)
if targets == None and len(left.digest):
targets = left.digest.keys()
if targets == None and len(right.digest):
targets = right.digest.keys()
updated_targets = self.regression(left, right, targets)
if len(updated_targets):
mid = self.torch_src.get_mid_commit(left, right)
if mid == None:
self.result.append((left, right))
else:
self.bisectq.append((left, mid, updated_targets))
self.bisectq.append((mid, right, updated_targets))
def output(self):
json_obj = dict()
json_obj["start"] = self.start
json_obj["end"] = self.end
json_obj["threshold"] = self.threshold
json_obj["timeout"] = self.bench.timelimit
json_obj["torchbench_branch"] = self.bench.branch
json_obj["result"] = []
for res in self.result:
r = dict()
r["commit1"] = res[0].sha
r["commit1_time"] = res[0].ctime
r["commit1_digest"] = res[0].digest if len(res[0].digest) else "timeout"
r["commit2"] = res[1].sha
r["commit2_time"] = res[1].ctime
r["commit2_digest"] = res[1].digest if len(res[1].digest) else "timeout"
json_obj["result"].append(r)
with open(self.output_json, 'w') as outfile:
json.dump(json_obj, outfile, indent=2)
def output_abtest_result(self):
abtest_result = analyze_abtest_result_dir(self.workdir)
with open(self.output_json, 'w') as outfile:
outfile.write(abtest_result)
print(abtest_result)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--work-dir",
help="bisection working directory",
type=exist_dir_path)
parser.add_argument("--pytorch-src",
help="the directory of pytorch source code git repository",
type=exist_dir_path)
parser.add_argument("--torchbench-src",
help="the directory of torchbench source code git repository",
type=exist_dir_path)
parser.add_argument("--config",
help="the bisection configuration in YAML format")
parser.add_argument("--output",
help="the output json file")
parser.add_argument("--analyze-result",
help="specify the output result directory to analyze")
# by default, do not build lazy tensor
parser.add_argument("--build-lazy",
action='store_true',
help="build lazy tensor feature in PyTorch")
# by default, debug mode is disabled
parser.add_argument("--debug",
help="run in debug mode, if the result json exists, use it directly",
action='store_true')
args = parser.parse_args()
# If this is to print the overview of a test result, don't need to run the actual execution
if args.analyze_result:
print(analyze_abtest_result_dir(args.analyze_result))
exit(0)
with open(args.config, "r") as f:
bisect_config = yaml.full_load(f)
# sanity checks
valid_directions = ["increase", "decrease", "both"]
assert("start" in bisect_config), "Illegal bisection config, must specify start commit SHA."
assert("end" in bisect_config), "Illegal bisection config, must specify end commit SHA."
assert("threshold" in bisect_config), "Illegal bisection config, must specify threshold."
assert("direction" in bisect_config), "Illegal bisection config, must specify direction."
assert(bisect_config["direction"] in valid_directions), "We only support increase, decrease, or both directions"
assert("timeout" in bisect_config), "Illegal bisection config, must specify timeout."
targets = None
if "tests" in bisect_config:
targets = bisect_config["tests"]
bisection = TorchBenchBisection(workdir=args.work_dir,
torch_src=args.pytorch_src,
bench_src=args.torchbench_src,
start=bisect_config["start"],
end=bisect_config["end"],
threshold=bisect_config["threshold"],
direction=bisect_config["direction"],
timeout=bisect_config["timeout"],
targets=targets,
output_json=args.output,
build_lazy=args.build_lazy,
debug=args.debug)
assert bisection.prep(), "The working condition of bisection is not satisfied."
print("Preparation steps ok. Commit to bisect: " + " ".join([str(x) for x in bisection.torch_src.commits]))
bisection.run()
if bisection.abtest:
bisection.output_abtest_result()
else:
bisection.output()
|
from enum import Enum
# Enum class for each Domain for the model and the respective tasks
# that is available in the domain.
class COMPUTER_VISION(Enum):
SEGMENTATION = "segmentation"
CLASSIFICATION = "classification"
DETECTION = "detection"
GENERATION = "generation"
PATTERN_RECOGNITION = "pattern recognition"
VIDEO_INTERPOLATION = "video interpolation"
OTHER_COMPUTER_VISION = "other computer vision"
class NLP(Enum):
TRANSLATION = "translation"
LANGUAGE_MODELING = "language modeling"
OTHER_NLP = "other nlp"
class SPEECH(Enum):
SYNTHESIS = "synthesis"
RECOGNITION = "recognition"
class RECOMMENDATION(Enum):
RECOMMENDATION = "recommendation"
class REINFORCEMENT_LEARNING(Enum):
OTHER_RL = "other rl"
class OTHER(Enum):
OTHER_TASKS = "other tasks"
class GNN(Enum):
CLASSIFICATION = "classification"
|
import contextlib
import dataclasses
import gc
import importlib
import io
import os
import pathlib
import subprocess
import sys
import tempfile
import threading
from pathlib import Path
from typing import Any, Callable, Dict, List, NoReturn, Optional, Tuple
from urllib import request
import torch
from components._impl.tasks import base as base_task
from components._impl.workers import subprocess_worker
class ModelNotFoundError(RuntimeError):
pass
REPO_PATH = Path(os.path.abspath(__file__)).parent.parent
DATA_PATH = os.path.join(REPO_PATH, "torchbenchmark", "data", ".data")
class add_path():
def __init__(self, path):
self.path = path
def __enter__(self):
sys.path.insert(0, self.path)
def __exit__(self, exc_type, exc_value, traceback):
try:
sys.path.remove(self.path)
except ValueError:
pass
with add_path(str(REPO_PATH)):
from utils import TORCH_DEPS, get_pkg_versions, proxy_suggestion
this_dir = pathlib.Path(__file__).parent.absolute()
model_dir = 'models'
internal_model_dir = "fb"
canary_model_dir = "canary_models"
install_file = 'install.py'
def _test_https(test_url: str = 'https://github.com', timeout: float = 0.5) -> bool:
try:
request.urlopen(test_url, timeout=timeout)
except OSError:
return False
return True
def _install_deps(model_path: str, verbose: bool = True) -> Tuple[bool, Any]:
from .util.env_check import get_pkg_versions
run_args = [
[sys.executable, install_file],
]
run_env = os.environ.copy()
run_env["PYTHONPATH"] = this_dir.parent
run_kwargs = {
'cwd': model_path,
'check': True,
'env': run_env,
}
output_buffer = None
_, stdout_fpath = tempfile.mkstemp()
try:
output_buffer = io.FileIO(stdout_fpath, mode="w")
if os.path.exists(os.path.join(model_path, install_file)):
if not verbose:
run_kwargs['stderr'] = subprocess.STDOUT
run_kwargs['stdout'] = output_buffer
versions = get_pkg_versions(TORCH_DEPS)
subprocess.run(*run_args, **run_kwargs) # type: ignore
new_versions = get_pkg_versions(TORCH_DEPS)
if versions != new_versions:
errmsg = f"The torch packages are re-installed after installing the benchmark deps. \
Before: {versions}, after: {new_versions}"
return (False, errmsg, None)
else:
return (True, f"No install.py is found in {model_path}. Skip.", None)
except subprocess.CalledProcessError as e:
return (False, e.output, io.FileIO(stdout_fpath, mode="r").read().decode())
except Exception as e:
return (False, e, io.FileIO(stdout_fpath, mode="r").read().decode())
finally:
del output_buffer
os.remove(stdout_fpath)
return (True, None, None)
def dir_contains_file(dir, file_name) -> bool:
names = map(lambda x: x.name, filter(lambda x: x.is_file(), dir.iterdir()))
return file_name in names
def _list_model_paths() -> List[str]:
p = pathlib.Path(__file__).parent.joinpath(model_dir)
# Only load the model directories that contain a "__init.py__" file
models = sorted(str(child.absolute()) for child in p.iterdir() if child.is_dir() and \
(not child.name == internal_model_dir) and dir_contains_file(child, "__init__.py"))
p = p.joinpath(internal_model_dir)
if p.exists():
m = sorted(str(child.absolute()) for child in p.iterdir() if child.is_dir() and dir_contains_file(child, "__init__.py"))
models.extend(m)
return models
def _list_canary_model_paths() -> List[str]:
p = pathlib.Path(__file__).parent.joinpath(canary_model_dir)
# Only load the model directories that contain a "__init.py__" file
models = sorted(str(child.absolute()) for child in p.iterdir() if child.is_dir() and \
(not child.name == internal_model_dir) and dir_contains_file(child, "__init__.py"))
return models
def _is_internal_model(model_name: str) -> bool:
p = pathlib.Path(__file__).parent.joinpath(model_dir).joinpath(internal_model_dir).joinpath(model_name)
if p.exists() and p.joinpath("__init__.py").exists():
return True
return False
def _is_canary_model(model_name: str) -> bool:
p = pathlib.Path(__file__).parent.joinpath(canary_model_dir).joinpath(model_name)
if p.exists() and p.joinpath("__init__.py").exists():
return True
return False
def setup(models: List[str] = [], verbose: bool = True, continue_on_fail: bool = False, test_mode: bool = False, allow_canary: bool = False) -> bool:
if not _test_https():
print(proxy_suggestion)
sys.exit(-1)
failures = {}
models = list(map(lambda p: p.lower(), models))
model_paths = filter(lambda p: True if not models else os.path.basename(p).lower() in models, _list_model_paths())
if allow_canary:
canary_model_paths = filter(lambda p: os.path.basename(p).lower() in models, _list_canary_model_paths())
model_paths = list(model_paths)
model_paths.extend(canary_model_paths)
for model_path in model_paths:
print(f"running setup for {model_path}...", end="", flush=True)
if test_mode:
versions = get_pkg_versions(TORCH_DEPS)
success, errmsg, stdout_stderr = _install_deps(model_path, verbose=verbose)
if test_mode:
new_versions = get_pkg_versions(TORCH_DEPS, reload=True)
if versions != new_versions:
print(f"The torch packages are re-installed after installing the benchmark model {model_path}. \
Before: {versions}, after: {new_versions}")
sys.exit(-1)
if success and errmsg and "No install.py is found" in errmsg:
print("SKIP - No install.py is found")
elif success:
print("OK")
else:
print("FAIL")
try:
errmsg = errmsg.decode()
except Exception:
pass
# If the install was very chatty, we don't want to overwhelm.
# This will not affect verbose mode, which does not catch stdout
# and stderr.
log_lines = (stdout_stderr or "").splitlines(keepends=False)
if len(log_lines) > 40:
log_lines = log_lines[:20] + ["..."] + log_lines[-20:]
stdout_stderr = "\n".join(log_lines)
if stdout_stderr:
errmsg = f"{stdout_stderr}\n\n{errmsg or ''}"
failures[model_path] = errmsg
if not continue_on_fail:
break
for model_path in failures:
print(f"Error for {model_path}:")
print("---------------------------------------------------------------------------")
print(failures[model_path])
print("---------------------------------------------------------------------------")
print()
return len(failures) == 0
@dataclasses.dataclass(frozen=True)
class ModelDetails:
"""Static description of what a particular TorchBench model supports.
When parameterizing tests, we only want to generate sensible ones.
(e.g. Those where a model can be imported and supports the feature to be
tested or benchmarked.) This requires us to import the model; however many
of the models are EXTREMELY stateful, and even importing them consumes
significant system resources. As a result, we only want one (or a few)
alive at any given time.
Note that affinity cannot be solved by simply calling `torch.set_num_threads`
in the child process; this will cause PyTorch to use all of the cores but
at a much lower efficiency.
This class describes what a particular model does and does not support, so
that we can release the underlying subprocess but retain any pertinent
metadata.
"""
path: str
exists: bool
_diagnostic_msg: str
metadata: Dict[str, Any]
@property
def name(self) -> str:
return os.path.basename(self.path)
class Worker(subprocess_worker.SubprocessWorker):
"""Run subprocess using taskset if CPU affinity is set.
When GOMP_CPU_AFFINITY is set, importing `torch` in the main process has
the very surprising effect of changing the threading behavior in the
subprocess. (See https://github.com/pytorch/pytorch/issues/49971 for
details.) This is a problem, because it means that the worker is not
hermetic and also tends to force the subprocess torch to run in single
threaded mode which drastically skews results.
This can be ameliorated by calling the subprocess using `taskset`, which
allows the subprocess PyTorch to properly bind threads.
"""
@property
def args(self) -> List[str]:
affinity = os.environ.get("GOMP_CPU_AFFINITY", "")
return (
["taskset", "--cpu-list", affinity] if affinity else []
) + super().args
class ModelTask(base_task.TaskBase):
# The worker may (and often does) consume significant system resources.
# In order to ensure that runs do not interfere with each other, we only
# allow a single ModelTask to exist at a time.
_lock = threading.Lock()
def __init__(
self,
model_path: str,
timeout: Optional[float] = None,
extra_env: Optional[Dict[str, str]] = None,
) -> None:
gc.collect() # Make sure previous task has a chance to release the lock
assert self._lock.acquire(blocking=False), "Failed to acquire lock."
self._model_path = model_path
self._worker = Worker(timeout=timeout, extra_env=extra_env)
self.worker.run("import torch")
self._details: ModelDetails = ModelDetails(
**self._maybe_import_model(
package=__name__,
model_path=model_path,
)
)
def __del__(self) -> None:
self._lock.release()
@property
def worker(self) -> subprocess_worker.SubprocessWorker:
return self._worker
@property
def model_details(self) -> bool:
return self._details
# =========================================================================
# == Import Model in the child process ====================================
# =========================================================================
@base_task.run_in_worker(scoped=True)
@staticmethod
def _maybe_import_model(package: str, model_path: str) -> Dict[str, Any]:
import importlib
import os
model_name = os.path.basename(model_path)
diagnostic_msg = ""
try:
module = importlib.import_module(f'.models.{model_name}', package=package)
Model = getattr(module, 'Model', None)
if Model is None:
diagnostic_msg = f"Warning: {module} does not define attribute Model, skip it"
elif not hasattr(Model, 'name'):
Model.name = model_name
except ModuleNotFoundError as e:
Model = None
diagnostic_msg = f"Warning: Could not find dependent module {e.name} for Model {model_name}, skip it"
# Populate global namespace so subsequent calls to worker.run can access `Model`
globals()["Model"] = Model
# This will be used to populate a `ModelDetails` instance in the parent.
return {
"path": model_path,
"exists": Model is not None,
"_diagnostic_msg": diagnostic_msg,
"metadata": {}
}
# =========================================================================
# == Instantiate a concrete `model` instance ==============================
# =========================================================================
@base_task.run_in_worker(scoped=True)
@staticmethod
def make_model_instance(test: str, device: str, jit: bool, batch_size: Optional[int]=None, extra_args: List[str]=[]) -> None:
Model = globals()["Model"]
model = Model(test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
import gc
gc.collect()
if device == 'cuda':
torch.cuda.empty_cache()
maybe_sync = torch.cuda.synchronize
else:
maybe_sync = lambda: None
globals().update({
"model": model,
"maybe_sync": maybe_sync,
})
# =========================================================================
# == Replace the `invoke()` function in `model` instance ==================
# =========================================================================
@base_task.run_in_worker(scoped=True)
@staticmethod
def replace_invoke(module_name: str, func_name: str) -> None:
import importlib
# import function from pkg
model = globals()["model"]
try:
module = importlib.import_module(module_name)
inject_func = getattr(module, func_name, None)
if inject_func is None:
diagnostic_msg = f"Warning: {module} does not define attribute {func_name}, skip it"
except ModuleNotFoundError as e:
diagnostic_msg = f"Warning: Could not find dependent module {e.name} for Model {model.name}, skip it"
model.invoke = inject_func.__get__(model)
# =========================================================================
# == Get Model attribute in the child process =============================
# =========================================================================
@base_task.run_in_worker(scoped=True)
@staticmethod
def get_model_attribute(attr: str, field: str=None, classattr: bool=False) -> Any:
if classattr:
model = globals()["Model"]
else:
model = globals()["model"]
if hasattr(model, attr):
if field:
model_attr = getattr(model, attr)
return getattr(model_attr, field)
else:
return getattr(model, attr)
else:
return None
def gc_collect(self) -> None:
self.worker.run("""
import gc
gc.collect()
""")
def del_model_instance(self):
self.worker.run("""
del model
del maybe_sync
""")
self.gc_collect()
# =========================================================================
# == Forward calls to `model` from parent to worker =======================
# =========================================================================
def set_train(self) -> None:
self.worker.run("model.set_train()")
def invoke(self) -> None:
self.worker.run("""
model.invoke()
maybe_sync()
""")
def set_eval(self) -> None:
self.worker.run("model.set_eval()")
def extract_details_train(self) -> None:
self._details.metadata["train_benchmark"] = self.worker.load_stmt("torch.backends.cudnn.benchmark")
self._details.metadata["train_deterministic"] = self.worker.load_stmt("torch.backends.cudnn.deterministic")
def check_details_train(self, device, md) -> None:
self.extract_details_train()
if device == 'cuda':
assert md["train_benchmark"] == self._details.metadata["train_benchmark"], \
"torch.backends.cudnn.benchmark does not match expect metadata during training."
assert md["train_deterministic"] == self._details.metadata["train_deterministic"], \
"torch.backends.cudnn.deterministic does not match expect metadata during training."
def extract_details_eval(self) -> None:
self._details.metadata["eval_benchmark"] = self.worker.load_stmt("torch.backends.cudnn.benchmark")
self._details.metadata["eval_deterministic"] = self.worker.load_stmt("torch.backends.cudnn.deterministic")
# FIXME: Models will use context "with torch.no_grad():", so the lifetime of no_grad will end after the eval().
# FIXME: Must incorporate this "torch.is_grad_enabled()" inside of actual eval() func.
# self._details.metadata["eval_nograd"] = not self.worker.load_stmt("torch.is_grad_enabled()")
self._details.metadata["eval_nograd"] = True
def check_details_eval(self, device, md) -> None:
self.extract_details_eval()
if device == 'cuda':
assert md["eval_benchmark"] == self._details.metadata["eval_benchmark"], \
"torch.backends.cudnn.benchmark does not match expect metadata during eval."
assert md["eval_deterministic"] == self._details.metadata["eval_deterministic"], \
"torch.backends.cudnn.deterministic does not match expect metadata during eval."
assert md["eval_nograd"] == self._details.metadata["eval_nograd"], \
"torch.is_grad_enabled does not match expect metadata during eval."
def check_opt_vs_noopt_jit(self) -> None:
self.worker.run("model.check_opt_vs_noopt_jit()")
@base_task.run_in_worker(scoped=True)
@staticmethod
def check_example() -> None:
model = globals()["model"]
module, example_inputs = model.get_module()
if isinstance(example_inputs, dict):
# Huggingface and GNN models pass **kwargs as arguments, not *args
module(**example_inputs)
elif isinstance(example_inputs, tuple) or isinstance(example_inputs, list):
module(*example_inputs)
else:
assert False, "example_inputs from model.get_module() must be dict, tuple, or list"
# If model implements `gen_inputs()` interface, test the first example input it generates
try:
input_iter, _size = model.gen_inputs()
next_inputs = next(input_iter)
for input in next_inputs:
if isinstance(input, dict):
# Huggingface models pass **kwargs as arguments, not *args
module(**input)
else:
module(*input)
except NotImplementedError:
# We allow models that don't implement this interface
pass
@base_task.run_in_worker(scoped=True)
@staticmethod
def check_eval_output() -> None:
instance = globals()["model"]
assert instance.test == "eval", "We only support checking output of an eval test. Please submit a bug report."
out = instance.invoke()
# check output stableness on CUDA device
from torchbenchmark.util.env_check import stableness_check
if instance.device == "cuda":
stableness_check(instance, cos_sim=False, deepcopy=instance.DEEPCOPY)
@base_task.run_in_worker(scoped=True)
@staticmethod
def check_device() -> None:
instance = globals()["model"]
# Check this BenchmarkModel has a device attribute.
current_device = getattr(instance, 'device', None)
if current_device is None:
raise RuntimeError('Missing device in BenchmarkModel.')
model, inputs = instance.get_module()
model_name = getattr(model, 'name', None)
# Check the model tensors are assigned to the expected device.
for t in model.parameters():
model_device = t.device.type
if model_device != current_device:
raise RuntimeError(f'Model {model_name} was not set to the'
f' expected device {current_device},'
f' found device {model_device}.')
# Check the inputs are assigned to the expected device.
def check_inputs(inputs):
if isinstance(inputs, torch.Tensor):
if inputs.dim() and current_device == "cuda":
# Zero dim Tensors (Scalars) can be captured by CUDA
# kernels and need not match device.
return
inputs_device = inputs.device.type
if inputs_device != current_device:
raise RuntimeError(f'Model {model_name} inputs were'
f' not set to the expected device'
f' {current_device}, found device'
f' {inputs_device}.')
elif isinstance(inputs, tuple):
# Some inputs are nested inside tuples, such as tacotron2
for i in inputs:
check_inputs(i)
elif isinstance(inputs, dict):
# Huggingface models take inputs as kwargs
for i in inputs.values():
check_inputs(i)
check_inputs(inputs)
# =========================================================================
# == Control `torch` state (in the subprocess) ============================
# =========================================================================
@contextlib.contextmanager
def no_grad(self, disable_nograd: bool) -> None:
# TODO: deduplicate with `torchbenchmark.util.model.no_grad`
initial_value = self.worker.load_stmt("torch.is_grad_enabled()")
eval_in_nograd = (
not disable_nograd and
self.worker.load_stmt("model.eval_in_nograd()"))
try:
self.worker.run(f"torch.set_grad_enabled({not eval_in_nograd})")
yield
finally:
self.worker.run(f"torch.set_grad_enabled({initial_value})")
@contextlib.contextmanager
def watch_cuda_memory(
self,
skip: bool,
assert_equal: Callable[[int, int], NoReturn],
):
# This context manager is used in testing to ensure we're not leaking
# memory; these tests are generally parameterized by device, so in some
# cases we want this (and the outer check) to simply be a no-op.
if skip or os.getenv('PYTORCH_TEST_SKIP_CUDA_MEM_LEAK_CHECK', '0') == '1':
yield
return
if hasattr(torch._C, '_cuda_clearCublasWorkspaces'):
self.worker.load_stmt("torch._C._cuda_clearCublasWorkspaces()")
self.gc_collect()
memory_before = self.worker.load_stmt("torch.cuda.memory_allocated()")
yield
if hasattr(torch._C, '_cuda_clearCublasWorkspaces'):
self.worker.load_stmt("torch._C._cuda_clearCublasWorkspaces()")
self.gc_collect()
assert_equal(
memory_before,
self.worker.load_stmt("torch.cuda.memory_allocated()"),
)
self.worker.run("torch.cuda.empty_cache()")
def list_models_details(workers: int = 1) -> List[ModelDetails]:
return [
ModelTask(model_path).model_details
for model_path in _list_model_paths()
]
def list_models(model_match=None):
models = []
for model_path in _list_model_paths():
model_name = os.path.basename(model_path)
model_pkg = model_name if not _is_internal_model(model_name) else f"{internal_model_dir}.{model_name}"
try:
module = importlib.import_module(f'.models.{model_pkg}', package=__name__)
except ModuleNotFoundError as e:
print(f"Warning: Could not find dependent module {e.name} for Model {model_name}, skip it")
continue
Model = getattr(module, 'Model', None)
if Model is None:
print(f"Warning: {module} does not define attribute Model, skip it")
continue
if not hasattr(Model, 'name'):
Model.name = model_name
# If given model_match, only return full or partial name matches in models.
if model_match is None:
models.append(Model)
else:
if model_match.lower() in Model.name.lower():
models.append(Model)
return models
def load_model_by_name(model):
models = filter(lambda x: model.lower() == x.lower(),
map(lambda y: os.path.basename(y), _list_model_paths()))
models = list(models)
if not models:
raise ModelNotFoundError(f"{model} is not found in the core model list.")
assert len(models) == 1, f"Found more than one models {models} with the exact name: {model}"
model_name = models[0]
model_pkg = model_name if not _is_internal_model(model_name) else f"{internal_model_dir}.{model_name}"
module = importlib.import_module(f'.models.{model_pkg}', package=__name__)
Model = getattr(module, 'Model', None)
if Model is None:
print(f"Warning: {module} does not define attribute Model, skip it")
return None
if not hasattr(Model, 'name'):
Model.name = model_name
return Model
def load_canary_model_by_name(model: str):
if not _is_canary_model(model):
raise ModelNotFoundError(f"{model} is not found in the canary model list.")
module = importlib.import_module(f'.canary_models.{model}', package=__name__)
Model = getattr(module, 'Model', None)
if Model is None:
print(f"Warning: {module} does not define attribute Model, skip it")
return None
if not hasattr(Model, 'name'):
Model.name = model
return Model
def get_metadata_from_yaml(path):
import yaml
metadata_path = path + "/metadata.yaml"
md = None
if os.path.exists(metadata_path):
with open(metadata_path, 'r') as f:
md = yaml.load(f, Loader=yaml.FullLoader)
return md
def str_to_bool(input: Any) -> bool:
if not input:
return False
return str(input).lower() in ("1", "yes", "y", "true", "t", "on")
|
import os
import pathlib
import importlib
from dataclasses import dataclass
from typing import List, Dict, Any
E2E_MODEL_DIR = 'e2e_models'
def _list_model_paths() -> List[str]:
p = pathlib.Path(__file__).parent.joinpath(E2E_MODEL_DIR)
return sorted(str(child.absolute()) for child in p.iterdir() if child.is_dir())
@dataclass
class E2EBenchmarkResult:
device: str
device_num: int
test: str
num_examples: int
num_epochs: int
batch_size: int
result: Dict[str, Any]
def load_e2e_model_by_name(model):
models = filter(lambda x: model.lower() == x.lower(),
map(lambda y: os.path.basename(y), _list_model_paths()))
models = list(models)
if not models:
return None
assert len(models) == 1, f"Found more than one models {models} with the exact name: {model}"
model_name = models[0]
try:
module = importlib.import_module(f'torchbenchmark.e2e_models.{model_name}', package=__name__)
except ModuleNotFoundError as e:
print(f"Warning: Could not find dependent module {e.name} for Model {model_name}, skip it: {e}")
return None
Model = getattr(module, 'Model', None)
if Model is None:
print(f"Warning: {module} does not define attribute Model, skip it")
return None
if not hasattr(Model, 'name'):
Model.name = model_name
return Model
|
from torchbenchmark.util.framework.gnn.model_factory import GNNModel
from torchbenchmark.tasks import GNN
class Model(GNNModel):
task = GNN.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 64
DEFAULT_EVAL_BSIZE = 64
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(model_name="gat", test=test, device=device, jit=jit,
batch_size=batch_size, extra_args=extra_args)
if device == 'cuda':
# TODO - Add CUDA support
raise NotImplementedError("GAT doesn't support CUDA")
|
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt', '-f', 'https://data.pyg.org/whl/torch-2.0.0+cpu.html'])
if __name__ == '__main__':
pip_install_requirements()
|
from torchbenchmark.tasks import COMPUTER_VISION
from torchbenchmark.util.framework.diffusers.model_factory import DiffuserModel
class Model(DiffuserModel):
task = COMPUTER_VISION.GENERATION
DEFAULT_TRAIN_BSIZE = 4
DEFAULT_EVAL_BSIZE = 1
# Default eval precision on CUDA device is fp16
DEFAULT_EVAL_CUDA_PRECISION = "fp16"
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(name="timbrooks/instruct-pix2pix",
test=test, device=device, jit=jit,
batch_size=batch_size, extra_args=extra_args)
|
from torchbenchmark.util.framework.diffusers import install_diffusers
from diffusers import StableDiffusionInstructPix2PixPipeline
import torch
MODEL_NAME = "timbrooks/instruct-pix2pix"
def load_model_checkpoint():
StableDiffusionInstructPix2PixPipeline.from_pretrained(MODEL_NAME, torch_dtype=torch.float16, safety_checker=None)
if __name__ == '__main__':
install_diffusers()
|
import dataclasses
from typing import List
def cfg_to_str(cfg: dataclasses.dataclass) -> List[str]:
def rewrite_option(opt: str) -> str:
new_opt = opt.replace("_", "-")
return f"--{new_opt}"
out = []
for fld in dataclasses.fields(cfg):
new_option = rewrite_option(fld.name)
val = getattr(cfg, fld.name)
if isinstance(val, bool):
if val:
out.append(new_option)
else:
out.append(new_option)
out.append(str(getattr(cfg, fld.name)))
return out
# dummy config location:
# https://github.com/facebookresearch/FAMBench/blob/a0f12ca4fe8973f4cc65d18b51ce3aa94ceec0ac/benchmarks/run_dlrm_ootb_train.sh#L54
# config: A.1dev-embed32-fp32
@dataclasses.dataclass
class FAMBenchTrainConfig:
mini_batch_size: int = 1024
test_mini_batch_size: int = 1024
test_num_workers: int = 0
data_generation: str = "random"
arch_mlp_bot:str = "2000-1500-1500-1500-192"
arch_mlp_top:str = "4000-4000-4000-4000-4000-4000-4000-4000-4000-1"
arch_sparse_feature_size:int = 192
arch_embedding_size:str = "965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965-965"
num_indices_per_lookup:int = 55
num_indices_per_lookup_fixed:int = 1
numpy_rand_seed:int = 727
weighted_pooling: str = "learned"
# torchbench: run 2 batches only (original 15)
num_batches:int = 2
# torchbench: these items in the original config are disabled
# because they are handled by the framework
# num_batches:int = 15
# warmup_step = 5
# use_gpu: bool = True
# precache_ml_data: bool = True
# dummy config location:
# https://github.com/facebookresearch/FAMBench/blob/a0f12ca4fe8973f4cc65d18b51ce3aa94ceec0ac/benchmarks/run_dlrm_ootb_infer.sh#L54
# config: A.1dev-embed4-fp16
@dataclasses.dataclass
class FAMBenchEvalConfig:
mini_batch_size:int = 1024
test_mini_batch_size:int = 1024
test_num_workers:int = 0
data_generation:str = "random"
arch_mlp_bot:str = "1414-1750-1750-1750-1750-1750-1750-1750-1750-96"
arch_mlp_top:str = "1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1450-1"
arch_sparse_feature_size:int = 96
arch_embedding_size:str = "555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693-555693"
num_indices_per_lookup:int = 8
num_indices_per_lookup_fixed:int = 1
numpy_rand_seed:int = 727
weighted_pooling: str = "fixed"
# original number of batches: 15
num_batches:int = 15
# torchbench: these items in the original config are disabled
# because they either handled by the framework
# or requires extra dependencies that we don't support yet (such as fbgemm and torch2trt_for_mlp)
# disable warmup
# warmup_step: int = 5
# do not support quantize, torch2trt_for_mlp or fbgemm
# quantize_emb_with_bit: int = 4
# use_fbgemm_gpu: bool = True
# use_gpu: bool = True
# inference_only: bool = True
# precache_ml_data: bool = True
# use_torch2trt_for_mlp: bool = True
# quantize_mlp_with_bit: int = 16
|
import sys
from torch.optim.lr_scheduler import _LRScheduler
class LRPolicyScheduler(_LRScheduler):
def __init__(self, optimizer, num_warmup_steps, decay_start_step, num_decay_steps):
self.num_warmup_steps = num_warmup_steps
self.decay_start_step = decay_start_step
self.decay_end_step = decay_start_step + num_decay_steps
self.num_decay_steps = num_decay_steps
if self.decay_start_step < self.num_warmup_steps:
sys.exit("Learning rate warmup must finish before the decay starts")
super(LRPolicyScheduler, self).__init__(optimizer)
def get_lr(self):
step_count = self._step_count
if step_count < self.num_warmup_steps:
# warmup
scale = 1.0 - (self.num_warmup_steps - step_count) / self.num_warmup_steps
lr = [base_lr * scale for base_lr in self.base_lrs]
self.last_lr = lr
elif self.decay_start_step <= step_count and step_count < self.decay_end_step:
# decay
decayed_steps = step_count - self.decay_start_step
scale = ((self.num_decay_steps - decayed_steps) / self.num_decay_steps) ** 2
min_lr = 0.0000001
lr = [max(min_lr, base_lr * scale) for base_lr in self.base_lrs]
self.last_lr = lr
else:
if self.num_decay_steps > 0:
# freeze at last, either because we're after decay
# or because we're between warmup and decay
lr = self.last_lr
else:
# do not adjust
lr = self.base_lrs
return lr
|
"""
Simplifed dlrm model from FAMBench
It doesn't support multiGPU or fbgemm_gpu.
"""
import torch
import sys
import os
import numpy as np
import torch.nn as nn
from torchbenchmark import REPO_PATH
from typing import Tuple, List
from torchbenchmark.util.model import BenchmarkModel
from torchbenchmark.tasks import RECOMMENDATION
# Import FAMBench model path
class add_path():
def __init__(self, path):
self.path = path
def __enter__(self):
sys.path.insert(0, self.path)
def __exit__(self, exc_type, exc_value, traceback):
try:
sys.path.remove(self.path)
except ValueError:
pass
DLRM_PATH = os.path.join(REPO_PATH, "submodules", "FAMBench", "benchmarks", "dlrm", "ootb")
with add_path(DLRM_PATH):
import optim.rwsadagrad as RowWiseSparseAdagrad
from .dlrmnet import DLRM_Net
from .data import prep_data
from .config import FAMBenchTrainConfig, FAMBenchEvalConfig, cfg_to_str
from .args import parse_fambench_args, validate_fambench_args
from .lrscheduler import LRPolicyScheduler
from .utils import unpack_batch, loss_fn_wrap, dlrm_wrap, prefetch
class Model(BenchmarkModel):
task = RECOMMENDATION.RECOMMENDATION
FAMBENCH_MODEL = True
# config
DEFAULT_EVAL_ARGS = FAMBenchEvalConfig()
DEFAULT_TRAIN_ARGS = FAMBenchTrainConfig()
DEFAULT_EVAL_BSIZE = DEFAULT_EVAL_ARGS.mini_batch_size
DEFAULT_TRAIN_BSIZE = DEFAULT_TRAIN_ARGS.mini_batch_size
DEEPCOPY: bool = False
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(test, device, batch_size, jit, extra_args)
if test == "train":
self.fambench_args = parse_fambench_args(cfg_to_str(self.DEFAULT_TRAIN_ARGS))
self.fambench_args.inference_only = False
elif test == "eval":
self.fambench_args = parse_fambench_args(cfg_to_str(self.DEFAULT_EVAL_ARGS))
self.fambench_args.inference_only = True
if device == "cuda":
self.fambench_args.use_gpu = True
self.fambench_args.ndevices = 1
args = self.fambench_args
validate_fambench_args(args)
self.prep(args)
ln_bot, ln_emb, ln_top, m_spa, train_ld, test_ld = prep_data(args)
dlrm = DLRM_Net(
args,
m_spa,
ln_emb,
ln_bot,
ln_top,
args.arch_project_size,
arch_interaction_op=args.arch_interaction_op,
arch_interaction_itself=args.arch_interaction_itself,
sigmoid_bot=-1,
sigmoid_top=ln_top.size - 2,
sync_dense_params=args.sync_dense_params,
loss_threshold=args.loss_threshold,
ndevices=args.ndevices,
qr_flag=args.qr_flag,
qr_operation=args.qr_operation,
qr_collisions=args.qr_collisions,
qr_threshold=args.qr_threshold,
md_flag=args.md_flag,
md_threshold=args.md_threshold,
weighted_pooling=args.weighted_pooling,
loss_function=args.loss_function,
learning_rate=args.learning_rate,
use_gpu=args.use_gpu,
use_fbgemm_gpu=args.use_fbgemm_gpu,
fbgemm_gpu_codegen_pref=args.fbgemm_gpu_codegen_pref,
inference_only=args.inference_only,
quantize_mlp_with_bit=args.quantize_mlp_with_bit,
quantize_emb_with_bit=args.quantize_emb_with_bit,
use_torch2trt_for_mlp=args.use_torch2trt_for_mlp,)
# In dlrm.quantize_embedding called below, the torch quantize calls run
# on cpu tensors only. They cannot quantize tensors stored on the gpu.
# So quantization occurs on cpu tensors before transferring them to gpu if
# use_gpu is enabled.
if args.quantize_emb_with_bit != 32:
dlrm.quantize_embedding(args.quantize_emb_with_bit)
if not args.inference_only:
assert args.quantize_mlp_with_bit == 32, (
"Dynamic quantization for mlp requires "
+ "--inference-only because training is not supported"
)
else:
# Currently only INT8 and FP16 quantized types are supported for quantized MLP inference.
# By default we don't do the quantization: quantize_{mlp,emb}_with_bit == 32 (FP32)
assert args.quantize_mlp_with_bit in [
8,
16,
32,
], "only support 8/16/32-bit but got {}".format(args.quantize_mlp_with_bit)
if not args.use_torch2trt_for_mlp:
if args.quantize_mlp_with_bit == 16 and args.use_gpu:
dlrm.top_l = dlrm.top_l.half()
dlrm.bot_l = dlrm.bot_l.half()
elif args.quantize_mlp_with_bit in [8, 16]:
assert not args.use_gpu, (
"Cannot run PyTorch's built-in dynamic quantization for mlp "
+ "with --use-gpu enabled, because DynamicQuantizedLinear's "
+ "forward function calls 'quantized::linear_dynamic', which does not "
+ "support the 'CUDA' backend. To convert to and run quantized mlp layers "
+ "on the gpu, install torch2trt and enable --use-torch2trt-for-mlp. "
+ "Alternatively, disable --use-gpu to use PyTorch's built-in "
+ "cpu quantization ops for the mlp layers. "
)
if args.quantize_mlp_with_bit == 8:
quantize_dtype = torch.qint8
else:
quantize_dtype = torch.float16
dlrm.top_l = torch.quantization.quantize_dynamic(
dlrm.top_l, {torch.nn.Linear}, quantize_dtype
)
dlrm.bot_l = torch.quantization.quantize_dynamic(
dlrm.bot_l, {torch.nn.Linear}, quantize_dtype
)
# Prep work for embedding tables and model transfer:
# Handling single-cpu and single-gpu modes
# NOTE: This also handles dist-backend modes (CLI args --dist-backend=nccl,
# --dist-backend=ccl, and --dist-backend=mpi) because in these modes each
# process runs in single-gpu mode. For example, if 8 processes are launched
# running dlrm_s_pytorch.py with --dist-backend=nccl --use-gpu, each process
# will run in single-gpu mode, resulting in 8 gpus total running distributed
# training or distributed inference if --inference-only is enabled.
if dlrm.ndevices_available <= 1:
if args.use_fbgemm_gpu:
from .fbgemm_embedding import fbgemm_gpu_emb_bag_wrapper
dlrm.fbgemm_emb_l = nn.ModuleList(
[
fbgemm_gpu_emb_bag_wrapper(
device,
dlrm.emb_l if dlrm.emb_l else dlrm.emb_l_q,
dlrm.m_spa,
dlrm.quantize_bits,
dlrm.learning_rate,
dlrm.fbgemm_gpu_codegen_pref,
dlrm.requires_grad,
)
]
)
if args.use_gpu:
dlrm = dlrm.to(device)
if dlrm.weighted_pooling == "fixed":
for k, w in enumerate(dlrm.v_W_l):
dlrm.v_W_l[k] = w.to(device)
else:
# Handing Multi-gpu mode
dlrm.bot_l = dlrm.bot_l.to(device)
dlrm.top_l = dlrm.top_l.to(device)
dlrm.prepare_parallel_model(args.ndevices)
assert not args.use_torch2trt_for_mlp, "torch2trt is not supported."
if not args.inference_only:
# specify the optimizer algorithm
opts = {
"sgd": torch.optim.SGD,
"rwsadagrad": RowWiseSparseAdagrad.RWSAdagrad,
"adagrad": torch.optim.Adagrad,
}
# removed distributed code here
parameters = (
dlrm.parameters()
)
self.optimizer = opts[args.optimizer](parameters, lr=args.learning_rate)
self.lr_scheduler = LRPolicyScheduler(
self.optimizer,
args.lr_num_warmup_steps,
args.lr_decay_start_step,
args.lr_num_decay_steps,
)
self.model = dlrm.to(self.device)
# torchbench: prefetch the input to device
if test == "train":
self.ld = prefetch(train_ld, self.device)
elif test == "eval":
self.ld = prefetch(test_ld, self.device)
# Guarantee GPU setup has completed before training or inference starts.
if args.use_gpu:
torch.cuda.synchronize()
def prep(self, args):
np.random.seed(args.numpy_rand_seed)
np.set_printoptions(precision=args.print_precision)
torch.set_printoptions(args.print_precision)
torch.manual_seed(args.numpy_rand_seed)
if args.test_mini_batch_size < 0:
# if the parameter is not set, use the training batch size
args.test_mini_batch_size = args.mini_batch_size
if args.test_num_workers < 0:
# if the parameter is not set, use the same parameter for training
args.test_num_workers = args.num_workers
if args.use_gpu:
torch.cuda.manual_seed_all(args.numpy_rand_seed)
torch.backends.cudnn.deterministic = True
# we only support 1 device
args.ndevices = 1
def get_module(self) -> Tuple[torch.nn.Module, List[torch.Tensor]]:
for inputBatch in self.ld:
X, lS_o, lS_i, T, W, CBPP = unpack_batch(inputBatch, self.device)
if self.model.quantize_mlp_input_with_half_call:
X = X.half()
return (self.model, (X, lS_o, lS_i))
def train(self):
args = self.fambench_args
for j, inputBatch in enumerate(self.ld):
X, lS_o, lS_i, T, W, CBPP = unpack_batch(inputBatch, self.device)
mbs = T.shape[0] # = args.mini_batch_size except maybe for last
# forward pass
Z = dlrm_wrap(
self.model,
X,
lS_o,
lS_i,
args.use_gpu,
self.device,
ndevices=args.ndevices,
)
# loss
E = loss_fn_wrap(self.model, self.fambench_args, Z, T, args.use_gpu, self.device)
# compute loss and accuracy
L = E.detach().cpu().numpy() # numpy array
self.optimizer.zero_grad()
E.backward()
self.optimizer.step()
self.lr_scheduler.step()
def eval(self) -> Tuple[torch.Tensor]:
result = []
args = self.fambench_args
for i, testBatch in enumerate(self.ld):
X_test, lS_o_test, lS_i_test, T_test, W_test, CBPP_test = unpack_batch(
testBatch, self.device
)
# forward pass
Z_test = dlrm_wrap(
self.model,
X_test,
lS_o_test,
lS_i_test,
args.use_gpu,
self.device,
ndevices=args.ndevices,
)
result = (Z_test, T_test)
return result
|
import torch.nn as nn
import torch
import sys
import numpy as np
import itertools
from torch._ops import ops
from torch.nn.parameter import Parameter
from torch.nn.parallel.replicate import replicate
from torch.nn.parallel.parallel_apply import parallel_apply
from torch.nn.parallel.scatter_gather import gather, scatter
# fambench imports
# projection
import project
# quotient-remainder trick
from tricks.qr_embedding_bag import QREmbeddingBag
# mixed-dimension trick
from tricks.md_embedding_bag import PrEmbeddingBag
class DLRM_Net(nn.Module):
def create_mlp(self, ln, sigmoid_layer):
# build MLP layer by layer
layers = nn.ModuleList()
layers.training = self.requires_grad
for i in range(0, ln.size - 1):
n = ln[i]
m = ln[i + 1]
# construct fully connected operator
LL = nn.Linear(int(n), int(m), bias=True)
# initialize the weights
# with torch.no_grad():
# custom Xavier input, output or two-sided fill
mean = 0.0 # std_dev = np.sqrt(variance)
std_dev = np.sqrt(2 / (m + n)) # np.sqrt(1 / m) # np.sqrt(1 / n)
W = np.random.normal(mean, std_dev, size=(m, n)).astype(np.float32)
std_dev = np.sqrt(1 / m) # np.sqrt(2 / (m + 1))
bt = np.random.normal(mean, std_dev, size=m).astype(np.float32)
# approach 1
LL.weight.data = torch.tensor(W)
LL.weight.requires_grad = self.requires_grad
LL.bias.data = torch.tensor(bt)
LL.bias.requires_grad = self.requires_grad
# approach 2
# LL.weight.data.copy_(torch.tensor(W))
# LL.bias.data.copy_(torch.tensor(bt))
# approach 3
# LL.weight = Parameter(torch.tensor(W),requires_grad=True)
# LL.bias = Parameter(torch.tensor(bt),requires_grad=True)
layers.append(LL)
# construct sigmoid or relu operator
if i == sigmoid_layer:
layers.append(nn.Sigmoid())
else:
layers.append(nn.ReLU())
# approach 1: use ModuleList
# return layers
# approach 2: use Sequential container to wrap all layers
return torch.nn.Sequential(*layers)
def create_emb(self, m, ln, weighted_pooling=None):
# create_emb parameter description
#
# ln parameter:
# ln is a list of all the tables' row counts. E.g. [10,5,16] would mean
# table 0 has 10 rows, table 1 has 5 rows, and table 2 has 16 rows.
#
# m parameter (when m is a single value):
# m is the length of all embedding vectors. All embedding vectors in all
# embedding tables are created to be the same length. E.g. if ln were [3,2,5]
# and m were 4, table 0 would be dimension 3 x 4, table 1 would be 2 x 4,
# and table 2 would be 5 x 4.
#
# m parameter (when m is a list):
# m is a list of all the tables' column counts. E.g. if m were [4,5,6] and
# ln were [3,2,5], table 0 would be dimension 3 x 4, table 1 would be 2 x 5,
# and table 2 would be 5 x 6.
#
# Key to remember:
# embedding table i has shape: ln[i] rows, m columns, when m is a single value.
# embedding table i has shape: ln[i] rows, m[i] columns, when m is a list.
emb_l = nn.ModuleList()
v_W_l = []
for i in range(0, ln.size):
# torchbench: commment distributed
# if ext_dist.my_size > 1:
# if i not in self.local_emb_indices:
# continue
n = ln[i]
# construct embedding operator
if self.qr_flag and n > self.qr_threshold:
EE = QREmbeddingBag(
n,
m,
self.qr_collisions,
operation=self.qr_operation,
mode="sum",
sparse=True,
)
elif self.md_flag and n > self.md_threshold:
base = max(m)
_m = m[i] if n > self.md_threshold else base
EE = PrEmbeddingBag(n, _m, base)
# use np initialization as below for consistency...
W = np.random.uniform(
low=-np.sqrt(1 / n), high=np.sqrt(1 / n), size=(n, _m)
).astype(np.float32)
EE.embs.weight.data = torch.tensor(W, requires_grad=self.requires_grad)
else:
EE = nn.EmbeddingBag(n, m, mode="sum", sparse=True)
# initialize embeddings
# nn.init.uniform_(EE.weight, a=-np.sqrt(1 / n), b=np.sqrt(1 / n))
W = np.random.uniform(
low=-np.sqrt(1 / n), high=np.sqrt(1 / n), size=(n, m)
).astype(np.float32)
# approach 1
EE.weight.data = torch.tensor(W, requires_grad=self.requires_grad)
# approach 2
# EE.weight.data.copy_(torch.tensor(W))
# approach 3
# EE.weight = Parameter(torch.tensor(W),requires_grad=True)
if weighted_pooling is None:
v_W_l.append(None)
else:
v_W_l.append(torch.ones(n, dtype=torch.float32))
emb_l.append(EE)
return emb_l, v_W_l
def __init__(
self,
args,
m_spa=None,
ln_emb=None,
ln_bot=None,
ln_top=None,
proj_size=0,
arch_interaction_op=None,
arch_interaction_itself=False,
sigmoid_bot=-1,
sigmoid_top=-1,
sync_dense_params=True,
loss_threshold=0.0,
ndevices=-1,
qr_flag=False,
qr_operation="mult",
qr_collisions=0,
qr_threshold=200,
md_flag=False,
md_threshold=200,
weighted_pooling=None,
loss_function="bce",
learning_rate=0.1,
use_gpu=False,
use_fbgemm_gpu=False,
fbgemm_gpu_codegen_pref="Split",
inference_only=False,
quantize_mlp_with_bit=False,
quantize_emb_with_bit=False,
use_torch2trt_for_mlp=False,
):
super(DLRM_Net, self).__init__()
if (
(m_spa is not None)
and (ln_emb is not None)
and (ln_bot is not None)
and (ln_top is not None)
and (arch_interaction_op is not None)
):
# save arguments
self.ntables = len(ln_emb)
self.m_spa = m_spa
self.proj_size = proj_size
self.use_gpu = use_gpu
self.use_fbgemm_gpu = use_fbgemm_gpu
self.fbgemm_gpu_codegen_pref = fbgemm_gpu_codegen_pref
self.requires_grad = not inference_only
self.ndevices_available = ndevices
self.ndevices_in_use = ndevices
self.output_d = 0
self.add_new_weights_to_params = False
self.arch_interaction_op = arch_interaction_op
self.arch_interaction_itself = arch_interaction_itself
self.sync_dense_params = sync_dense_params and not inference_only
self.loss_threshold = loss_threshold
self.loss_function = loss_function
self.learning_rate = learning_rate
if weighted_pooling is not None and weighted_pooling != "fixed":
self.weighted_pooling = "learned"
else:
self.weighted_pooling = weighted_pooling
# create variables for QR embedding if applicable
self.qr_flag = qr_flag
if self.qr_flag:
self.qr_collisions = qr_collisions
self.qr_operation = qr_operation
self.qr_threshold = qr_threshold
# create variables for MD embedding if applicable
self.md_flag = md_flag
if self.md_flag:
self.md_threshold = md_threshold
# torchbench: comment distributed
# If running distributed, get local slice of embedding tables
# if ext_dist.my_size > 1:
# n_emb = len(ln_emb)
# if n_emb < ext_dist.my_size:
# sys.exit(
# "only (%d) sparse features for (%d) devices, table partitions will fail"
# % (n_emb, ext_dist.my_size)
# )
# self.n_global_emb = n_emb
# self.n_local_emb, self.n_emb_per_rank = ext_dist.get_split_lengths(
# n_emb
# )
# self.local_emb_slice = ext_dist.get_my_slice(n_emb)
# self.local_emb_indices = list(range(n_emb))[self.local_emb_slice]
# create operators
self.emb_l, self.v_W_l = self.create_emb(m_spa, ln_emb, weighted_pooling)
if self.weighted_pooling == "learned":
self.v_W_l = nn.ParameterList(list(map(Parameter, self.v_W_l)))
self.bot_l = self.create_mlp(ln_bot, sigmoid_bot)
self.top_l = self.create_mlp(ln_top, sigmoid_top)
if proj_size > 0:
self.proj_l = project.create_proj(len(ln_emb) + 1, proj_size)
# mlp quantization
self.quantize_mlp_with_bit = quantize_mlp_with_bit
self.use_torch2trt_for_mlp = use_torch2trt_for_mlp
self.quantize_mlp_input_with_half_call = use_gpu and not args.use_torch2trt_for_mlp and args.quantize_mlp_with_bit == 16
# embedding quantization
self.quantize_emb = False
self.emb_l_q = []
self.quantize_bits = 32
# fbgemm_gpu
self.fbgemm_emb_l = []
self.v_W_l_l = [self.v_W_l] if self.weighted_pooling else [None]
self.interact_features_l = []
# specify the loss function
if self.loss_function == "mse":
self.loss_fn = torch.nn.MSELoss(reduction="mean")
elif self.loss_function == "bce":
self.loss_fn = torch.nn.BCELoss(reduction="mean")
elif self.loss_function == "wbce":
self.loss_ws = torch.tensor(
np.fromstring(args.loss_weights, dtype=float, sep="-")
)
self.loss_fn = torch.nn.BCELoss(reduction="none")
else:
sys.exit(
"ERROR: --loss-function=" + self.loss_function + " is not supported"
)
def prepare_parallel_model(self, ndevices):
device_ids = range(ndevices)
# replicate mlp (data parallelism)
self.bot_l_replicas = replicate(self.bot_l, device_ids)
self.top_l_replicas = replicate(self.top_l, device_ids)
# distribute embeddings (model parallelism)
if self.weighted_pooling is not None:
for k, w in enumerate(self.v_W_l):
self.v_W_l[k] = Parameter(
w.to(torch.device("cuda:" + str(k % ndevices)))
)
if not self.use_fbgemm_gpu:
for k, w in enumerate(self.emb_l):
self.emb_l[k] = w.to(torch.device("cuda:" + str(k % ndevices)))
else:
from .fbgemm_embedding import fbgemm_gpu_emb_bag_wrapper
self.fbgemm_emb_l, self.v_W_l_l = zip(
*[
(
fbgemm_gpu_emb_bag_wrapper(
torch.device("cuda:" + str(k)),
self.emb_l[k::ndevices]
if self.emb_l
else self.emb_l_q[k::ndevices],
self.m_spa[k::ndevices]
if isinstance(self.m_spa, list)
else self.m_spa,
self.quantize_bits,
self.learning_rate,
self.fbgemm_gpu_codegen_pref,
self.requires_grad,
),
self.v_W_l[k::ndevices] if self.weighted_pooling else None,
)
for k in range(ndevices)
]
)
self.add_new_weights_to_params = True
self.interact_features_l = [self.nn_module_wrapper() for _ in range(ndevices)]
# nn_module_wrapper is used to call functions concurrently across multi-gpus, using parallel_apply,
# which requires an nn.Module subclass.
class nn_module_wrapper(nn.Module):
def __init__(self):
super(DLRM_Net.nn_module_wrapper, self).__init__()
def forward(self, E, x, ly):
return E(x, ly)
def apply_mlp(self, x, layers):
# approach 1: use ModuleList
# for layer in layers:
# x = layer(x)
# return x
# approach 2: use Sequential container to wrap all layers
return layers(x)
def apply_emb(self, lS_o, lS_i):
# WARNING: notice that we are processing the batch at once. We implicitly
# assume that the data is laid out such that:
# 1. each embedding is indexed with a group of sparse indices,
# corresponding to a single lookup
# 2. for each embedding the lookups are further organized into a batch
# 3. for a list of embedding tables there is a list of batched lookups
if self.use_fbgemm_gpu:
# Deinterleave and reshape to 2d, so items are grouped by device
# per row. Then parallel apply.
ndevices = len(self.fbgemm_emb_l)
lS_o_l = [lS_o[k::ndevices] for k in range(ndevices)]
lS_i_l = [lS_i[k::ndevices] for k in range(ndevices)]
ly = parallel_apply(
self.fbgemm_emb_l, list(zip(lS_o_l, lS_i_l, self.v_W_l_l))
)
# Interleave and flatten to match non-fbgemm_gpu ly format.
ly = [ly[i % ndevices][i // ndevices] for i in range(self.ntables)]
else:
ly = []
for k, sparse_index_group_batch in enumerate(lS_i):
sparse_offset_group_batch = lS_o[k]
# embedding lookup
# We are using EmbeddingBag, which implicitly uses sum operator.
# The embeddings are represented as tall matrices, with sum
# happening vertically across 0 axis, resulting in a row vector
# E = emb_l[k]
if self.v_W_l[k] is not None:
per_sample_weights = self.v_W_l[k].gather(
0, sparse_index_group_batch
)
else:
per_sample_weights = None
if self.quantize_emb:
if self.quantize_bits == 4:
E = ops.quantized.embedding_bag_4bit_rowwise_offsets
elif self.quantize_bits == 8:
E = ops.quantized.embedding_bag_byte_rowwise_offsets
QV = E(
self.emb_l_q[k],
sparse_index_group_batch,
sparse_offset_group_batch,
per_sample_weights=per_sample_weights,
)
ly.append(QV)
else:
E = self.emb_l[k]
V = E(
sparse_index_group_batch,
sparse_offset_group_batch,
per_sample_weights=per_sample_weights,
)
ly.append(V)
# print(ly)
return ly
# using quantizing functions from caffe2/aten/src/ATen/native/quantized/cpu
def quantize_embedding(self, bits):
n = len(self.emb_l)
self.emb_l_q = [None] * n
for k in range(n):
if bits == 4:
self.emb_l_q[k] = ops.quantized.embedding_bag_4bit_prepack(
self.emb_l[k].weight
)
elif bits == 8:
self.emb_l_q[k] = ops.quantized.embedding_bag_byte_prepack(
self.emb_l[k].weight
)
elif bits == 16:
self.emb_l_q[k] = self.emb_l[k].half().weight
else:
return
self.emb_l = None
self.quantize_emb = True
self.quantize_bits = bits
def interact_features(self, x, ly):
if self.arch_interaction_op == "dot":
# concatenate dense and sparse features
(batch_size, d) = x.shape
T = torch.cat([x] + ly, dim=1).view((batch_size, -1, d))
# perform a dot product
if self.proj_size > 0:
R = project.project(T, x, self.proj_l)
else:
Z = torch.bmm(T, torch.transpose(T, 1, 2))
# append dense feature with the interactions (into a row vector)
# approach 1: all
# Zflat = Z.view((batch_size, -1))
# approach 2: unique
_, ni, nj = Z.shape
# approach 1: tril_indices
# offset = 0 if self.arch_interaction_itself else -1
# li, lj = torch.tril_indices(ni, nj, offset=offset)
# approach 2: custom
offset = 1 if self.arch_interaction_itself else 0
li = torch.tensor([i for i in range(ni) for j in range(i + offset)])
lj = torch.tensor([j for i in range(nj) for j in range(i + offset)])
Zflat = Z[:, li, lj]
# concatenate dense features and interactions
R = torch.cat([x] + [Zflat], dim=1)
elif self.arch_interaction_op == "cat":
# concatenation features (into a row vector)
R = torch.cat([x] + ly, dim=1)
else:
sys.exit(
"ERROR: --arch-interaction-op="
+ self.arch_interaction_op
+ " is not supported"
)
return R
def forward(self, dense_x, lS_o, lS_i):
# torchbench: only enable sequential forward
return self.sequential_forward(dense_x, lS_o, lS_i)
# if ext_dist.my_size > 1:
# # multi-node multi-device run
# return self.distributed_forward(dense_x, lS_o, lS_i)
# elif self.ndevices_available <= 1:
# # single device run
# return self.sequential_forward(dense_x, lS_o, lS_i)
# else:
# # single-node multi-device run
# return self.parallel_forward(dense_x, lS_o, lS_i)
# torchbench: disable distributed forward
# def distributed_forward(self, dense_x, lS_o, lS_i):
# batch_size = dense_x.size()[0]
# # WARNING: # of ranks must be <= batch size in distributed_forward call
# if batch_size < ext_dist.my_size:
# sys.exit(
# "ERROR: batch_size (%d) must be larger than number of ranks (%d)"
# % (batch_size, ext_dist.my_size)
# )
# if batch_size % ext_dist.my_size != 0:
# sys.exit(
# "ERROR: batch_size %d can not split across %d ranks evenly"
# % (batch_size, ext_dist.my_size)
# )
# dense_x = dense_x[ext_dist.get_my_slice(batch_size)]
# lS_o = lS_o[self.local_emb_slice]
# lS_i = lS_i[self.local_emb_slice]
# if (self.ntables != len(lS_o)) or (self.ntables != len(lS_i)):
# sys.exit(
# "ERROR: corrupted model input detected in distributed_forward call"
# )
# # embeddings
# with record_function("DLRM embedding forward"):
# ly = self.apply_emb(lS_o, lS_i)
# # WARNING: Note that at this point we have the result of the embedding lookup
# # for the entire batch on each rank. We would like to obtain partial results
# # corresponding to all embedding lookups, but part of the batch on each rank.
# # Therefore, matching the distribution of output of bottom mlp, so that both
# # could be used for subsequent interactions on each device.
# if self.ntables != len(ly):
# sys.exit("ERROR: corrupted intermediate result in distributed_forward call")
# a2a_req = ext_dist.alltoall(ly, self.n_emb_per_rank)
# with record_function("DLRM bottom mlp forward"):
# x = self.apply_mlp(dense_x, self.bot_l)
# ly = a2a_req.wait()
# ly = list(ly)
# # interactions
# with record_function("DLRM interaction forward"):
# z = self.interact_features(x, ly)
# # top mlp
# with record_function("DLRM top mlp forward"):
# # quantize top mlp's input to fp16 if PyTorch's built-in fp16 quantization is used.
# if self.quantize_mlp_input_with_half_call:
# z = z.half()
# p = self.apply_mlp(z, self.top_l)
# # clamp output if needed
# if 0.0 < self.loss_threshold and self.loss_threshold < 1.0:
# z = torch.clamp(p, min=self.loss_threshold, max=(1.0 - self.loss_threshold))
# else:
# z = p
# return z
def sequential_forward(self, dense_x, lS_o, lS_i):
# process dense features (using bottom mlp), resulting in a row vector
x = self.apply_mlp(dense_x, self.bot_l)
# debug prints
# print("intermediate")
# print(x.detach().cpu().numpy())
# process sparse features(using embeddings), resulting in a list of row vectors
ly = self.apply_emb(lS_o, lS_i)
# for y in ly:
# print(y.detach().cpu().numpy())
# interact features (dense and sparse)
z = self.interact_features(x, ly)
# print(z.detach().cpu().numpy())
# quantize top mlp's input to fp16 if PyTorch's built-in fp16 quantization is used.
if self.quantize_mlp_input_with_half_call:
z = z.half()
# obtain probability of a click (using top mlp)
p = self.apply_mlp(z, self.top_l)
# clamp output if needed
if 0.0 < self.loss_threshold and self.loss_threshold < 1.0:
z = torch.clamp(p, min=self.loss_threshold, max=(1.0 - self.loss_threshold))
else:
z = p
return z
def parallel_forward(self, dense_x, lS_o, lS_i):
### prepare model (overwrite) ###
# WARNING: # of devices must be >= batch size in parallel_forward call
batch_size = dense_x.size()[0]
ndevices = min(self.ndevices_available, batch_size, self.ntables)
device_ids = range(ndevices)
# WARNING: must redistribute the model if mini-batch size changes(this is common
# for last mini-batch, when # of elements in the dataset/batch size is not even
if self.ndevices_in_use != ndevices:
self.ndevices_in_use = ndevices
self.prepare_parallel_model(ndevices)
elif self.sync_dense_params:
# When training, replicate the new/updated mlp weights each iteration.
# For inference-only, this code should never run.
self.bot_l_replicas = replicate(self.bot_l, device_ids)
self.top_l_replicas = replicate(self.top_l, device_ids)
### prepare input (overwrite) ###
# scatter dense features (data parallelism)
# print(dense_x.device)
dense_x = scatter(dense_x, device_ids, dim=0)
# distribute sparse features (model parallelism)
if (self.ntables != len(lS_o)) or (self.ntables != len(lS_i)):
sys.exit("ERROR: corrupted model input detected in parallel_forward call")
lS_o = [
lS_o[k].to(torch.device("cuda:" + str(k % ndevices)))
for k in range(self.ntables)
]
lS_i = [
lS_i[k].to(torch.device("cuda:" + str(k % ndevices)))
for k in range(self.ntables)
]
### compute results in parallel ###
# bottom mlp
# WARNING: Note that the self.bot_l is a list of bottom mlp modules
# that have been replicated across devices, while dense_x is a tuple of dense
# inputs that has been scattered across devices on the first (batch) dimension.
# The output is a list of tensors scattered across devices according to the
# distribution of dense_x.
x = parallel_apply(self.bot_l_replicas, dense_x, None, device_ids)
# debug prints
# print(x)
# embeddings
ly = self.apply_emb(lS_o, lS_i)
# debug prints
# print(ly)
# butterfly shuffle (implemented inefficiently for now)
# WARNING: Note that at this point we have the result of the embedding lookup
# for the entire batch on each device. We would like to obtain partial results
# corresponding to all embedding lookups, but part of the batch on each device.
# Therefore, matching the distribution of output of bottom mlp, so that both
# could be used for subsequent interactions on each device.
if self.ntables != len(ly):
sys.exit("ERROR: corrupted intermediate result in parallel_forward call")
t_list = [scatter(ly[k], device_ids, dim=0) for k in range(self.ntables)]
# adjust the list to be ordered per device
ly = list(map(lambda y: list(y), zip(*t_list)))
# debug prints
# print(ly)
# interactions
z = parallel_apply(self.interact_features_l, list(zip(itertools.repeat(self.interact_features),x,ly)))
# debug prints
# print(z)
if self.quantize_mlp_input_with_half_call:
z = [tens.half() for tens in z]
# top mlp
# WARNING: Note that the self.top_l is a list of top mlp modules that
# have been replicated across devices, while z is a list of interaction results
# that by construction are scattered across devices on the first (batch) dim.
# The output is a list of tensors scattered across devices according to the
# distribution of z.
p = parallel_apply(self.top_l_replicas, z, None, device_ids)
### gather the distributed results ###
p0 = gather(p, self.output_d, dim=0)
# clamp output if needed
if 0.0 < self.loss_threshold and self.loss_threshold < 1.0:
z0 = torch.clamp(
p0, min=self.loss_threshold, max=(1.0 - self.loss_threshold)
)
else:
z0 = p0
return z0
def print_weights(self):
if self.use_fbgemm_gpu and len(self.fbgemm_emb_l):
ntables_l = [
len(e.fbgemm_gpu_emb_bag.embedding_specs) for e in self.fbgemm_emb_l
]
for j in range(ntables_l[0] + 1):
for k, e in enumerate(self.fbgemm_emb_l):
if j < ntables_l[k]:
print(
e.fbgemm_gpu_emb_bag.split_embedding_weights()[j]
.detach()
.cpu()
.numpy()
)
elif self.quantize_bits != 32:
for e in self.emb_l_q:
print(e.data.detach().cpu().numpy())
else: # if self.emb_l:
for param in self.emb_l.parameters():
print(param.detach().cpu().numpy())
if isinstance(self.v_W_l, nn.ParameterList):
for param in self.v_W_l.parameters():
print(param.detach().cpu().numpy())
for param in self.bot_l.parameters():
print(param.detach().cpu().numpy())
for param in self.top_l.parameters():
print(param.detach().cpu().numpy()) |
import torch
# The following function is a wrapper to avoid checking this multiple times in th
# loop below.
def unpack_batch(b, device):
# Experiment with unweighted samples
return b[0], b[1], b[2], b[3], torch.ones(b[3].size()).to(device), None
def dlrm_wrap(dlrm, X, lS_o, lS_i, use_gpu, device, ndevices=1):
if dlrm.quantize_mlp_input_with_half_call:
X = X.half()
if use_gpu:
# lS_i can be either a list of tensors or a stacked tensor.
# Handle each case below:
if ndevices == 1:
lS_i = (
[S_i.to(device) for S_i in lS_i]
if isinstance(lS_i, list)
else lS_i.to(device)
)
lS_o = (
[S_o.to(device) for S_o in lS_o]
if isinstance(lS_o, list)
else lS_o.to(device)
)
return dlrm(X.to(device), lS_o, lS_i)
def loss_fn_wrap(dlrm, args, Z, T, use_gpu, device):
if args.loss_function == "mse" or args.loss_function == "bce":
return dlrm.loss_fn(Z, T.to(device))
elif args.loss_function == "wbce":
loss_ws_ = dlrm.loss_ws[T.data.view(-1).long()].view_as(T).to(device)
loss_fn_ = dlrm.loss_fn(Z, T.to(device))
loss_sc_ = loss_ws_ * loss_fn_
return loss_sc_.mean()
def prefetch(dl, device):
out = []
for inputBatch in dl:
X, lS_o, lS_i, T = inputBatch
lS_i = (
[S_i.to(device) for S_i in lS_i]
if isinstance(lS_i, list)
else lS_i.to(device)
)
lS_o = (
[S_o.to(device) for S_o in lS_o]
if isinstance(lS_o, list)
else lS_o.to(device)
)
out.append(tuple([X.to(device), lS_o, lS_i, T]))
return out |
# Currently, this file is not used, because torchbench doesn't support fbgemm embeddding yet;
# Note that FAMBench does support it.
import torch.nn as nn
import torch
import os
import sys
import numpy as np
from torchbenchmark import REPO_PATH
# This file assumes fbgemm_gpu is installed
import fbgemm_gpu
from fbgemm_gpu import split_table_batched_embeddings_ops
from fbgemm_gpu.split_table_batched_embeddings_ops import (
CacheAlgorithm,
PoolingMode,
OptimType,
SparseType,
SplitTableBatchedEmbeddingBagsCodegen,
IntNBitTableBatchedEmbeddingBagsCodegen,
)
# mixed-dimension trick
from tricks.md_embedding_bag import PrEmbeddingBag
# quantize_fbgemm_gpu_embedding_bag is partially lifted from
# fbgemm_gpu/test/split_embedding_inference_converter.py, def _quantize_split_embs.
# Converts SplitTableBatchedEmbeddingBagsCodegen to IntNBitTableBatchedEmbeddingBagsCodegen
def quantize_fbgemm_gpu_embedding_bag(model, quantize_type, device):
embedding_specs = []
if device.type == "cpu":
emb_location = split_table_batched_embeddings_ops.EmbeddingLocation.HOST
else:
emb_location = split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE
for (E, D, _, _) in model.embedding_specs:
weights_ty = quantize_type
if D % weights_ty.align_size() != 0:
assert D % 4 == 0
weights_ty = (
SparseType.FP16
) # fall back to FP16 if dimension couldn't be aligned with the required size
embedding_specs.append(("", E, D, weights_ty, emb_location))
q_model = (
split_table_batched_embeddings_ops.IntNBitTableBatchedEmbeddingBagsCodegen(
embedding_specs=embedding_specs,
pooling_mode=model.pooling_mode,
device=device,
)
)
q_model.initialize_weights()
for t, (_, _, _, weight_ty, _) in enumerate(embedding_specs):
if weight_ty == SparseType.FP16:
original_weight = model.split_embedding_weights()[t]
q_weight = original_weight.half()
weights = torch.tensor(q_weight.cpu().numpy().view(np.uint8))
q_model.split_embedding_weights()[t][0].data.copy_(weights)
elif weight_ty == SparseType.INT8:
original_weight = model.split_embedding_weights()[t]
q_weight = torch.ops.fbgemm.FloatToFused8BitRowwiseQuantized(
original_weight
)
weights = q_weight[:, :-8]
scale_shift = torch.tensor(
q_weight[:, -8:]
.contiguous()
.cpu()
.numpy()
.view(np.float32)
.astype(np.float16)
.view(np.uint8)
)
q_model.split_embedding_weights()[t][0].data.copy_(weights)
q_model.split_embedding_weights()[t][1].data.copy_(scale_shift)
elif weight_ty == SparseType.INT4 or weight_ty == SparseType.INT2:
original_weight = model.split_embedding_weights()[t]
q_weight = torch.ops.fbgemm.FloatToFusedNBitRowwiseQuantizedSBHalf(
original_weight,
bit_rate=quantize_type.bit_rate(),
)
weights = q_weight[:, :-4]
scale_shift = torch.tensor(
q_weight[:, -4:].contiguous().cpu().numpy().view(np.uint8)
)
q_model.split_embedding_weights()[t][0].data.copy_(weights)
q_model.split_embedding_weights()[t][1].data.copy_(scale_shift)
return q_model
def create_fbgemm_gpu_emb_bag(
device,
emb_l,
m_spa,
quantize_bits,
learning_rate,
codegen_preference=None,
requires_grad=True,
):
if isinstance(emb_l[0], PrEmbeddingBag):
emb_l = [e.embs for e in emb_l]
if isinstance(emb_l[0], nn.EmbeddingBag):
emb_l = [e.weight for e in emb_l]
Es = [e.shape[0] for e in emb_l]
if isinstance(m_spa, list):
Ds = m_spa
else:
Ds = [m_spa for _ in emb_l]
if device.type == "cpu":
emb_location = split_table_batched_embeddings_ops.EmbeddingLocation.HOST
compute_device = split_table_batched_embeddings_ops.ComputeDevice.CPU
else:
emb_location = split_table_batched_embeddings_ops.EmbeddingLocation.DEVICE
compute_device = split_table_batched_embeddings_ops.ComputeDevice.CUDA
pooling_mode = PoolingMode.SUM
cache_algorithm = CacheAlgorithm.LRU
sparse_type_dict = {
4: SparseType.INT4,
8: SparseType.INT8,
16: SparseType.FP16,
32: SparseType.FP32,
}
codegen_type_dict = {
4: "IntN",
8: "Split" if codegen_preference != "IntN" else "IntN",
16: "Split" if codegen_preference != "IntN" else "IntN",
32: "Split",
}
codegen_type = codegen_type_dict[quantize_bits]
quantize_type = sparse_type_dict[quantize_bits]
if codegen_type == "IntN":
# Create non-quantized model and then call quantize_fbgemm_gpu_embedding_bag
fbgemm_gpu_emb_bag = SplitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
E, # num of rows in the table
D, # num of columns in the table
split_table_batched_embeddings_ops.EmbeddingLocation.HOST,
split_table_batched_embeddings_ops.ComputeDevice.CPU,
)
for (E, D) in zip(Es, Ds)
],
weights_precision=SparseType.FP32,
optimizer=OptimType.EXACT_SGD,
learning_rate=learning_rate,
cache_algorithm=cache_algorithm,
pooling_mode=pooling_mode,
).to(device)
if quantize_type == quantize_type.FP16:
weights = fbgemm_gpu_emb_bag.split_embedding_weights()
for i, emb in enumerate(weights):
emb.data.copy_(emb_l[i])
elif quantize_type == quantize_type.INT8:
# copy quantized values upsampled/recasted to FP32
for i in range(len(Es)):
fbgemm_gpu_emb_bag.split_embedding_weights()[i].data.copy_(
torch.ops.fbgemm.Fused8BitRowwiseQuantizedToFloat(emb_l[i])
)
elif quantize_type == quantize_type.INT4:
# copy quantized values upsampled/recasted to FP32
for i in range(len(Es)):
fbgemm_gpu_emb_bag.split_embedding_weights()[i].data.copy_(
torch.ops.fbgemm.FusedNBitRowwiseQuantizedSBHalfToFloat(
emb_l[i],
bit_rate=quantize_type.bit_rate(),
)
)
fbgemm_gpu_emb_bag = quantize_fbgemm_gpu_embedding_bag(
fbgemm_gpu_emb_bag, quantize_type, device
)
else:
fbgemm_gpu_emb_bag = SplitTableBatchedEmbeddingBagsCodegen(
embedding_specs=[
(
E, # num of rows in the table
D, # num of columns in the table
emb_location,
compute_device,
)
for (E, D) in zip(Es, Ds)
],
weights_precision=quantize_type,
optimizer=OptimType.EXACT_SGD,
learning_rate=learning_rate,
cache_algorithm=cache_algorithm,
pooling_mode=pooling_mode,
).to(device)
weights = fbgemm_gpu_emb_bag.split_embedding_weights()
for i, emb in enumerate(weights):
emb.data.copy_(emb_l[i])
if not requires_grad:
torch.no_grad()
torch.set_grad_enabled(False)
return fbgemm_gpu_emb_bag
# The purpose of this wrapper is to encapsulate the format conversions to/from fbgemm_gpu
# so parallel_apply() executes the format-in -> fbgemm_gpu op -> format-out instructions
# for each respective GPU in parallel.
class fbgemm_gpu_emb_bag_wrapper(nn.Module):
def __init__(
self,
device,
emb_l,
m_spa,
quantize_bits,
learning_rate,
codegen_preference,
requires_grad,
):
super(fbgemm_gpu_emb_bag_wrapper, self).__init__()
self.fbgemm_gpu_emb_bag = create_fbgemm_gpu_emb_bag(
device,
emb_l,
m_spa,
quantize_bits,
learning_rate,
codegen_preference,
requires_grad,
)
self.device = device
self.m_spa = m_spa
# create cumsum array for mixed dimension support
if isinstance(m_spa, list):
self.m_spa_cumsum = np.cumsum([0] + m_spa)
if not requires_grad:
torch.no_grad()
torch.set_grad_enabled(False)
def forward(self, lS_o, lS_i, v_W_l=None):
# convert offsets to fbgemm format
lengths_list = list(map(len, lS_i))
indices_lengths_cumsum = np.cumsum([0] + lengths_list)
if isinstance(lS_o, list):
lS_o = torch.stack(lS_o)
lS_o = lS_o.to(self.device)
lS_o += torch.from_numpy(indices_lengths_cumsum[:-1, np.newaxis]).to(
self.device
)
numel = torch.tensor([indices_lengths_cumsum[-1]], dtype=torch.long).to(
self.device
)
lS_o = torch.cat((lS_o.flatten(), numel))
# create per_sample_weights
if v_W_l:
per_sample_weights = torch.cat(
[a.gather(0, b) for a, b in zip(v_W_l, lS_i)]
)
else:
per_sample_weights = None
# convert indices to fbgemm_gpu format
if isinstance(lS_i, torch.Tensor):
lS_i = [lS_i]
lS_i = torch.cat(lS_i, dim=0).to(self.device)
if isinstance(self.fbgemm_gpu_emb_bag, IntNBitTableBatchedEmbeddingBagsCodegen):
lS_o = lS_o.int()
lS_i = lS_i.int()
# gpu embedding bag op
ly = self.fbgemm_gpu_emb_bag(lS_i, lS_o, per_sample_weights)
# convert the results to the next layer's input format.
if isinstance(self.m_spa, list):
# handle mixed dimensions case.
ly = [
ly[:, s:e]
for (s, e) in zip(self.m_spa_cumsum[:-1], self.m_spa_cumsum[1:])
]
else:
# handle case in which all tables share the same column dimension.
cols = self.m_spa
ntables = len(self.fbgemm_gpu_emb_bag.embedding_specs)
ly = ly.reshape(-1, ntables, cols).swapaxes(0, 1)
ly = list(ly)
return ly |
# Original source:
# https://github.com/facebookresearch/FAMBench/blob/a0f12ca4fe8973f4cc65d18b51ce3aa94ceec0ac/benchmarks/dlrm/ootb/dlrm_s_pytorch.py
import sys
import torch
import argparse
def dash_separated_ints(value):
vals = value.split("-")
for val in vals:
try:
int(val)
except ValueError:
raise argparse.ArgumentTypeError(
"%s is not a valid dash separated list of ints" % value
)
return value
def dash_separated_floats(value):
vals = value.split("-")
for val in vals:
try:
float(val)
except ValueError:
raise argparse.ArgumentTypeError(
"%s is not a valid dash separated list of floats" % value
)
return value
def validate_fambench_args(args):
if args.weighted_pooling is not None:
if args.qr_flag:
sys.exit("ERROR: quotient remainder with weighted pooling is not supported")
if args.md_flag:
sys.exit("ERROR: mixed dimensions with weighted pooling is not supported")
if args.quantize_emb_with_bit in [4, 8]:
if args.qr_flag:
sys.exit(
"ERROR: 4 and 8-bit quantization with quotient remainder is not supported"
)
if args.md_flag:
sys.exit(
"ERROR: 4 and 8-bit quantization with mixed dimensions is not supported"
)
if args.quantize_emb_with_bit in [4, 8, 16] and (
not args.use_fbgemm_gpu
):
try:
import fbgemm_gpu
except ImportError:
sys.exit("Failed to import fbgemm_gpu module.\n")
extra_info = ""
if not args.use_fbgemm_gpu:
extra_info += "--use-fbgemm-gpu not set. "
if not args.inference_only:
sys.exit(
"ERROR: Training quantized embeddings requires fbgemm_gpu. "
+ extra_info
)
elif args.use_gpu:
sys.exit(
"ERROR: Quantized embeddings on GPU requires fbgemm_gpu. " + extra_info
)
elif args.quantize_emb_with_bit == 16:
sys.exit(
"ERROR: 16-bit quantized embeddings requires fbgemm_gpu. " + extra_info
)
assert args.quantize_emb_with_bit in [
4,
8,
16,
32,
], "only support 4/8/16/32-bit but got {}".format(args.quantize_emb_with_bit)
if args.use_gpu:
assert torch.cuda.is_available(), "No cuda device is available."
# validations by torchbench (distributed is not supported)
# we don't support fbgemm_gpu
assert not args.use_fbgemm_gpu, "fbgemm_gpu is not supported."
# we don't support torch2trt for mlp
assert not args.use_torch2trt_for_mlp, "torch2trt for mlp is not supported."
# we only support random dataset for now
assert args.data_generation == "random", f"only random data generator is supported right now, but get {args.data_generation}."
def parse_fambench_args(args):
### parse arguments ###
parser = argparse.ArgumentParser(
description="Train Deep Learning Recommendation Model (DLRM)"
)
# model related parameters
parser.add_argument("--arch-sparse-feature-size", type=int, default=2)
parser.add_argument(
"--arch-embedding-size", type=dash_separated_ints, default="4-3-2"
)
parser.add_argument("--arch-project-size", type=int, default=0)
# j will be replaced with the table number
parser.add_argument("--arch-mlp-bot", type=dash_separated_ints, default="4-3-2")
parser.add_argument("--arch-mlp-top", type=dash_separated_ints, default="4-2-1")
parser.add_argument(
"--arch-interaction-op", type=str, choices=["dot", "cat"], default="dot"
)
parser.add_argument("--arch-interaction-itself", action="store_true", default=False)
parser.add_argument(
"--weighted-pooling", type=str, choices=["fixed", "learned", None], default=None
)
# embedding table options
parser.add_argument("--md-flag", action="store_true", default=False)
parser.add_argument("--md-threshold", type=int, default=200)
parser.add_argument("--md-temperature", type=float, default=0.3)
parser.add_argument("--md-round-dims", action="store_true", default=False)
parser.add_argument("--qr-flag", action="store_true", default=False)
parser.add_argument("--qr-threshold", type=int, default=200)
parser.add_argument("--qr-operation", type=str, default="mult")
parser.add_argument("--qr-collisions", type=int, default=4)
# activations and loss
parser.add_argument("--activation-function", type=str, default="relu")
parser.add_argument("--loss-function", type=str, default="mse") # or bce or wbce
parser.add_argument(
"--loss-weights", type=dash_separated_floats, default="1.0-1.0"
) # for wbce
parser.add_argument("--loss-threshold", type=float, default=0.0) # 1.0e-7
parser.add_argument("--round-targets", type=bool, default=False)
# data
parser.add_argument("--data-size", type=int, default=1)
parser.add_argument("--num-batches", type=int, default=0)
parser.add_argument(
"--data-generation", type=str, default="random"
) # synthetic or dataset
parser.add_argument(
"--rand-data-dist", type=str, default="uniform"
) # uniform or gaussian
parser.add_argument("--rand-data-min", type=float, default=0)
parser.add_argument("--rand-data-max", type=float, default=1)
parser.add_argument("--rand-data-mu", type=float, default=-1)
parser.add_argument("--rand-data-sigma", type=float, default=1)
parser.add_argument("--data-trace-file", type=str, default="./input/dist_emb_j.log")
parser.add_argument("--data-set", type=str, default="kaggle") # or terabyte
parser.add_argument("--raw-data-file", type=str, default="")
parser.add_argument("--processed-data-file", type=str, default="")
parser.add_argument("--data-randomize", type=str, default="total") # or day or none
parser.add_argument("--data-trace-enable-padding", type=bool, default=False)
parser.add_argument("--max-ind-range", type=int, default=-1)
parser.add_argument("--data-sub-sample-rate", type=float, default=0.0) # in [0, 1]
parser.add_argument("--num-indices-per-lookup", type=int, default=10)
parser.add_argument("--num-indices-per-lookup-fixed", type=bool, default=False)
parser.add_argument("--num-workers", type=int, default=0)
parser.add_argument("--memory-map", action="store_true", default=False)
# training
parser.add_argument("--mini-batch-size", type=int, default=1)
parser.add_argument("--nepochs", type=int, default=1)
parser.add_argument("--learning-rate", type=float, default=0.01)
parser.add_argument("--print-precision", type=int, default=5)
parser.add_argument("--numpy-rand-seed", type=int, default=123)
parser.add_argument("--sync-dense-params", type=bool, default=True)
parser.add_argument("--optimizer", type=str, default="sgd")
parser.add_argument(
"--dataset-multiprocessing",
action="store_true",
default=False,
help="The Kaggle dataset can be multiprocessed in an environment \
with more than 7 CPU cores and more than 20 GB of memory. \n \
The Terabyte dataset can be multiprocessed in an environment \
with more than 24 CPU cores and at least 1 TB of memory.",
)
# inference
parser.add_argument("--inference-only", action="store_true", default=False)
# quantize
parser.add_argument("--quantize-mlp-with-bit", type=int, default=32)
parser.add_argument("--quantize-emb-with-bit", type=int, default=32)
# onnx
parser.add_argument("--save-onnx", action="store_true", default=False)
# gpu
parser.add_argument("--use-gpu", action="store_true", default=False)
parser.add_argument("--use-fbgemm-gpu", action="store_true", default=False)
parser.add_argument(
"--fbgemm-gpu-codegen-pref",
type=str,
choices=["Split", "IntN"],
default="Split",
)
# torch2trt
parser.add_argument("--use-torch2trt-for-mlp", action="store_true", default=False)
# distributed
parser.add_argument("--local_rank", type=int, default=-1)
parser.add_argument("--dist-backend", type=str, default="")
# debugging and profiling
parser.add_argument("--print-freq", type=int, default=1)
parser.add_argument("--test-freq", type=int, default=-1)
parser.add_argument("--test-mini-batch-size", type=int, default=-1)
parser.add_argument("--test-num-workers", type=int, default=-1)
parser.add_argument("--print-time", action="store_true", default=False)
parser.add_argument("--print-wall-time", action="store_true", default=False)
parser.add_argument("--print-accumulated-time", action="store_true", default=False)
parser.add_argument("--debug-mode", action="store_true", default=False)
parser.add_argument("--enable-profiling", action="store_true", default=False)
parser.add_argument("--plot-compute-graph", action="store_true", default=False)
parser.add_argument("--tensor-board-filename", type=str, default="run_kaggle_pt")
# store/load model
parser.add_argument("--save-model", type=str, default="")
parser.add_argument("--load-model", type=str, default="")
# mlperf logging (disables other output and stops early)
parser.add_argument("--mlperf-logging", action="store_true", default=False)
# stop at target accuracy Kaggle 0.789, Terabyte (sub-sampled=0.875) 0.8107
parser.add_argument("--mlperf-acc-threshold", type=float, default=0.0)
# stop at target AUC Terabyte (no subsampling) 0.8025
parser.add_argument("--mlperf-auc-threshold", type=float, default=0.0)
parser.add_argument("--mlperf-bin-loader", action="store_true", default=False)
parser.add_argument("--mlperf-bin-shuffle", action="store_true", default=False)
# mlperf gradient accumulation iterations
parser.add_argument("--mlperf-grad-accum-iter", type=int, default=1)
# LR policy
parser.add_argument("--lr-num-warmup-steps", type=int, default=0)
parser.add_argument("--lr-decay-start-step", type=int, default=0)
parser.add_argument("--lr-num-decay-steps", type=int, default=0)
parser.add_argument("--precache-ml-data", type=int, nargs='?', default=None, const=sys.maxsize)
parser.add_argument("--warmup-steps", type=int, default=0)
# FB5 Logging
parser.add_argument("--fb5logger", type=str, default=None)
parser.add_argument("--fb5config", type=str, default="tiny")
args = parser.parse_args(args)
return args |
import os
import sys
import torch
import subprocess
from torchbenchmark import REPO_PATH
def update_fambench_submodule():
"Update FAMBench submodule of the benchmark repo"
update_command = ["git", "submodule", "update",
"--init", "--recursive", os.path.join("submodules","FAMBench")]
subprocess.check_call(update_command, cwd=REPO_PATH)
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])
if __name__ == "__main__":
update_fambench_submodule()
pip_install_requirements()
|
import torch
import sys
import numpy as np
# data generation
import dlrm_data_pytorch as dp
def prep_data(args):
ln_bot = np.fromstring(args.arch_mlp_bot, dtype=int, sep="-")
if args.data_generation == "dataset":
train_data, train_ld, test_data, test_ld = dp.make_criteo_data_and_loaders(args)
table_feature_map = {idx: idx for idx in range(len(train_data.counts))}
nbatches = args.num_batches if args.num_batches > 0 else len(train_ld)
nbatches_test = len(test_ld)
ln_emb = train_data.counts
# enforce maximum limit on number of vectors per embedding
if args.max_ind_range > 0:
ln_emb = np.array(
list(
map(
lambda x: x if x < args.max_ind_range else args.max_ind_range,
ln_emb,
)
)
)
else:
ln_emb = np.array(ln_emb)
m_den = train_data.m_den
ln_bot[0] = m_den
else:
# input and target at random
ln_emb = np.fromstring(args.arch_embedding_size, dtype=int, sep="-")
m_den = ln_bot[0]
train_data, train_ld, test_data, test_ld = dp.make_random_data_and_loader(
args, ln_emb, m_den, cache_size=args.precache_ml_data
)
nbatches = args.num_batches if args.num_batches > 0 else len(train_ld)
nbatches_test = len(test_ld)
nbatches_in_use = nbatches_test if args.inference_only else nbatches
assert nbatches_in_use > args.warmup_steps, (f"Change --warmup-steps={args.warmup_steps} to be lower than {nbatches_in_use}.")
args.ln_emb = ln_emb.tolist()
### parse command line arguments ###
m_spa = args.arch_sparse_feature_size
ln_emb = np.asarray(ln_emb)
num_fea = ln_emb.size + 1 # num sparse + num dense features
if args.use_fbgemm_gpu:
assert m_spa % 4 == 0, (
f"{m_spa} % 4 is not 0, but fbgemm_gpu requires the embedding dim "
+ "(--arch-sparse-feature-size number) to be evenly divisible by 4."
)
m_den_out = ln_bot[ln_bot.size - 1]
if args.arch_interaction_op == "dot":
# approach 1: all
# num_int = num_fea * num_fea + m_den_out
# approach 2: unique
if args.arch_project_size > 0:
num_int = num_fea * args.arch_project_size + m_den_out
else:
if args.arch_interaction_itself:
num_int = (num_fea * (num_fea + 1)) // 2 + m_den_out
else:
num_int = (num_fea * (num_fea - 1)) // 2 + m_den_out
elif args.arch_interaction_op == "cat":
num_int = num_fea * m_den_out
else:
sys.exit(
"ERROR: --arch-interaction-op="
+ args.arch_interaction_op
+ " is not supported"
)
arch_mlp_top_adjusted = str(num_int) + "-" + args.arch_mlp_top
ln_top = np.fromstring(arch_mlp_top_adjusted, dtype=int, sep="-")
# sanity check: feature sizes and mlp dimensions must match
if m_den != ln_bot[0]:
sys.exit(
"ERROR: arch-dense-feature-size "
+ str(m_den)
+ " does not match first dim of bottom mlp "
+ str(ln_bot[0])
)
if args.qr_flag:
if args.qr_operation == "concat" and 2 * m_spa != m_den_out:
sys.exit(
"ERROR: 2 arch-sparse-feature-size "
+ str(2 * m_spa)
+ " does not match last dim of bottom mlp "
+ str(m_den_out)
+ " (note that the last dim of bottom mlp must be 2x the embedding dim)"
)
if args.qr_operation != "concat" and m_spa != m_den_out:
sys.exit(
"ERROR: arch-sparse-feature-size "
+ str(m_spa)
+ " does not match last dim of bottom mlp "
+ str(m_den_out)
)
else:
if m_spa != m_den_out:
sys.exit(
"ERROR: arch-sparse-feature-size "
+ str(m_spa)
+ " does not match last dim of bottom mlp "
+ str(m_den_out)
)
if num_int != ln_top[0]:
sys.exit(
"ERROR: # of feature interactions "
+ str(num_int)
+ " does not match first dimension of top mlp "
+ str(ln_top[0])
)
# assign mixed dimensions if applicable
if args.md_flag:
m_spa = md_solver(
torch.tensor(ln_emb),
args.md_temperature, # alpha
d0=m_spa,
round_dim=args.md_round_dims,
).tolist()
if args.use_fbgemm_gpu:
for m in m_spa:
assert m % 4 == 0, (
"Found an incompatible embedding dim in m_spa. "
+ f"{m} % 4 is not 0, but fbgemm_gpu requires the "
+ "embedding dim to be evenly divisible by 4."
)
return ln_bot, ln_emb, ln_top, m_spa, train_ld, test_ld |
"""
HuggingFace Stable Diffusion model.
It requires users to specify "HUGGINGFACE_AUTH_TOKEN" in environment variable
to authorize login and agree HuggingFace terms and conditions.
"""
from torchbenchmark.tasks import COMPUTER_VISION
from torchbenchmark.util.model import BenchmarkModel
import torch
from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler
class Model(BenchmarkModel):
task = COMPUTER_VISION.GENERATION
DEFAULT_TRAIN_BSIZE = 1
DEFAULT_EVAL_BSIZE = 1
ALLOW_CUSTOMIZE_BSIZE = False
# Default eval precision on CUDA device is fp16
DEFAULT_EVAL_CUDA_PRECISION = "fp16"
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(test=test, device=device, jit=jit,
batch_size=batch_size, extra_args=extra_args)
assert self.dargs.precision == "fp16", f"Stable Diffusion model only supports fp16 precision."
model_id = "stabilityai/stable-diffusion-2"
scheduler = EulerDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler")
self.pipe = StableDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, torch_dtype=torch.float16)
self.pipe.to(self.device)
self.example_inputs = "a photo of an astronaut riding a horse on mars"
def enable_fp16_half(self):
pass
def get_module(self):
return self.model, self.example_inputs
def train(self):
raise NotImplementedError("Train test is not implemented for the stable diffusion model.")
def eval(self):
image = self.pipe(self.example_inputs)
return (image, )
|
from torchbenchmark.util.framework.diffusers import install_diffusers
from diffusers import StableDiffusionPipeline
import torch
MODEL_NAME = "stabilityai/stable-diffusion-2"
def load_model_checkpoint():
StableDiffusionPipeline.from_pretrained(MODEL_NAME, torch_dtype=torch.float16, safety_checker=None)
if __name__ == '__main__':
install_diffusers()
load_model_checkpoint()
|
from torchbenchmark.util.framework.gnn.model_factory import GNNModel
from torchbenchmark.tasks import GNN
class Model(GNNModel):
task = GNN.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 64
DEFAULT_EVAL_BSIZE = 64
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(model_name="gcn", test=test, device=device, jit=jit,
batch_size=batch_size, extra_args=extra_args)
if device == 'cuda':
# TODO - Add CUDA support
raise NotImplementedError("GCN doesn't support CUDA")
|
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt', '-f', 'https://data.pyg.org/whl/torch-2.0.0+cpu.html'])
if __name__ == '__main__':
pip_install_requirements()
|
from torchbenchmark.util.framework.gnn.model_factory import GNNModel
from torchbenchmark.tasks import GNN
class Model(GNNModel):
task = GNN.CLASSIFICATION
DEFAULT_TRAIN_BSIZE = 64
DEFAULT_EVAL_BSIZE = 64
def __init__(self, test, device, jit=False, batch_size=None, extra_args=[]):
super().__init__(model_name="sage", test=test, device=device,
batch_size=batch_size, extra_args=extra_args)
if device == 'cuda':
# TODO - Add CUDA support
raise NotImplementedError("Sage doesn't support CUDA")
|
import subprocess
import sys
def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt', '-f', 'https://data.pyg.org/whl/torch-2.0.0+cpu.html'])
if __name__ == '__main__':
pip_install_requirements()
|
import torch
from typing import Optional, List
from contextlib import contextmanager, ExitStack
from typing import ContextManager
class PostInitProcessor(type):
def __call__(cls, *args, **kwargs):
obj = type.__call__(cls, *args, **kwargs)
obj.__post__init__()
return obj
@contextmanager
def nested(*contexts):
"""
Chain and apply a list of contexts
"""
with ExitStack() as stack:
for ctx in contexts:
stack.enter_context(ctx())
yield contexts
class E2EBenchmarkModel(metaclass=PostInitProcessor):
"""
A base class for adding models for all e2e models.
"""
def __init__(self, test: str, batch_size: Optional[int]=None, extra_args: List[str]=[]):
self.test = test
assert self.test == "train" or self.test == "eval", f"Test must be 'train' or 'eval', but get {self.test}. Please submit a bug report."
self.batch_size = batch_size
if not self.batch_size:
self.batch_size = self.DEFAULT_TRAIN_BSIZE if test == "train" else self.DEFAULT_EVAL_BSIZE
# If the model doesn't implement test or eval test
# its DEFAULT_TRAIN_BSIZE or DEFAULT_EVAL_BSIZE will still be None
if not self.batch_size:
raise NotImplementedError(f"Test {test} is not implemented.")
self.extra_args = extra_args
if "--torchdynamo" in self.extra_args:
self.dynamo = True
from torchbenchmark.util.backends.torchdynamo import parse_torchdynamo_args
self.opt_args, self.extra_args = parse_torchdynamo_args(self, self.extra_args)
else:
self.dynamo = False
# Run the post processing for model acceleration
def __post__init__(self):
# sanity checks of the options
assert self.test == "train" or self.test == "eval", f"Test must be 'train' or 'eval', but provided {self.test}."
# initialize run contexts
self.run_contexts = []
if self.dynamo:
from torchbenchmark.util.backends.torchdynamo import apply_torchdynamo_args
apply_torchdynamo_args(self, self.opt_args, precision=self.tb_args.fp16)
def add_context(self, context_fn):
ctx = context_fn()
assert isinstance(ctx, ContextManager), f"Expected adding a ContextManager, get {type(ctx)}. Please report a bug."
self.run_contexts.append(context_fn)
def get_optimizer(self):
raise NotImplementedError("Every E2EModel should implement a way to access the optimizer used.")
def set_optimizer(self, optimizer) -> None:
raise NotImplementedError("Every E2EModel should implement a way to swap out the optimizer(s).")
def next_batch(self):
raise NotImplementedError("Every E2EModel should implement a way to retrieve the next batch.")
def run_forward(self, input):
raise NotImplementedError("Every E2EModel should implement a modular forward step.")
def run_backward(self, loss):
raise NotImplementedError("Every E2EModel should implement a modular backward step.")
def run_optimizer_step(self):
raise NotImplementedError("Every E2EModel should implement a modular optimizer step.")
|
import argparse
import enum
from typing import List, Optional, Tuple
from torchbenchmark.util.backends import list_backends, BACKENDS
from torchbenchmark.util.backends.flops import enable_fvcore_flops
from torchbenchmark.util.env_check import is_torchvision_model, is_staged_train_test
TEST_STAGE = enum.Enum('TEST_STAGE', ['FORWARD', 'BACKWARD', 'OPTIMIZER', 'ALL'])
AVAILABLE_PRECISIONS = ["fp32", "tf32", "fp16", "amp", "fx_int8", "bf16","amp_fp16", "amp_bf16"]
QUANT_ENGINES = ["x86", "fbgemm", "qnnpack", "onednn"]
def check_correctness_p(
model: 'torchbenchmark.util.model.BenchmarkModel',
opt_args: argparse.Namespace,
dargs: argparse.Namespace,
) -> bool:
"If correctness check should be enabled."
# if the model doesn't support correctness check (like detectron2), skip it
if hasattr(model, 'SKIP_CORRECTNESS_CHECK') and model.SKIP_CORRECTNESS_CHECK:
return False
if dargs.skip_correctness:
return False
# always check correctness with torchdynamo
if model.dynamo:
return True
opt_args_dict = vars(opt_args)
for k in opt_args_dict:
if opt_args_dict[k]:
return True
return False
def add_bool_arg(parser: argparse.ArgumentParser, name: str, default_value: bool=True):
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('--' + name, dest=name, action='store_true')
group.add_argument('--no-' + name, dest=name, action='store_false')
parser.set_defaults(**{name: default_value})
def check_precision(model: 'torchbenchmark.util.model.BenchmarkModel', precision: str) -> bool:
if precision == "fp16":
return model.device == 'cuda' and hasattr(model, "enable_fp16_half")
if precision == "tf32":
return model.device == "cuda"
if precision == "amp":
return True
if precision == "fx_int8":
return model.device == 'cpu' and hasattr(model, "enable_fx_int8")
if precision == "bf16":
return model.device == 'cpu' and hasattr(model, "enable_bf16")
if precision == "amp_fp16":
if model.test == 'eval' and model.device == 'cuda':
return True
if model.test == 'train' and model.device == 'cuda':
return hasattr(model, 'enable_amp') or is_staged_train_test(model)
if precision == "amp_bf16":
return model.device == 'cpu'
assert precision == "fp32", f"Expected precision to be one of {AVAILABLE_PRECISIONS}, but get {precision}"
return True
def check_memory_layout(model: 'torchbenchmark.util.model.BenchmakModel', channels_last: bool) -> bool:
if channels_last:
return hasattr(model, 'enable_channels_last')
return True
def check_distributed_trainer(model: 'torchbenchmark.util.model.BenchmakModel', distributed_trainer: Optional[str]) -> bool:
if not model.test == "train" and distributed_trainer:
return False
return True
def get_precision_default(model: 'torchbenchmark.util.model.BenchmarkModel') -> str:
if hasattr(model, "DEFAULT_EVAL_CUDA_PRECISION") and model.test == 'eval' and model.device == 'cuda':
return model.DEFAULT_EVAL_CUDA_PRECISION
if hasattr(model, "DEFAULT_TRAIN_CUDA_PRECISION") and model.test == 'train' and model.device == 'cuda':
return model.DEFAULT_TRAIN_CUDA_PRECISION
return "fp32"
def parse_decoration_args(model: 'torchbenchmark.util.model.BenchmarkModel', extra_args: List[str]) -> Tuple[argparse.Namespace, List[str]]:
parser = argparse.ArgumentParser()
parser.add_argument(
"--distributed",
choices=["ddp", "ddp_no_static_graph", "fsdp"],
default=None,
help="Enable distributed trainer",
)
parser.add_argument(
"--distributed_wrap_fn",
type=str,
default=None,
help="Path to function that will apply distributed wrapping fn(model, dargs.distributed)",
)
parser.add_argument("--precision", choices=AVAILABLE_PRECISIONS, default=get_precision_default(model), help=f"choose precisions from {AVAILABLE_PRECISIONS}")
parser.add_argument("--channels-last", action='store_true', help="enable channels-last memory layout")
parser.add_argument("--skip_correctness", action='store_true', help="Skip correctness checks")
parser.add_argument("--quant-engine", choices=QUANT_ENGINES, default='x86', help=f"choose quantization engine for fx_int8 precision from {QUANT_ENGINES}")
dargs, opt_args = parser.parse_known_args(extra_args)
if not check_precision(model, dargs.precision):
raise NotImplementedError(f"precision value: {dargs.precision}, "
"fp16 is only supported if the model implements the `enable_fp16_half()` callback function."
"amp is only supported if cuda+eval, or if `enable_amp` implemented,"
"or if model uses staged train interfaces (forward, backward, optimizer).")
if not check_memory_layout(model, dargs.channels_last):
raise NotImplementedError(f"Specified channels_last: {dargs.channels_last} ,"
f" but the model doesn't implement the enable_channels_last() interface.")
if not check_distributed_trainer(model, dargs.distributed):
raise NotImplementedError(f"We only support distributed trainer {dargs.distributed} for train tests, "
f"but get test: {model.test}")
return (dargs, opt_args)
def apply_decoration_args(model: 'torchbenchmark.util.model.BenchmarkModel', dargs: argparse.Namespace):
if dargs.channels_last:
model.enable_channels_last()
if dargs.precision == "fp16":
model.enable_fp16_half()
elif dargs.precision == "tf32":
import torch
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
elif dargs.precision == "amp":
# model handles amp itself if it has 'enable_amp' callback function (e.g. pytorch_unet)
if hasattr(model, "enable_amp"):
model.enable_amp()
elif dargs.precision == "fx_int8":
assert model.device == "cpu" and model.test == "eval", f"fx_int8 only work for eval mode on cpu device."
model.enable_fx_int8(dargs.quant_engine)
elif dargs.precision == "bf16":
assert model.device == "cpu", f"bf16 only work on cpu device."
model.enable_bf16()
elif dargs.precision == "amp_fp16":
assert model.device == "cuda", f"{model.device} has no fp16 autocast."
if model.test == "eval":
import torch
model.add_context(lambda: torch.cuda.amp.autocast(dtype=torch.float16))
elif model.test == "train":
# the model must implement staged train test
assert is_staged_train_test(model), f"Expected model implements staged train test (forward, backward, optimizer)."
import torch
model.add_context(lambda: torch.cuda.amp.autocast(dtype=torch.float16), stage=TEST_STAGE.FORWARD)
elif dargs.precision == "amp_bf16":
import torch
model.add_context(lambda: torch.cpu.amp.autocast(dtype=torch.bfloat16))
elif not dargs.precision == "fp32":
assert False, f"Get an invalid precision option: {dargs.precision}. Please report a bug."
# Dispatch arguments based on model type
def parse_opt_args(model: 'torchbenchmark.util.model.BenchmarkModel', opt_args: List[str]) -> argparse.Namespace:
parser = argparse.ArgumentParser()
parser.add_argument("--backend", choices=list_backends(), help="enable backends")
parser.add_argument("--flops", choices=["fvcore", "dcgm"], help="Return the flops result")
parser.add_argument("--use_cosine_similarity", action='store_true', help="use cosine similarity for correctness check")
args, extra_args = parser.parse_known_args(opt_args)
if model.jit:
args.backend = "torchscript"
if args.backend:
backend = BACKENDS[args.backend]
model._enable_backend, extra_args = backend(model, backend_args=extra_args)
return args, extra_args
def apply_opt_args(model: 'torchbenchmark.util.model.BenchmarkModel', args: argparse.Namespace):
if args.flops == "fvcore":
enable_fvcore_flops(model)
if args.backend:
model._enable_backend()
|
import argparse
import re
import torch
from enum import Enum
class OpType(Enum):
POINTWISE = 1
NORMS = 2
REDUCTIONS = 3
VIEWS_EXPANDS = 4
REMOVE = 5
IGNORE = 6
op_types = {
"aten::rsqrt": OpType.POINTWISE,
"aten::abs": OpType.POINTWISE,
"aten::eq": OpType.POINTWISE,
"aten::gelu": OpType.POINTWISE,
"aten::remainder": OpType.POINTWISE,
"aten::_softmax": OpType.POINTWISE,
"aten::clamp": OpType.POINTWISE,
"aten::gt": OpType.POINTWISE,
"aten::mul": OpType.POINTWISE,
"aten::add": OpType.POINTWISE,
"aten::sum": OpType.REDUCTIONS,
"aten::ne": OpType.POINTWISE,
"aten::silu": OpType.POINTWISE,
"aten::pow": OpType.POINTWISE,
"aten::ge": OpType.POINTWISE,
"aten::native_batch_norm": OpType.NORMS,
"aten::sub": OpType.POINTWISE,
"aten::mean": OpType.REDUCTIONS,
"aten::sqrt": OpType.POINTWISE,
"aten::reciprocal": OpType.POINTWISE,
"aten::reshape": OpType.VIEWS_EXPANDS,
"aten::relu": OpType.POINTWISE,
"prim::Constant": OpType.REMOVE,
"prim::TupleConstruct": OpType.IGNORE,
"aten::div": OpType.POINTWISE,
"aten::tanh": OpType.POINTWISE,
"aten::neg": OpType.POINTWISE,
"aten::log": OpType.POINTWISE,
"aten::unsqueeze": OpType.VIEWS_EXPANDS,
"aten::native_layer_norm": OpType.NORMS,
"aten::exp": OpType.POINTWISE,
"aten::sigmoid": OpType.POINTWISE,
}
def type_to_placeholder(op_type: OpType) -> str:
mapping = {
OpType.POINTWISE: "aten::pointwise_placeholder",
OpType.NORMS: "aten::norm_placeholder",
OpType.REDUCTIONS: "aten::reduction_placeholder",
OpType.VIEWS_EXPANDS: "aten::view_expand_placeholder",
OpType.IGNORE: "aten::ignore_placeholder",
OpType.REMOVE: "aten::remove_placeholder",
}
return mapping[op_type]
# get the op type. op_name is expected to be the qualified name.
def get_type(op_name: str) -> OpType:
if op_name in op_types:
return op_types[op_name]
for optype in OpType:
if type_to_placeholder(optype) == op_name:
return optype
raise NotImplementedError(f"No OpType known for op '{op_name}'")
def simplify_tensor_type(jit_type):
if isinstance(jit_type, torch._C.TensorType):
return torch._C.TensorType.get()
return jit_type
def remove_inputs(graph):
inputs_size = 0
for n in graph.inputs():
inputs_size += 1
for use in n.uses():
use.user.removeInput(use.offset)
for i in reversed(range(inputs_size)):
graph.eraseInput(i)
return graph
# Remove vertices like x or y below, where x or y are pointwise.
# (pointwise) --> (x) --> (...)
# (...) --> (y) --> (pointwise)
# if remove_all is true, then it doesn't care if pointwise ops preceed/succeed x or y.
def remove_duplicate_pointwise(graph, remove_all=False):
to_remove = []
old_str = str(graph)
def bypass_node(n):
to_remove.append(n)
n.output().replaceAllUsesWith(n.input())
for n in graph.nodes():
if get_type(n.kind()) != OpType.POINTWISE:
continue
if n.inputsSize() != 1 or n.outputsSize() != 1:
continue
if get_type(n.input().node().kind()) == OpType.POINTWISE or remove_all:
bypass_node(n)
continue
uses = [r.user for r in n.output().uses() if r.user.kind() != "prim::Return"]
if len(uses) >= 1 and (all(get_type(r.kind()) == OpType.POINTWISE for r in uses) or remove_all):
bypass_node(n)
continue
for n in reversed(to_remove):
n.destroy()
return graph
def compress_graph(graph):
old_nodes = []
erased_nodes = set()
for n in graph.nodes():
simple_type = get_type(n.kind())
if simple_type == OpType.IGNORE:
continue
old_nodes.append(n)
if simple_type == OpType.REMOVE:
erased_nodes.add(n)
continue
new_node = graph.create(type_to_placeholder(simple_type), n.outputsSize())
new_node.insertBefore(n)
for inp in n.inputs():
if inp.node() not in erased_nodes:
new_node.addInput(inp)
for old_out, new_out in zip(n.outputs(), new_node.outputs()):
new_out.setType(simplify_tensor_type(old_out.type()))
old_out.replaceAllUsesWith(new_out)
for n in reversed(old_nodes):
n.destroy()
graph = remove_inputs(graph)
graph = remove_duplicate_pointwise(graph)
return torch._C._jit_pass_canonicalize(graph, False)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="""
Collection of helper functions for eliminating duplicate subgraphs
Usage:
~~~
import classify_graphs
# some ir string called "ir"
graph = torch._C.parse_ir(ir)
# "hashes" the graph based on categories of ops (pointwise, reductions, views/expands, norms)
compressed_graph = classify_graphs.compress_graph(graph)
# do something with the compressed graph
~~~
Alternatively, call it and it will return one graph per hashed category
Usage:
python3 log_extract.py log.txt --output > log_result.py
python3 classify_graphs.py log_result.py > filtered_logs.py
""", formatter_class = argparse.RawDescriptionHelpFormatter)
parser.add_argument("filename", type=str, help="output from log_extract.py --help")
args = parser.parse_args()
with open(args.filename) as f:
arr = eval(f.read())
# see 73984
for i in range(len(arr)):
if len(re.findall(r'value=annotate\(List\[int', arr[i])) >= 1:
arr[i] = arr[0]
classified = {}
for ir in arr:
graph = torch._C.parse_ir(ir)
graph = compress_graph(graph)
graph_class = str(graph)
if graph_class not in classified:
classified[graph_class] = []
classified[graph_class].append(ir)
final_selection = []
for cl, graphs in classified.items():
# choose the longest graph of this type
s = sorted(graphs, key=lambda x: -len(str(x)))
final_selection.append(str(graphs[0]))
print('[' + ', '.join(f'"""{x}"""' for x in final_selection) + ']')
|
"""Utilities for tuning the machine for better benchmark stability.
Written for Amazon linux and Intel CPU, Nvidia GPU althogh many utilities will overlap.
"""
import argparse
import cpuinfo
import distro
import enum
import os
import platform
import psutil
import subprocess
import re
import sys
import typing
from pathlib import Path
def read_sys_file(sysfile: Path):
with open(sysfile, 'r') as f:
return f.read()
def write_sys_file(sysfile: Path, content: str):
print(f"Write {content} to {sysfile}")
with open(sysfile, 'w') as f:
f.write(content)
def check_intel_no_turbo_state(turbo_file='/sys/devices/system/cpu/intel_pstate/no_turbo'):
return int(read_sys_file(turbo_file))
def set_intel_no_turbo_state(state: int, turbo_file='/sys/devices/system/cpu/intel_pstate/no_turbo'):
assert state in [0, 1]
write_sys_file(turbo_file, str(state))
def parse_lscpu_cpu_core_list():
coreinfo = subprocess.check_output("lscpu --all --parse=CPU,CORE,ONLINE", shell=True).strip().decode().split('\n')
matched_cpus = 0
cpu_core = []
for line in coreinfo[2:]:
if line[0] == '#':
continue
cpu, core, online = line.split(',')
cpu = int(cpu)
online = online == "Y"
core = int(core) if online else None
if cpu == core:
matched_cpus += 1
cpu_core.append((cpu, core, online))
assert matched_cpus > 0, "Failed to parse lscpu output"
return cpu_core
def hyper_threading_enabled():
for cpu, core, online in parse_lscpu_cpu_core_list():
if cpu != core and online:
return True
return False
def set_hyper_threading(enabled=False):
for cpu, core, online in parse_lscpu_cpu_core_list():
if cpu != core:
if not online and not enabled:
continue
if online and enabled:
continue
virtual_cpu_online_file = f"/sys/devices/system/cpu/cpu{cpu}/online"
value = "1" if enabled else "0"
write_sys_file(virtual_cpu_online_file, value)
def get_intel_max_cstate():
kernel_args = read_sys_file('/proc/cmdline').split()
for arg in kernel_args:
if arg.find('intel_idle.max_cstate') == 0:
return int(arg.split('=')[1])
return None
def get_isolated_cpus():
"""
Returns a list of cpus marked as isolated from the kernel scheduler for regular tasks.
Only tasks scheduled via taskset command can use these cpus, e.g. benchmarking workload.
"""
kernel_args = read_sys_file('/proc/cmdline').split()
isolcpus = set()
for arg in kernel_args:
if arg.find('isolcpus') == 0:
arg = arg.split('=')[1]
chunks = arg.split(',')
for chunk in chunks:
if '-' in chunk:
start, end = chunk.split('-')
for cpu in range(int(start), int(end) + 1):
isolcpus.add(cpu)
else:
isolcpus.add(int(chunk))
return list(isolcpus)
def get_process_cpu_affinity():
p = psutil.Process()
return p.cpu_affinity()
def nvidia_smi_query(query: str, device_ids: typing.List[int] = None):
if device_ids:
device_ids = [str(id) for id in device_ids]
device_ids = ",".join(device_ids)
id_selector = f"-i {device_ids}" if device_ids else ""
values = subprocess.check_output(f'nvidia-smi --query-gpu="{query}" {id_selector} --format=csv,noheader,nounits',
shell=True).strip().decode().split("\n")
return values
def has_nvidia_smi():
try:
subprocess.check_output('nvidia-smi', shell=True)
return True
except:
return False
def get_nvidia_gpu_clocks(device_ids: typing.List[int] = None):
clocks = nvidia_smi_query("clocks.applications.graphics", device_ids)
for clock in range(len(clocks)):
clocks[clock] = 0 if clocks[clock] == '[N/A]' else clocks[clock]
return [int(clock) for clock in clocks]
def get_nvidia_gpu_temps(device_ids: typing.List[int] = None):
temps = {}
raw_temps = nvidia_smi_query("temperature.gpu,temperature.memory", device_ids)
temps['gpu'] = [temp.split(',')[0] for temp in raw_temps]
temps['memory'] = [temp.split(',')[1] for temp in raw_temps]
return temps
def set_nvidia_graphics_clock(device_id=0, clock=900):
if has_nvidia_smi():
return subprocess.check_call(['nvidia-smi', '-ac', '5001,900'])
return False
def get_nvidia_throttle_reasons(device_ids: typing.List[int] = None):
""" See 'nvidia-smi --help-query-gpu for explanation of throttle reasons
"""
queries = ['gpu_idle', 'applications_clocks_setting', 'sw_power_cap',
'hw_slowdown', 'hw_thermal_slowdown',
'hw_power_brake_slowdown', 'sw_thermal_slowdown', 'sync_boost']
query_str = ','.join(["clocks_throttle_reasons." + q for q in queries])
raw = nvidia_smi_query(query_str, device_ids)
throttle_reasons = []
for line in raw:
gpu_reasons = [q for q, v in zip(queries, line.split(',')) if 'Active' == v]
throttle_reasons.append(gpu_reasons)
return throttle_reasons
MACHINE = enum.Enum('MACHINE', ['AMAZON_LINUX', 'UBUNTU', 'UNKNOWN'])
def get_machine_type():
# It's tricky to write platform setup code that works on different OS/configs.
# initially, just intend to identify a known environment and for any other
# environment revert to no-op. Expand functionality over time as needed.
if platform.system() == 'Linux':
if distro.name() == "Amazon Linux":
return MACHINE.AMAZON_LINUX
if platform.system() == 'Linux':
if distro.name() == 'Ubuntu':
return MACHINE.UBUNTU
return MACHINE.UNKNOWN
def get_cpu_temp():
temps = {}
if not MACHINE.UNKNOWN == get_machine_type():
thermal_path = Path('/sys/class/thermal/')
for zone in filter(lambda x: "thermal_zone" in x, os.listdir(thermal_path)):
temps[zone] = int(read_sys_file(thermal_path / zone / "temp")) / 1000.
return temps
def is_using_isolated_cpus():
isolated_cpus = get_isolated_cpus()
using_cpus = get_process_cpu_affinity()
omp_using_cpus = get_omp_affinity()
lscpu = parse_lscpu_cpu_core_list()
assert len(lscpu) > 0, "unable to parse current CPUs"
for cpu, core, active in lscpu:
# check that all used cpus are isolated ones (more critical)
if (cpu in using_cpus or cpu in omp_using_cpus) and cpu not in isolated_cpus:
return False
# check all isolated cpus are used (less critical)
elif active and cpu in isolated_cpus:
if cpu not in using_cpus:
# currently after importing torch, process cpu affinity mask changes from e.g. 4-47 to 4.
# since we can't assert that all intended cores are being used, we can at least assert that
# the first core in the range of isolated cores is used.
# see https://github.com/pytorch/pytorch/issues/49971
# return False
pass
if cpu not in omp_using_cpus:
return False
return True
def get_omp_affinity():
if 'GOMP_CPU_AFFINITY' not in os.environ:
return []
raw = os.environ['GOMP_CPU_AFFINITY']
affinity = []
def parse_block(block):
if '-' in block:
start, end = block.split('-')
return list(range(int(start), int(end) + 1))
return [int(block)]
if ' ' in raw:
for block in raw.split(' '):
affinity.extend(parse_block(block))
else:
affinity.extend(parse_block(raw))
return affinity
def get_pstate_frequency():
CPU_FREQ_BASE_DIR = '/sys/devices/system/cpu'
CPU_FREQ_FILES = ["scaling_min_freq", "scaling_max_freq", "scaling_cur_freq"]
cpu_dirs = ["cpu" + str(cpu[0]) for cpu in parse_lscpu_cpu_core_list() if cpu[2]]
output = dict()
for cpu_dir in cpu_dirs:
full_path = os.path.join(CPU_FREQ_BASE_DIR, cpu_dir, "cpufreq")
freq_paths = [os.path.join(full_path, x) for x in CPU_FREQ_FILES]
all_exist = True
for path in freq_paths:
all_exist = all_exist and os.path.exists(path)
if all_exist:
output[cpu_dir] = dict()
for i, path in enumerate(freq_paths):
output[cpu_dir][CPU_FREQ_FILES[i]] = int(read_sys_file(path)) / 1000
return output
def set_pstate_frequency(min_freq = 2500, max_freq = 2500):
CPU_FREQ_BASE_DIR = '/sys/devices/system/cpu'
CPU_FREQ_FILES = ["scaling_min_freq", "scaling_max_freq", "scaling_cur_freq"]
cpu_dirs = ["cpu" + str(cpu[0]) for cpu in parse_lscpu_cpu_core_list() if cpu[2]]
for cpu_dir in cpu_dirs:
full_path = os.path.join(CPU_FREQ_BASE_DIR, cpu_dir, "cpufreq")
freq_paths = [os.path.join(full_path, x) for x in CPU_FREQ_FILES]
all_exist = True
for path in freq_paths:
all_exist = all_exist and os.path.exists(path)
if all_exist:
write_sys_file(freq_paths[0], str(min_freq * 1000))
write_sys_file(freq_paths[1], str(max_freq * 1000))
def check_pstate_frequency_pin(pin_freq = 2500):
FREQ_THRESHOLD = 15 # Allow 15 MHz difference maximum
all_freq = get_pstate_frequency()
for cpuid in all_freq:
for attr in all_freq[cpuid]:
freq = all_freq[cpuid][attr]
difference = abs(freq - pin_freq)
if difference > FREQ_THRESHOLD:
print(f"Specify frequency {pin_freq} Mhz, find setting {cpuid} {attr}: {freq}.")
return False
return True
def get_machine_config():
config = {}
machine_type = get_machine_type()
config['machine_type'] = machine_type
config['cpu_brand'] = cpuinfo.get_cpu_info()['brand_raw']
if not MACHINE.UNKNOWN == machine_type:
config['linux_distribution'] = distro.linux_distribution()
config['intel_turbo_disabled'] = check_intel_no_turbo_state()
config['intel_hyper_threading_enabled'] = hyper_threading_enabled()
config['intel_max_cstate'] = get_intel_max_cstate()
config['isolated_cpus'] = get_isolated_cpus()
config['process_cpu_affinity'] = get_process_cpu_affinity()
config['is_using_isolated_cpus'] = is_using_isolated_cpus()
config['cpu_pstate_frequency'] = get_pstate_frequency()
return config
def check_machine_configured(check_process_affinity=True):
check_environment()
if not MACHINE.UNKNOWN == get_machine_type():
assert 1 == check_intel_no_turbo_state(), "Turbo Boost is not disabled"
assert False == hyper_threading_enabled(), "HyperThreading is not disabled"
assert 1 == get_intel_max_cstate(), "Intel max C-State isn't set to 1, which avoids power-saving modes."
assert len(get_isolated_cpus()) > 0, "No cpus are isolated for benchmarking with isolcpus"
assert 900 == get_nvidia_gpu_clocks()[0], "Nvidia gpu clock isn't limited, to increase consistency by reducing throttling"
assert is_using_isolated_cpus(), "taskset or GOMP_CPU_AFFINITY not specified or not matching kernel isolated cpus"
assert check_pstate_frequency_pin(), "Must pin CPU frequency to a fixed number in MHz"
else:
raise RuntimeError(f"Unsupported machine type {get_machine_type()}")
def get_machine_state():
state = {}
machine_type = get_machine_type()
state['machine_type'] = machine_type
if not MACHINE.UNKNOWN == machine_type:
state['cpu_temps'] = get_cpu_temp()
if has_nvidia_smi():
state['nvidia_gpu_temps'] = get_nvidia_gpu_temps()
state['nvidia_gpu_clocks'] = get_nvidia_gpu_clocks()
state['nvidia_gpu_throttle_reasons'] = get_nvidia_throttle_reasons()
state['process_cpu_affinity'] = get_process_cpu_affinity()
return state
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--enable_ht", action="store_true", help="Enable HyperThreading")
parser.add_argument("--configure", action="store_true", help="Apply benchmark tuning to this machine")
parser.add_argument("--no_verify", action="store_true", help="Skip verifying machine is configured for benchmarking")
args = parser.parse_args()
machine_type = get_machine_type()
if MACHINE.UNKNOWN == machine_type:
raise RuntimeError(f"Unsupported machine type {machine_type}")
if args.enable_ht:
set_hyper_threading(True)
if args.configure:
set_intel_no_turbo_state(1)
set_hyper_threading(False)
set_nvidia_graphics_clock()
set_pstate_frequency()
if not args.no_verify:
assert 1 == check_intel_no_turbo_state(), "Turbo Boost is not disabled"
assert False == hyper_threading_enabled(), "HyperThreading is not disabled"
assert 1 == get_intel_max_cstate(), "Intel max C-State isn't set to 1, which avoids power-saving modes."
assert len(get_isolated_cpus()) > 0, "No cpus are isolated for benchmarking with isolcpus"
assert 900 == get_nvidia_gpu_clocks()[0], "Nvidia gpu clock isn't limited, to increase consistency by reducing throttling"
assert check_pstate_frequency_pin(), "CPU frequency is not correctly pinned, which is required to minimize noise."
# doesn't make too much sense to ask the user to run this configure script with the isolated cpu cores
# that check is more important to be done at runtime of benchmark, and is checked by conftest.py
#assert is_using_isolated_cpus(), "Not using isolated CPUs for this process"
def check_environment():
checks = [
# VAR_NAME, blacklist
("DEBUG", None),
("MKLDNN_VERBOSE", None),
("PYTORCH_JIT_LOG_LEVEL", None)
]
for check in checks:
if check[0] in os.environ and (check[1] == None or os.environ[check[0]] in check[1]):
raise RuntimeError(f"{check[0]} is set")
|
"""gitutils.py
Utils for getting git-related information.
"""
import os
import time
from pathlib import Path
import subprocess
from datetime import datetime
from typing import Optional, List
def clean_git_repo(repo: str) -> bool:
try:
command = f"git clean -xdf"
subprocess.check_call(command, cwd=repo, shell=True)
return True
except subprocess.CalledProcessError:
print(f"Failed to cleanup git repo {repo}")
return None
def update_git_repo_branch(repo: str, branch: str) -> bool:
try:
command = f"git pull origin {branch}"
out = subprocess.check_output(command, cwd=repo, shell=True).decode().strip()
return out
except subprocess.CalledProcessError:
print(f"Failed to update git repo {repo}, branch {branch}")
return None
def get_git_commit_on_date(repo: str, date: datetime) -> Optional[str]:
try:
# Get the first commit since date
formatted_date = date.strftime("%Y-%m-%d")
command = f"git log --until={formatted_date} -1 --oneline | cut -d ' ' -f 1"
out = subprocess.check_output(command, cwd=repo, shell=True).decode().strip()
return out
except subprocess.CalledProcessError:
print(f"Failed to get the last commit on date {formatted_date} in repo {repo}")
return None
def check_git_exist_local_branch(repo: str, branch: str) -> bool:
command = f"git rev-parse --verify {branch} &> /dev/null "
retcode = subprocess.call(command, cwd=repo, shell=True)
return (retcode == 0)
def get_git_commit_date(repo: str, commit: str) -> str:
try:
command = f"git show -s --format=%ci {commit}"
out = subprocess.check_output(command, cwd=repo, shell=True).decode().strip()
return out
except subprocess.CalledProcessError:
print(f"Failed to get date of commit {commit} in repo {repo}")
return None
def checkout_git_branch(repo: str, branch: str) -> bool:
try:
if check_git_exist_local_branch(repo, branch):
command = f"git checkout {branch} &> /dev/null "
else:
command = f"git checkout --track origin/{branch} &> /dev/null"
retcode = subprocess.call(command, cwd=repo, shell=True)
return (retcode == 0)
except subprocess.CalledProcessError:
print(f"Failed to checkout git repo {repo}, branch {branch}")
return None
def get_current_branch(repo: str) -> Optional[str]:
try:
command = "git branch --show-current"
out = subprocess.check_output(command, cwd=repo, shell=True).decode().strip()
return out
except subprocess.CalledProcessError:
print(f"Failed to get current branch name for repo {repo}")
return None
def get_git_origin(repo: str) -> Optional[str]:
try:
command = "git remote get-url origin"
out = subprocess.check_output(command, cwd=repo, shell=True).decode().strip()
return out
except:
print(f"git command {command} returns non-zero status in repo {repo}")
return None
def get_git_commits(repo: str, start: str, end: str) -> Optional[List[str]]:
try:
command = f"git log --reverse --oneline --ancestry-path {start}^..{end} | cut -d \" \" -f 1"
out = subprocess.check_output(command, cwd=repo, shell=True).decode().strip().split("\n")
if out == ['']:
out = None
return out
except subprocess.CalledProcessError:
print(f"git command {command} returns non-zero status in repo {repo}")
return None
def get_current_commit(repo: str) -> Optional[str]:
try:
command = f"git log --reverse --oneline -1 | cut -d \" \" -f 1"
out = subprocess.check_output(command, cwd=repo, shell=True).decode().strip()
return out
except subprocess.CalledProcessError:
print(f"Failed to get the current commit in repo {repo}")
return None
def checkout_git_commit(repo: str, commit: str) -> bool:
try:
assert len(commit) != 0
command = ["git", "checkout", "--recurse-submodules", commit]
subprocess.check_call(command, cwd=repo, shell=False)
return True
except subprocess.CalledProcessError:
# Sleep 5 seconds for concurrent git process, remove the index.lock file if exists, and try again
try:
time.sleep(5)
index_lock = os.path.join(repo, ".git", "index.lock")
if os.path.exists(index_lock):
os.remove(index_lock)
command = ["git", "checkout", "--recurse-submodules", commit]
subprocess.check_call(command, cwd=repo, shell=False)
return True
except subprocess.CalledProcessError:
print(f"Failed to checkout commit {commit} in repo {repo}")
return False
def update_git_repo(repo: str, branch: str="main") -> bool:
try:
assert len(branch) != 0
command = ["git", "checkout", "--recurse-submodules", branch]
subprocess.check_call(command, cwd=repo, shell=False)
command = ["git", "pull"]
subprocess.check_call(command, cwd=repo, shell=False)
command = ["git", "checkout", "--recurse-submodules", branch]
subprocess.check_call(command, cwd=repo, shell=False)
return True
except subprocess.CalledProcessError:
# Sleep 5 seconds for concurrent git process, remove the index.lock file if exists, and try again
try:
time.sleep(5)
index_lock = os.path.join(repo, ".git", "index.lock")
if os.path.exists(index_lock):
os.remove(index_lock)
command = ["git", "checkout", "--recurse-submodules", branch]
subprocess.check_call(command, cwd=repo, shell=False)
command = ["git", "pull"]
subprocess.check_call(command, cwd=repo, shell=False)
command = ["git", "checkout", "--recurse-submodules", branch]
subprocess.check_call(command, cwd=repo, shell=False)
return True
except subprocess.CalledProcessError:
print(f"Failed to update to branch {branch} in repo {repo}")
return False
|
import copy
import importlib
import os
import torch
from contextlib import contextmanager, ExitStack
import warnings
import inspect
import yaml
from pathlib import Path
from typing import ContextManager, Optional, List, Tuple, Generator
from torch.utils._pytree import tree_map
from torchbenchmark import REPO_PATH
from torchbenchmark.util.extra_args import check_correctness_p, parse_opt_args, apply_opt_args, \
parse_decoration_args, apply_decoration_args, is_staged_train_test, \
TEST_STAGE
from torchbenchmark.util.env_check import set_random_seed, correctness_check, stableness_check, is_hf_model
from torchbenchmark.util.fx_int8 import get_sub_module, prepare_sub_module, convert_sub_module
class PostInitProcessor(type):
def __call__(cls, *args, **kwargs):
obj = type.__call__(cls, *args, **kwargs)
obj.__post__init__()
return obj
@contextmanager
def no_grad(val):
"""Some meta-learning models (e.g. maml) may need to train a target(another) model
in inference runs
"""
old_state = torch.is_grad_enabled()
try:
torch.set_grad_enabled(not val)
yield
finally:
torch.set_grad_enabled(old_state)
@contextmanager
def nested(*contexts):
"""
Chain and apply a list of contexts
"""
with ExitStack() as stack:
for ctx in contexts:
stack.enter_context(ctx())
yield contexts
# enable JIT profiling executor
@contextmanager
def enable_profiling_executor():
try:
graph_executor = torch._C._get_graph_executor_optimize(True)
profiling_executor = torch._C._jit_set_profiling_executor(True)
profiling_mode = torch._C._jit_set_profiling_mode(True)
yield
finally:
torch._C._jit_set_profiling_mode(profiling_mode)
torch._C._jit_set_profiling_executor(profiling_executor)
torch._C._get_graph_executor_optimize(graph_executor)
class BenchmarkModel(metaclass=PostInitProcessor):
DEFAULT_TRAIN_BSIZE: Optional[int] = None
DEFAULT_EVAL_BSIZE: Optional[int] = None
# by default, deepcopy the model when checking correctness
# because some models are stateful (such as moco)
DEEPCOPY: bool = True
test: str
device: str
jit: bool
batch_size: int
extra_args: List[str]
run_contexts: List[ContextManager]
"""
A base class for adding models to torch benchmark.
See [Adding Models](#../models/ADDING_MODELS.md)
"""
def __init__(self, test: str, device: str, jit: bool=False, batch_size: Optional[int]=None, extra_args: List[str]=[]):
self.metadata = self.load_metadata()
self.test = test
assert self.test == "train" or self.test == "eval", \
f"Test must be 'train' or 'eval', but get {self.test}. Please submit a bug report."
self.device = device
self.jit = jit
self.determine_batch_size(batch_size)
self.extra_args = extra_args
self.opt = None
# contexts to run in the test function
if self.test == "train":
# In train test, there are run contexts that should only be applied for forward/backward/optimizer stage
# For example, amp only applies for the forward stage
self.forward_contexts = []
self.backward_contexts = []
self.optimizer_contexts = []
self.run_contexts = [
enable_profiling_executor # force JIT profiling executor to be enabled by default
]
# taken from torchdynamo benchmarks, this further controls randomness settings
def deterministic_torch_manual_seed(*args, **kwargs):
from torch._C import default_generator
seed = 1337
import torch.cuda
if not torch.cuda._is_in_bad_fork():
torch.cuda.manual_seed_all(seed)
return default_generator.manual_seed(seed)
torch.manual_seed = deterministic_torch_manual_seed
set_random_seed()
# sanity checks of the options
assert self.test == "train" or self.test == "eval", f"Test must be 'train' or 'eval', but provided {self.test}."
# parse the args
self.dargs, opt_args = parse_decoration_args(self, self.extra_args)
# if the args contain "--torchdynamo", parse torchdynamo args
if "--torchdynamo" in opt_args:
self.dynamo = True
from torchbenchmark.util.backends.torchdynamo import parse_torchdynamo_args
self.opt_args, self.extra_args = parse_torchdynamo_args(self, opt_args)
else:
self.dynamo = False
self.opt_args, self.extra_args = parse_opt_args(self, opt_args)
# Run the post processing for model acceleration
def __post__init__(self):
# All arguments should be parsed at this point.
assert not self.extra_args, f"Expected no unknown args at this point, found {self.extra_args}"
should_check_correctness = check_correctness_p(self, self.opt_args, self.dargs)
if should_check_correctness:
self.eager_output = stableness_check(self, cos_sim=False, deepcopy=self.DEEPCOPY, rounds=1)
if isinstance(self.eager_output, Tuple):
self.eager_output = tuple((t.detach() if isinstance(t, torch.Tensor) else t) for t in self.eager_output)
elif isinstance(self.eager_output, torch.Tensor):
self.eager_output = self.eager_output.detach()
if self.test == "train":
current_optimizer = self.get_optimizer()
if current_optimizer is not None:
self.set_optimizer(None)
try:
if self.DEEPCOPY:
copy_model = copy.deepcopy(self)
else:
copy_model = self
copy_model.invoke()
self.eager_model_after_one_train_iteration = copy_model.model
except RuntimeError:
warnings.warn(UserWarning("Can't copy the model. Skipping train correctness check."))
if current_optimizer is not None:
self.set_optimizer(current_optimizer)
# apply decoration args
apply_decoration_args(self, self.dargs)
# apply optimization args
if self.dynamo:
from torchbenchmark.util.backends.torchdynamo import apply_torchdynamo_args
apply_torchdynamo_args(self, self.opt_args, self.dargs.precision)
else:
apply_opt_args(self, self.opt_args)
if should_check_correctness:
# tensorrt or fp16 is known to generate less-accurate results
# in this case, use more relaxed cosine similarity instead of torch.allclose
# for correctness testing
# see: https://github.com/pytorch/torchdynamo/pull/438
if (
self.dargs.precision == "fp16"
or self.dargs.precision == "amp"
or (self.dynamo and self.opt_args.torchdynamo == "fx2trt")
or (not self.dynamo and (self.device == "cuda" and self.opt_args.backend == "fx2trt"))
or (not self.dynamo and self.opt_args.use_cosine_similarity)
or self.dargs.precision == "fx_int8"
or self.dargs.precision == "bf16"
or self.dargs.precision == "amp_fp16"
or self.dargs.precision == "amp_bf16"
):
self.correctness = correctness_check(self, cos_sim=True, deepcopy=self.DEEPCOPY)
else:
# get tolerance of correctness check from os.environ
atol = float(os.environ.get("TORCHBENCH_ATOL", "1e-4"))
rtol = float(os.environ.get("TORCHBENCH_RTOL", "1e-4"))
self.correctness = correctness_check(self, cos_sim=False, deepcopy=self.DEEPCOPY, atol=atol, rtol=rtol)
# setup distributed trainer
if self.dargs.distributed:
if self.dargs.distributed_wrap_fn:
pos = self.dargs.distributed_wrap_fn.rfind(".")
module = importlib.import_module(self.dargs.distributed_wrap_fn[:pos])
apply_trainer = getattr(module, self.dargs.distributed_wrap_fn[(pos+1):])
else:
from torchbenchmark.util.distributed.core_model.apply_trainer import apply_trainer
if is_hf_model(self):
# DDP requires to use unwrapped model for huggingface
module, _inputs = self.get_module(wrap_model=False)
else:
module, _inputs = self.get_module()
self.set_module(apply_trainer(module, self.dargs.distributed))
# Need to clean up the cache because we run deep copy within correceness check
if self.device == "cuda":
torch.cuda.empty_cache()
def determine_batch_size(self, batch_size=None):
# batch size priority for eval tests: not ALLOW_CUSTOMIZE_BSIZE > user specified > device specified > default
# batch size priority for train tests: not ALLOW_CUSTOMIZE_BSIZE > user specified > default
self.batch_size = batch_size
if not batch_size:
self.batch_size = self.DEFAULT_TRAIN_BSIZE if self.test == "train" else self.DEFAULT_EVAL_BSIZE
if self.device == "cuda":
current_device_name = torch.cuda.get_device_name()
assert current_device_name, f"torch.cuda.get_device_name() returns None when device is set to cuda, please double check."
elif self.device == "cpu":
current_device_name = "cpu"
elif self.device == "mps":
current_device_name = "mps"
# use the device suggestion on CUDA inference tests, key should be either eval_batch_size or train_batch_size
device_batch_size_key = f"{self.test}_batch_size"
if self.metadata and "devices" in self.metadata and current_device_name in self.metadata["devices"] \
and device_batch_size_key in self.metadata["devices"][current_device_name]:
self.batch_size = self.metadata["devices"][current_device_name][device_batch_size_key]
# If the model doesn't implement test or eval test
# its DEFAULT_TRAIN_BSIZE or DEFAULT_EVAL_BSIZE will still be None
if not self.batch_size:
raise NotImplementedError(f"Test {self.test} is not implemented.")
else:
self.batch_size = batch_size
# Check if specified batch size is supported by the model
if hasattr(self, "ALLOW_CUSTOMIZE_BSIZE") and (not getattr(self, "ALLOW_CUSTOMIZE_BSIZE")):
if self.test == "train" and (not self.batch_size == self.DEFAULT_TRAIN_BSIZE):
raise NotImplementedError("Model doesn't support customizing batch size.")
elif self.test == "eval" and (not self.batch_size == self.DEFAULT_EVAL_BSIZE):
raise NotImplementedError("Model doesn't support customizing batch size.")
def load_metadata(self):
relative_path = self.__class__.__module__.split(".")
self.name = relative_path[-1]
metadata_loc = Path(REPO_PATH).joinpath(*relative_path).joinpath("metadata.yaml")
if not metadata_loc.exists():
return None
with open(metadata_loc, "r") as mf:
metadata = yaml.safe_load(mf)
return metadata
def add_context(self, context_fn, stage=TEST_STAGE.ALL):
ctx = context_fn()
assert isinstance(ctx, ContextManager), f"Expected adding a ContextManager, get {type(ctx)}. Please report a bug."
if stage == TEST_STAGE.ALL:
self.run_contexts.append(context_fn)
elif stage == TEST_STAGE.FORWARD:
self.forward_contexts.append(context_fn)
elif stage == TEST_STAGE.BACKWARD:
self.backward_contexts.append(context_fn)
elif stage == TEST_STAGE.OPTIMIZER:
self.optimizer_contexts.append(context_fn)
# Common interface for all models extending BenchmarkModel to access the optimizer.
# Some models have an opt attribute, others have an optimizer attribute; this
# implementation handles both. This function should not error! Simply return None
# if there's no optimizer in sight.
def get_optimizer(self):
if hasattr(self, "optimizer"):
return self.optimizer
if hasattr(self, "opt"):
return self.opt
warnings.warn("The optimizer for this model is not stored in self.opt nor self.optimizer. "
"Currently returning None! Please override this implementation with your own "
"if there is an optimizer this should be returning instead.")
return None
# Takes in an optimizer and sets that to be the optimizer used from now on.
# There are special models like dcgan that would update multiple optimizers at once,
# so optimizer here is not always strictly a, say, torch.optim.Optimizer.
def set_optimizer(self, optimizer) -> None:
if hasattr(self, "optimizer"):
self.optimizer = optimizer
return
if hasattr(self, "opt"):
self.opt = optimizer
return
raise NotImplementedError("The optimizer for this model is not stored in self.opt nor self.optimizer. "
"Please override this implementation with your own.")
# Default implementation for replacing the model
def set_module(self, new_model):
if hasattr(self, 'model') and isinstance(self.model, torch.nn.Module):
self.model = new_model
else:
raise NotImplementedError("The instance variable 'model' does not exist or is not type 'torch.nn.Module', implement your own `set_module()` function.")
def gen_inputs(self, num_batches: int=1) -> Tuple[Generator, Optional[int]]:
"""Generate a tuple of (iterator of model input, the size of the iterator).
If size is None, the input is randomly generated and has infinite size."""
raise NotImplementedError("Default input generation function is not implemented. "
"Please submit an issue if you need input iterator implementation for the model.")
def invoke_staged_train_test(self) -> None:
optimizer = self.get_optimizer()
if optimizer is not None:
optimizer.zero_grad()
with nested(*self.forward_contexts):
losses = self.forward()
with nested(*self.backward_contexts):
self.backward(losses)
if optimizer is not None:
with nested(*self.optimizer_contexts):
self.optimizer_step()
return None
def invoke(self) -> Optional[Tuple[torch.Tensor]]:
out = None
if self.test == "train" and is_staged_train_test(self):
self.invoke_staged_train_test()
return out
with nested(*self.run_contexts):
if self.test == "train":
self.train()
elif self.test == "eval":
out = self.eval()
return out
def eval_in_nograd(self):
return True
def check_opt_vs_noopt_jit(self):
if not self.jit:
return
model_name = inspect.getfile(self.__class__).split(os.sep)[-2]
print(f"model_name={model_name} , {inspect.getfile(self.__class__)}")
model_blacklist = [
'demucs', # set up issue
'yolov3', # set up issue
'BERT_pytorch', # set up issue
'moco', # set up issue
'Super_SloMo', # results don't match, might be due to the way TE CUDA handles rand?
'attention_is_all_you_need_pytorch', # results don't match, might be due to the way TE CUDA handles rand?
]
if model_name in model_blacklist:
warnings.warn(UserWarning(f"{model_name}.get_module() doesn't support `check_results` yet!"))
return
# if a model doesn't support `get_module`
# we should let it throw and then
# override `check_results` for that model
try:
model, inputs = self.get_module()
except NotImplementedError:
warnings.warn(UserWarning(f"{model_name}.get_module() doesn't support `check_results` yet!"))
return
def bench_allclose(a, b):
if isinstance(a, torch.Tensor):
assert(isinstance(b, torch.Tensor))
assert(a.allclose(b))
elif isinstance(a, tuple) or isinstance (b, list):
assert(type(a) == type(b))
assert(len(a) == len(b))
for i in range(len(a)):
bench_allclose(a[i], b[i])
else:
raise RuntimeError("Encountered an supported type.\n" +
"Please add the type or override `bench_allclose`")
try:
opt = model(*inputs)
except Exception as e:
print(e)
warnings.warn(UserWarning(f"{model_name}.eval() doesn't support `check_results` yet!"))
return
# disable optimizations and force a recompilation
# to a baseline version
fwd = model._c._get_method("forward")
fwd._debug_flush_compilation_cache()
torch._C._set_graph_executor_optimize(False)
base = model(*inputs)
torch._C._set_graph_executor_optimize(True)
bench_allclose(base, opt)
def enable_channels_last(self):
model_name = self.name
try:
model, _ = self.get_module()
model = model.to(memory_format=torch.channels_last)
except RuntimeError:
warnings.warn(UserWarning(f"{model_name} doesn't support `channels_last` yet!"))
return
self.set_module(model)
def inputs_convert(example_inputs):
if isinstance(example_inputs, torch.Tensor) and example_inputs.dim()==4:
return example_inputs.to(memory_format=torch.channels_last)
elif isinstance(example_inputs, (tuple, list, dict)):
return tree_map(lambda x: inputs_convert(x), example_inputs)
else:
warnings.warn(UserWarning(f"{model_name} example inputs doesn't convert to `channels_last`!"))
return example_inputs
if hasattr(self, 'example_inputs'):
self.example_inputs = inputs_convert(self.example_inputs)
else:
warnings.warn(UserWarning(f"{model_name} example inputs doesn't convert to `channels_last`!"))
def enable_fx_int8(self, quant_engine:str='x86'):
torch.backends.quantized.engine = quant_engine
try:
model, _ = self.get_module()
# Get sub modules
model, sub_module_list = get_sub_module(model, dict(model.named_modules()), '')
if not len(sub_module_list):
warnings.warn(UserWarning(f"{self.name} doesn't have submodule can ben quantized!"))
model = prepare_sub_module(sub_module_list, model, '', quant_engine)
self.set_module(model)
# Calibration
self.eval()
model, _ = self.get_module()
model = convert_sub_module(sub_module_list, model, '')
self.set_module(model)
except Exception as e:
print(e)
raise RuntimeError(f"{self.name} doesn't support `fx_int8` yet!")
def enable_bf16(self):
model_name = self.name
try:
model, _ = self.get_module()
model = model.to(torch.bfloat16)
except RuntimeError:
warnings.warn(UserWarning(f"{model_name} doesn't support `to(torch.bfloat16)` yet!"))
return
self.set_module(model)
def inputs_convert(example_inputs):
if isinstance(example_inputs, torch.Tensor) and example_inputs.dtype == torch.float32:
return example_inputs.to(torch.bfloat16)
elif isinstance(example_inputs, (tuple, list, dict)):
return tree_map(lambda x: inputs_convert(x), example_inputs)
else:
warnings.warn(UserWarning(f"{model_name} example inputs doesn't convert to `torch.bfloat16`!"))
return example_inputs
if hasattr(self, 'example_inputs'):
self.example_inputs = inputs_convert(self.example_inputs)
else:
warnings.warn(UserWarning(f"{model_name} example inputs doesn't convert to `torch.bfloat16`!"))
def enable_amp(self):
if not self.dynamo and self.opt_args.backend == 'cudagraph':
return NotImplementedError("AMP not implemented for cudagraphs")
if not hasattr(self, "amp_context"):
raise RuntimeError(f"{self.name} doesn't have amp_context support!")
if self.device == "cpu":
self.amp_context = lambda: torch.cpu.amp.autocast()
elif self.device == "cuda":
self.amp_context = lambda: torch.cuda.amp.autocast()
|
"""
Return a list of recent PyTorch wheels published on download.pytorch.org.
Users can specify package name, python version, platform, and the number of days to return.
If one of the packages specified is missing on one day, the script will skip outputing the results on that day.
"""
import os
import re
import requests
import argparse
import urllib.parse
from datetime import date, timedelta
from bs4 import BeautifulSoup
from collections import defaultdict
import sys
from pathlib import Path
import subprocess
from typing import List
REPO_ROOT = Path(__file__).parent.parent.parent.resolve()
class add_path():
def __init__(self, path):
self.path = path
def __enter__(self):
sys.path.insert(0, self.path)
def __exit__(self, exc_type, exc_value, traceback):
try:
sys.path.remove(self.path)
except ValueError:
pass
with add_path(str(REPO_ROOT)):
from utils.cuda_utils import DEFAULT_CUDA_VERSION, CUDA_VERSION_MAP
from utils.python_utils import DEFAULT_PYTHON_VERSION, PYTHON_VERSION_MAP
PYTORCH_CUDA_VERISON = CUDA_VERSION_MAP[DEFAULT_CUDA_VERSION]["pytorch_url"]
PYTORCH_PYTHON_VERSION = PYTHON_VERSION_MAP[DEFAULT_PYTHON_VERSION]["pytorch_url"]
torch_wheel_nightly_base = f"https://download.pytorch.org/whl/nightly/{PYTORCH_CUDA_VERISON}/"
torch_nightly_wheel_index = f"https://download.pytorch.org/whl/nightly/{PYTORCH_CUDA_VERISON}/torch_nightly.html"
torch_nightly_wheel_index_override = "torch_nightly.html"
def memoize(function):
"""
"""
call_cache = {}
def memoized_function(*f_args):
if f_args in call_cache:
return call_cache[f_args]
call_cache[f_args] = result = function(*f_args)
return result
return memoized_function
@memoize
def get_wheel_index_data(py_version, platform_version, url=torch_nightly_wheel_index, override_file=torch_nightly_wheel_index_override):
"""
"""
if os.path.isfile(override_file) and os.stat(override_file).st_size:
with open(override_file) as f:
data = f.read()
else:
r = requests.get(url)
r.raise_for_status()
data = r.text
soup = BeautifulSoup(data, 'html.parser')
data = defaultdict(dict)
for link in soup.find_all('a'):
group_match = re.search("([a-z]*)-(.*)-(.*)-(.*)-(.*)\.whl", link.text)
# some packages (e.g., torch-rec) doesn't follow this naming convention
if not group_match:
continue
pkg, version, py, py_m, platform = group_match.groups()
version = urllib.parse.unquote(version)
if py == py_version and platform == platform_version:
full_url = os.path.join(torch_wheel_nightly_base, link.text)
data[pkg][version] = full_url
return data
def get_nightly_wheel_urls(packages:list, date:date,
py_version=PYTORCH_PYTHON_VERSION, platform_version='linux_x86_64'):
"""Gets urls to wheels for specified packages matching the date, py_version, platform_version
"""
date_str = f"{date.year}{date.month:02}{date.day:02}"
data = get_wheel_index_data(py_version, platform_version)
rc = {}
for pkg in packages:
pkg_versions = data[pkg]
# multiple versions could happen when bumping the pytorch version number
# e.g., both torch-1.11.0.dev20220211%2Bcu113-cp38-cp38-linux_x86_64.whl and
# torch-1.12.0.dev20220212%2Bcu113-cp38-cp38-linux_x86_64.whl exist in the download link
keys = sorted([key for key in pkg_versions if date_str in key], reverse=True)
if len(keys) > 1:
print(f"Warning: multiple versions matching a single date: {keys}, using {keys[0]}")
if len(keys) == 0:
return None
full_url = pkg_versions[keys[0]]
rc[pkg] = {
"version": keys[0],
"wheel": full_url,
}
return rc
def get_nightly_wheels_in_range(packages:list, start_date:date, end_date:date,
py_version=PYTORCH_PYTHON_VERSION, platform_version='linux_x86_64', reverse=False):
rc = []
curr_date = start_date
while curr_date <= end_date:
curr_wheels = get_nightly_wheel_urls(packages, curr_date,
py_version=py_version,
platform_version=platform_version)
if curr_wheels is not None:
rc.append(curr_wheels)
curr_date += timedelta(days=1)
if reverse:
rc.reverse()
return rc
def get_n_prior_nightly_wheels(packages:list, n:int,
py_version=PYTORCH_PYTHON_VERSION, platform_version='linux_x86_64', reverse=False):
end_date = date.today()
start_date = end_date - timedelta(days=n)
return get_nightly_wheels_in_range(packages, start_date, end_date,
py_version=py_version, platform_version=platform_version, reverse=reverse)
def get_most_recent_successful_wheels(packages: list, pyver: str, platform: str) -> List[str]:
"""Get the most recent successful nightly wheels. Return List[str] """
curr_date = date.today()
date_limit = curr_date - timedelta(days=365)
while curr_date >= date_limit:
wheels = get_nightly_wheel_urls(packages, curr_date, py_version=pyver, platform_version=platform)
if wheels:
return wheels
curr_date = curr_date - timedelta(days=1)
# Can't find any valid pytorch package
return None
def install_wheels(wheels):
"""Install the wheels specified in the wheels."""
wheel_urls = list(map(lambda x: wheels[x]["wheel"], wheels.keys()))
work_dir = Path(__file__).parent.joinpath(".data")
work_dir.mkdir(parents=True, exist_ok=True)
requirements_file = work_dir.joinpath("requirements.txt").resolve()
with open(requirements_file, "w") as rf:
rf.write("\n".join(wheel_urls))
command = ["pip", "install", "-r", str(requirements_file)]
print(f"Installing pytorch nightly packages command: {command}")
subprocess.check_call(command)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("--pyver", type=str, default=PYTORCH_PYTHON_VERSION, help="PyTorch Python version")
parser.add_argument("--platform", type=str, default="linux_x86_64", help="PyTorch platform")
parser.add_argument("--priordays", type=int, default=1, help="Number of days")
parser.add_argument("--reverse", action="store_true", help="Return reversed result")
parser.add_argument("--packages", required=True, type=str, nargs="+", help="List of package names")
parser.add_argument("--install-nightlies", action="store_true",
help="Install the most recent successfully built nightly packages")
args = parser.parse_args()
if args.install_nightlies:
wheels = get_most_recent_successful_wheels(args.packages, args.pyver, args.platform)
assert wheels, f"We do not find any successful pytorch nightly build of packages: {args.packages}."
print(f"Found pytorch nightly wheels: {wheels} ")
install_wheels(wheels)
exit(0)
wheels = get_n_prior_nightly_wheels(packages=args.packages,
n=args.priordays,
py_version=args.pyver,
platform_version=args.platform,
reverse=args.reverse)
for wheelset in wheels:
for pkg in wheelset:
print(f"{pkg}-{wheelset[pkg]['version']}: {wheelset[pkg]['wheel']}")
|
"""
Utils for model metadata
"""
from typing import Any, List, Dict
def match_item(item_name: str, item_val: str, skip_item: Dict[str, Any]) -> bool:
if item_name not in skip_item:
return True
return skip_item[item_name] == item_val
def skip_by_metadata(test: str, device:str, jit: bool, extra_args: List[str], metadata: Dict[str, Any]) -> bool:
"Check if the test should be skipped based on model metadata."
if not "not_implemented" in metadata:
return False
for skip_item in metadata["not_implemented"]:
match = match_item("test", test, skip_item) and \
match_item("device", device, skip_item) and \
match_item("jit", jit, skip_item) and \
match_item("extra_args", extra_args, skip_item)
if match:
return True
return False |
def prefetch_loader(loader, device):
result = []
for data in loader:
items = []
for item in data:
items.append(item.to(device))
result.append(tuple(items))
return result |
import argparse
import os
import subprocess
from datetime import date, timedelta
from pathlib import Path
from torch_nightly import get_n_prior_nightly_wheels
def run_step(cmd, cwd=None, conda_env=None, verbose=True):
if verbose:
print(f" # running step: {cmd}")
if conda_env:
cmd = f'conda run --prefix {conda_env} {cmd}'
return subprocess.check_output(
cmd,
cwd=cwd,
stderr=subprocess.STDOUT,
shell=True)
def create_env(env, benchmark, wheelset, py_ver=3.7, verbose=True):
run_step(f"conda create -y -q -p {env} python={py_ver}")
run_step(f"pip install -q {wheelset['torch']['wheel']} {wheelset['torchvision']['wheel']}", conda_env=env)
run_step(f"pip install -q --no-deps {wheelset['torchtext']['wheel']}", conda_env=env)
run_step(f"python install.py", conda_env=env, cwd=benchmark)
def check_env(env):
torch_ver = run_step(f'python -c "import torch; print(torch.__version__)"', conda_env=env)
torchvision_ver = run_step(f'python -c "import torchvision; print(torchvision.__version__)"', conda_env=env)
torchtext_ver = run_step(f'python -c "import torchtext; print(torchtext.__version__)"', conda_env=env)
print(torch_ver, torchvision_ver, torchtext_ver)
return True
def prepare_envs(num_prior, env_root, benchmark):
wheelsets = get_n_prior_nightly_wheels(['torch', 'torchvision', 'torchtext'], num_prior)
for wheelset in wheelsets:
version = wheelset['torch']['version']
env = Path(env_root) / f"torch-{version}-env"
print(f"### Creating env for {version} with py{py_ver}")
try:
create_env(env, benchmark, wheelset)
check_env(env)
except Exception as e:
print(f"### Failed creating env for {version}: {e}")
continue
def run_benchmark(conda_env_path, benchmark_repo, output_file, coreset="4-47", filter=None, min_rounds=20):
cmd = [
f'conda run --prefix {conda_env_path}',
f'taskset -c "{coreset}"',
f'pytest test_bench.py -k "{filter}"',
f'--benchmark-min-rounds {min_rounds}',
f'--benchmark-json {output_file}'
]
prepared_env = os.environ
prepared_env['GOMP_CPU_AFFINITY'] = f"{coreset}"
output = subprocess.run(
cmd,
env=prepared_env,
cwd=benchmark_repo,
shell=True,
)
print(output)
if __name__ == "__main__":
# ' --benchmark-json {output_file}/$(date +"%Y%m%d_%H%M%S")_${c}.json'
# prepare_envs(num_prior=60, env_root="/home/ec2-user/sweep_conda_envs")
# prepare_envs(num_prior=60, env_root="/home/ec2-user/sweep_conda_envs", benchmark="/home/ec2-user/benchmark")
run_benchmark('/home/ec2-user/sweep_conda_envs/torch-1.8.0.dev20201219-env',
'/home/ec2-user/benchmark',
'/home/ec2-user/test_benchmark.json',
filter="(bert and cpu and eval and eager)") |
"""
PyTorch benchmark env check utils.
This file may be loaded without torch packages installed, e.g., in OnDemand CI.
"""
import importlib
import copy
import warnings
from typing import List, Dict, Tuple, Optional
MAIN_RANDOM_SEED = 1337
# rounds for stableness tests
STABLENESS_CHECK_ROUNDS: int = 3
# rounds for correctness tests
CORRECTNESS_CHECK_ROUNDS: int = 2
def set_random_seed():
import torch
import random
import numpy
torch.manual_seed(MAIN_RANDOM_SEED)
random.seed(MAIN_RANDOM_SEED)
numpy.random.seed(MAIN_RANDOM_SEED)
def get_pkg_versions(packages: List[str]) -> Dict[str, str]:
versions = {}
for module in packages:
module = importlib.import_module(module)
versions[module] = module.__version__
return versions
def has_native_amp() -> bool:
import torch
try:
if getattr(torch.cuda.amp, 'autocast') is not None:
return True
except AttributeError:
pass
return False
def is_timm_model(model: 'torchbenchmark.util.model.BenchmarkModel') -> bool:
return hasattr(model, 'TIMM_MODEL') and model.TIMM_MODEL
def is_torchvision_model(model: 'torchbenchmark.util.model.BenchmarkModel') -> bool:
return hasattr(model, 'TORCHVISION_MODEL') and model.TORCHVISION_MODEL
def is_hf_model(model: 'torchbenchmark.util.model.BenchmarkModel') -> bool:
return hasattr(model, 'HF_MODEL') and model.HF_MODEL
def is_fambench_model(model: 'torchbenchmark.util.model.BenchmarkModel') -> bool:
return hasattr(model, 'FAMBENCH_MODEL') and model.FAMBENCH_MODEL
def is_staged_train_test(model: 'torchbenchmark.util.model.BenchmarkModel') -> bool:
return hasattr(model, 'forward') and hasattr(model, 'backward') and hasattr(model, 'optimizer_step')
def stableness_check(model: 'torchbenchmark.util.model.BenchmarkModel', cos_sim=True, deepcopy=True, rounds=STABLENESS_CHECK_ROUNDS) -> Tuple['torch.Tensor']:
"""Get the eager output. Run eager mode a couple of times to guarantee stableness.
If the result is not stable, raise RuntimeError. """
old_test = model.test
model.test = "eval"
opt_saved = None
if hasattr(model, "opt"):
opt_saved = model.opt
model.opt = None
previous_result = None
for _i in range(rounds):
set_random_seed()
# some models are stateful and will give different outputs
# on the same input if called multiple times
try:
if deepcopy:
copy_model = copy.deepcopy(model)
else:
copy_model = model
except RuntimeError:
# if the model is not copy-able, don't copy it
copy_model = model
if previous_result == None:
previous_result = copy_model.invoke()
else:
cur_result = copy_model.invoke()
if not same(previous_result, cur_result, cos_similarity=cos_sim):
raise RuntimeError("Model returns unstable result. Please report a bug.")
del cur_result
model.test = old_test
if opt_saved:
model.opt = opt_saved
return previous_result
def correctness_check(model: 'torchbenchmark.util.model.BenchmarkModel', cos_sim=True, deepcopy=True, rounds=CORRECTNESS_CHECK_ROUNDS, atol=1e-4, rtol=1e-4) -> bool:
import torch
old_test = model.test
model.test = "eval"
opt_saved = None
opt_saved = model.opt
model.opt = None
# It looks we don't run backward here and also dynamo may have
# an issue with memory usage: https://fburl.com/workplace/cgxzsdhz
with torch.no_grad():
for _i in range(rounds):
# some models are stateful and will give different outputs
# on the same input if called multiple times
set_random_seed()
try:
if deepcopy:
copy_model = copy.deepcopy(model)
else:
copy_model = model
except RuntimeError:
# if the model is not copy-able, don't copy it
copy_model = model
cur_result = copy_model.invoke()
equal_nan = hasattr(model, "EQUAL_NAN") and model.EQUAL_NAN
if not same(model.eager_output, cur_result, cos_similarity=cos_sim, atol=atol, rtol=rtol, equal_nan=equal_nan):
# Restore the original model test if eval correctness doesn't pass
model.test = old_test
model.opt = opt_saved if opt_saved else model.opt
return False
del cur_result
model.test = old_test
model.opt = opt_saved if opt_saved else model.opt
if model.test == "train":
if not hasattr(model, "model") or not hasattr(model.model, "named_parameters"):
warnings.warn(UserWarning("model doesn't have model or model.named_parameters. Skipping train correctness check."))
return True
if not hasattr(model, "eager_model_after_one_train_iteration"):
warnings.warn(UserWarning("model doesn't have eager_model_after_one_train_iteration. Skipping train correctness check."))
return True
model.invoke()
for name, param in model.model.named_parameters():
if not param.requires_grad:
continue
found = False
for name_ref, param_ref in model.eager_model_after_one_train_iteration.named_parameters():
if name_ref == name:
found = True
# backward typically requires higher error margin.
# 400 times bigger may sound too big to be useful but still better than not checking at all.
if not same(param_ref.grad, param.grad, cos_similarity=cos_sim, atol=atol*40, rtol=rtol*40):
import torch
if not isinstance(param.grad, torch.Tensor):
print(f"model with dynamo does not have grad of param {name}")
else:
print(f"grad of param {name} after running with dynamo doesn't have gradient matching with eager mode")
print(f"grad of param:\n{param.grad}\neager grad:\n{param_ref.grad}")
return False
break
if not found:
print(f"param {name} in model with dynamo not found in the eager model")
return False
return True
def istype(obj, allowed_types):
"""isinstance() without subclasses"""
if isinstance(allowed_types, (tuple, list, set)):
return type(obj) in allowed_types
return type(obj) is allowed_types
def is_numpy_int_type(value):
import numpy as np
return istype(
value,
(
np.int8,
np.int16,
np.int32,
np.int64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
),
)
def is_numpy_float_type(value):
import numpy as np
return istype(
value,
(
np.float16,
np.float32,
np.float64,
),
)
def is_numpy_ndarray(value):
import numpy as np
return istype(
value,
(np.ndarray, ),
)
# copied from https://github.com/pytorch/torchdynamo/blob/main/torchdynamo/utils.py#L411
def same(a, b, cos_similarity=False, atol=1e-4, rtol=1e-4, equal_nan=False):
"""Check correctness to see if a and b match"""
import torch
import math
if isinstance(a, (list, tuple, torch.nn.ParameterList, torch.Size)):
assert isinstance(b, (list, tuple)), f"type mismatch {type(a)} {type(b)}"
return len(a) == len(b) and all(
same(ai, bi, cos_similarity, atol, rtol, equal_nan) for ai, bi in zip(a, b)
)
elif isinstance(a, dict):
assert isinstance(b, dict)
assert set(a.keys()) == set(
b.keys()
), f"keys mismatch {set(a.keys())} == {set(b.keys())}"
for k in a.keys():
if not (same(a[k], b[k], cos_similarity, atol, rtol, equal_nan=equal_nan)):
print("Accuracy failed for key name", k)
return False
return True
elif isinstance(a, torch.Tensor):
if a.is_sparse:
assert b.is_sparse
a = a.to_dense()
b = b.to_dense()
if not isinstance(b, torch.Tensor):
return False
if cos_similarity:
# TRT will bring error loss larger than current threshold. Use cosine similarity as replacement
a = a.flatten().to(torch.float32)
b = b.flatten().to(torch.float32)
res = torch.nn.functional.cosine_similarity(a, b, dim=0, eps=1e-6)
if res < 0.99:
print(f"Similarity score={res.cpu().detach().item()}")
return res >= 0.99
else:
return torch.allclose(a, b, atol=atol, rtol=rtol, equal_nan=equal_nan)
elif isinstance(a, (str, int, type(None), bool, torch.device)):
return a == b
elif isinstance(a, float):
return math.isclose(a, b, rel_tol=rtol, abs_tol=atol)
elif is_numpy_int_type(a) or is_numpy_float_type(a):
return (type(a) is type(b)) and (a == b)
elif is_numpy_ndarray(a):
return (type(a) is type(b)) and same(torch.from_numpy(a),
torch.from_numpy(b),
cos_similarity,
atol, rtol, equal_nan)
elif type(a).__name__ in (
"MaskedLMOutput",
"Seq2SeqLMOutput",
"CausalLMOutputWithCrossAttentions",
"LongformerMaskedLMOutput",
"Instances",
"SquashedNormal",
"Boxes",
"Normal",
"TanhTransform",
"Foo",
"Variable",
):
assert type(a) is type(b)
return all(
same(getattr(a, key), getattr(b, key), cos_similarity, atol, rtol, equal_nan)
for key in a.__dict__.keys()
)
else:
raise RuntimeError(f"unsupported type: {type(a).__name__}")
|
import re
import torch
from torch.ao.quantization import QuantWrapper, get_default_qconfig_mapping, get_default_qconfig_propagation_list
from torch.ao.quantization.quantize_fx import _fuse_fx, prepare_fx, convert_fx
from torchbenchmark.util.env_check import is_hf_model
def _append_attr(fx_module, module, fx_white_list=[]):
fx_attr = dir(fx_module)
org_attr = dir(module)
ignore_match_patterns = [r"_", r"quant", r"dequant", r"weight",
r"bias", r'activation_post_process']
ignore_search_patterns = [r"_scale_", r"_zero_point_",
r'_activation_post_process_']
add_special_patterns = [r"_forward_hooks", r"_forward_pre_hooks", r"_backward_hooks"]
attr_names = []
for i in org_attr:
if type(module) in fx_white_list and type(module) != torch.nn.Sequential \
and any([re.search(p, i) for p in add_special_patterns]):
continue
if any([re.search(p, i) for p in add_special_patterns]) \
or (i not in fx_attr \
and not any([re.match(p, i) for p in ignore_match_patterns]) \
and not any([re.search(p, i) for p in ignore_search_patterns])) :
attr_names.append(i)
for name in attr_names:
attr = getattr(module, name, None)
if isinstance(attr, torch.nn.Module) or \
isinstance(attr, torch.quantization.qconfig.QConfig):
continue
setattr(fx_module, name, attr)
return fx_module
def get_sub_module(model, module_dict, prefix):
fx_white_list = get_default_qconfig_propagation_list()
ignore_list = []
if is_hf_model:
import transformers
ignore_list.extend([transformers.models.gpt2.modeling_gpt2.GPT2Attention, transformers.models.t5.modeling_t5.T5DenseActDense])
def _get_sub_module(model, module_dict, prefix, sub_module_list):
for name, module in model.named_children():
quant_wrap_flag = False
if type(module) in ignore_list:
continue
op_name = prefix + "." + name if prefix != "" else name
if op_name not in module_dict:
continue
if type(module) in fx_white_list and type(module) != torch.nn.Sequential:
module = QuantWrapper(module)
quant_wrap_flag = True
try:
graph_module = torch.fx.symbolic_trace(module)
if not quant_wrap_flag and str(module.get_submodule).count("\n") != str(graph_module.get_submodule).count("\n"):
continue
_fuse_fx(graph_module, False)
setattr(model, name, module)
sub_module_list.append(op_name)
except:
module = _get_sub_module(module, module_dict, op_name, sub_module_list)
setattr(model, name, module)
return model
sub_module_list = []
model = _get_sub_module(model, module_dict, prefix, sub_module_list)
return model, sub_module_list
def prepare_sub_module(sub_module_list, model, prefix, quant_engine:str='x86'):
qconfig_mapping = get_default_qconfig_mapping(quant_engine)
for name, module in model.named_children():
op_name = prefix + '.' + name if prefix != '' else name
if op_name in sub_module_list:
prepared_module = prepare_fx(module, qconfig_mapping, None)
_append_attr(prepared_module, module)
setattr(model, name, prepared_module)
else:
prepared_module = prepare_sub_module(sub_module_list, module, op_name, quant_engine)
_append_attr(prepared_module, module)
setattr(model, name, prepared_module)
return model
def convert_sub_module(sub_module_list, model, prefix):
for name, module in model.named_children():
op_name = prefix + '.' + name if prefix != '' else name
if op_name in sub_module_list:
convert_module = convert_fx(module)
setattr(model, name, convert_module)
else:
convert_module = convert_sub_module(sub_module_list, module, op_name)
setattr(model, name, convert_module)
return model
|
import json
import os
import pandas as pd
import typing
class BenchmarkData:
def __init__(self):
self._benchmark_data = {}
self._machine_info = {}
self._commit_info = {}
self._names_all = set()
self._names_common = set()
self._tags = []
self._json_raw = []
def add_json_data(self, tag, json_data):
names = set([b['name'] for b in json_data['benchmarks']])
self._names_all.update(names)
if len(self._benchmark_data) == 0:
self._names_common.update(names)
else:
self._names_common.intersection_update(names)
self._benchmark_data[tag] = {b['name']: b for b in json_data['benchmarks']}
self._machine_info[tag] = json_data['machine_info']
self._commit_info[tag] = json_data['commit_info']
self._tags.append(tag)
self._json_raw.append(json_data)
def tags(self):
return list(self._benchmark_data.keys())
def benchmark_names(self, mode='common', keyword_filter=None):
"""
Return the names of benchmarks across the dataset.
mode:
'common': intersection across dataset files - useful for comparison plot
'all': union across dataset files
'outliers': union - intersection across dataset files
"""
if mode == 'common':
names = self._names_common
elif mode == 'all':
names = self._names_all
elif mode == 'outliers':
names = self._names_all - self._names_common
if keyword_filter is not None:
if isinstance(keyword_filter, str):
keyword_filter = [keyword_filter]
for kw in keyword_filter:
names = [n for n in names if kw in n]
return names
def as_dataframe(self, name, max_data=100):
df = pd.DataFrame()
for i, tag in enumerate(self._benchmark_data):
benchmark = self._benchmark_data[tag][name]
df = df.append(pd.DataFrame()
.assign(time=benchmark['stats']['data'][:max_data])
.assign(tag=tag)
.assign(file_idx=i)
.assign(git_repo=self._commit_info[tag]['project'])
.assign(git_commit=self._commit_info[tag]['id'])
.assign(torch=self._machine_info[tag]['pytorch_version'])
.assign(torchtext=self._machine_info[tag]['torchtext_version'])
.assign(torchvision=self._machine_info[tag]['torchvision_version'])
.assign(date=self._commit_info[tag]['time']), ignore_index=True)
return df
def load_data_dir(data_dir, most_recent_files:int =None, use_history_file=True):
"""
load all the files in the given data dir, up to N most recent.
if use_history_file=True, find most recent files using order in history file.
"""
history_file = os.path.join(data_dir, 'history')
if os.path.isfile(history_file):
with open(history_file) as hf:
history = hf.read().splitlines()
files = [os.path.join(data_dir, f) for f in history]
else:
files = sorted([os.path.join(data_dir, f) for f in os.listdir(data_dir) if os.path.splitext(f)[1] == '.json'])
if most_recent_files is not None:
files = files[:most_recent_files]
return load_data_files(files)
def load_data_files(files: typing.List[str]):
data = BenchmarkData()
for fname in files:
try:
with open(fname) as f:
data.add_json_data(fname, json.load(f))
except:
print(f"Error loading {fname}")
raise
return data
|
# coding: utf8
from collections import Counter, OrderedDict
from itertools import chain
import torch
from tqdm import tqdm
from .data import Dataset
from .pipeline import Pipeline
from torchtext.data.utils import get_tokenizer, dtype_to_attr, is_tokenizer_serializable
from .vocab import Vocab
class RawField(object):
""" Defines a general datatype.
Every dataset consists of one or more types of data. For instance, a text
classification dataset contains sentences and their classes, while a
machine translation dataset contains paired examples of text in two
languages. Each of these types of data is represented by a RawField object.
A RawField object does not assume any property of the data type and
it holds parameters relating to how a datatype should be processed.
Attributes:
preprocessing: The Pipeline that will be applied to examples
using this field before creating an example.
Default: None.
postprocessing: A Pipeline that will be applied to a list of examples
using this field before assigning to a batch.
Function signature: (batch(list)) -> object
Default: None.
is_target: Whether this field is a target variable.
Affects iteration over batches. Default: False
"""
def __init__(self, preprocessing=None, postprocessing=None, is_target=False):
self.preprocessing = preprocessing
self.postprocessing = postprocessing
self.is_target = is_target
def preprocess(self, x):
""" Preprocess an example if the `preprocessing` Pipeline is provided. """
if self.preprocessing is not None:
return self.preprocessing(x)
else:
return x
def process(self, batch, *args, **kwargs):
""" Process a list of examples to create a batch.
Postprocess the batch with user-provided Pipeline.
Args:
batch (list(object)): A list of object from a batch of examples.
Returns:
object: Processed object given the input and custom
postprocessing Pipeline.
"""
if self.postprocessing is not None:
batch = self.postprocessing(batch)
return batch
class Field(RawField):
"""Defines a datatype together with instructions for converting to Tensor.
Field class models common text processing datatypes that can be represented
by tensors. It holds a Vocab object that defines the set of possible values
for elements of the field and their corresponding numerical representations.
The Field object also holds other parameters relating to how a datatype
should be numericalized, such as a tokenization method and the kind of
Tensor that should be produced.
If a Field is shared between two columns in a dataset (e.g., question and
answer in a QA dataset), then they will have a shared vocabulary.
Attributes:
sequential: Whether the datatype represents sequential data. If False,
no tokenization is applied. Default: True.
use_vocab: Whether to use a Vocab object. If False, the data in this
field should already be numerical. Default: True.
init_token: A token that will be prepended to every example using this
field, or None for no initial token. Default: None.
eos_token: A token that will be appended to every example using this
field, or None for no end-of-sentence token. Default: None.
fix_length: A fixed length that all examples using this field will be
padded to, or None for flexible sequence lengths. Default: None.
dtype: The torch.dtype class that represents a batch of examples
of this kind of data. Default: torch.long.
preprocessing: The Pipeline that will be applied to examples
using this field after tokenizing but before numericalizing. Many
Datasets replace this attribute with a custom preprocessor.
Default: None.
postprocessing: A Pipeline that will be applied to examples using
this field after numericalizing but before the numbers are turned
into a Tensor. The pipeline function takes the batch as a list, and
the field's Vocab.
Default: None.
lower: Whether to lowercase the text in this field. Default: False.
tokenize: The function used to tokenize strings using this field into
sequential examples. If "spacy", the SpaCy tokenizer is
used. If a non-serializable function is passed as an argument,
the field will not be able to be serialized. Default: string.split.
tokenizer_language: The language of the tokenizer to be constructed.
Various languages currently supported only in SpaCy.
include_lengths: Whether to return a tuple of a padded minibatch and
a list containing the lengths of each examples, or just a padded
minibatch. Default: False.
batch_first: Whether to produce tensors with the batch dimension first.
Default: False.
pad_token: The string token used as padding. Default: "<pad>".
unk_token: The string token used to represent OOV words. Default: "<unk>".
pad_first: Do the padding of the sequence at the beginning. Default: False.
truncate_first: Do the truncating of the sequence at the beginning. Default: False
stop_words: Tokens to discard during the preprocessing step. Default: None
is_target: Whether this field is a target variable.
Affects iteration over batches. Default: False
"""
vocab_cls = Vocab
# Dictionary mapping PyTorch tensor dtypes to the appropriate Python
# numeric type.
dtypes = {
torch.float32: float,
torch.float: float,
torch.float64: float,
torch.double: float,
torch.float16: float,
torch.half: float,
torch.uint8: int,
torch.int8: int,
torch.int16: int,
torch.short: int,
torch.int32: int,
torch.int: int,
torch.int64: int,
torch.long: int,
}
ignore = ['dtype', 'tokenize']
def __init__(self, sequential=True, use_vocab=True, init_token=None,
eos_token=None, fix_length=None, dtype=torch.long,
preprocessing=None, postprocessing=None, lower=False,
tokenize=None, tokenizer_language='en', include_lengths=False,
batch_first=False, pad_token="<pad>", unk_token="<unk>",
pad_first=False, truncate_first=False, stop_words=None,
is_target=False):
self.sequential = sequential
self.use_vocab = use_vocab
self.init_token = init_token
self.eos_token = eos_token
self.unk_token = unk_token
self.fix_length = fix_length
self.dtype = dtype
self.preprocessing = preprocessing
self.postprocessing = postprocessing
self.lower = lower
# store params to construct tokenizer for serialization
# in case the tokenizer isn't picklable (e.g. spacy)
self.tokenizer_args = (tokenize, tokenizer_language)
self.tokenize = get_tokenizer(tokenize, tokenizer_language)
self.include_lengths = include_lengths
self.batch_first = batch_first
self.pad_token = pad_token if self.sequential else None
self.pad_first = pad_first
self.truncate_first = truncate_first
try:
self.stop_words = set(stop_words) if stop_words is not None else None
except TypeError:
raise ValueError("Stop words must be convertible to a set")
self.is_target = is_target
def __getstate__(self):
str_type = dtype_to_attr(self.dtype)
if is_tokenizer_serializable(*self.tokenizer_args):
tokenize = self.tokenize
else:
# signal to restore in `__setstate__`
tokenize = None
attrs = {k: v for k, v in self.__dict__.items() if k not in self.ignore}
attrs['dtype'] = str_type
attrs['tokenize'] = tokenize
return attrs
def __setstate__(self, state):
state['dtype'] = getattr(torch, state['dtype'])
if not state['tokenize']:
state['tokenize'] = get_tokenizer(*state['tokenizer_args'])
self.__dict__.update(state)
def __hash__(self):
# we don't expect this to be called often
return 42
def __eq__(self, other):
if not isinstance(other, RawField):
return False
return self.__dict__ == other.__dict__
def preprocess(self, x):
"""Load a single example using this field, tokenizing if necessary.
If `sequential=True`, the input will be tokenized. Then the input
will be optionally lowercased and passed to the user-provided
`preprocessing` Pipeline."""
if self.sequential and isinstance(x, str):
x = self.tokenize(x.rstrip('\n'))
if self.lower:
x = Pipeline(str.lower)(x)
if self.sequential and self.use_vocab and self.stop_words is not None:
x = [w for w in x if w not in self.stop_words]
if self.preprocessing is not None:
return self.preprocessing(x)
else:
return x
def process(self, batch, device=None):
""" Process a list of examples to create a torch.Tensor.
Pad, numericalize, and postprocess a batch and create a tensor.
Args:
batch (list(object)): A list of object from a batch of examples.
Returns:
torch.autograd.Variable: Processed object given the input
and custom postprocessing Pipeline.
"""
padded = self.pad(batch)
tensor = self.numericalize(padded, device=device)
return tensor
def pad(self, minibatch):
"""Pad a batch of examples using this field.
Pads to self.fix_length if provided, otherwise pads to the length of
the longest example in the batch. Prepends self.init_token and appends
self.eos_token if those attributes are not None. Returns a tuple of the
padded list and a list containing lengths of each example if
`self.include_lengths` is `True` and `self.sequential` is `True`, else just
returns the padded list. If `self.sequential` is `False`, no padding is applied.
"""
minibatch = list(minibatch)
if not self.sequential:
return minibatch
if self.fix_length is None:
max_len = max(len(x) for x in minibatch)
else:
max_len = self.fix_length + (
self.init_token, self.eos_token).count(None) - 2
padded, lengths = [], []
for x in minibatch:
if self.pad_first:
padded.append(
[self.pad_token] * max(0, max_len - len(x))
+ ([] if self.init_token is None else [self.init_token])
+ list(x[-max_len:] if self.truncate_first else x[:max_len])
+ ([] if self.eos_token is None else [self.eos_token]))
else:
padded.append(
([] if self.init_token is None else [self.init_token])
+ list(x[-max_len:] if self.truncate_first else x[:max_len])
+ ([] if self.eos_token is None else [self.eos_token])
+ [self.pad_token] * max(0, max_len - len(x)))
lengths.append(len(padded[-1]) - max(0, max_len - len(x)))
if self.include_lengths:
return (padded, lengths)
return padded
def build_vocab(self, *args, **kwargs):
"""Construct the Vocab object for this field from one or more datasets.
Arguments:
Positional arguments: Dataset objects or other iterable data
sources from which to construct the Vocab object that
represents the set of possible values for this field. If
a Dataset object is provided, all columns corresponding
to this field are used; individual columns can also be
provided directly.
Remaining keyword arguments: Passed to the constructor of Vocab.
"""
counter = Counter()
sources = []
for arg in args:
if isinstance(arg, Dataset):
sources += [getattr(arg, name) for name, field in
arg.fields.items() if field is self]
else:
sources.append(arg)
for data in sources:
for x in data:
if not self.sequential:
x = [x]
try:
counter.update(x)
except TypeError:
counter.update(chain.from_iterable(x))
specials = list(OrderedDict.fromkeys(
tok for tok in [self.unk_token, self.pad_token, self.init_token,
self.eos_token] + kwargs.pop('specials', [])
if tok is not None))
self.vocab = self.vocab_cls(counter, specials=specials, **kwargs)
def numericalize(self, arr, device=None):
"""Turn a batch of examples that use this field into a Variable.
If the field has include_lengths=True, a tensor of lengths will be
included in the return value.
Arguments:
arr (List[List[str]], or tuple of (List[List[str]], List[int])): List of tokenized
and padded examples, or tuple of List of
tokenized and padded examples and List of lengths of each
example if self.include_lengths is True.
device (str or torch.device): A string or instance of `torch.device`
specifying which device the Variables are going to be created on.
If left as default, the tensors will be created on cpu. Default: None.
"""
if self.include_lengths and not isinstance(arr, tuple):
raise ValueError("Field has include_lengths set to True, but "
"input data is not a tuple of "
"(data batch, batch lengths).")
if isinstance(arr, tuple):
arr, lengths = arr
lengths = torch.tensor(lengths, dtype=self.dtype, device=device)
if self.use_vocab:
if self.sequential:
arr = [[self.vocab.stoi[x] for x in ex] for ex in arr]
else:
arr = [self.vocab.stoi[x] for x in arr]
if self.postprocessing is not None:
arr = self.postprocessing(arr, self.vocab)
else:
if self.dtype not in self.dtypes:
raise ValueError(
"Specified Field dtype {} can not be used with "
"use_vocab=False because we do not know how to numericalize it. "
"Please raise an issue at "
"https://github.com/pytorch/text/issues".format(self.dtype))
numericalization_func = self.dtypes[self.dtype]
# It doesn't make sense to explicitly coerce to a numeric type if
# the data is sequential, since it's unclear how to coerce padding tokens
# to a numeric type.
if not self.sequential:
arr = [numericalization_func(x) if isinstance(x, str)
else x for x in arr]
if self.postprocessing is not None:
arr = self.postprocessing(arr, None)
var = torch.tensor(arr, dtype=self.dtype, device=device)
if self.sequential and not self.batch_first:
var.t_()
if self.sequential:
var = var.contiguous()
if self.include_lengths:
return var, lengths
return var
|
import torch
from collections import defaultdict
class Vocab(object):
"""Defines a vocabulary object that will be used to numericalize a field.
Attributes:
freqs: A collections.Counter object holding the frequencies of tokens
in the data used to build the Vocab.
stoi: A collections.defaultdict instance mapping token strings to
numerical identifiers.
itos: A list of token strings indexed by their numerical identifiers.
"""
# TODO (@mttk): Populate classs with default values of special symbols
UNK = '<unk>'
def __init__(self, counter, max_size=None, min_freq=1, specials=('<unk>', '<pad>'),
vectors=None, unk_init=None, vectors_cache=None, specials_first=True):
"""Create a Vocab object from a collections.Counter.
Args:
counter: collections.Counter object holding the frequencies of
each value found in the data.
max_size: The maximum size of the vocabulary, or None for no
maximum. Default: None.
min_freq: The minimum frequency needed to include a token in the
vocabulary. Values less than 1 will be set to 1. Default: 1.
specials: The list of special tokens (e.g., padding or eos) that
will be prepended to the vocabulary. Default: ['<unk'>, '<pad>']
vectors: One of either the available pretrained vectors
or custom pretrained vectors (see Vocab.load_vectors);
or a list of aforementioned vectors
unk_init (callback): by default, initialize out-of-vocabulary word vectors
to zero vectors; can be any function that takes in a Tensor and
returns a Tensor of the same size. Default: 'torch.zeros'
vectors_cache: directory for cached vectors. Default: '.vector_cache'
specials_first: Whether to add special tokens into the vocabulary at first.
If it is False, they are added into the vocabulary at last.
Default: True.
"""
self.freqs = counter
counter = counter.copy()
min_freq = max(min_freq, 1)
self.itos = list()
self.unk_index = None
if specials_first:
self.itos = list(specials)
# only extend max size if specials are prepended
max_size = None if max_size is None else max_size + len(specials)
# frequencies of special tokens are not counted when building vocabulary
# in frequency order
for tok in specials:
del counter[tok]
# sort by frequency, then alphabetically
words_and_frequencies = sorted(counter.items(), key=lambda tup: tup[0])
words_and_frequencies.sort(key=lambda tup: tup[1], reverse=True)
for word, freq in words_and_frequencies:
if freq < min_freq or len(self.itos) == max_size:
break
self.itos.append(word)
if Vocab.UNK in specials: # hard-coded for now
unk_index = specials.index(Vocab.UNK) # position in list
# account for ordering of specials, set variable
self.unk_index = unk_index if specials_first else len(self.itos) + unk_index
self.stoi = defaultdict(self._default_unk_index)
else:
self.stoi = defaultdict()
if not specials_first:
self.itos.extend(list(specials))
# stoi is simply a reverse dict for itos
self.stoi.update({tok: i for i, tok in enumerate(self.itos)})
self.vectors = None
if vectors is not None:
self.load_vectors(vectors, unk_init=unk_init, cache=vectors_cache)
else:
assert unk_init is None and vectors_cache is None
def _default_unk_index(self):
return self.unk_index
def __getitem__(self, token):
return self.stoi.get(token, self.stoi.get(Vocab.UNK))
def __getstate__(self):
# avoid picking defaultdict
attrs = dict(self.__dict__)
# cast to regular dict
attrs['stoi'] = dict(self.stoi)
return attrs
def __setstate__(self, state):
if state.get("unk_index", None) is None:
stoi = defaultdict()
else:
stoi = defaultdict(self._default_unk_index)
stoi.update(state['stoi'])
state['stoi'] = stoi
self.__dict__.update(state)
def __eq__(self, other):
if self.freqs != other.freqs:
return False
if self.stoi != other.stoi:
return False
if self.itos != other.itos:
return False
if self.vectors != other.vectors:
return False
return True
def __len__(self):
return len(self.itos)
def lookup_indices(self, tokens):
indices = [self.__getitem__(token) for token in tokens]
return indices
def extend(self, v, sort=False):
words = sorted(v.itos) if sort else v.itos
for w in words:
if w not in self.stoi:
self.itos.append(w)
self.stoi[w] = len(self.itos) - 1
def load_vectors(self, vectors, **kwargs):
"""
Args:
vectors: one of or a list containing instantiations of the
GloVe, CharNGram, or Vectors classes. Alternatively, one
of or a list of available pretrained vectors:
charngram.100d
fasttext.en.300d
fasttext.simple.300d
glove.42B.300d
glove.840B.300d
glove.twitter.27B.25d
glove.twitter.27B.50d
glove.twitter.27B.100d
glove.twitter.27B.200d
glove.6B.50d
glove.6B.100d
glove.6B.200d
glove.6B.300d
Remaining keyword arguments: Passed to the constructor of Vectors classes.
"""
if not isinstance(vectors, list):
vectors = [vectors]
for idx, vector in enumerate(vectors):
if isinstance(vector, str):
# Convert the string pretrained vector identifier
# to a Vectors object
if vector not in pretrained_aliases:
raise ValueError(
"Got string input vector {}, but allowed pretrained "
"vectors are {}".format(
vector, list(pretrained_aliases.keys())))
vectors[idx] = pretrained_aliases[vector](**kwargs)
elif not isinstance(vector, Vectors):
raise ValueError(
"Got input vectors of type {}, expected str or "
"Vectors object".format(type(vector)))
tot_dim = sum(v.dim for v in vectors)
self.vectors = torch.Tensor(len(self), tot_dim)
for i, token in enumerate(self.itos):
start_dim = 0
for v in vectors:
end_dim = start_dim + v.dim
self.vectors[i][start_dim:end_dim] = v[token.strip()]
start_dim = end_dim
assert(start_dim == tot_dim)
def set_vectors(self, stoi, vectors, dim, unk_init=torch.Tensor.zero_):
"""
Set the vectors for the Vocab instance from a collection of Tensors.
Args:
stoi: A dictionary of string to the index of the associated vector
in the `vectors` input argument.
vectors: An indexed iterable (or other structure supporting __getitem__) that
given an input index, returns a FloatTensor representing the vector
for the token associated with the index. For example,
vector[stoi["string"]] should return the vector for "string".
dim: The dimensionality of the vectors.
unk_init (callback): by default, initialize out-of-vocabulary word vectors
to zero vectors; can be any function that takes in a Tensor and
returns a Tensor of the same size. Default: 'torch.zeros'
"""
self.vectors = torch.Tensor(len(self), dim)
for i, token in enumerate(self.itos):
wv_index = stoi.get(token, None)
if wv_index is not None:
self.vectors[i] = vectors[wv_index]
else:
self.vectors[i] = unk_init(self.vectors[i])
|
import torch
class Batch(object):
"""Defines a batch of examples along with its Fields.
Attributes:
batch_size: Number of examples in the batch.
dataset: A reference to the dataset object the examples come from
(which itself contains the dataset's Field objects).
train: Deprecated: this attribute is left for backwards compatibility,
however it is UNUSED as of the merger with pytorch 0.4.
input_fields: The names of the fields that are used as input for the model
target_fields: The names of the fields that are used as targets during
model training
Also stores the Variable for each column in the batch as an attribute.
"""
def __init__(self, data=None, dataset=None, device=None):
"""Create a Batch from a list of examples."""
if data is not None:
self.batch_size = len(data)
self.dataset = dataset
self.fields = dataset.fields.keys() # copy field names
self.input_fields = [k for k, v in dataset.fields.items() if
v is not None and not v.is_target]
self.target_fields = [k for k, v in dataset.fields.items() if
v is not None and v.is_target]
for (name, field) in dataset.fields.items():
if field is not None:
batch = [getattr(x, name) for x in data]
setattr(self, name, field.process(batch, device=device))
@classmethod
def fromvars(cls, dataset, batch_size, train=None, **kwargs):
"""Create a Batch directly from a number of Variables."""
batch = cls()
batch.batch_size = batch_size
batch.dataset = dataset
batch.fields = dataset.fields.keys()
for k, v in kwargs.items():
setattr(batch, k, v)
return batch
def __repr__(self):
return str(self)
def __str__(self):
if not self.__dict__:
return 'Empty {} instance'.format(torch.typename(self))
fields_to_index = filter(lambda field: field is not None, self.fields)
var_strs = '\n'.join(['\t[.' + name + ']' + ":" + _short_str(getattr(self, name))
for name in fields_to_index if hasattr(self, name)])
data_str = (' from {}'.format(self.dataset.name.upper())
if hasattr(self.dataset, 'name')
and isinstance(self.dataset.name, str) else '')
strt = '[{} of size {}{}]\n{}'.format(torch.typename(self),
self.batch_size, data_str, var_strs)
return '\n' + strt
def __len__(self):
return self.batch_size
def _get_field_values(self, fields):
if len(fields) == 0:
return None
elif len(fields) == 1:
return getattr(self, fields[0])
else:
return tuple(getattr(self, f) for f in fields)
def __iter__(self):
yield self._get_field_values(self.input_fields)
yield self._get_field_values(self.target_fields)
def _short_str(tensor):
# unwrap variable to tensor
if not torch.is_tensor(tensor):
# (1) unpack variable
if hasattr(tensor, 'data'):
tensor = tensor.data
# (2) handle include_lengths
elif isinstance(tensor, tuple):
return str(tuple(_short_str(t) for t in tensor))
# (3) fallback to default str
else:
return str(tensor)
# copied from torch _tensor_str
size_str = 'x'.join(str(size) for size in tensor.size())
device_str = '' if not tensor.is_cuda else \
' (GPU {})'.format(tensor.get_device())
strt = '[{} of size {}{}]'.format(torch.typename(tensor),
size_str, device_str)
return strt
|
from .data import Dataset
from .example import Example
class SequenceTaggingDataset(Dataset):
"""Defines a dataset for sequence tagging. Examples in this dataset
contain paired lists -- paired list of words and tags.
For example, in the case of part-of-speech tagging, an example is of the
form
[I, love, PyTorch, .] paired with [PRON, VERB, PROPN, PUNCT]
See torchtext/test/sequence_tagging.py on how to use this class.
"""
@staticmethod
def sort_key(example):
for attr in dir(example):
if not callable(getattr(example, attr)) and \
not attr.startswith("__"):
return len(getattr(example, attr))
return 0
def __init__(self, path, fields, encoding="utf-8", separator="\t", **kwargs):
examples = []
columns = []
with open(path, encoding=encoding) as input_file:
for line in input_file:
line = line.strip()
if line == "":
if columns:
examples.append(Example.fromlist(columns, fields))
columns = []
else:
for i, column in enumerate(line.split(separator)):
if len(columns) < i + 1:
columns.append([])
columns[i].append(column)
if columns:
examples.append(Example.fromlist(columns, fields))
super(SequenceTaggingDataset, self).__init__(examples, fields,
**kwargs)
class UDPOS(SequenceTaggingDataset):
# Universal Dependencies English Web Treebank.
# Download original at http://universaldependencies.org/
# License: http://creativecommons.org/licenses/by-sa/4.0/
urls = ['https://bitbucket.org/sivareddyg/public/downloads/en-ud-v2.zip']
dirname = 'en-ud-v2'
name = 'udpos'
@classmethod
def splits(cls, fields, root=".data", train="en-ud-tag.v2.train.txt",
validation="en-ud-tag.v2.dev.txt",
test="en-ud-tag.v2.test.txt", **kwargs):
"""Downloads and loads the Universal Dependencies Version 2 POS Tagged
data.
"""
return super(UDPOS, cls).splits(
fields=fields, root=root, train=train, validation=validation,
test=test, **kwargs)
|
import math
import random
import logging
import torch
from torchtext.data.utils import RandomShuffler
from .batch import Batch
from .data import Dataset
logger = logging.getLogger(__name__)
class Iterator(object):
"""Defines an iterator that loads batches of data from a Dataset.
Attributes:
dataset: The Dataset object to load Examples from.
batch_size: Batch size.
batch_size_fn: Function of three arguments (new example to add, current
count of examples in the batch, and current effective batch size)
that returns the new effective batch size resulting from adding
that example to a batch. This is useful for dynamic batching, where
this function would add to the current effective batch size the
number of tokens in the new example.
sort_key: A key to use for sorting examples in order to batch together
examples with similar lengths and minimize padding. The sort_key
provided to the Iterator constructor overrides the sort_key
attribute of the Dataset, or defers to it if None.
train: Whether the iterator represents a train set.
repeat: Whether to repeat the iterator for multiple epochs. Default: False.
shuffle: Whether to shuffle examples between epochs.
sort: Whether to sort examples according to self.sort_key.
Note that shuffle and sort default to train and (not train).
sort_within_batch: Whether to sort (in descending order according to
self.sort_key) within each batch. If None, defaults to self.sort.
If self.sort is True and this is False, the batch is left in the
original (ascending) sorted order.
device (str or `torch.device`): A string or instance of `torch.device`
specifying which device the Variables are going to be created on.
If left as default, the tensors will be created on cpu. Default: None.
"""
def __init__(self, dataset, batch_size, sort_key=None, device=None,
batch_size_fn=None, train=True,
repeat=False, shuffle=None, sort=None,
sort_within_batch=None):
self.batch_size, self.train, self.dataset = batch_size, train, dataset
self.batch_size_fn = batch_size_fn
self.iterations = 0
self.repeat = repeat
self.shuffle = train if shuffle is None else shuffle
self.sort = not train if sort is None else sort
if sort_within_batch is None:
self.sort_within_batch = self.sort
else:
self.sort_within_batch = sort_within_batch
if sort_key is None:
self.sort_key = dataset.sort_key
else:
self.sort_key = sort_key
if isinstance(device, int):
logger.warning("The `device` argument should be set by using `torch.device`"
+ " or passing a string as an argument. This behavior will be"
+ " deprecated soon and currently defaults to cpu.")
device = None
if device is None:
device = torch.device('cpu')
elif isinstance(device, str):
device = torch.device(device)
self.device = device
self.random_shuffler = RandomShuffler()
# For state loading/saving only
self._iterations_this_epoch = 0
self._random_state_this_epoch = None
self._restored_from_state = False
@classmethod
def splits(cls, datasets, batch_sizes=None, **kwargs):
"""Create Iterator objects for multiple splits of a dataset.
Arguments:
datasets: Tuple of Dataset objects corresponding to the splits. The
first such object should be the train set.
batch_sizes: Tuple of batch sizes to use for the different splits,
or None to use the same batch_size for all splits.
Remaining keyword arguments: Passed to the constructor of the
iterator class being used.
"""
if batch_sizes is None:
batch_sizes = [kwargs.pop('batch_size')] * len(datasets)
ret = []
for i in range(len(datasets)):
train = i == 0
ret.append(cls(
datasets[i], batch_size=batch_sizes[i], train=train, **kwargs))
return tuple(ret)
def data(self):
"""Return the examples in the dataset in order, sorted, or shuffled."""
if self.sort:
xs = sorted(self.dataset, key=self.sort_key)
elif self.shuffle:
xs = [self.dataset[i] for i in self.random_shuffler(range(len(self.dataset)))]
else:
xs = self.dataset
return xs
def init_epoch(self):
"""Set up the batch generator for a new epoch."""
if self._restored_from_state:
self.random_shuffler.random_state = self._random_state_this_epoch
else:
self._random_state_this_epoch = self.random_shuffler.random_state
self.create_batches()
if self._restored_from_state:
self._restored_from_state = False
else:
self._iterations_this_epoch = 0
if not self.repeat:
self.iterations = 0
def create_batches(self):
self.batches = batch(self.data(), self.batch_size, self.batch_size_fn)
@property
def epoch(self):
return math.floor(self.iterations / len(self))
def __len__(self):
if self.batch_size_fn is not None:
raise NotImplementedError
return math.ceil(len(self.dataset) / self.batch_size)
def __iter__(self):
while True:
self.init_epoch()
for idx, minibatch in enumerate(self.batches):
# fast-forward if loaded from state
if self._iterations_this_epoch > idx:
continue
self.iterations += 1
self._iterations_this_epoch += 1
if self.sort_within_batch:
# NOTE: `rnn.pack_padded_sequence` requires that a minibatch
# be sorted by decreasing order, which requires reversing
# relative to typical sort keys
if self.sort:
minibatch.reverse()
else:
minibatch.sort(key=self.sort_key, reverse=True)
yield Batch(minibatch, self.dataset, self.device)
if not self.repeat:
return
def state_dict(self):
return {
"iterations": self.iterations,
"iterations_this_epoch": self._iterations_this_epoch,
"random_state_this_epoch": self._random_state_this_epoch}
def load_state_dict(self, state_dict):
self.iterations = state_dict["iterations"]
self._iterations_this_epoch = state_dict["iterations_this_epoch"]
self._random_state_this_epoch = state_dict["random_state_this_epoch"]
self._restored_from_state = True
class BPTTIterator(Iterator):
"""Defines an iterator for language modeling tasks that use BPTT.
Provides contiguous streams of examples together with targets that are
one timestep further forward, for language modeling training with
backpropagation through time (BPTT). Expects a Dataset with a single
example and a single field called 'text' and produces Batches with text and
target attributes.
Attributes:
dataset: The Dataset object to load Examples from.
batch_size: Batch size.
bptt_len: Length of sequences for backpropagation through time.
sort_key: A key to use for sorting examples in order to batch together
examples with similar lengths and minimize padding. The sort_key
provided to the Iterator constructor overrides the sort_key
attribute of the Dataset, or defers to it if None.
train: Whether the iterator represents a train set.
repeat: Whether to repeat the iterator for multiple epochs. Default: False.
shuffle: Whether to shuffle examples between epochs.
sort: Whether to sort examples according to self.sort_key.
Note that shuffle and sort default to train and (not train).
device (str or torch.device): A string or instance of `torch.device`
specifying which device the Variables are going to be created on.
If left as default, the tensors will be created on cpu. Default: None.
"""
def __init__(self, dataset, batch_size, bptt_len, **kwargs):
self.bptt_len = bptt_len
super(BPTTIterator, self).__init__(dataset, batch_size, **kwargs)
def __len__(self):
return math.ceil((len(self.dataset[0].text) / self.batch_size - 1)
/ self.bptt_len)
def __iter__(self):
text = self.dataset[0].text
TEXT = self.dataset.fields['text']
TEXT.eos_token = None
text = text + ([TEXT.pad_token] * int(math.ceil(len(text) / self.batch_size)
* self.batch_size - len(text)))
data = TEXT.numericalize(
[text], device=self.device)
data = data.view(self.batch_size, -1).t().contiguous()
dataset = Dataset(examples=self.dataset.examples, fields=[
('text', TEXT), ('target', TEXT)])
while True:
for i in range(0, len(self) * self.bptt_len, self.bptt_len):
self.iterations += 1
seq_len = min(self.bptt_len, len(data) - i - 1)
batch_text = data[i:i + seq_len]
batch_target = data[i + 1:i + 1 + seq_len]
if TEXT.batch_first:
batch_text = batch_text.t().contiguous()
batch_target = batch_target.t().contiguous()
yield Batch.fromvars(
dataset, self.batch_size,
text=batch_text,
target=batch_target)
if not self.repeat:
return
class BucketIterator(Iterator):
"""Defines an iterator that batches examples of similar lengths together.
Minimizes amount of padding needed while producing freshly shuffled
batches for each new epoch. See pool for the bucketing procedure used.
"""
def create_batches(self):
if self.sort:
self.batches = batch(self.data(), self.batch_size,
self.batch_size_fn)
else:
self.batches = pool(self.data(), self.batch_size,
self.sort_key, self.batch_size_fn,
random_shuffler=self.random_shuffler,
shuffle=self.shuffle,
sort_within_batch=self.sort_within_batch)
def batch(data, batch_size, batch_size_fn=None):
"""Yield elements from data in chunks of batch_size."""
if batch_size_fn is None:
def batch_size_fn(new, count, sofar):
return count
minibatch, size_so_far = [], 0
for ex in data:
minibatch.append(ex)
size_so_far = batch_size_fn(ex, len(minibatch), size_so_far)
if size_so_far == batch_size:
yield minibatch
minibatch, size_so_far = [], 0
elif size_so_far > batch_size:
yield minibatch[:-1]
minibatch, size_so_far = minibatch[-1:], batch_size_fn(ex, 1, 0)
if minibatch:
yield minibatch
def pool(data, batch_size, key, batch_size_fn=lambda new, count, sofar: count,
random_shuffler=None, shuffle=False, sort_within_batch=False):
"""Sort within buckets, then batch, then shuffle batches.
Partitions data into chunks of size 100*batch_size, sorts examples within
each chunk using sort_key, then batch these examples and shuffle the
batches.
"""
if random_shuffler is None:
random_shuffler = random.shuffle
for p in batch(data, batch_size * 100, batch_size_fn):
p_batch = batch(sorted(p, key=key), batch_size, batch_size_fn) \
if sort_within_batch \
else batch(p, batch_size, batch_size_fn)
if shuffle:
for b in random_shuffler(list(p_batch)):
yield b
else:
for b in list(p_batch):
yield b
|
import json
from functools import reduce
class Example(object):
"""Defines a single training or test example.
Stores each column of the example as an attribute.
"""
@classmethod
def fromJSON(cls, data, fields):
ex = cls()
obj = json.loads(data)
for key, vals in fields.items():
if vals is not None:
if not isinstance(vals, list):
vals = [vals]
for val in vals:
# for processing the key likes 'foo.bar'
name, field = val
ks = key.split('.')
def reducer(obj, key):
if isinstance(obj, list):
results = []
for data in obj:
if key not in data:
# key error
raise ValueError("Specified key {} was not found in "
"the input data".format(key))
else:
results.append(data[key])
return results
else:
# key error
if key not in obj:
raise ValueError("Specified key {} was not found in "
"the input data".format(key))
else:
return obj[key]
v = reduce(reducer, ks, obj)
setattr(ex, name, field.preprocess(v))
return ex
@classmethod
def fromdict(cls, data, fields):
ex = cls()
for key, vals in fields.items():
if key not in data:
raise ValueError("Specified key {} was not found in "
"the input data".format(key))
if vals is not None:
if not isinstance(vals, list):
vals = [vals]
for val in vals:
name, field = val
setattr(ex, name, field.preprocess(data[key]))
return ex
@classmethod
def fromCSV(cls, data, fields, field_to_index=None):
if field_to_index is None:
return cls.fromlist(data, fields)
else:
assert(isinstance(fields, dict))
data_dict = {f: data[idx] for f, idx in field_to_index.items()}
return cls.fromdict(data_dict, fields)
@classmethod
def fromlist(cls, data, fields):
ex = cls()
for (name, field), val in zip(fields, data):
if field is not None:
if isinstance(val, str):
val = val.rstrip('\n')
# Handle field tuples
if isinstance(name, tuple):
for n, f in zip(name, field):
setattr(ex, n, f.preprocess(val))
else:
setattr(ex, name, field.preprocess(val))
return ex
@classmethod
def fromtree(cls, data, fields, subtrees=False):
try:
from nltk.tree import Tree
except ImportError:
print("Please install NLTK. "
"See the docs at http://nltk.org for more information.")
raise
tree = Tree.fromstring(data)
if subtrees:
return [cls.fromlist(
[' '.join(t.leaves()), t.label()], fields) for t in tree.subtrees()]
return cls.fromlist([' '.join(tree.leaves()), tree.label()], fields)
|
class Pipeline(object):
"""Defines a pipeline for transforming sequence data.
The input is assumed to be utf-8 encoded `str`.
Attributes:
convert_token: The function to apply to input sequence data.
pipes: The Pipelines that will be applied to input sequence
data in order.
"""
def __init__(self, convert_token=None):
"""Create a pipeline.
Arguments:
convert_token: The function to apply to input sequence data.
If None, the identity function is used. Default: None
"""
if convert_token is None:
self.convert_token = Pipeline.identity
elif callable(convert_token):
self.convert_token = convert_token
else:
raise ValueError("Pipeline input convert_token {} is not None "
"or callable".format(convert_token))
self.pipes = [self]
def __call__(self, x, *args):
"""Apply the the current Pipeline(s) to an input.
Arguments:
x: The input to process with the Pipeline(s).
Positional arguments: Forwarded to the `call` function
of the Pipeline(s).
"""
for pipe in self.pipes:
x = pipe.call(x, *args)
return x
def call(self, x, *args):
"""Apply _only_ the convert_token function of the current pipeline
to the input. If the input is a list, a list with the results of
applying the `convert_token` function to all input elements is
returned.
Arguments:
x: The input to apply the convert_token function to.
Positional arguments: Forwarded to the `convert_token` function
of the current Pipeline.
"""
if isinstance(x, list):
return [self.convert_token(tok, *args) for tok in x]
return self.convert_token(x, *args)
def add_before(self, pipeline):
"""Add a Pipeline to be applied before this processing pipeline.
Arguments:
pipeline: The Pipeline or callable to apply before this
Pipeline.
"""
if not isinstance(pipeline, Pipeline):
pipeline = Pipeline(pipeline)
self.pipes = pipeline.pipes[:] + self.pipes[:]
return self
def add_after(self, pipeline):
"""Add a Pipeline to be applied after this processing pipeline.
Arguments:
pipeline: The Pipeline or callable to apply after this
Pipeline.
"""
if not isinstance(pipeline, Pipeline):
pipeline = Pipeline(pipeline)
self.pipes = self.pipes[:] + pipeline.pipes[:]
return self
@staticmethod
def identity(x):
"""Return a copy of the input.
This is here for serialization compatibility with pickle.
"""
return x
|
import os
try:
import defusedxml.ElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
import glob
import io
import codecs
from .data import Dataset
from .example import Example
class TranslationDataset(Dataset):
"""Defines a dataset for machine translation."""
@staticmethod
def sort_key(ex):
return data.interleave_keys(len(ex.src), len(ex.trg))
def __init__(self, path, exts, fields, **kwargs):
"""Create a TranslationDataset given paths and fields.
Args:
path: Common prefix of paths to the data files for both languages.
exts: A tuple containing the extension to path for each language.
fields: A tuple containing the fields that will be used for data
in each language.
Remaining keyword arguments: Passed to the constructor of
data.Dataset.
"""
if not isinstance(fields[0], (tuple, list)):
fields = [('src', fields[0]), ('trg', fields[1])]
src_path, trg_path = tuple(os.path.expanduser(path + x) for x in exts)
examples = []
with io.open(src_path, mode='r', encoding='utf-8') as src_file, \
io.open(trg_path, mode='r', encoding='utf-8') as trg_file:
for src_line, trg_line in zip(src_file, trg_file):
src_line, trg_line = src_line.strip(), trg_line.strip()
if src_line != '' and trg_line != '':
examples.append(Example.fromlist(
[src_line, trg_line], fields))
super(TranslationDataset, self).__init__(examples, fields, **kwargs)
@classmethod
def splits(cls, exts, fields, path=None, root='.data',
train='train', validation='val', test='test', **kwargs):
"""Create dataset objects for splits of a TranslationDataset.
Args:
exts: A tuple containing the extension to path for each language.
fields: A tuple containing the fields that will be used for data
in each language.
path (str): Common prefix of the splits' file paths, or None to use
the result of cls.download(root).
root: Root dataset storage directory. Default is '.data'.
train: The prefix of the train data. Default: 'train'.
validation: The prefix of the validation data. Default: 'val'.
test: The prefix of the test data. Default: 'test'.
Remaining keyword arguments: Passed to the splits method of
Dataset.
"""
if path is None:
path = cls.download(root)
train_data = None if train is None else cls(
os.path.join(path, train), exts, fields, **kwargs)
val_data = None if validation is None else cls(
os.path.join(path, validation), exts, fields, **kwargs)
test_data = None if test is None else cls(
os.path.join(path, test), exts, fields, **kwargs)
return tuple(d for d in (train_data, val_data, test_data)
if d is not None)
class Multi30k(TranslationDataset):
"""The small-dataset WMT 2016 multimodal task, also known as Flickr30k"""
urls = ['http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/training.tar.gz',
'http://www.quest.dcs.shef.ac.uk/wmt16_files_mmt/validation.tar.gz',
'http://www.quest.dcs.shef.ac.uk/'
'wmt17_files_mmt/mmt_task1_test2016.tar.gz']
name = 'multi30k'
dirname = ''
@classmethod
def splits(cls, exts, fields, root='.data',
train='train', validation='val', test='test2016', **kwargs):
"""Create dataset objects for splits of the Multi30k dataset.
Args:
exts: A tuple containing the extension to path for each language.
fields: A tuple containing the fields that will be used for data
in each language.
root: Root dataset storage directory. Default is '.data'.
train: The prefix of the train data. Default: 'train'.
validation: The prefix of the validation data. Default: 'val'.
test: The prefix of the test data. Default: 'test'.
Remaining keyword arguments: Passed to the splits method of
Dataset.
"""
# TODO: This is a _HORRIBLE_ patch related to #208
# 'path' can be passed as a kwarg to the translation dataset constructor
# or has to be set (so the download wouldn't be duplicated). A good idea
# seems to rename the existence check variable from path to something else
if 'path' not in kwargs:
expected_folder = os.path.join(root, cls.name)
path = expected_folder if os.path.exists(expected_folder) else None
else:
path = kwargs['path']
del kwargs['path']
return super(Multi30k, cls).splits(
exts, fields, path, root, train, validation, test, **kwargs)
class IWSLT(TranslationDataset):
"""The IWSLT 2016 TED talk translation task"""
base_url = 'https://wit3.fbk.eu/archive/2016-01//texts/{}/{}/{}.tgz'
name = 'iwslt'
base_dirname = '{}-{}'
@classmethod
def splits(cls, exts, fields, root='.data',
train='train', validation='IWSLT16.TED.tst2013',
test='IWSLT16.TED.tst2014', **kwargs):
"""Create dataset objects for splits of the IWSLT dataset.
Args:
exts: A tuple containing the extension to path for each language.
fields: A tuple containing the fields that will be used for data
in each language.
root: Root dataset storage directory. Default is '.data'.
train: The prefix of the train data. Default: 'train'.
validation: The prefix of the validation data. Default: 'val'.
test: The prefix of the test data. Default: 'test'.
Remaining keyword arguments: Passed to the splits method of
Dataset.
"""
cls.dirname = cls.base_dirname.format(exts[0][1:], exts[1][1:])
cls.urls = [cls.base_url.format(exts[0][1:], exts[1][1:], cls.dirname)]
check = os.path.join(root, cls.name, cls.dirname)
path = cls.download(root, check=check)
train = '.'.join([train, cls.dirname])
validation = '.'.join([validation, cls.dirname])
if test is not None:
test = '.'.join([test, cls.dirname])
if not os.path.exists(os.path.join(path, train) + exts[0]):
cls.clean(path)
train_data = None if train is None else cls(
os.path.join(path, train), exts, fields, **kwargs)
val_data = None if validation is None else cls(
os.path.join(path, validation), exts, fields, **kwargs)
test_data = None if test is None else cls(
os.path.join(path, test), exts, fields, **kwargs)
return tuple(d for d in (train_data, val_data, test_data)
if d is not None)
@staticmethod
def clean(path):
for f_xml in glob.iglob(os.path.join(path, '*.xml')):
print(f_xml)
f_txt = os.path.splitext(f_xml)[0]
with codecs.open(f_txt, mode='w', encoding='utf-8') as fd_txt:
root = ET.parse(f_xml).getroot()[0]
for doc in root.findall('doc'):
for e in doc.findall('seg'):
fd_txt.write(e.text.strip() + '\n')
xml_tags = ['<url', '<keywords', '<talkid', '<description',
'<reviewer', '<translator', '<title', '<speaker']
for f_orig in glob.iglob(os.path.join(path, 'train.tags*')):
print(f_orig)
f_txt = f_orig.replace('.tags', '')
with codecs.open(f_txt, mode='w', encoding='utf-8') as fd_txt, \
io.open(f_orig, mode='r', encoding='utf-8') as fd_orig:
for line in fd_orig:
if not any(tag in line for tag in xml_tags):
fd_txt.write(line.strip() + '\n')
class WMT14(TranslationDataset):
"""The WMT 2014 English-German dataset, as preprocessed by Google Brain.
Though this download contains test sets from 2015 and 2016, the train set
differs slightly from WMT 2015 and 2016 and significantly from WMT 2017."""
urls = [('https://drive.google.com/uc?export=download&'
'id=0B_bZck-ksdkpM25jRUN2X2UxMm8', 'wmt16_en_de.tar.gz')]
name = 'wmt14'
dirname = ''
@classmethod
def splits(cls, exts, fields, root='.data',
train='train.tok.clean.bpe.32000',
validation='newstest2013.tok.bpe.32000',
test='newstest2014.tok.bpe.32000', **kwargs):
"""Create dataset objects for splits of the WMT 2014 dataset.
Args:
exts: A tuple containing the extensions for each language. Must be
either ('.en', '.de') or the reverse.
fields: A tuple containing the fields that will be used for data
in each language.
root: Root dataset storage directory. Default is '.data'.
train: The prefix of the train data. Default:
'train.tok.clean.bpe.32000'.
validation: The prefix of the validation data. Default:
'newstest2013.tok.bpe.32000'.
test: The prefix of the test data. Default:
'newstest2014.tok.bpe.32000'.
Remaining keyword arguments: Passed to the splits method of
Dataset.
"""
# TODO: This is a _HORRIBLE_ patch related to #208
# 'path' can be passed as a kwarg to the translation dataset constructor
# or has to be set (so the download wouldn't be duplicated). A good idea
# seems to rename the existence check variable from path to something else
if 'path' not in kwargs:
expected_folder = os.path.join(root, cls.name)
path = expected_folder if os.path.exists(expected_folder) else None
else:
path = kwargs['path']
del kwargs['path']
return super(WMT14, cls).splits(
exts, fields, path, root, train, validation, test, **kwargs)
|
import io
import os
import zipfile
import tarfile
import gzip
import sys
import csv
import shutil
from functools import partial
import torch.utils.data
from torchtext.data.utils import RandomShuffler
from .example import Example
from torchtext.utils import download_from_url
def unicode_csv_reader(unicode_csv_data, **kwargs):
r"""Since the standard csv library does not handle unicode in Python 2, we need a wrapper.
Borrowed and slightly modified from the Python docs:
https://docs.python.org/2/library/csv.html#csv-examples
Args:
unicode_csv_data: unicode csv data (see example below)
Examples:
>>> from torchtext.utils import unicode_csv_reader
>>> import io
>>> with io.open(data_path, encoding="utf8") as f:
>>> reader = unicode_csv_reader(f)
"""
# Fix field larger than field limit error
maxInt = sys.maxsize
while True:
# decrease the maxInt value by factor 10
# as long as the OverflowError occurs.
try:
csv.field_size_limit(maxInt)
break
except OverflowError:
maxInt = int(maxInt / 10)
csv.field_size_limit(maxInt)
for line in csv.reader(unicode_csv_data, **kwargs):
yield line
class Dataset(torch.utils.data.Dataset):
"""Defines a dataset composed of Examples along with its Fields.
Attributes:
sort_key (callable): A key to use for sorting dataset examples for batching
together examples with similar lengths to minimize padding.
examples (list(Example)): The examples in this dataset.
fields (dict[str, Field]): Contains the name of each column or field, together
with the corresponding Field object. Two fields with the same Field object
will have a shared vocabulary.
"""
sort_key = None
def __init__(self, examples, fields, filter_pred=None):
"""Create a dataset from a list of Examples and Fields.
Arguments:
examples: List of Examples.
fields (List(tuple(str, Field))): The Fields to use in this tuple. The
string is a field name, and the Field is the associated field.
filter_pred (callable or None): Use only examples for which
filter_pred(example) is True, or use all examples if None.
Default is None.
"""
if filter_pred is not None:
make_list = isinstance(examples, list)
examples = filter(filter_pred, examples)
if make_list:
examples = list(examples)
self.examples = examples
self.fields = dict(fields)
# Unpack field tuples
for n, f in list(self.fields.items()):
if isinstance(n, tuple):
self.fields.update(zip(n, f))
del self.fields[n]
@classmethod
def splits(cls, path=None, root='.data', train=None, validation=None,
test=None, **kwargs):
"""Create Dataset objects for multiple splits of a dataset.
Arguments:
path (str): Common prefix of the splits' file paths, or None to use
the result of cls.download(root).
root (str): Root dataset storage directory. Default is '.data'.
train (str): Suffix to add to path for the train set, or None for no
train set. Default is None.
validation (str): Suffix to add to path for the validation set, or None
for no validation set. Default is None.
test (str): Suffix to add to path for the test set, or None for no test
set. Default is None.
Remaining keyword arguments: Passed to the constructor of the
Dataset (sub)class being used.
Returns:
Tuple[Dataset]: Datasets for train, validation, and
test splits in that order, if provided.
"""
if path is None:
path = cls.download(root)
train_data = None if train is None else cls(
os.path.join(path, train), **kwargs)
val_data = None if validation is None else cls(
os.path.join(path, validation), **kwargs)
test_data = None if test is None else cls(
os.path.join(path, test), **kwargs)
return tuple(d for d in (train_data, val_data, test_data)
if d is not None)
def split(self, split_ratio=0.7, stratified=False, strata_field='label',
random_state=None):
"""Create train-test(-valid?) splits from the instance's examples.
Arguments:
split_ratio (float or List of floats): a number [0, 1] denoting the amount
of data to be used for the training split (rest is used for test),
or a list of numbers denoting the relative sizes of train, test and valid
splits respectively. If the relative size for valid is missing, only the
train-test split is returned. Default is 0.7 (for the train set).
stratified (bool): whether the sampling should be stratified.
Default is False.
strata_field (str): name of the examples Field stratified over.
Default is 'label' for the conventional label field.
random_state (tuple): the random seed used for shuffling.
A return value of `random.getstate()`.
Returns:
Tuple[Dataset]: Datasets for train, validation, and
test splits in that order, if the splits are provided.
"""
train_ratio, test_ratio, val_ratio = check_split_ratio(split_ratio)
# For the permutations
rnd = RandomShuffler(random_state)
if not stratified:
train_data, test_data, val_data = rationed_split(self.examples, train_ratio,
test_ratio, val_ratio, rnd)
else:
if strata_field not in self.fields:
raise ValueError("Invalid field name for strata_field {}"
.format(strata_field))
strata = stratify(self.examples, strata_field)
train_data, test_data, val_data = [], [], []
for group in strata:
# Stratify each group and add together the indices.
group_train, group_test, group_val = rationed_split(group, train_ratio,
test_ratio, val_ratio,
rnd)
train_data += group_train
test_data += group_test
val_data += group_val
splits = tuple(Dataset(d, self.fields)
for d in (train_data, val_data, test_data) if d)
# In case the parent sort key isn't none
if self.sort_key:
for subset in splits:
subset.sort_key = self.sort_key
return splits
def __getitem__(self, i):
return self.examples[i]
def __len__(self):
try:
return len(self.examples)
except TypeError:
return 2**32
def __iter__(self):
for x in self.examples:
yield x
def __getattr__(self, attr):
if attr in self.fields:
for x in self.examples:
yield getattr(x, attr)
@classmethod
def download(cls, root, check=None):
"""Download and unzip an online archive (.zip, .gz, or .tgz).
Arguments:
root (str): Folder to download data to.
check (str or None): Folder whose existence indicates
that the dataset has already been downloaded, or
None to check the existence of root/{cls.name}.
Returns:
str: Path to extracted dataset.
"""
path = os.path.join(root, cls.name)
check = path if check is None else check
if not os.path.isdir(check):
for url in cls.urls:
if isinstance(url, tuple):
url, filename = url
else:
filename = os.path.basename(url)
zpath = os.path.join(path, filename)
if not os.path.isfile(zpath):
if not os.path.exists(os.path.dirname(zpath)):
os.makedirs(os.path.dirname(zpath))
print('downloading {}'.format(filename))
download_from_url(url, zpath)
zroot, ext = os.path.splitext(zpath)
_, ext_inner = os.path.splitext(zroot)
if ext == '.zip':
with zipfile.ZipFile(zpath, 'r') as zfile:
print('extracting')
zfile.extractall(path)
# tarfile cannot handle bare .gz files
elif ext == '.tgz' or ext == '.gz' and ext_inner == '.tar':
with tarfile.open(zpath, 'r:gz') as tar:
dirs = [member for member in tar.getmembers()]
tar.extractall(path=path, members=dirs)
elif ext == '.gz':
with gzip.open(zpath, 'rb') as gz:
with open(zroot, 'wb') as uncompressed:
shutil.copyfileobj(gz, uncompressed)
return os.path.join(path, cls.dirname)
def filter_examples(self, field_names):
"""Remove unknown words from dataset examples with respect to given field.
Arguments:
field_names (list(str)): Within example only the parts with field names in
field_names will have their unknown words deleted.
"""
for i, example in enumerate(self.examples):
for field_name in field_names:
vocab = set(self.fields[field_name].vocab.stoi)
text = getattr(example, field_name)
example_part = [word for word in text if word in vocab]
setattr(example, field_name, example_part)
self.examples[i] = example
class TabularDataset(Dataset):
"""Defines a Dataset of columns stored in CSV, TSV, or JSON format."""
def __init__(self, path, format, fields, skip_header=False,
csv_reader_params=None, **kwargs):
"""Create a TabularDataset given a path, file format, and field list.
Args:
path (str): Path to the data file.
format (str): The format of the data file. One of "CSV", "TSV", or
"JSON" (case-insensitive).
fields ((list(tuple(str, Field)) or dict[str: tuple(str, Field)): If using a list,
the format must be CSV or TSV, and the values of the list
should be tuples of (name, field).
The fields should be in the same order as the columns in the CSV or TSV
file, while tuples of (name, None) represent columns that will be ignored.
If using a dict, the keys should be a subset of the JSON keys or CSV/TSV
columns, and the values should be tuples of (name, field).
Keys not present in the input dictionary are ignored.
This allows the user to rename columns from their JSON/CSV/TSV key names
and also enables selecting a subset of columns to load.
skip_header (bool): Whether to skip the first line of the input file.
csv_reader_params(dict): Parameters to pass to the csv reader.
Only relevant when format is csv or tsv.
See
https://docs.python.org/3/library/csv.html#csv.reader
for more details.
kwargs (dict): passed to the Dataset parent class.
"""
if csv_reader_params is None:
csv_reader_params = {}
format = format.lower()
make_example = {
'json': Example.fromJSON, 'dict': Example.fromdict,
'tsv': Example.fromCSV, 'csv': Example.fromCSV}[format]
with io.open(os.path.expanduser(path), encoding="utf8") as f:
if format == 'csv':
reader = unicode_csv_reader(f, **csv_reader_params)
elif format == 'tsv':
reader = unicode_csv_reader(f, delimiter='\t', **csv_reader_params)
else:
reader = f
if format in ['csv', 'tsv'] and isinstance(fields, dict):
if skip_header:
raise ValueError('When using a dict to specify fields with a {} file,'
'skip_header must be False and'
'the file must have a header.'.format(format))
header = next(reader)
field_to_index = {f: header.index(f) for f in fields.keys()}
make_example = partial(make_example, field_to_index=field_to_index)
if skip_header:
next(reader)
examples = [make_example(line, fields) for line in reader]
if isinstance(fields, dict):
fields, field_dict = [], fields
for field in field_dict.values():
if isinstance(field, list):
fields.extend(field)
else:
fields.append(field)
super(TabularDataset, self).__init__(examples, fields, **kwargs)
def check_split_ratio(split_ratio):
"""Check that the split ratio argument is not malformed"""
valid_ratio = 0.
if isinstance(split_ratio, float):
# Only the train set relative ratio is provided
# Assert in bounds, validation size is zero
assert 0. < split_ratio < 1., (
"Split ratio {} not between 0 and 1".format(split_ratio))
test_ratio = 1. - split_ratio
return (split_ratio, test_ratio, valid_ratio)
elif isinstance(split_ratio, list):
# A list of relative ratios is provided
length = len(split_ratio)
assert length == 2 or length == 3, (
"Length of split ratio list should be 2 or 3, got {}".format(split_ratio))
# Normalize if necessary
ratio_sum = sum(split_ratio)
if not ratio_sum == 1.:
split_ratio = [float(ratio) / ratio_sum for ratio in split_ratio]
if length == 2:
return tuple(split_ratio + [valid_ratio])
return tuple(split_ratio)
else:
raise ValueError('Split ratio must be float or a list, got {}'
.format(type(split_ratio)))
def stratify(examples, strata_field):
# The field has to be hashable otherwise this doesn't work
# There's two iterations over the whole dataset here, which can be
# reduced to just one if a dedicated method for stratified splitting is used
unique_strata = set(getattr(example, strata_field) for example in examples)
strata_maps = {s: [] for s in unique_strata}
for example in examples:
strata_maps[getattr(example, strata_field)].append(example)
return list(strata_maps.values())
def rationed_split(examples, train_ratio, test_ratio, val_ratio, rnd):
"""Create a random permutation of examples, then split them by ratios
Arguments:
examples: a list of data
train_ratio, test_ratio, val_ratio: split fractions.
rnd: a random shuffler
Examples:
>>> examples = []
>>> train_ratio, test_ratio, val_ratio = 0.7, 0.2, 0.1
>>> rnd = torchtext.data.dataset.RandomShuffler(None)
>>> train_examples, test_examples, valid_examples = \
torchtext.data.dataset.rationed_split(examples, train_ratio,
test_ratio, val_ratio,
rnd)
"""
N = len(examples)
randperm = rnd(range(N))
train_len = int(round(train_ratio * N))
# Due to possible rounding problems
if not val_ratio:
test_len = N - train_len
else:
test_len = int(round(test_ratio * N))
indices = (randperm[:train_len], # Train
randperm[train_len:train_len + test_len], # Test
randperm[train_len + test_len:]) # Validation
# There's a possibly empty list for the validation set
data = tuple([examples[i] for i in index] for index in indices)
return data
|
import subprocess
import os
import sys
from pathlib import Path
CURRENT_DIR = Path(os.path.dirname(os.path.realpath(__file__)))
def pip_install_requirements():
requirements_file = os.path.join(CURRENT_DIR, "requirements.txt")
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', requirements_file])
def install_diffusers():
pip_install_requirements() |
import torch
from torchbenchmark.util.model import BenchmarkModel
from diffusers import StableDiffusionInstructPix2PixPipeline, EulerAncestralDiscreteScheduler
from typing import Optional, List
class DiffuserModel(BenchmarkModel):
DIFFUSER_MODEL = True
def __init__(self, name: str, test: str, device: str, jit: bool = False, batch_size: Optional[int] = None, extra_args: List[str] = ...):
super().__init__(test, device, jit, batch_size, extra_args)
if self.device == "cpu":
raise NotImplementedError(f"Model {self.name} does not support CPU device.")
if not self.dargs.precision == "fp16":
raise NotImplementedError(f"Model {self.name} only supports fp16 precision.")
pipe = StableDiffusionInstructPix2PixPipeline.from_pretrained(name, torch_dtype=torch.float16, safety_checker=None)
pipe.to(self.device)
pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
self.pipe = pipe
prompt = "turn him into cyborg"
# use the same size as the example image
# https://raw.githubusercontent.com/timothybrooks/instruct-pix2pix/main/imgs/example.jpg
self.example_inputs = (prompt, torch.randn(self.batch_size, 3, 32, 32).to(self.device))
def enable_fp16_half(self):
pass
def get_module(self):
return self.pipe, self.example_inputs
def train(self):
raise NotImplementedError(f"Train is not implemented for model {self.name}")
def eval(self):
with torch.no_grad():
images = self.pipe(*self.example_inputs).images
return images
|
import torch
from typing import Tuple
def enable_cudagraph(model: 'torchbenchmark.util.model.BenchmarkModel', example_inputs: Tuple[torch.tensor]):
optimizer = model.optimizer
loss_fn = model.loss_fn
# warmup
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
for _ in range(3):
optimizer.zero_grad(set_to_none=True)
y_pred = model.model(*example_inputs)
loss = loss_fn(y_pred, model.example_outputs)
loss.backward()
optimizer.step()
torch.cuda.current_stream().wait_stream(s)
# capture
g = torch.cuda.CUDAGraph()
optimizer.zero_grad(set_to_none=True)
with torch.cuda.graph(g):
static_y_pred = model.model(*example_inputs)
static_loss = loss_fn(static_y_pred, model.example_outputs)
static_loss.backward()
optimizer.step()
model.g = g
|
import os
import torch
import typing
import torch.optim as optim
import torchvision.models as models
from contextlib import nullcontext
from torchbenchmark.util.model import BenchmarkModel
from typing import Tuple, Generator, Optional
class TorchVisionModel(BenchmarkModel):
# To recognize this is a torchvision model
TORCHVISION_MODEL = True
# These two variables should be defined by subclasses
DEFAULT_TRAIN_BSIZE = None
DEFAULT_EVAL_BSIZE = None
# Default eval precision on CUDA device is fp16
DEFAULT_EVAL_CUDA_PRECISION = "fp16"
# Whether to skip the opt zero grad
SKIP_ZERO_GRAD = False
def __init__(self, model_name, test, device, jit=False, batch_size=None, weights=None, extra_args=[]):
super().__init__(test=test, device=device, jit=jit, batch_size=batch_size, extra_args=extra_args)
torch.backends.cudnn.deterministic = False
torch.backends.cudnn.benchmark = True
if weights is None:
self.model = getattr(models, model_name)(pretrained=True).to(self.device)
else:
self.model = getattr(models, model_name)(weights=weights).to(self.device)
self.example_inputs = (torch.randn((self.batch_size, 3, 224, 224)).to(self.device), )
if test == "train":
# compute loss
with torch.no_grad():
self.example_outputs = (torch.rand_like(self.model(*self.example_inputs)), )
self.model.train()
# setup optimizer and loss_fn
# if backend is cudagraph, must set optimizer to be capturable
capturable = bool(int(os.getenv("ADAM_CAPTURABLE", 0))) \
if not (hasattr(self.opt_args, 'backend') and self.opt_args.backend == "cudagraph") else True
self.opt = optim.Adam(self.model.parameters(), capturable=capturable)
self.loss_fn = torch.nn.CrossEntropyLoss()
elif test == "eval":
self.model.eval()
self.amp_context = nullcontext
if hasattr(self.opt_args, 'backend') and self.opt_args.backend == "cudagraph":
self.real_input = ( torch.rand_like(self.example_inputs[0]), )
self.real_output = ( torch.rand_like(self.example_outputs), )
def get_flops(self):
return self.flops, self.batch_size
def gen_inputs(self, num_batches:int=1) -> Tuple[Generator, Optional[int]]:
def _gen_inputs():
while True:
result = []
for _i in range(num_batches):
result.append((torch.randn((self.batch_size, 3, 224, 224)).to(self.device),))
if self.dargs.precision == "fp16":
result = list(map(lambda x: (x[0].half(), ), result))
yield result
return (_gen_inputs(), None)
def enable_fp16_half(self):
self.model = self.model.half()
self.example_inputs = (self.example_inputs[0].half(), )
def get_module(self):
return self.model, self.example_inputs
def train(self):
if self.opt and not self.SKIP_ZERO_GRAD:
self.opt.zero_grad()
for data, target in zip(self.example_inputs, self.example_outputs):
with self.amp_context():
pred = self.model(data)
self.loss_fn(pred, target).backward()
if self.opt:
self.opt.step()
def cudagraph_train(self):
for data, target in zip(self.real_input, self.real_output):
self.example_inputs[0].copy_(data)
self.example_outputs.copy_(target)
self.g.replay()
def eval(self) -> typing.Tuple[torch.Tensor]:
with torch.no_grad():
with self.amp_context():
return self.model(*self.example_inputs)
def cudagraph_eval(self):
for data, target in zip(self.real_input, self.real_output):
self.example_inputs[0].copy_(data)
self.example_outputs.copy_(target)
self.g.replay()
break
return (self.example_outputs, )
|
import argparse
def parse_tb_args(args):
parser = argparse.ArgumentParser()
parser.add_argument("--graph_type", choices=["dense", "sparse"], default="dense", help="Determine dense graph or sparse graph")
args, unknown_args = parser.parse_known_args(args)
return args, unknown_args
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.