python_code
stringlengths
0
258k
## @package generator # Module caffe2.python.docs.generator from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import os from caffe2.python import core, workspace from caffe2.python.docs.formatter import Markdown OpSchema = workspace.C.OpSchema class DocUploader(object): def __init__(self): pass def upload(self, text): pass class DocGenerator(object): def __init__(self, formatter, uploader): self.formatter = formatter self.uploader = uploader self.content_body = "" def create_body(self): pass def update(self): self.uploader.upload(self.content_body) class OpDocGenerator(DocGenerator): def getOperatorDoc(self, name, schema, priority): return OperatorDoc(name, schema, priority) def getOperatorEngine(self, name): return OperatorEngine(name) def getOperators(self): # map: op_name -> operator self.operators = {} # map: op_name -> [engine, engine] self.engines = {} def filePriority(x): if x == "caffe2/caffe2/operators": return 0 if 'contrib' in x.split('/'): return 2 if 'experiments' in x.split('/'): return 3 return 1 for name in core._GetRegisteredOperators(): schema = OpSchema.get(name) if schema: priority = filePriority(os.path.dirname(schema.file)) operator = self.getOperatorDoc(name, schema, priority) self.operators[name] = operator # Engine elif name.find("_ENGINE_") != -1: engine = self.getOperatorEngine(name) if engine.base_op_name in self.engines: self.engines[engine.base_op_name].append(engine) else: self.engines[engine.base_op_name] = [engine] # No schema else: priority = 4 self.operators[name] = self.getOperatorDoc(name, schema, priority) for name, engines in self.engines.items(): if name in self.operators: self.operators[name].addEngines(engines) # Generate a sorted list of operators operators = [v for k, v in self.operators.items()] def compare(op1, op2): if op1.priority == op2.priority: if op1.name < op2.name: return -1 else: return 1 return op1.priority - op2.priority return sorted(operators, cmp=compare) def createBody(self): operators = self.getOperators() for operator in operators: operator.generateSchema(self.formatter) self.content_body += self.formatter.dump() class OperatorEngine(object): def __init__(self, name): self.op_name = name self.base_op_name, self.engine = name.split("_ENGINE_", 1) def getDeviceImpl(self): deviceImplList = [] for device, impl in {'CPU': OpSchema.get_cpu_impl(self.op_name), 'CUDA': OpSchema.get_cuda_impl(self.op_name)}.items(): if not impl: continue deviceImplList.append((device, impl)) return deviceImplList def generateDoc(self, formatter): for device, impl in self.getDeviceImpl(): formatter.addLine( '{engine} on {device}: {impl}'.format(engine=self.engine, device=device, impl=impl)) class OperatorDoc(object): def __init__(self, name, schema, priority): self.name = name self.schema = schema self.priority = priority self.engines = [] def addEngines(self, engines): self.engines = engines def generateDoc(self, formatter): if self.schema.doc: formatter.parseAndAdd(self.schema.doc) formatter.addLinebreak() else: formatter.addLine("No documentation yet.") def generateTable(self, formatter, tuples, title_row, title): if tuples: if title: formatter.addHeader(title, 3) table = [] if title_row: table = [title_row] for name, doc in tuples: table.append([name, doc or '']) formatter.addTable(table, (table == [])) def generateInterface(self, formatter): def makeDesc(title, desc): f = formatter.clone() f.addEmphasis(title, 1) out = [(f.dump(), '')] for name, doc in desc: f = formatter.clone() f.addCode(name, inline=True) out.append((f.dump(), doc or '')) return out tuples = [] if self.schema.arg_desc: tuples += makeDesc('Arguments', self.schema.arg_desc) if self.schema.input_desc: tuples += makeDesc('Inputs', self.schema.input_desc) if self.schema.output_desc: tuples += makeDesc('Outputs', self.schema.output_desc) self.generateTable(formatter, tuples, None, 'Interface') def generateCodeLink(self, formatter): formatter.addHeader("Code", 3) formatter.addLinebreak() formatter.addCodeLink(self.schema.file) def getInfo(self, formatter, name, impl): pass def generateDevices(self, formatter): formatter.addHeader("Devices", 3) devices = [ self.getInfo(formatter, 'CPU', OpSchema.get_cpu_impl(self.name)), self.getInfo(formatter, 'GPU', OpSchema.get_cuda_impl(self.name)), ] formatter.addList([i for i in devices if i]) def generateEngines(self, formatter): if not len(self.engines): return formatter.addHeader("Engines", 3) for engine in self.engines: engine.generateDoc(formatter) def generateSchema(self, formatter): formatter.addHeader(self.name, 2) if self.schema: self.generateDoc(formatter) self.generateInterface(formatter) self.generateCodeLink(formatter) self.generateDevices(formatter) self.generateEngines(formatter) formatter.addBreak() else: formatter.addLine("No schema documented yet.") self.generateDevices(formatter) if __name__ == "__main__": ops = OpDocGenerator(Markdown(), DocUploader()) ops.createBody() print(ops.content_body)
## @package github # Module caffe2.python.docs.github from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python.docs.formatter import Markdown from caffe2.python.docs.generator import OpDocGenerator, DocUploader from caffe2.python.docs.generator import OperatorDoc, OperatorEngine import os class GHOpDocUploader(DocUploader): def __init__(self): pass def upload(self, content_body): print(content_body) class GHMarkdown(Markdown): def addHeader(self, text, h=1): self.addLine("\n{header} {text}\n".format(header=h * '#', text=text), True) def addDocHeader(self): self.addLine("---") self.addLine("docid: operators-catalogue") self.addLine("title: Operators Catalogue") self.addLine("layout: docs") self.addLine("permalink: /docs/operators-catalogue.html") self.addLine("---") def addTable(self, table, noTitle=False): self.addRaw("<table>") for row in table: self.addRaw("<tr>") for cell in row: self.addRaw("<td>") self.addLine("{cell}".format(cell=cell)) self.addRaw("</td>") self.addRaw("</tr>") self.addRaw("</table>") def getCodeLink(formatter, schema): formatter = formatter.clone() path = os.path.relpath(schema.file, "caffe2") schemaLink = ('https://github.com/caffe2/caffe2/blob/master/{path}' .format(path=path)) formatter.addLink('{path}'.format(path=path), schemaLink) return formatter.dump() class GHOperatorEngine(OperatorEngine): def generateDoc(self, formatter): for device, _ in self.getDeviceImpl(): formatter.addCode('{engine}'.format(engine=self.engine), True) if device: formatter.addRaw(' on ') formatter.addEmphasis("{device}".format(device=device), 1) class GHOperatorDoc(OperatorDoc): def generateCodeLink(self, formatter): formatter.addHeader("Code", 3) formatter.addLinebreak() formatter.addRaw(getCodeLink(formatter, self.schema)) def getInfo(self, formatter, name, impl): formatter = formatter.clone() if impl: formatter.addEmphasis('{name}'.format(name=name), 1) formatter.addRaw(' ') formatter.addCode('{impl}'.format(impl=impl), True) return formatter.dump() class GHOpDocGenerator(OpDocGenerator): def getOperatorDoc(self, name, schema, priority): return GHOperatorDoc(name, schema, priority) def getOperatorEngine(self, name): return GHOperatorEngine(name) def createBody(self): self.formatter.addDocHeader() operators = self.getOperators() for operator in operators: operator.generateSchema(self.formatter) self.content_body += self.formatter.dump() if __name__ == "__main__": ops = GHOpDocGenerator(GHMarkdown(), GHOpDocUploader) ops.createBody() print(ops.content_body)
## @package convnet_benchmarks # Module caffe2.python.convnet_benchmarks from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals """ Benchmark for common convnets. Speed on Titan X, with 10 warmup steps and 10 main steps and with different versions of cudnn, are as follows (time reported below is per-batch time, forward / forward+backward): CuDNN V3 CuDNN v4 AlexNet 32.5 / 108.0 27.4 / 90.1 OverFeat 113.0 / 342.3 91.7 / 276.5 Inception 134.5 / 485.8 125.7 / 450.6 VGG (batch 64) 200.8 / 650.0 164.1 / 551.7 Speed on Inception with varied batch sizes and CuDNN v4 is as follows: Batch Size Speed per batch Speed per image 16 22.8 / 72.7 1.43 / 4.54 32 38.0 / 127.5 1.19 / 3.98 64 67.2 / 233.6 1.05 / 3.65 128 125.7 / 450.6 0.98 / 3.52 Speed on Tesla M40, which 10 warmup steps and 10 main steps and with cudnn v4, is as follows: AlexNet 68.4 / 218.1 OverFeat 210.5 / 630.3 Inception 300.2 / 1122.2 VGG (batch 64) 405.8 / 1327.7 (Note that these numbers involve a "full" backprop, i.e. the gradient with respect to the input image is also computed.) To get the numbers, simply run: for MODEL in AlexNet OverFeat Inception; do PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \ --batch_size 128 --model $MODEL --forward_only True done for MODEL in AlexNet OverFeat Inception; do PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \ --batch_size 128 --model $MODEL done PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \ --batch_size 64 --model VGGA --forward_only True PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \ --batch_size 64 --model VGGA for BS in 16 32 64 128; do PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \ --batch_size $BS --model Inception --forward_only True PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \ --batch_size $BS --model Inception done Note that VGG needs to be run at batch 64 due to memory limit on the backward pass. """ import argparse from caffe2.python import cnn, workspace import numpy as np def MLP(order, cudnn_ws, mkl): model = cnn.CNNModelHelper() d = 256 depth = 20 width = 3 for i in range(depth): for j in range(width): current = "fc_{}_{}".format(i, j) if i > 0 else "data" next_ = "fc_{}_{}".format(i + 1, j) model.FC( current, next_, dim_in=d, dim_out=d, weight_init=model.XavierInit, bias_init=model.XavierInit) model.Sum(["fc_{}_{}".format(depth, j) for j in range(width)], ["sum"]) model.FC("sum", "last", dim_in=d, dim_out=1000, weight_init=model.XavierInit, bias_init=model.XavierInit) xent = model.LabelCrossEntropy(["last", "label"], "xent") if not mkl: model.AveragedLoss(xent, "loss") return model, d def AlexNet(order, cudnn_ws, mkl): model = cnn.CNNModelHelper( order, name="alexnet", use_cudnn=True, cudnn_exhaustive_search=True, ws_nbytes_limit=cudnn_ws) conv1 = model.Conv( "data", "conv1", 3, 64, 11, ('XavierFill', {}), ('ConstantFill', {}), stride=4, pad=2 ) relu1 = model.Relu(conv1, "conv1") pool1 = model.MaxPool(relu1, "pool1", kernel=3, stride=2) conv2 = model.Conv( pool1, "conv2", 64, 192, 5, ('XavierFill', {}), ('ConstantFill', {}), pad=2 ) relu2 = model.Relu(conv2, "conv2") pool2 = model.MaxPool(relu2, "pool2", kernel=3, stride=2) conv3 = model.Conv( pool2, "conv3", 192, 384, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu3 = model.Relu(conv3, "conv3") conv4 = model.Conv( relu3, "conv4", 384, 256, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu4 = model.Relu(conv4, "conv4") conv5 = model.Conv( relu4, "conv5", 256, 256, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu5 = model.Relu(conv5, "conv5") pool5 = model.MaxPool(relu5, "pool5", kernel=3, stride=2) fc6 = model.FC( pool5, "fc6", 256 * 6 * 6, 4096, ('XavierFill', {}), ('ConstantFill', {}) ) relu6 = model.Relu(fc6, "fc6") fc7 = model.FC( relu6, "fc7", 4096, 4096, ('XavierFill', {}), ('ConstantFill', {}) ) relu7 = model.Relu(fc7, "fc7") fc8 = model.FC( relu7, "fc8", 4096, 1000, ('XavierFill', {}), ('ConstantFill', {}) ) pred = model.Softmax(fc8, "pred") xent = model.LabelCrossEntropy([pred, "label"], "xent") if not mkl: loss = model.AveragedLoss(xent, "loss") return model, 224 def OverFeat(order, cudnn_ws, mkl): model = cnn.CNNModelHelper( order, name="overfeat", use_cudnn=True, cudnn_exhaustive_search=True, ws_nbytes_limit=cudnn_ws) conv1 = model.Conv( "data", "conv1", 3, 96, 11, ('XavierFill', {}), ('ConstantFill', {}), stride=4 ) relu1 = model.Relu(conv1, "conv1") pool1 = model.MaxPool(relu1, "pool1", kernel=2, stride=2) conv2 = model.Conv( pool1, "conv2", 96, 256, 5, ('XavierFill', {}), ('ConstantFill', {}) ) relu2 = model.Relu(conv2, "conv2") pool2 = model.MaxPool(relu2, "pool2", kernel=2, stride=2) conv3 = model.Conv( pool2, "conv3", 256, 512, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu3 = model.Relu(conv3, "conv3") conv4 = model.Conv( relu3, "conv4", 512, 1024, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu4 = model.Relu(conv4, "conv4") conv5 = model.Conv( relu4, "conv5", 1024, 1024, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu5 = model.Relu(conv5, "conv5") pool5 = model.MaxPool(relu5, "pool5", kernel=2, stride=2) fc6 = model.FC( pool5, "fc6", 1024 * 6 * 6, 3072, ('XavierFill', {}), ('ConstantFill', {}) ) relu6 = model.Relu(fc6, "fc6") fc7 = model.FC( relu6, "fc7", 3072, 4096, ('XavierFill', {}), ('ConstantFill', {}) ) relu7 = model.Relu(fc7, "fc7") fc8 = model.FC( relu7, "fc8", 4096, 1000, ('XavierFill', {}), ('ConstantFill', {}) ) pred = model.Softmax(fc8, "pred") xent = model.LabelCrossEntropy([pred, "label"], "xent") if not mkl: loss = model.AveragedLoss(xent, "loss") return model, 231 def VGGA(order, cudnn_ws, mkl): model = cnn.CNNModelHelper( order, name='vgg-a', use_cudnn=True, cudnn_exhaustive_search=True, ws_nbytes_limit=cudnn_ws) conv1 = model.Conv( "data", "conv1", 3, 64, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu1 = model.Relu(conv1, "conv1") pool1 = model.MaxPool(relu1, "pool1", kernel=2, stride=2) conv2 = model.Conv( pool1, "conv2", 64, 128, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu2 = model.Relu(conv2, "conv2") pool2 = model.MaxPool(relu2, "pool2", kernel=2, stride=2) conv3 = model.Conv( pool2, "conv3", 128, 256, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu3 = model.Relu(conv3, "conv3") conv4 = model.Conv( relu3, "conv4", 256, 256, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu4 = model.Relu(conv4, "conv4") pool4 = model.MaxPool(relu4, "pool4", kernel=2, stride=2) conv5 = model.Conv( pool4, "conv5", 256, 512, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu5 = model.Relu(conv5, "conv5") conv6 = model.Conv( relu5, "conv6", 512, 512, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu6 = model.Relu(conv6, "conv6") pool6 = model.MaxPool(relu6, "pool6", kernel=2, stride=2) conv7 = model.Conv( pool6, "conv7", 512, 512, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu7 = model.Relu(conv7, "conv7") conv8 = model.Conv( relu7, "conv8", 512, 512, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu8 = model.Relu(conv8, "conv8") pool8 = model.MaxPool(relu8, "pool8", kernel=2, stride=2) fcix = model.FC( pool8, "fcix", 512 * 7 * 7, 4096, ('XavierFill', {}), ('ConstantFill', {}) ) reluix = model.Relu(fcix, "fcix") fcx = model.FC( reluix, "fcx", 4096, 4096, ('XavierFill', {}), ('ConstantFill', {}) ) relux = model.Relu(fcx, "fcx") fcxi = model.FC( relux, "fcxi", 4096, 1000, ('XavierFill', {}), ('ConstantFill', {}) ) pred = model.Softmax(fcxi, "pred") xent = model.LabelCrossEntropy([pred, "label"], "xent") if not mkl: loss = model.AveragedLoss(xent, "loss") return model, 231 def _InceptionModule( model, input_blob, input_depth, output_name, conv1_depth, conv3_depths, conv5_depths, pool_depth ): # path 1: 1x1 conv conv1 = model.Conv( input_blob, output_name + ":conv1", input_depth, conv1_depth, 1, ('XavierFill', {}), ('ConstantFill', {}) ) conv1 = model.Relu(conv1, conv1) # path 2: 1x1 conv + 3x3 conv conv3_reduce = model.Conv( input_blob, output_name + ":conv3_reduce", input_depth, conv3_depths[0], 1, ('XavierFill', {}), ('ConstantFill', {}) ) conv3_reduce = model.Relu(conv3_reduce, conv3_reduce) conv3 = model.Conv( conv3_reduce, output_name + ":conv3", conv3_depths[0], conv3_depths[1], 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) conv3 = model.Relu(conv3, conv3) # path 3: 1x1 conv + 5x5 conv conv5_reduce = model.Conv( input_blob, output_name + ":conv5_reduce", input_depth, conv5_depths[0], 1, ('XavierFill', {}), ('ConstantFill', {}) ) conv5_reduce = model.Relu(conv5_reduce, conv5_reduce) conv5 = model.Conv( conv5_reduce, output_name + ":conv5", conv5_depths[0], conv5_depths[1], 5, ('XavierFill', {}), ('ConstantFill', {}), pad=2 ) conv5 = model.Relu(conv5, conv5) # path 4: pool + 1x1 conv pool = model.MaxPool( input_blob, output_name + ":pool", kernel=3, stride=1, pad=1 ) pool_proj = model.Conv( pool, output_name + ":pool_proj", input_depth, pool_depth, 1, ('XavierFill', {}), ('ConstantFill', {}) ) pool_proj = model.Relu(pool_proj, pool_proj) output = model.Concat([conv1, conv3, conv5, pool_proj], output_name) return output def Inception(order, cudnn_ws, mkl): model = cnn.CNNModelHelper( order, name="inception", use_cudnn=True, cudnn_exhaustive_search=True, ws_nbytes_limit=cudnn_ws) conv1 = model.Conv( "data", "conv1", 3, 64, 7, ('XavierFill', {}), ('ConstantFill', {}), stride=2, pad=3 ) relu1 = model.Relu(conv1, "conv1") pool1 = model.MaxPool(relu1, "pool1", kernel=3, stride=2, pad=1) conv2a = model.Conv( pool1, "conv2a", 64, 64, 1, ('XavierFill', {}), ('ConstantFill', {}) ) conv2a = model.Relu(conv2a, conv2a) conv2 = model.Conv( conv2a, "conv2", 64, 192, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu2 = model.Relu(conv2, "conv2") pool2 = model.MaxPool(relu2, "pool2", kernel=3, stride=2, pad=1) # Inception modules inc3 = _InceptionModule( model, pool2, 192, "inc3", 64, [96, 128], [16, 32], 32 ) inc4 = _InceptionModule( model, inc3, 256, "inc4", 128, [128, 192], [32, 96], 64 ) pool5 = model.MaxPool(inc4, "pool5", kernel=3, stride=2, pad=1) inc5 = _InceptionModule( model, pool5, 480, "inc5", 192, [96, 208], [16, 48], 64 ) inc6 = _InceptionModule( model, inc5, 512, "inc6", 160, [112, 224], [24, 64], 64 ) inc7 = _InceptionModule( model, inc6, 512, "inc7", 128, [128, 256], [24, 64], 64 ) inc8 = _InceptionModule( model, inc7, 512, "inc8", 112, [144, 288], [32, 64], 64 ) inc9 = _InceptionModule( model, inc8, 528, "inc9", 256, [160, 320], [32, 128], 128 ) pool9 = model.MaxPool(inc9, "pool9", kernel=3, stride=2, pad=1) inc10 = _InceptionModule( model, pool9, 832, "inc10", 256, [160, 320], [32, 128], 128 ) inc11 = _InceptionModule( model, inc10, 832, "inc11", 384, [192, 384], [48, 128], 128 ) pool11 = model.AveragePool(inc11, "pool11", kernel=7, stride=1) fc = model.FC( pool11, "fc", 1024, 1000, ('XavierFill', {}), ('ConstantFill', {}) ) # It seems that Soumith's benchmark does not have softmax on top # for Inception. We will add it anyway so we can have a proper # backward pass. pred = model.Softmax(fc, "pred") xent = model.LabelCrossEntropy([pred, "label"], "xent") if not mkl: loss = model.AveragedLoss(xent, "loss") return model, 224 def AddParameterUpdate(model): """ Simple plain SGD update -- not tuned to actually train the models """ ITER = model.Iter("iter") LR = model.LearningRate( ITER, "LR", base_lr=-1e-8, policy="step", stepsize=10000, gamma=0.999) ONE = model.param_init_net.ConstantFill([], "ONE", shape=[1], value=1.0) for param in model.params: param_grad = model.param_to_grad[param] model.WeightedSum([param, ONE, param_grad, LR], param) def Benchmark(model_gen, arg): model, input_size = model_gen(arg.order, arg.cudnn_ws, arg.mkl) model.Proto().type = arg.net_type model.Proto().num_workers = arg.num_workers # In order to be able to run everything without feeding more stuff, let's # add the data and label blobs to the parameter initialization net as well. if arg.order == "NCHW": input_shape = [arg.batch_size, 3, input_size, input_size] else: input_shape = [arg.batch_size, input_size, input_size, 3] if arg.model == "MLP": input_shape = [arg.batch_size, input_size] model.param_init_net.GaussianFill( [], "data", shape=input_shape, mean=0.0, std=1.0 ) #MKL doesn't support int, so have to use numpy if arg.mkl: label = np.random.randint(low=0, high=1000, size=(arg.batch_size,)).astype(np.int32) workspace.FeedBlob("label", label) else: model.param_init_net.UniformIntFill( [], "label", shape=[arg.batch_size, ], min=0, max=999 ) if arg.forward_only: print('{}: running forward only.'.format(arg.model)) else: if arg.mkl: print( '==WARNING==\n' 'forward-backward not supported yet in MKL, so exiting' ) print('{}: running forward-backward.'.format(arg.model)) model.AddGradientOperators(["loss"]) AddParameterUpdate(model) if arg.order == 'NHWC': print( '==WARNING==\n' 'NHWC order with CuDNN may not be supported yet, so I might\n' 'exit suddenly.' ) if not arg.cpu: if arg.mkl: model.param_init_net.RunAllOnMKL() model.net.RunAllOnMKL() else: model.param_init_net.RunAllOnGPU() model.net.RunAllOnGPU() if arg.engine: for op in model.net.Proto().op: op.engine = arg.engine if arg.dump_model: # Writes out the pbtxt for benchmarks on e.g. Android with open( "{0}_init_batch_{1}.pbtxt".format(arg.model, arg.batch_size), "w" ) as fid: fid.write(str(model.param_init_net.Proto())) with open("{0}.pbtxt".format(arg.model, arg.batch_size), "w") as fid: fid.write(str(model.net.Proto())) workspace.RunNetOnce(model.param_init_net) workspace.CreateNet(model.net) workspace.BenchmarkNet( model.net.Proto().name, arg.warmup_iterations, arg.iterations, arg.layer_wise_benchmark) def GetArgumentParser(): parser = argparse.ArgumentParser(description="Caffe2 benchmark.") parser.add_argument( "--batch_size", type=int, default=128, help="The batch size." ) parser.add_argument("--model", type=str, help="The model to benchmark.") parser.add_argument( "--order", type=str, default="NCHW", help="The order to evaluate." ) parser.add_argument( "--cudnn_ws", type=int, help="The cudnn workspace size." ) parser.add_argument( "--iterations", type=int, default=10, help="Number of iterations to run the network." ) parser.add_argument( "--warmup_iterations", type=int, default=10, help="Number of warm-up iterations before benchmarking." ) parser.add_argument( "--forward_only", action='store_true', help="If set, only run the forward pass." ) parser.add_argument( "--layer_wise_benchmark", action='store_true', help="If True, run the layer-wise benchmark as well." ) parser.add_argument( "--cpu", action='store_true', help="If True, run testing on CPU instead of GPU." ) parser.add_argument( "--mkl", action='store_true', help="If True, run testing on CPU-MKL instead of GPU." ) parser.add_argument( "--engine", type=str, default="", help="If set, blindly prefer the given engine(s) for every op.") parser.add_argument( "--dump_model", action='store_true', help="If True, dump the model prototxts to disk." ) parser.add_argument("--net_type", type=str, default="simple") parser.add_argument("--num_workers", type=int, default=2) parser.add_argument("--use-nvtx", default=False, action='store_true') parser.add_argument("--htrace_span_log_path", type=str) return parser if __name__ == '__main__': args = GetArgumentParser().parse_args() if ( not args.batch_size or not args.model or not args.order ): GetArgumentParser().print_help() else: workspace.GlobalInit( ['caffe2', '--caffe2_log_level=0'] + (['--caffe2_use_nvtx'] if args.use_nvtx else []) + (['--caffe2_htrace_span_log_path=' + args.htrace_span_log_path] if args.htrace_span_log_path else [])) model_map = { 'AlexNet': AlexNet, 'OverFeat': OverFeat, 'VGGA': VGGA, 'Inception': Inception, 'MLP': MLP, } Benchmark(model_map[args.model], args)
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest import hypothesis.strategies as st from hypothesis import given, settings import numpy as np from caffe2.python import core, workspace import caffe2.python.hypothesis_test_util as hu import caffe2.python.mkl_test_util as mu @unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.") class MKLConvTest(hu.HypothesisTestCase): @given(stride=st.integers(1, 3), pad=st.integers(0, 3), kernel=st.integers(3, 5), size=st.integers(8, 8), input_channels=st.integers(1, 3), output_channels=st.integers(1, 3), batch_size=st.integers(1, 3), **mu.gcs) @settings(max_examples=2, timeout=100) def test_mkl_convolution(self, stride, pad, kernel, size, input_channels, output_channels, batch_size, gc, dc): op = core.CreateOperator( "Conv", ["X", "w", "b"], ["Y"], stride=stride, pad=pad, kernel=kernel, ) X = np.random.rand( batch_size, input_channels, size, size).astype(np.float32) - 0.5 w = np.random.rand( output_channels, input_channels, kernel, kernel) \ .astype(np.float32) - 0.5 b = np.random.rand(output_channels).astype(np.float32) - 0.5 inputs = [X, w, b] self.assertDeviceChecks(dc, op, inputs, [0]) if __name__ == "__main__": import unittest unittest.main()
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest import numpy as np from caffe2.proto import caffe2_pb2 from caffe2.python import cnn, core, workspace, test_util @unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.") class TestMKLBasic(test_util.TestCase): def testReLUSpeed(self): X = np.random.randn(128, 4096).astype(np.float32) mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN) # Makes sure that feed works. workspace.FeedBlob("X", X) workspace.FeedBlob("X_mkl", X, device_option=mkl_do) net = core.Net("test") # Makes sure that we can run relu. net.Relu("X", "Y") net.Relu("X_mkl", "Y_mkl", device_option=mkl_do) workspace.CreateNet(net) workspace.RunNet(net) # makes sure that the results are good. np.testing.assert_allclose( workspace.FetchBlob("Y"), workspace.FetchBlob("Y_mkl"), atol=1e-10, rtol=1e-10) runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True) # The returned runtime is the time of # [whole_net, cpu_op, mkl_op] # so we will assume that the MKL one runs faster than the CPU one. # Note(Yangqing): in fact, it seems that in optimized mode, this is # not always guaranteed - MKL runs slower than the Eigen vectorized # version, so I am turning this assertion off. #self.assertTrue(runtime[1] >= runtime[2]) print("Relu CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2])) def testConvSpeed(self): # We randomly select a shape to test the speed. Intentionally we # test a batch size of 1 since this may be the most frequent use # case for MKL during deployment time. X = np.random.rand(1, 256, 27, 27).astype(np.float32) - 0.5 W = np.random.rand(192, 256, 3, 3).astype(np.float32) - 0.5 b = np.random.rand(192).astype(np.float32) - 0.5 mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN) # Makes sure that feed works. workspace.FeedBlob("X", X) workspace.FeedBlob("W", W) workspace.FeedBlob("b", b) workspace.FeedBlob("X_mkl", X, device_option=mkl_do) workspace.FeedBlob("W_mkl", W, device_option=mkl_do) workspace.FeedBlob("b_mkl", b, device_option=mkl_do) net = core.Net("test") # Makes sure that we can run relu. net.Conv(["X", "W", "b"], "Y", pad=1, stride=1, kernel=3) net.Conv(["X_mkl", "W_mkl", "b_mkl"], "Y_mkl", pad=1, stride=1, kernel=3, device_option=mkl_do) workspace.CreateNet(net) workspace.RunNet(net) # makes sure that the results are good. np.testing.assert_allclose( workspace.FetchBlob("Y"), workspace.FetchBlob("Y_mkl"), atol=1e-2, rtol=1e-2) runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True) print("Conv CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2])) if __name__ == '__main__': unittest.main()
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest import numpy as np from caffe2.proto import caffe2_pb2 from caffe2.python import cnn, core, workspace, test_util @unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.") class TestMKLBasic(test_util.TestCase): def testSpatialBNTestingSpeed(self): input_channel = 10 X = np.random.rand(1, input_channel, 100, 100).astype(np.float32) - 0.5 scale = np.random.rand(input_channel).astype(np.float32) + 0.5 bias = np.random.rand(input_channel).astype(np.float32) - 0.5 mean = np.random.randn(input_channel).astype(np.float32) var = np.random.rand(input_channel).astype(np.float32) + 0.5 mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN) # Makes sure that feed works. workspace.FeedBlob("X", X) workspace.FeedBlob("scale", scale) workspace.FeedBlob("bias", bias) workspace.FeedBlob("mean", mean) workspace.FeedBlob("var", var) workspace.FeedBlob("X_mkl", X, device_option=mkl_do) workspace.FeedBlob("scale_mkl", scale, device_option=mkl_do) workspace.FeedBlob("bias_mkl", bias, device_option=mkl_do) workspace.FeedBlob("mean_mkl", mean, device_option=mkl_do) workspace.FeedBlob("var_mkl", var, device_option=mkl_do) net = core.Net("test") # Makes sure that we can run relu. net.SpatialBN(["X", "scale", "bias","mean","var"], "Y", order="NCHW", is_test=True, epsilon=1e-5) net.SpatialBN(["X_mkl", "scale_mkl", "bias_mkl","mean_mkl","var_mkl"], "Y_mkl", order="NCHW", is_test=True, epsilon=1e-5, device_option=mkl_do) workspace.CreateNet(net) workspace.RunNet(net) # makes sure that the results are good. np.testing.assert_allclose( workspace.FetchBlob("Y"), workspace.FetchBlob("Y_mkl"), atol=1e-2, rtol=1e-2) runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True) print("FC CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2])) def testSpatialBNTrainingSpeed(self): input_channel = 10 X = np.random.rand(1, input_channel, 100, 100).astype(np.float32) - 0.5 scale = np.random.rand(input_channel).astype(np.float32) + 0.5 bias = np.random.rand(input_channel).astype(np.float32) - 0.5 mean = np.random.randn(input_channel).astype(np.float32) var = np.random.rand(input_channel).astype(np.float32) + 0.5 #mean = np.zeros(input_channel) #var = np.zeros(input_channel) mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN) # Makes sure that feed works. workspace.FeedBlob("X", X) workspace.FeedBlob("scale", scale) workspace.FeedBlob("bias", bias) workspace.FeedBlob("mean", mean) workspace.FeedBlob("var", var) workspace.FeedBlob("X_mkl", X, device_option=mkl_do) workspace.FeedBlob("scale_mkl", scale, device_option=mkl_do) workspace.FeedBlob("bias_mkl", bias, device_option=mkl_do) workspace.FeedBlob("mean_mkl", mean, device_option=mkl_do) workspace.FeedBlob("var_mkl", var, device_option=mkl_do) net = core.Net("test") # Makes sure that we can run relu. net.SpatialBN(["X", "scale", "bias","mean", "var"], ["Y", "mean", "var", "saved_mean", "saved_var"], order="NCHW", is_test=False, epsilon=1e-5) net.SpatialBN(["X_mkl", "scale_mkl", "bias_mkl","mean_mkl","var_mkl"], ["Y_mkl", "mean_mkl", "var_mkl", "saved_mean_mkl", "saved_var_mkl"], order="NCHW", is_test=False, epsilon=1e-5, device_option=mkl_do) workspace.CreateNet(net) workspace.RunNet(net) # makes sure that the results are good. np.testing.assert_allclose( workspace.FetchBlob("Y"), workspace.FetchBlob("Y_mkl"), atol=1e-2, rtol=1e-2) np.testing.assert_allclose( workspace.FetchBlob("mean"), workspace.FetchBlob("mean_mkl"), atol=1e-2, rtol=1e-2) np.testing.assert_allclose( workspace.FetchBlob("var"), workspace.FetchBlob("var_mkl"), atol=1e-2, rtol=1e-2) runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True) print("FC CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2])) if __name__ == '__main__': unittest.main()
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest import hypothesis.strategies as st from hypothesis import given, settings import numpy as np from caffe2.python import core, workspace import caffe2.python.hypothesis_test_util as hu import caffe2.python.mkl_test_util as mu @unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.") class MKLLRNTest(hu.HypothesisTestCase): @given(input_channels=st.integers(1, 3), batch_size=st.integers(1, 3), im_size=st.integers(1, 10), order=st.sampled_from(["NCHW"]), **mu.gcs) def test_mkl_LRN(self, input_channels, batch_size, im_size, order, gc, dc): op = core.CreateOperator( "LRN", ["X"], ["Y", "Y_scale"], size=5, alpha=0.001, beta=0.75, bias=2.0, order=order, ) X = np.random.rand( batch_size, input_channels, im_size, im_size).astype(np.float32) self.assertDeviceChecks(dc, op, [X], [0]) if __name__ == "__main__": import unittest unittest.main()
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest import hypothesis.strategies as st from hypothesis import given, settings import numpy as np from caffe2.python import core, workspace import caffe2.python.hypothesis_test_util as hu import caffe2.python.mkl_test_util as mu @unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.") class MKLPoolTest(hu.HypothesisTestCase): @given(stride=st.integers(1, 3), pad=st.integers(0, 3), kernel=st.integers(3, 5), size=st.integers(7, 9), input_channels=st.integers(1, 3), batch_size=st.integers(1, 3), method=st.sampled_from(["MaxPool", "AveragePool"]), **mu.gcs) @settings(max_examples=2, timeout=100) def test_mkl_pooling(self, stride, pad, kernel, size, input_channels, batch_size, method, gc, dc): op = core.CreateOperator( method, ["X"], ["Y"], stride=stride, pad=pad, kernel=kernel, ) X = np.random.rand( batch_size, input_channels, size, size).astype(np.float32) self.assertDeviceChecks(dc, op, [X], [0]) if __name__ == "__main__": import unittest unittest.main()
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest import hypothesis.strategies as st from hypothesis import given, settings import numpy as np from caffe2.python import core, workspace import caffe2.python.hypothesis_test_util as hu import caffe2.python.mkl_test_util as mu @unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.") class MKLFcTest(hu.HypothesisTestCase): @given(n=st.integers(1, 5), m=st.integers(1, 5), k=st.integers(1, 5), **mu.gcs) def test_mkl_fc(self,n, m, k, gc, dc): X = np.random.rand(m, k).astype(np.float32) - 0.5 W = np.random.rand(n, k).astype(np.float32) - 0.5 b = np.random.rand(n).astype(np.float32) - 0.5 op = core.CreateOperator( 'FC', ['X', 'W', 'b'], ["Y"] ) self.assertDeviceChecks(dc, op, [X, W, b], [0]) if __name__ == "__main__": import unittest unittest.main()
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest import numpy as np from caffe2.proto import caffe2_pb2 from caffe2.python import cnn, core, workspace, test_util @unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.") class TestMKLBasic(test_util.TestCase): def testLRNSpeed(self): # We randomly select a shape to test the speed. Intentionally we # test a batch size of 1 since this may be the most frequent use # case for MKL during deployment time. X = np.random.rand(1, 2, 224, 224).astype(np.float32) mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN) # Makes sure that feed works. workspace.FeedBlob("X", X) workspace.FeedBlob("X_mkl", X, device_option=mkl_do) net = core.Net("test") # Makes sure that we can run relu. net.LRN("X", ["Y", "Y_Scale"], size=5, alpha=0.001, beta=0.75, bias=2.0, order="NCHW") net.LRN("X_mkl", ["Y_mkl", "Y_Scale_mkl"], size=5, alpha=0.001, beta=0.75, bias=2.0, order="NCHW", device_option=mkl_do) workspace.CreateNet(net) workspace.RunNet(net) # makes sure that the results are good. np.testing.assert_allclose( workspace.FetchBlob("Y"), workspace.FetchBlob("Y_mkl"), atol=1e-2, rtol=1e-2) runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True) print("LRN CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2])) def testConvReluLRNSpeed(self): # We randomly select a shape to test the speed. Intentionally we # test a batch size of 1 since this may be the most frequent use # case for MKL during deployment time. X = np.random.rand(1, 3, 224, 224).astype(np.float32) - 0.5 W = np.random.rand(64, 3, 11, 11).astype(np.float32) - 0.5 b = np.random.rand(64).astype(np.float32) - 0.5 mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN) # Makes sure that feed works. workspace.FeedBlob("X", X) workspace.FeedBlob("W", W) workspace.FeedBlob("b", b) workspace.FeedBlob("X_mkl", X, device_option=mkl_do) workspace.FeedBlob("W_mkl", W, device_option=mkl_do) workspace.FeedBlob("b_mkl", b, device_option=mkl_do) net = core.Net("test") net.Conv(["X", "W", "b"], "C", pad=1, stride=1, kernel=11) net.Conv(["X_mkl", "W_mkl", "b_mkl"], "C_mkl", pad=1, stride=1, kernel=11, device_option=mkl_do) net.Relu("C", "R") net.Relu("C_mkl", "R_mkl", device_option=mkl_do) net.LRN("R", ["Y", "Y_Scale"], size=5, alpha=0.001, beta=0.75, bias=2.0, order="NCHW") net.LRN("R_mkl", ["Y_mkl", "Y_Scale_mkl"],size=5, alpha=0.001, beta=0.75, bias=2.0, order="NCHW", device_option=mkl_do) workspace.CreateNet(net) workspace.RunNet(net) # makes sure that the results are good. np.testing.assert_allclose( workspace.FetchBlob("Y"), workspace.FetchBlob("Y_mkl"), atol=1e-2, rtol=1e-2) runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True) if __name__ == '__main__': unittest.main()
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest import numpy as np from caffe2.proto import caffe2_pb2 from caffe2.python import cnn, core, workspace, test_util @unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.") class TestMKLBasic(test_util.TestCase): def testMaxPoolingSpeed(self): # We randomly select a shape to test the speed. Intentionally we # test a batch size of 1 since this may be the most frequent use # case for MKL during deployment time. X = np.random.rand(1, 64, 224, 224).astype(np.float32) mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN) # Makes sure that feed works. workspace.FeedBlob("X", X) workspace.FeedBlob("X_mkl", X, device_option=mkl_do) net = core.Net("test") # Makes sure that we can run relu. net.MaxPool("X", "Y", stride=2, kernel=3) net.MaxPool("X_mkl", "Y_mkl", stride=2, kernel=3, device_option=mkl_do) workspace.CreateNet(net) workspace.RunNet(net) # makes sure that the results are good. np.testing.assert_allclose( workspace.FetchBlob("Y"), workspace.FetchBlob("Y_mkl"), atol=1e-2, rtol=1e-2) runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True) print("Maxpooling CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2])) def testAveragePoolingSpeed(self): # We randomly select a shape to test the speed. Intentionally we # test a batch size of 1 since this may be the most frequent use # case for MKL during deployment time. X = np.random.rand(1, 64, 224, 224).astype(np.float32) mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN) # Makes sure that feed works. workspace.FeedBlob("X", X) workspace.FeedBlob("X_mkl", X, device_option=mkl_do) net = core.Net("test") # Makes sure that we can run relu. net.AveragePool("X", "Y", stride=2, kernel=3) net.AveragePool("X_mkl", "Y_mkl", stride=2, kernel=3, device_option=mkl_do) workspace.CreateNet(net) workspace.RunNet(net) # makes sure that the results are good. np.testing.assert_allclose( workspace.FetchBlob("Y"), workspace.FetchBlob("Y_mkl"), atol=1e-2, rtol=1e-2) runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True) print("Averagepooling CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2])) def testConvReluMaxPoolSpeed(self): # We randomly select a shape to test the speed. Intentionally we # test a batch size of 1 since this may be the most frequent use # case for MKL during deployment time. X = np.random.rand(1, 3, 224, 224).astype(np.float32) - 0.5 W = np.random.rand(64, 3, 11, 11).astype(np.float32) - 0.5 b = np.random.rand(64).astype(np.float32) - 0.5 mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN) # Makes sure that feed works. workspace.FeedBlob("X", X) workspace.FeedBlob("W", W) workspace.FeedBlob("b", b) workspace.FeedBlob("X_mkl", X, device_option=mkl_do) workspace.FeedBlob("W_mkl", W, device_option=mkl_do) workspace.FeedBlob("b_mkl", b, device_option=mkl_do) net = core.Net("test") net.Conv(["X", "W", "b"], "C", pad=1, stride=1, kernel=11) net.Conv(["X_mkl", "W_mkl", "b_mkl"], "C_mkl", pad=1, stride=1, kernel=11, device_option=mkl_do) net.Relu("C", "R") net.Relu("C_mkl", "R_mkl", device_option=mkl_do) net.AveragePool("R", "Y", stride=2, kernel=3) net.AveragePool("R_mkl", "Y_mkl", stride=2, kernel=3, device_option=mkl_do) workspace.CreateNet(net) workspace.RunNet(net) # makes sure that the results are good. np.testing.assert_allclose( workspace.FetchBlob("Y"), workspace.FetchBlob("Y_mkl"), atol=1e-2, rtol=1e-2) runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True) if __name__ == '__main__': unittest.main()
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest import hypothesis.strategies as st from hypothesis import given, settings import numpy as np from caffe2.python import core, workspace import caffe2.python.hypothesis_test_util as hu import caffe2.python.mkl_test_util as mu @unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.") class MKLSpatialBNTest(hu.HypothesisTestCase): @given(size=st.integers(7, 10), input_channels=st.integers(1, 10), batch_size=st.integers(1, 3), seed=st.integers(0, 65535), #order=st.sampled_from(["NCHW", "NHWC"]), order=st.sampled_from(["NCHW"]), epsilon=st.floats(1e-5, 1e-2), **mu.gcs) def test_mkl_BN(self, size, input_channels, batch_size, seed, order, epsilon, gc, dc): scale = np.random.rand(input_channels).astype(np.float32) + 0.5 bias = np.random.rand(input_channels).astype(np.float32) - 0.5 mean = np.random.randn(input_channels).astype(np.float32) var = np.random.rand(input_channels).astype(np.float32) + 0.5 X = np.random.rand( batch_size, input_channels, size, size).astype(np.float32) - 0.5 op = core.CreateOperator( "SpatialBN", ["X", "scale", "bias", "mean", "var"], ["Y"], order=order, is_test=True, epsilon=epsilon, ) self.assertDeviceChecks(dc, op, [X, scale, bias, mean, var], [0]) @given(size=st.integers(7, 10), input_channels=st.integers(1, 10), batch_size=st.integers(1, 3), seed=st.integers(0, 65535), #order=st.sampled_from(["NCHW", "NHWC"]), order=st.sampled_from(["NCHW"]), epsilon=st.floats(1e-5, 1e-2), **mu.gcs) def test_spatialbn_train_mode( self, size, input_channels, batch_size, seed, order, epsilon, gc, dc): op = core.CreateOperator( "SpatialBN", ["X", "scale", "bias", "running_mean", "running_var"], ["Y", "running_mean", "running_var", "saved_mean", "saved_var"], order=order, is_test=False, epsilon=epsilon, ) np.random.seed(1701) scale = np.random.rand(input_channels).astype(np.float32) + 0.5 bias = np.random.rand(input_channels).astype(np.float32) - 0.5 mean = np.random.randn(input_channels).astype(np.float32) var = np.random.rand(input_channels).astype(np.float32) + 0.5 X = np.random.rand( batch_size, input_channels, size, size).astype(np.float32) - 0.5 self.assertDeviceChecks(dc, op, [X, scale, bias, mean, var], [0, 1, 2, 3, 4]) if __name__ == "__main__": import unittest unittest.main()
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest import numpy as np from caffe2.proto import caffe2_pb2 from caffe2.python import cnn, core, workspace, test_util @unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.") class TestMKLBasic(test_util.TestCase): def testFCSpeed(self): # We randomly select a shape to test the speed. Intentionally we # test a batch size of 1 since this may be the most frequent use # case for MKL during deployment time. X = np.random.rand(1, 256, 6, 6).astype(np.float32) - 0.5 #X = np.random.rand(32, 256*6*6).astype(np.float32) - 0.5 W = np.random.rand(4096, 9216).astype(np.float32) - 0.5 b = np.random.rand(4096).astype(np.float32) - 0.5 mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN) # Makes sure that feed works. workspace.FeedBlob("X", X) workspace.FeedBlob("W", W) workspace.FeedBlob("b", b) workspace.FeedBlob("X_mkl", X, device_option=mkl_do) workspace.FeedBlob("W_mkl", W, device_option=mkl_do) workspace.FeedBlob("b_mkl", b, device_option=mkl_do) net = core.Net("test") # Makes sure that we can run relu. net.FC(["X", "W", "b"], "Y") net.FC(["X_mkl", "W_mkl", "b_mkl"], "Y_mkl", device_option=mkl_do) workspace.CreateNet(net) workspace.RunNet(net) # makes sure that the results are good. np.testing.assert_allclose( workspace.FetchBlob("Y"), workspace.FetchBlob("Y_mkl"), atol=1e-2, rtol=1e-2) runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True) print("FC CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2])) def testConvReluMaxPoolFcSpeed(self): # We randomly select a shape to test the speed. Intentionally we # test a batch size of 1 since this may be the most frequent use # case for MKL during deployment time. X = np.random.rand(1, 256, 13, 13).astype(np.float32) - 0.5 W = np.random.rand(256, 256, 3, 3).astype(np.float32) - 0.5 b = np.random.rand(256).astype(np.float32) - 0.5 w_fc = np.random.rand(4096, 9216).astype(np.float32) - 0.5 b_fc = np.random.rand(4096).astype(np.float32) - 0.5 mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN) # Makes sure that feed works. workspace.FeedBlob("X", X) workspace.FeedBlob("W", W) workspace.FeedBlob("b", b) workspace.FeedBlob("w_fc", w_fc) workspace.FeedBlob("b_fc", b_fc) workspace.FeedBlob("X_mkl", X, device_option=mkl_do) workspace.FeedBlob("W_mkl", W, device_option=mkl_do) workspace.FeedBlob("b_mkl", b, device_option=mkl_do) workspace.FeedBlob("w_fc_mkl", w_fc, device_option=mkl_do) workspace.FeedBlob("b_fc_mkl", b_fc, device_option=mkl_do) net = core.Net("test") net.Conv(["X", "W", "b"], "C", pad=1, stride=1, kernel=3) net.Relu("C", "R") net.MaxPool("R", "P", stride=2, kernel=3) net.FC(["P","w_fc", "b_fc"], "Y") net.Conv(["X_mkl", "W_mkl", "b_mkl"], "C_mkl", pad=1, stride=1, kernel=3, device_option=mkl_do) net.Relu("C_mkl", "R_mkl", device_option=mkl_do) net.MaxPool("R_mkl", "P_mkl", stride=2, kernel=3, device_option=mkl_do) net.FC(["P_mkl","w_fc_mkl", "b_fc_mkl"], "Y_mkl", device_option=mkl_do) workspace.CreateNet(net) workspace.RunNet(net) # makes sure that the results are good. np.testing.assert_allclose( workspace.FetchBlob("Y"), workspace.FetchBlob("Y_mkl"), atol=1e-2, rtol=1e-2) runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True) if __name__ == '__main__': unittest.main()
## @package lmdb_create_example # Module caffe2.python.examples.lmdb_create_example from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import argparse import numpy as np import lmdb from caffe2.proto import caffe2_pb2 from caffe2.python import workspace, cnn ''' Simple example to create an lmdb database of random image data and labels. This can be used a skeleton to write your own data import. It also runs a dummy-model with Caffe2 that reads the data and validates the checksum is same. ''' def create_db(output_file): print(">>> Write database...") LMDB_MAP_SIZE = 1 << 40 # MODIFY env = lmdb.open(output_file, map_size=LMDB_MAP_SIZE) checksum = 0 with env.begin(write=True) as txn: for j in range(0, 128): # MODIFY: add your own data reader / creator label = j % 10 width = 64 height = 32 img_data = np.random.rand(3, width, height) # ... # Create TensorProtos tensor_protos = caffe2_pb2.TensorProtos() img_tensor = tensor_protos.protos.add() img_tensor.dims.extend(img_data.shape) img_tensor.data_type = 1 flatten_img = img_data.reshape(np.prod(img_data.shape)) img_tensor.float_data.extend(flatten_img) label_tensor = tensor_protos.protos.add() label_tensor.data_type = 2 label_tensor.int32_data.append(label) txn.put( '{}'.format(j).encode('ascii'), tensor_protos.SerializeToString() ) checksum += np.sum(img_data) * label if (j % 16 == 0): print("Inserted {} rows".format(j)) print("Checksum/write: {}".format(int(checksum))) return checksum def read_db_with_caffe2(db_file, expected_checksum): print(">>> Read database...") model = cnn.CNNModelHelper( order="NCHW", name="lmdbtest") batch_size = 32 data, label = model.TensorProtosDBInput( [], ["data", "label"], batch_size=batch_size, db=db_file, db_type="lmdb") checksum = 0 workspace.RunNetOnce(model.param_init_net) workspace.CreateNet(model.net) for batch_idx in range(0, 4): workspace.RunNet(model.net.Proto().name) img_datas = workspace.FetchBlob("data") labels = workspace.FetchBlob("label") for j in range(batch_size): checksum += np.sum(img_datas[j, :]) * labels[j] print("Checksum/read: {}".format(int(checksum))) assert np.abs(expected_checksum - checksum < 0.1), \ "Read/write checksums dont match" def main(): parser = argparse.ArgumentParser( description="Example LMDB creation" ) parser.add_argument("--output_file", type=str, default=None, help="Path to write the database to", required=True) args = parser.parse_args() checksum = create_db(args.output_file) # For testing reading: read_db_with_caffe2(args.output_file, checksum) if __name__ == '__main__': main()
## @package resnet50_trainer # Module caffe2.python.examples.resnet50_trainer from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import argparse import logging import numpy as np import time import os from caffe2.python import core, workspace, experiment_util, data_parallel_model, dyndep from caffe2.python import timeout_guard, model_helper, brew import caffe2.python.models.resnet as resnet import caffe2.python.predictor.predictor_exporter as pred_exp import caffe2.python.predictor.predictor_py_utils as pred_utils from caffe2.python.predictor_constants import predictor_constants as predictor_constants ''' Parallelized multi-GPU distributed trainer for Resnet 50. Can be used to train on imagenet data, for example. To run the trainer in single-machine multi-gpu mode by setting num_shards = 1. To run the trainer in multi-machine multi-gpu mode with M machines, run the same program on all machines, specifying num_shards = M, and shard_id = a unique integer in the set [0, M-1]. For rendezvous (the trainer processes have to know about each other), you can either use a directory path that is visible to all processes (e.g. NFS directory), or use a Redis instance. Use the former by passing the `file_store_path` argument. Use the latter by passing the `redis_host` and `redis_port` arguments. ''' logging.basicConfig() log = logging.getLogger("resnet50_trainer") log.setLevel(logging.DEBUG) dyndep.InitOpsLibrary('@/caffe2/caffe2/distributed:file_store_handler_ops') dyndep.InitOpsLibrary('@/caffe2/caffe2/distributed:redis_store_handler_ops') def AddImageInput(model, reader, batch_size, img_size): ''' Image input operator that loads data from reader and applies certain transformations to the images. ''' data, label = brew.image_input( model, reader, ["data", "label"], batch_size=batch_size, use_caffe_datum=True, mean=128., std=128., scale=256, crop=img_size, mirror=1 ) data = model.StopGradient(data, data) def AddMomentumParameterUpdate(train_model, LR): ''' Add the momentum-SGD update. ''' params = train_model.GetParams() assert (len(params) > 0) for param in params: param_grad = train_model.param_to_grad[param] param_momentum = train_model.param_init_net.ConstantFill( [param], param + '_momentum', value=0.0 ) # Update param_grad and param_momentum in place train_model.net.MomentumSGDUpdate( [param_grad, param_momentum, LR, param], [param_grad, param_momentum, param], momentum=0.9, nesterov=1, ) def SaveModel(args, train_model, epoch): prefix = "gpu_{}".format(train_model._devices[0]) predictor_export_meta = pred_exp.PredictorExportMeta( predict_net=train_model.net.Proto(), parameters=data_parallel_model.GetCheckpointParams(train_model), inputs=[prefix + "/data"], outputs=[prefix + "/softmax"], shapes={ prefix + "/softmax": (1, args.num_labels), prefix + "/data": (args.num_channels, args.image_size, args.image_size) } ) # save the train_model for the current epoch model_path = "%s/%s_%d.mdl" % ( args.file_store_path, args.save_model_name, epoch, ) # set db_type to be "minidb" instead of "log_file_db", which breaks # the serialization in save_to_db. Need to switch back to log_file_db # after migration pred_exp.save_to_db( db_type="minidb", db_destination=model_path, predictor_export_meta=predictor_export_meta, ) def LoadModel(path, model): ''' Load pretrained model from file ''' log.info("Loading path: {}".format(path)) meta_net_def = pred_exp.load_from_db(path, 'minidb') init_net = core.Net(pred_utils.GetNet( meta_net_def, predictor_constants.GLOBAL_INIT_NET_TYPE)) predict_init_net = core.Net(pred_utils.GetNet( meta_net_def, predictor_constants.PREDICT_INIT_NET_TYPE)) predict_init_net.RunAllOnGPU() init_net.RunAllOnGPU() assert workspace.RunNetOnce(predict_init_net) assert workspace.RunNetOnce(init_net) def RunEpoch( args, epoch, train_model, test_model, total_batch_size, num_shards, expname, explog, ): ''' Run one epoch of the trainer. TODO: add checkpointing here. ''' # TODO: add loading from checkpoint log.info("Starting epoch {}/{}".format(epoch, args.num_epochs)) epoch_iters = int(args.epoch_size / total_batch_size / num_shards) for i in range(epoch_iters): # This timeout is required (temporarily) since CUDA-NCCL # operators might deadlock when synchronizing between GPUs. timeout = 600.0 if i == 0 else 60.0 with timeout_guard.CompleteInTimeOrDie(timeout): t1 = time.time() workspace.RunNet(train_model.net.Proto().name) t2 = time.time() dt = t2 - t1 fmt = "Finished iteration {}/{} of epoch {} ({:.2f} images/sec)" log.info(fmt.format(i + 1, epoch_iters, epoch, total_batch_size / dt)) prefix = "gpu_{}".format(train_model._devices[0]) accuracy = workspace.FetchBlob(prefix + '/accuracy') loss = workspace.FetchBlob(prefix + '/loss') train_fmt = "Training loss: {}, accuracy: {}" log.info(train_fmt.format(loss, accuracy)) num_images = epoch * epoch_iters * total_batch_size prefix = "gpu_{}".format(train_model._devices[0]) accuracy = workspace.FetchBlob(prefix + '/accuracy') loss = workspace.FetchBlob(prefix + '/loss') learning_rate = workspace.FetchBlob(prefix + '/LR') test_accuracy = 0 if (test_model is not None): # Run 100 iters of testing ntests = 0 for _ in range(0, 100): workspace.RunNet(test_model.net.Proto().name) for g in test_model._devices: test_accuracy += np.asscalar(workspace.FetchBlob( "gpu_{}".format(g) + '/accuracy' )) ntests += 1 test_accuracy /= ntests else: test_accuracy = (-1) explog.log( input_count=num_images, batch_count=(i + epoch * epoch_iters), additional_values={ 'accuracy': accuracy, 'loss': loss, 'learning_rate': learning_rate, 'epoch': epoch, 'test_accuracy': test_accuracy, } ) assert loss < 40, "Exploded gradients :(" # TODO: add checkpointing return epoch + 1 def Train(args): # Either use specified device list or generate one if args.gpus is not None: gpus = [int(x) for x in args.gpus.split(',')] num_gpus = len(gpus) else: gpus = range(args.num_gpus) num_gpus = args.num_gpus log.info("Running on GPUs: {}".format(gpus)) # Verify valid batch size total_batch_size = args.batch_size batch_per_device = total_batch_size // num_gpus assert \ total_batch_size % num_gpus == 0, \ "Number of GPUs must divide batch size" # Round down epoch size to closest multiple of batch size across machines global_batch_size = total_batch_size * args.num_shards epoch_iters = int(args.epoch_size / global_batch_size) args.epoch_size = epoch_iters * global_batch_size log.info("Using epoch size: {}".format(args.epoch_size)) # Create ModelHelper object train_arg_scope = { 'order': 'NCHW', 'use_cudnn': True, 'cudnn_exhaustice_search': True, 'ws_nbytes_limit': (args.cudnn_workspace_limit_mb * 1024 * 1024), } train_model = model_helper.ModelHelper( name="resnet50", arg_scope=train_arg_scope ) num_shards = args.num_shards shard_id = args.shard_id if num_shards > 1: # Create rendezvous for distributed computation store_handler = "store_handler" if args.redis_host is not None: # Use Redis for rendezvous if Redis host is specified workspace.RunOperatorOnce( core.CreateOperator( "RedisStoreHandlerCreate", [], [store_handler], host=args.redis_host, port=args.redis_port, prefix=args.run_id, ) ) else: # Use filesystem for rendezvous otherwise workspace.RunOperatorOnce( core.CreateOperator( "FileStoreHandlerCreate", [], [store_handler], path=args.file_store_path, ) ) rendezvous = dict( kv_handler=store_handler, shard_id=shard_id, num_shards=num_shards, engine="GLOO", exit_nets=None) else: rendezvous = None # Model building functions def create_resnet50_model_ops(model, loss_scale): [softmax, loss] = resnet.create_resnet50( model, "data", num_input_channels=args.num_channels, num_labels=args.num_labels, label="label", no_bias=True, ) loss = model.Scale(loss, scale=loss_scale) brew.accuracy(model, [softmax, "label"], "accuracy") return [loss] # SGD def add_parameter_update_ops(model): brew.add_weight_decay(model, args.weight_decay) ITER = brew.iter(model, "ITER") stepsz = int(30 * args.epoch_size / total_batch_size / num_shards) LR = model.net.LearningRate( [ITER], "LR", base_lr=args.base_learning_rate, policy="step", stepsize=stepsz, gamma=0.1, ) AddMomentumParameterUpdate(model, LR) # Input. Note that the reader must be shared with all GPUS. reader = train_model.CreateDB( "reader", db=args.train_data, db_type=args.db_type, num_shards=num_shards, shard_id=shard_id, ) def add_image_input(model): AddImageInput( model, reader, batch_size=batch_per_device, img_size=args.image_size, ) # Create parallelized model data_parallel_model.Parallelize_GPU( train_model, input_builder_fun=add_image_input, forward_pass_builder_fun=create_resnet50_model_ops, param_update_builder_fun=add_parameter_update_ops, devices=gpus, rendezvous=rendezvous, optimize_gradient_memory=True, ) # Add test model, if specified test_model = None if (args.test_data is not None): log.info("----- Create test net ----") test_arg_scope = { 'order': "NCHW", 'use_cudnn': True, 'cudnn_exhaustive_search': True, } test_model = model_helper.ModelHelper( name="resnet50_test", arg_scope=test_arg_scope ) test_reader = test_model.CreateDB( "test_reader", db=args.test_data, db_type=args.db_type, ) def test_input_fn(model): AddImageInput( model, test_reader, batch_size=batch_per_device, img_size=args.image_size, ) data_parallel_model.Parallelize_GPU( test_model, input_builder_fun=test_input_fn, forward_pass_builder_fun=create_resnet50_model_ops, param_update_builder_fun=None, devices=gpus, ) workspace.RunNetOnce(test_model.param_init_net) workspace.CreateNet(test_model.net) workspace.RunNetOnce(train_model.param_init_net) workspace.CreateNet(train_model.net) epoch = 0 # load the pre-trained model and reset epoch if args.load_model_path is not None: LoadModel(args.load_model_path, train_model) # Sync the model params data_parallel_model.FinalizeAfterCheckpoint(train_model) # reset epoch. load_model_path should end with *_X.mdl, # where X is the epoch number last_str = args.load_model_path.split('_')[-1] if last_str.endswith('.mdl'): epoch = int(last_str[:-4]) log.info("Reset epoch to {}".format(epoch)) else: log.warning("The format of load_model_path doesn't match!") expname = "resnet50_gpu%d_b%d_L%d_lr%.2f_v2" % ( args.num_gpus, total_batch_size, args.num_labels, args.base_learning_rate, ) explog = experiment_util.ModelTrainerLog(expname, args) # Run the training one epoch a time while epoch < args.num_epochs: epoch = RunEpoch( args, epoch, train_model, test_model, total_batch_size, num_shards, expname, explog ) # Save the model for each epoch SaveModel(args, train_model, epoch) model_path = "%s/%s_" % ( args.file_store_path, args.save_model_name ) # remove the saved model from the previous epoch if it exists if os.path.isfile(model_path + str(epoch - 1) + ".mdl"): os.remove(model_path + str(epoch - 1) + ".mdl") def main(): # TODO: use argv parser = argparse.ArgumentParser( description="Caffe2: Resnet-50 training" ) parser.add_argument("--train_data", type=str, default=None, help="Path to training data or 'everstore_sampler'", required=True) parser.add_argument("--test_data", type=str, default=None, help="Path to test data") parser.add_argument("--db_type", type=str, default="lmdb", help="Database type (such as lmdb or leveldb)") parser.add_argument("--gpus", type=str, help="Comma separated list of GPU devices to use") parser.add_argument("--num_gpus", type=int, default=1, help="Number of GPU devices (instead of --gpus)") parser.add_argument("--num_channels", type=int, default=3, help="Number of color channels") parser.add_argument("--image_size", type=int, default=227, help="Input image size (to crop to)") parser.add_argument("--num_labels", type=int, default=1000, help="Number of labels") parser.add_argument("--batch_size", type=int, default=32, help="Batch size, total over all GPUs") parser.add_argument("--epoch_size", type=int, default=1500000, help="Number of images/epoch, total over all machines") parser.add_argument("--num_epochs", type=int, default=1000, help="Num epochs.") parser.add_argument("--base_learning_rate", type=float, default=0.1, help="Initial learning rate.") parser.add_argument("--weight_decay", type=float, default=1e-4, help="Weight decay (L2 regularization)") parser.add_argument("--cudnn_workspace_limit_mb", type=int, default=64, help="CuDNN workspace limit in MBs") parser.add_argument("--num_shards", type=int, default=1, help="Number of machines in distributed run") parser.add_argument("--shard_id", type=int, default=0, help="Shard id.") parser.add_argument("--run_id", type=str, help="Unique run identifier (e.g. uuid)") parser.add_argument("--redis_host", type=str, help="Host of Redis server (for rendezvous)") parser.add_argument("--redis_port", type=int, default=6379, help="Port of Redis server (for rendezvous)") parser.add_argument("--file_store_path", type=str, default="/tmp", help="Path to directory to use for rendezvous") parser.add_argument("--save_model_name", type=str, default="resnet50_model", help="Save the trained model to a given name") parser.add_argument("--load_model_path", type=str, default=None, help="Load previously saved model to continue training") args = parser.parse_args() Train(args) if __name__ == '__main__': workspace.GlobalInit(['caffe2', '--caffe2_log_level=2']) main()
## @package char_rnn # Module caffe2.python.examples.char_rnn from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python import core, workspace, model_helper, utils, brew from caffe2.python.rnn_cell import LSTM from caffe2.proto import caffe2_pb2 import argparse import logging import numpy as np from datetime import datetime ''' This script takes a text file as input and uses a recurrent neural network to learn to predict next character in a sequence. ''' logging.basicConfig() log = logging.getLogger("char_rnn") log.setLevel(logging.DEBUG) # Default set() here is intentional as it would accumulate values like a global # variable def CreateNetOnce(net, created_names=set()): # noqa name = net.Name() if name not in created_names: created_names.add(name) workspace.CreateNet(net) class CharRNN(object): def __init__(self, args): self.seq_length = args.seq_length self.batch_size = args.batch_size self.iters_to_report = args.iters_to_report self.hidden_size = args.hidden_size with open(args.train_data) as f: self.text = f.read() self.vocab = list(set(self.text)) self.char_to_idx = {ch: idx for idx, ch in enumerate(self.vocab)} self.idx_to_char = {idx: ch for idx, ch in enumerate(self.vocab)} self.D = len(self.char_to_idx) print("Input has {} characters. Total input size: {}".format( len(self.vocab), len(self.text))) def CreateModel(self): log.debug("Start training") model = model_helper.ModelHelper(name="char_rnn") input_blob, seq_lengths, hidden_init, cell_init, target = \ model.net.AddExternalInputs( 'input_blob', 'seq_lengths', 'hidden_init', 'cell_init', 'target', ) hidden_output_all, self.hidden_output, _, self.cell_state = LSTM( model, input_blob, seq_lengths, (hidden_init, cell_init), self.D, self.hidden_size, scope="LSTM") output = brew.fc( model, hidden_output_all, None, dim_in=self.hidden_size, dim_out=self.D, axis=2 ) # axis is 2 as first two are T (time) and N (batch size). # We treat them as one big batch of size T * N softmax = model.net.Softmax(output, 'softmax', axis=2) softmax_reshaped, _ = model.net.Reshape( softmax, ['softmax_reshaped', '_'], shape=[-1, self.D]) # Create a copy of the current net. We will use it on the forward # pass where we don't need loss and backward operators self.forward_net = core.Net(model.net.Proto()) xent = model.net.LabelCrossEntropy([softmax_reshaped, target], 'xent') # Loss is average both across batch and through time # Thats why the learning rate below is multiplied by self.seq_length loss = model.net.AveragedLoss(xent, 'loss') model.AddGradientOperators([loss]) # Hand made SGD update. Normally one can use helper functions # to build an optimizer ITER = brew.iter(model, "iter") LR = model.LearningRate( ITER, "LR", base_lr=-0.1 * self.seq_length, policy="step", stepsize=1, gamma=0.9999) ONE = model.param_init_net.ConstantFill([], "ONE", shape=[1], value=1.0) # Update weights for each of the model parameters for param in model.params: param_grad = model.param_to_grad[param] model.net.WeightedSum([param, ONE, param_grad, LR], param) self.model = model self.predictions = softmax self.loss = loss self.prepare_state = core.Net("prepare_state") self.prepare_state.Copy(self.hidden_output, hidden_init) self.prepare_state.Copy(self.cell_state, cell_init) def _idx_at_pos(self, pos): return self.char_to_idx[self.text[pos]] def TrainModel(self): log.debug("Training model") workspace.RunNetOnce(self.model.param_init_net) # As though we predict the same probability for each character smooth_loss = -np.log(1.0 / self.D) * self.seq_length last_n_iter = 0 last_n_loss = 0.0 num_iter = 0 N = len(self.text) # We split text into batch_size peaces. Each peace will be used only # by a corresponding batch during the training process text_block_positions = np.zeros(self.batch_size, dtype=np.int32) text_block_size = N // self.batch_size text_block_starts = range(0, N, text_block_size) text_block_sizes = [text_block_size] * self.batch_size text_block_sizes[self.batch_size - 1] += N % self.batch_size assert sum(text_block_sizes) == N # Writing to output states which will be copied to input # states within the loop below workspace.FeedBlob(self.hidden_output, np.zeros( [1, self.batch_size, self.hidden_size], dtype=np.float32 )) workspace.FeedBlob(self.cell_state, np.zeros( [1, self.batch_size, self.hidden_size], dtype=np.float32 )) workspace.CreateNet(self.prepare_state) # We iterate over text in a loop many times. Each time we peak # seq_length segment and feed it to LSTM as a sequence last_time = datetime.now() progress = 0 while True: workspace.FeedBlob( "seq_lengths", np.array([self.seq_length] * self.batch_size, dtype=np.int32) ) workspace.RunNet(self.prepare_state.Name()) input = np.zeros( [self.seq_length, self.batch_size, self.D] ).astype(np.float32) target = np.zeros( [self.seq_length * self.batch_size] ).astype(np.int32) for e in range(self.batch_size): for i in range(self.seq_length): pos = text_block_starts[e] + text_block_positions[e] input[i][e][self._idx_at_pos(pos)] = 1 target[i * self.batch_size + e] =\ self._idx_at_pos((pos + 1) % N) text_block_positions[e] = ( text_block_positions[e] + 1) % text_block_sizes[e] progress += 1 workspace.FeedBlob('input_blob', input) workspace.FeedBlob('target', target) CreateNetOnce(self.model.net) workspace.RunNet(self.model.net.Name()) num_iter += 1 last_n_iter += 1 if num_iter % self.iters_to_report == 0: new_time = datetime.now() print("Characters Per Second: {}". format( int(progress / (new_time - last_time).total_seconds()) )) print("Iterations Per Second: {}". format( int(self.iters_to_report / (new_time - last_time).total_seconds()) )) last_time = new_time progress = 0 print("{} Iteration {} {}". format('-' * 10, num_iter, '-' * 10)) loss = workspace.FetchBlob(self.loss) * self.seq_length smooth_loss = 0.999 * smooth_loss + 0.001 * loss last_n_loss += loss if num_iter % self.iters_to_report == 0: self.GenerateText(500, np.random.choice(self.vocab)) log.debug("Loss since last report: {}" .format(last_n_loss / last_n_iter)) log.debug("Smooth loss: {}".format(smooth_loss)) last_n_loss = 0.0 last_n_iter = 0 def GenerateText(self, num_characters, ch): # Given a starting symbol we feed a fake sequence of size 1 to # our RNN num_character times. After each time we use output # probabilities to pick a next character to feed to the network. # Same character becomes part of the output CreateNetOnce(self.forward_net) text = '' + ch for _i in range(num_characters): workspace.FeedBlob( "seq_lengths", np.array([1] * self.batch_size, dtype=np.int32)) workspace.RunNet(self.prepare_state.Name()) input = np.zeros([1, self.batch_size, self.D]).astype(np.float32) input[0][0][self.char_to_idx[ch]] = 1 workspace.FeedBlob("input_blob", input) workspace.RunNet(self.forward_net.Name()) p = workspace.FetchBlob(self.predictions) next = np.random.choice(self.D, p=p[0][0]) ch = self.idx_to_char[next] text += ch print(text) @utils.debug def main(): parser = argparse.ArgumentParser( description="Caffe2: Char RNN Training" ) parser.add_argument("--train_data", type=str, default=None, help="Path to training data in a text file format", required=True) parser.add_argument("--seq_length", type=int, default=25, help="One training example sequence length") parser.add_argument("--batch_size", type=int, default=1, help="Training batch size") parser.add_argument("--iters_to_report", type=int, default=500, help="How often to report loss and generate text") parser.add_argument("--hidden_size", type=int, default=100, help="Dimension of the hidden representation") parser.add_argument("--gpu", action="store_true", help="If set, training is going to use GPU 0") args = parser.parse_args() device = core.DeviceOption( caffe2_pb2.CUDA if args.gpu else caffe2_pb2.CPU, 0) with core.DeviceScope(device): model = CharRNN(args) model.CreateModel() model.TrainModel() if __name__ == '__main__': workspace.GlobalInit(['caffe2', '--caffe2_log_level=2']) main()
## @package helpers # Module caffe2.python.tutorials.helpers import numpy as np import skimage.io import skimage.transform import urllib2 def crop_center(img,cropx,cropy): y,x,c = img.shape startx = x//2-(cropx//2) starty = y//2-(cropy//2) return img[starty:starty+cropy,startx:startx+cropx] def rescale(img, input_height, input_width): # print("Original image shape:" + str(img.shape) + " --> it should be in H, W, C!") # print("Model's input shape is %dx%d") % (input_height, input_width) aspect = img.shape[1]/float(img.shape[0]) # print("Orginal aspect ratio: " + str(aspect)) if(aspect>1): # landscape orientation - wide image res = int(aspect * input_height) imgScaled = skimage.transform.resize(img, (input_width, res)) if(aspect<1): # portrait orientation - tall image res = int(input_width/aspect) imgScaled = skimage.transform.resize(img, (res, input_height)) if(aspect == 1): imgScaled = skimage.transform.resize(img, (input_width, input_height)) return imgScaled def load(img): # load and transform image img = skimage.img_as_float(skimage.io.imread(img)).astype(np.float32) return img def chw(img): # switch to CHW img = img.swapaxes(1, 2).swapaxes(0, 1) return img def bgr(img): # switch to BGR img = img[(2, 1, 0), :, :] return img def removeMean(img, mean): # remove mean for better results img = img * 255 - mean return img def batch(img): # add batch size img = img[np.newaxis, :, :, :].astype(np.float32) return img def parseResults(results): results = np.asarray(results) results = np.delete(results, 1) index = 0 highest = 0 arr = np.empty((0,2), dtype=object) arr[:,0] = int(10) arr[:,1:] = float(10) for i, r in enumerate(results): # imagenet index begins with 1! i=i+1 arr = np.append(arr, np.array([[i,r]]), axis=0) if (r > highest): highest = r index = i # top 3 results print "Raw top 3 results:", sorted(arr, key=lambda x: x[1], reverse=True)[:3] # now we can grab the code list with open('inference_codes.txt', 'r') as f: for line in f: code, result = line.partition(":")[::2] if (code.strip() == str(index)): answer = "The image contains a %s with a %s percent probability." % \ (result.strip()[1:-2], highest*100) f.closed return answer def loadToNCHW(img, mean, input_size): img = load(img) img = rescale(img, input_size, input_size) img = crop_center(img, input_size, input_size) img = chw(img) img = bgr(img) img = removeMean(img, mean) img = batch(img) return img
## @package predictor_py_utils # Module caffe2.python.predictor.predictor_py_utils from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python import core def create_predict_net(predictor_export_meta): """ Return the input prediction net. """ # Construct a new net to clear the existing settings. net = core.Net(predictor_export_meta.predict_net.name or "predict") net.Proto().op.extend(predictor_export_meta.predict_net.op) net.Proto().external_input.extend( predictor_export_meta.inputs + predictor_export_meta.parameters) net.Proto().external_output.extend(predictor_export_meta.outputs) if predictor_export_meta.net_type is not None: net.Proto().type = predictor_export_meta.net_type return net.Proto() def create_predict_init_net(ws, predictor_export_meta): """ Return an initialization net that zero-fill all the input and output blobs, using the shapes from the provided workspace. This is necessary as there is no shape inference functionality in Caffe2. """ net = core.Net("predict-init") def zero_fill(blob): shape = predictor_export_meta.shapes.get(blob) if shape is None: if blob not in ws.blobs: raise Exception( "{} not in workspace but needed for shape: {}".format( blob, ws.blobs)) shape = ws.blobs[blob].fetch().shape net.ConstantFill([], blob, shape=shape, value=0.0) external_blobs = predictor_export_meta.inputs + \ predictor_export_meta.outputs for blob in external_blobs: zero_fill(blob) net.Proto().external_input.extend(external_blobs) if predictor_export_meta.extra_init_net: net.AppendNet(predictor_export_meta.extra_init_net) return net.Proto() def get_comp_name(string, name): if name: return string + '_' + name return string def _ProtoMapGet(field, key): ''' Given the key, get the value of the repeated field. Helper function used by protobuf since it doesn't have map construct ''' for v in field: if (v.key == key): return v.value return None def GetPlan(meta_net_def, key): return _ProtoMapGet(meta_net_def.plans, key) def GetPlanOriginal(meta_net_def, key): return _ProtoMapGet(meta_net_def.plans, key) def GetBlobs(meta_net_def, key): blobs = _ProtoMapGet(meta_net_def.blobs, key) if blobs is None: return [] return blobs def GetNet(meta_net_def, key): return _ProtoMapGet(meta_net_def.nets, key) def GetNetOriginal(meta_net_def, key): return _ProtoMapGet(meta_net_def.nets, key) def GetApplicationSpecificInfo(meta_net_def, key): return _ProtoMapGet(meta_net_def.applicationSpecificInfo, key) def AddBlobs(meta_net_def, blob_name, blob_def): blobs = _ProtoMapGet(meta_net_def.blobs, blob_name) if blobs is None: blobs = meta_net_def.blobs.add() blobs.key = blob_name blobs = blobs.value for blob in blob_def: blobs.append(blob) def AddPlan(meta_net_def, plan_name, plan_def): meta_net_def.plans.add(key=plan_name, value=plan_def) def AddNet(meta_net_def, net_name, net_def): meta_net_def.nets.add(key=net_name, value=net_def)
## @package predictor_exporter # Module caffe2.python.predictor.predictor_exporter from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.proto import caffe2_pb2 from caffe2.proto import metanet_pb2 from caffe2.python import workspace, core from caffe2.python.predictor_constants import predictor_constants import caffe2.python.predictor.serde as serde import caffe2.python.predictor.predictor_py_utils as utils import collections class PredictorExportMeta(collections.namedtuple( 'PredictorExportMeta', 'predict_net, parameters, inputs, outputs, shapes, name, \ extra_init_net, net_type')): """ Metadata to be used for serializaing a net. parameters, inputs, outputs could be either BlobReference or blob's names predict_net can be either core.Net, NetDef, PlanDef or object Override the named tuple to provide optional name parameter. name will be used to identify multiple prediction nets. net_type is the type field in caffe2 NetDef - can be 'simple', 'dag', etc. """ def __new__( cls, predict_net, parameters, inputs, outputs, shapes=None, name="", extra_init_net=None, net_type=None, ): inputs = map(str, inputs) outputs = map(str, outputs) parameters = map(str, parameters) shapes = shapes or {} if isinstance(predict_net, (core.Net, core.Plan)): predict_net = predict_net.Proto() assert isinstance(predict_net, (caffe2_pb2.NetDef, caffe2_pb2.PlanDef)) return super(PredictorExportMeta, cls).__new__( cls, predict_net, parameters, inputs, outputs, shapes, name, extra_init_net, net_type) def inputs_name(self): return utils.get_comp_name(predictor_constants.INPUTS_BLOB_TYPE, self.name) def outputs_name(self): return utils.get_comp_name(predictor_constants.OUTPUTS_BLOB_TYPE, self.name) def parameters_name(self): return utils.get_comp_name(predictor_constants.PARAMETERS_BLOB_TYPE, self.name) def global_init_name(self): return utils.get_comp_name(predictor_constants.GLOBAL_INIT_NET_TYPE, self.name) def predict_init_name(self): return utils.get_comp_name(predictor_constants.PREDICT_INIT_NET_TYPE, self.name) def predict_net_name(self): return utils.get_comp_name(predictor_constants.PREDICT_NET_TYPE, self.name) def train_init_plan_name(self): return utils.get_comp_name(predictor_constants.TRAIN_INIT_PLAN_TYPE, self.name) def train_plan_name(self): return utils.get_comp_name(predictor_constants.TRAIN_PLAN_TYPE, self.name) def prepare_prediction_net(filename, db_type): ''' Helper function which loads all required blobs from the db and returns prediction net ready to be used ''' metanet_def = load_from_db(filename, db_type) global_init_net = utils.GetNet( metanet_def, predictor_constants.GLOBAL_INIT_NET_TYPE) workspace.RunNetOnce(global_init_net) predict_init_net = utils.GetNet( metanet_def, predictor_constants.PREDICT_INIT_NET_TYPE) workspace.RunNetOnce(predict_init_net) predict_net = core.Net( utils.GetNet(metanet_def, predictor_constants.PREDICT_NET_TYPE)) workspace.CreateNet(predict_net) return predict_net def _global_init_net(predictor_export_meta): net = core.Net("global-init") net.Load( [predictor_constants.PREDICTOR_DBREADER], predictor_export_meta.parameters) net.Proto().external_input.extend([predictor_constants.PREDICTOR_DBREADER]) net.Proto().external_output.extend(predictor_export_meta.parameters) return net.Proto() def get_meta_net_def(predictor_export_meta, ws=None): """ """ ws = ws or workspace.C.Workspace.current # Predict net is the core network that we use. meta_net_def = metanet_pb2.MetaNetDef() utils.AddNet(meta_net_def, predictor_export_meta.predict_init_name(), utils.create_predict_init_net(ws, predictor_export_meta)) utils.AddNet(meta_net_def, predictor_export_meta.global_init_name(), _global_init_net(predictor_export_meta)) utils.AddNet(meta_net_def, predictor_export_meta.predict_net_name(), utils.create_predict_net(predictor_export_meta)) utils.AddBlobs(meta_net_def, predictor_export_meta.parameters_name(), predictor_export_meta.parameters) utils.AddBlobs(meta_net_def, predictor_export_meta.inputs_name(), predictor_export_meta.inputs) utils.AddBlobs(meta_net_def, predictor_export_meta.outputs_name(), predictor_export_meta.outputs) return meta_net_def def set_model_info(meta_net_def, project_str, model_class_str, version): assert isinstance(meta_net_def, metanet_pb2.MetaNetDef) meta_net_def.modelInfo.project = project_str meta_net_def.modelInfo.modelClass = model_class_str meta_net_def.modelInfo.version = version def save_to_db(db_type, db_destination, predictor_export_meta): meta_net_def = get_meta_net_def(predictor_export_meta) with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)): workspace.FeedBlob( predictor_constants.META_NET_DEF, serde.serialize_protobuf_struct(meta_net_def) ) blobs_to_save = [predictor_constants.META_NET_DEF] + \ predictor_export_meta.parameters op = core.CreateOperator( "Save", blobs_to_save, [], absolute_path=True, db=db_destination, db_type=db_type) workspace.RunOperatorOnce(op) def load_from_db(filename, db_type): # global_init_net in meta_net_def will load parameters from # predictor_constants.PREDICTOR_DBREADER create_db = core.CreateOperator( 'CreateDB', [], [core.BlobReference(predictor_constants.PREDICTOR_DBREADER)], db=filename, db_type=db_type) assert workspace.RunOperatorOnce(create_db), ( 'Failed to create db {}'.format(filename)) # predictor_constants.META_NET_DEF is always stored before the parameters load_meta_net_def = core.CreateOperator( 'Load', [core.BlobReference(predictor_constants.PREDICTOR_DBREADER)], [core.BlobReference(predictor_constants.META_NET_DEF)]) assert workspace.RunOperatorOnce(load_meta_net_def) meta_net_def = serde.deserialize_protobuf_struct( str(workspace.FetchBlob(predictor_constants.META_NET_DEF)), metanet_pb2.MetaNetDef) return meta_net_def
## @package serde # Module caffe2.python.predictor.serde from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals def serialize_protobuf_struct(protobuf_struct): return protobuf_struct.SerializeToString() def deserialize_protobuf_struct(serialized_protobuf, struct_type): deser = struct_type() deser.ParseFromString(serialized_protobuf) return deser
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python.test_util import TestCase from caffe2.python import workspace, brew from caffe2.python.model_helper import ModelHelper from caffe2.python.predictor import mobile_exporter import numpy as np class TestMobileExporter(TestCase): def test_mobile_exporter(self): model = ModelHelper(name="mobile_exporter_test_model") # Test LeNet brew.conv(model, 'data', 'conv1', dim_in=1, dim_out=20, kernel=5) brew.max_pool(model, 'conv1', 'pool1', kernel=2, stride=2) brew.conv(model, 'pool1', 'conv2', dim_in=20, dim_out=50, kernel=5) brew.max_pool(model, 'conv2', 'pool2', kernel=2, stride=2) brew.fc(model, 'pool2', 'fc3', dim_in=50 * 4 * 4, dim_out=500) brew.relu(model, 'fc3', 'fc3') brew.fc(model, 'fc3', 'pred', 500, 10) brew.softmax(model, 'pred', 'out') # Create our mobile exportable networks workspace.RunNetOnce(model.param_init_net) init_net, predict_net = mobile_exporter.Export( workspace, model.net, model.params ) # Populate the workspace with data np_data = np.random.rand(1, 1, 28, 28).astype(np.float32) workspace.FeedBlob("data", np_data) workspace.CreateNet(model.net) workspace.RunNet(model.net) ref_out = workspace.FetchBlob("out") # Clear the workspace workspace.ResetWorkspace() # Populate the workspace with data workspace.RunNetOnce(init_net) # Fake "data" is populated by init_net, we have to replace it workspace.FeedBlob("data", np_data) # Overwrite the old net workspace.CreateNet(predict_net, True) workspace.RunNet(predict_net.name) manual_run_out = workspace.FetchBlob("out") np.testing.assert_allclose( ref_out, manual_run_out, atol=1e-10, rtol=1e-10 ) # Clear the workspace workspace.ResetWorkspace() # Predictor interface test (simulates writing to disk) predictor = workspace.Predictor( init_net.SerializeToString(), predict_net.SerializeToString() ) # Output is a vector of outputs but we only care about the first and only result predictor_out = predictor.run([np_data]) assert len(predictor_out) == 1 predictor_out = predictor_out[0] np.testing.assert_allclose( ref_out, predictor_out, atol=1e-10, rtol=1e-10 )
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import tempfile import unittest import numpy as np from caffe2.python import cnn, workspace, core from caffe2.python.predictor_constants import predictor_constants as pc import caffe2.python.predictor.predictor_exporter as pe import caffe2.python.predictor.predictor_py_utils as pred_utils class PredictorExporterTest(unittest.TestCase): def _create_model(self): m = cnn.CNNModelHelper() m.FC("data", "y", dim_in=5, dim_out=10, weight_init=m.XavierInit, bias_init=m.XavierInit) return m def setUp(self): np.random.seed(1) m = self._create_model() self.predictor_export_meta = pe.PredictorExportMeta( predict_net=m.net.Proto(), parameters=[str(b) for b in m.params], inputs=["data"], outputs=["y"], shapes={"y": (1, 10), "data": (1, 5)}, ) workspace.RunNetOnce(m.param_init_net) self.params = { param: workspace.FetchBlob(param) for param in self.predictor_export_meta.parameters} # Reset the workspace, to ensure net creation proceeds as expected. workspace.ResetWorkspace() def test_meta_constructor(self): ''' Test that passing net itself instead of proto works ''' m = self._create_model() pe.PredictorExportMeta( predict_net=m.net, parameters=m.params, inputs=["data"], outputs=["y"], shapes={"y": (1, 10), "data": (1, 5)}, ) def test_meta_net_def_net_runs(self): for param, value in self.params.items(): workspace.FeedBlob(param, value) extra_init_net = core.Net('extra_init') extra_init_net.ConstantFill('data', 'data', value=1.0) pem = pe.PredictorExportMeta( predict_net=self.predictor_export_meta.predict_net, parameters=self.predictor_export_meta.parameters, inputs=self.predictor_export_meta.inputs, outputs=self.predictor_export_meta.outputs, shapes=self.predictor_export_meta.shapes, extra_init_net=extra_init_net, net_type='dag', ) db_type = 'minidb' db_file = tempfile.NamedTemporaryFile( delete=False, suffix=".{}".format(db_type)) pe.save_to_db( db_type=db_type, db_destination=db_file.name, predictor_export_meta=pem) workspace.ResetWorkspace() meta_net_def = pe.load_from_db( db_type=db_type, filename=db_file.name, ) self.assertTrue("data" not in workspace.Blobs()) self.assertTrue("y" not in workspace.Blobs()) init_net = pred_utils.GetNet(meta_net_def, pc.PREDICT_INIT_NET_TYPE) # 0-fills externalblobs blobs and runs extra_init_net workspace.RunNetOnce(init_net) self.assertTrue("data" in workspace.Blobs()) self.assertTrue("y" in workspace.Blobs()) print(workspace.FetchBlob("data")) np.testing.assert_array_equal( workspace.FetchBlob("data"), np.ones(shape=(1, 5))) np.testing.assert_array_equal( workspace.FetchBlob("y"), np.zeros(shape=(1, 10))) # Load parameters from DB global_init_net = pred_utils.GetNet(meta_net_def, pc.GLOBAL_INIT_NET_TYPE) workspace.RunNetOnce(global_init_net) # Run the net with a reshaped input and verify we are # producing good numbers (with our custom implementation) workspace.FeedBlob("data", np.random.randn(2, 5).astype(np.float32)) predict_net = pred_utils.GetNet(meta_net_def, pc.PREDICT_NET_TYPE) self.assertEqual(predict_net.type, 'dag') workspace.RunNetOnce(predict_net) np.testing.assert_array_almost_equal( workspace.FetchBlob("y"), workspace.FetchBlob("data").dot(self.params["y_w"].T) + self.params["y_b"]) def test_db_fails_without_params(self): with self.assertRaises(Exception): for db_type in ["minidb"]: db_file = tempfile.NamedTemporaryFile( delete=False, suffix=".{}".format(db_type)) pe.save_to_db( db_type=db_type, db_destination=db_file.name, predictor_export_meta=self.predictor_export_meta)
## @package mobile_exporter # Module caffe2.python.mobile_exporter from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python import core, utils from caffe2.proto import caffe2_pb2 def Export(workspace, net, params): """Returns init_net and predict_net suitable for writing to disk and loading into a Predictor""" predict_net = caffe2_pb2.NetDef() predict_net.CopyFrom(net.Proto()) init_net = caffe2_pb2.NetDef() # Populate the init_net. ssa, blob_versions = core.get_ssa(net) inputs = [] for versioned_inputs, _ in ssa: inputs += [name for name, _ in versioned_inputs] input_blobs = [blob_name for blob_name, version in blob_versions.items() if version == 0 and blob_name not in params] # Blobs that are never used as an input to another layer, # i.e. strictly output blobs. output_blobs = [blob_name for blob_name, version in blob_versions.items() if version != 0 and blob_name not in inputs] for blob_ref in params: blob_name = str(blob_ref) blob = workspace.FetchBlob(blob_name) init_net.op.extend( [ core.CreateOperator( "GivenTensorFill", [], [blob_name], arg=[ utils.MakeArgument("shape", blob.shape), utils.MakeArgument("values", blob) ] ) ] ) # We have to make sure the blob exists in the namespace # and we can do so with fake data. (Which is immediately overwritten # by any typical usage) for blob_name in input_blobs: init_net.op.extend( [ core.CreateOperator( "GivenTensorFill", [], [blob_name], arg=[ utils.MakeArgument("shape", [1, 1]), utils.MakeArgument("values", [0.0]) ] ) ] ) # Now we make input/output_blobs line up with what Predictor expects. del predict_net.external_input[:] predict_net.external_input.extend(input_blobs) # For populating weights predict_net.external_input.extend(net.Proto().external_input) # Ensure the output is also consistent with what we want del predict_net.external_output[:] predict_net.external_output.extend(output_blobs) return init_net, predict_net
## @package fc # Module caffe2.python.helpers.fc from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python import core def _FC_or_packed_FC( model, op_call, blob_in, blob_out, dim_in, dim_out, weight_init=None, bias_init=None, **kwargs ): """FC""" weight_init = weight_init or ('XavierFill', {}) bias_init = bias_init or ('ConstantFill', {}) blob_out = blob_out or model.net.NextName() if model.init_params: weight = model.param_init_net.__getattr__(weight_init[0])( [], blob_out + '_w', shape=[dim_out, dim_in], **weight_init[1] ) bias = model.param_init_net.__getattr__(bias_init[0])( [], blob_out + '_b', shape=[dim_out, ], **bias_init[1] ) else: weight = core.ScopedBlobReference( blob_out + '_w', model.param_init_net) bias = core.ScopedBlobReference( blob_out + '_b', model.param_init_net) if 'freeze_bias' in kwargs: model.params.extend([weight]) else: model.params.extend([weight, bias]) model.weights.append(weight) model.biases.append(bias) return op_call([blob_in, weight, bias], blob_out, **kwargs) def fc(model, *args, **kwargs): return _FC_or_packed_FC(model, model.net.FC, *args, **kwargs) def packed_fc(model, *args, **kwargs): return _FC_or_packed_FC(model, model.net.PackedFC, *args, **kwargs) def fc_decomp( model, blob_in, blob_out, dim_in, dim_out, rank_approx=5, weight_init=None, bias_init=None, **kwargs ): """FC_Decomp version Here we assume that the rank of original input is bigger than 5. """ weight_init = weight_init if weight_init else ('XavierFill', {}) bias_init = bias_init if bias_init else ('ConstantFill', {}) blob_out = blob_out or model.net.NextName() u = model.param_init_net.__getattr__(weight_init[0])( [], blob_out + '_u', shape=[dim_out, rank_approx], **weight_init[1] ) v = model.param_init_net.__getattr__(weight_init[0])( [], blob_out + '_v', shape=[dim_in, rank_approx], **weight_init[1] ) bias = model.param_init_net.__getattr__(bias_init[0])( [], blob_out + '_b', shape=[dim_out, ], **bias_init[1] ) model.params.extend([u, v, bias]) return model.net.FC_Decomp([blob_in, u, v, bias], blob_out, **kwargs) def fc_prune( model, blob_in, blob_out, dim_in, dim_out, weight_init=None, bias_init=None, mask_init=None, threshold=0.00001, need_compress_rate=False, comp_lb=0.05, **kwargs ): """FC_Prune version Runnable so far. Great!:) """ weight_init = weight_init if weight_init else ('XavierFill', {}) bias_init = bias_init if bias_init else ('ConstantFill', {}) mask_init = mask_init if mask_init else ('ConstantFill', {}) blob_out = blob_out or model.net.NextName() compress_rate = blob_out + '_compress_rate' if model.init_params: compress_lb = model.param_init_net.ConstantFill( [], blob_out + '_lb', shape=[1], value=comp_lb ) weight = model.param_init_net.__getattr__(weight_init[0])( [], blob_out + '_w', shape=[dim_out, dim_in], **weight_init[1] ) mask = model.param_init_net.ConstantFill( [], blob_out + '_m', shape=[dim_out, dim_in], value=1.0 ) ag_dw = model.param_init_net.__getattr__(mask_init[0])( [], blob_out + '_ag_dw', shape=[dim_out, dim_in], **mask_init[1] ) bias = model.param_init_net.__getattr__(bias_init[0])( [], blob_out + '_b', shape=[dim_out, ], **bias_init[1] ) mask_seq = model.param_init_net.__getattr__(mask_init[0])( [], blob_out + '_mask_seq', shape=[dim_out, dim_in], **mask_init[1] ) thres = model.param_init_net.ConstantFill( [], blob_out + '_thres', shape=[1], value=threshold ) else: compress_lb = core.ScopedBlobReference( blob_out + '_lb', model.param_init_net) weight = core.ScopedBlobReference( blob_out + '_w', model.param_init_net) bias = core.ScopedBlobReference( blob_out + '_b', model.param_init_net) mask = core.ScopedBlobReference( blob_out + '_m', model.param_init_net) ag_dw = core.ScopedBlobReference( blob_out + '_ag_dw', model.param_init_net) mask_seq = core.ScopedBlobReference( blob_out + '_mask_seq', model.param_init_net) thres = core.ScopedBlobReference( blob_out + '_thres', model.param_init_net) model.params.extend([weight, bias]) if need_compress_rate: return model.net.FC_Prune([blob_in, weight, mask, bias, ag_dw, mask_seq, thres, compress_lb], [blob_out, compress_rate], **kwargs) else: return model.net.FC_Prune([blob_in, weight, mask, bias, ag_dw, mask_seq, thres, compress_lb], blob_out, **kwargs) def fc_sparse( model, blob_in, blob_out, w_csr, iw, jw, bias, **kwargs ): """FC_Sparse: Only takes in alocated weights""" if not (w_csr and iw and jw and bias): print("Warning...") model.params.extend([w_csr, iw, jw, bias]) return model.net.FC_Sparse([blob_in, w_csr, iw, jw, bias], blob_out, **kwargs)
## @package algebra # Module caffe2.python.helpers.algebra from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals def transpose(model, blob_in, blob_out, use_cudnn=False, **kwargs): """Transpose.""" if use_cudnn: kwargs['engine'] = 'CUDNN' return model.net.Transpose(blob_in, blob_out, **kwargs) def sum(model, blob_in, blob_out, **kwargs): """Sum""" return model.net.Sum(blob_in, blob_out, **kwargs)
## @package tools # Module caffe2.python.helpers.tools from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals def image_input( model, blob_in, blob_out, order="NCHW", use_gpu_transform=False, **kwargs ): if order == "NCHW": if (use_gpu_transform): kwargs['use_gpu_transform'] = 1 if use_gpu_transform else 0 # GPU transform will handle NHWC -> NCHW data, label = model.net.ImageInput( blob_in, [blob_out[0], blob_out[1]], **kwargs ) pass else: data, label = model.net.ImageInput( blob_in, [blob_out[0] + '_nhwc', blob_out[1]], **kwargs ) data = model.net.NHWC2NCHW(data, blob_out[0]) else: data, label = model.net.ImageInput(blob_in, blob_out, **kwargs) return data, label def video_input(model, blob_in, blob_out, **kwargs): data, label = model.net.VideoInput(blob_in, blob_out, **kwargs) return data, label
## @package pooling # Module caffe2.python.helpers.pooling ## @package fc # Module caffe2.python.helpers.pooling from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals def max_pool(model, blob_in, blob_out, use_cudnn=False, order="NCHW", **kwargs): """Max pooling""" if use_cudnn: kwargs['engine'] = 'CUDNN' return model.net.MaxPool(blob_in, blob_out, order=order, **kwargs) def average_pool(model, blob_in, blob_out, use_cudnn=False, order="NCHW", **kwargs): """Average pooling""" if use_cudnn: kwargs['engine'] = 'CUDNN' return model.net.AveragePool( blob_in, blob_out, order=order, **kwargs ) def max_pool_with_index(model, blob_in, blob_out, order="NCHW", **kwargs): """Max pooling with an explicit index of max position""" return model.net.MaxPoolWithIndex( blob_in, [blob_out, blob_out + "_index"], order=order, **kwargs )[0]
## @package arra_helpers # Module caffe2.python.helpers.array_helpers from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals def concat(model, blobs_in, blob_out, order="NCHW", **kwargs): """Depth Concat.""" return model.net.Concat( blobs_in, [blob_out, "_" + blob_out + "_concat_dims"], order=order, **kwargs )[0] def depth_concat(model, blobs_in, blob_out, **kwargs): """The old depth concat function - we should move to use concat.""" print("DepthConcat is deprecated. use Concat instead.") return concat(blobs_in, blob_out, **kwargs)
## @package nonlinearity # Module caffe2.python.helpers.nonlinearity from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python import core def prelu(model, blob_in, blob_out, num_channels=1, slope_init=None, **kwargs): """PRelu""" slope_init = ( slope_init if slope_init else ('ConstantFill', {'value': 0.25})) if model.init_params: slope = model.param_init_net.__getattr__(slope_init[0])( [], blob_out + '_slope', shape=[num_channels], **slope_init[1] ) else: slope = core.ScopedBlobReference( blob_out + '_slope', model.param_init_net) model.params.extend([slope]) return model.net.PRelu([blob_in, slope], [blob_out]) def relu(model, blob_in, blob_out, use_cudnn=False, order="NCHW", **kwargs): """Relu.""" if use_cudnn: kwargs['engine'] = 'CUDNN' return model.net.Relu(blob_in, blob_out, order=order, **kwargs)
## @package train # Module caffe2.python.helpers.train from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python import core, scope from caffe2.proto import caffe2_pb2 def _get_weights(model, namescope=None): if namescope is None: namescope = scope.CurrentNameScope() if namescope == '': return model.weights[:] else: return [w for w in model.weights if w.GetNameScope() == namescope] def iter(model, blob_out, **kwargs): if 'device_option' in kwargs: del kwargs['device_option'] model.param_init_net.ConstantFill( [], blob_out, shape=[1], value=0, dtype=core.DataType.INT64, device_option=core.DeviceOption(caffe2_pb2.CPU, 0), **kwargs ) return model.net.Iter(blob_out, blob_out, **kwargs) def accuracy(model, blob_in, blob_out, **kwargs): dev = kwargs['device_option'] if 'device_option' in kwargs \ else scope.CurrentDeviceScope() is_cpu = dev is None or dev.device_type == caffe2_pb2.CPU # We support top_k > 1 only on CPU if not is_cpu and 'top_k' in kwargs and kwargs['top_k'] > 1: pred_host = model.net.CopyGPUToCPU(blob_in[0], blob_in[0] + "_host") label_host = model.net.CopyGPUToCPU(blob_in[1], blob_in[1] + "_host") # Now use the Host version of the accuracy op model.net.Accuracy( [pred_host, label_host], blob_out, device_option=core.DeviceOption(caffe2_pb2.CPU, 0), **kwargs ) else: model.net.Accuracy(blob_in, blob_out) def add_weight_decay(model, weight_decay): """Adds a decay to weights in the model. This is a form of L2 regularization. Args: weight_decay: strength of the regularization """ if weight_decay <= 0.0: return wd = model.param_init_net.ConstantFill( [], 'wd', shape=[1], value=weight_decay ) ONE = model.param_init_net.ConstantFill([], "ONE", shape=[1], value=1.0) for param in _get_weights(model): # Equivalent to: grad += wd * param grad = model.param_to_grad[param] model.net.WeightedSum( [grad, ONE, param, wd], grad, )
## @package dropout # Module caffe2.python.helpers.dropout from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals def dropout(model, blob_in, blob_out, use_cudnn=False, **kwargs): """dropout""" if use_cudnn: kwargs['engine'] = 'CUDNN' return model.net.Dropout( blob_in, [blob_out, "_" + blob_out + "_mask"], **kwargs)[0]
## @package conv # Module caffe2.python.helpers.conv from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python import core def _ConvBase( model, is_nd, blob_in, blob_out, dim_in, dim_out, kernel, weight_init=None, bias_init=None, group=1, transform_inputs=None, use_cudnn=False, order="NCHW", cudnn_exhaustive_search=False, ws_nbytes_limit=None, **kwargs ): kernels = [] if is_nd: if not isinstance(kernel, list): kernels = [kernel] else: kernels = kernel else: kernels = [kernel] * 2 if use_cudnn: kwargs['engine'] = 'CUDNN' kwargs['exhaustive_search'] = cudnn_exhaustive_search if ws_nbytes_limit: kwargs['ws_nbytes_limit'] = ws_nbytes_limit use_bias =\ False if ("no_bias" in kwargs and kwargs["no_bias"]) else True weight_init = weight_init if weight_init else ('XavierFill', {}) bias_init = bias_init if bias_init else ('ConstantFill', {}) blob_out = blob_out or model.net.NextName() weight_shape = [dim_out] if order == "NCHW": weight_shape.append(int(dim_in / group)) weight_shape.extend(kernels) else: weight_shape.extend(kernels) weight_shape.append(int(dim_in / group)) if model.init_params: weight = model.param_init_net.__getattr__(weight_init[0])( [], blob_out + '_w', shape=weight_shape, **weight_init[1] ) if use_bias: bias = model.param_init_net.__getattr__(bias_init[0])( [], blob_out + '_b', shape=[dim_out, ], **bias_init[1] ) else: weight = core.ScopedBlobReference( blob_out + '_w', model.param_init_net) if use_bias: bias = core.ScopedBlobReference( blob_out + '_b', model.param_init_net) if use_bias: model.params.extend([weight, bias]) else: model.params.extend([weight]) model.weights.append(weight) if use_bias: model.biases.append(bias) if use_bias: inputs = [blob_in, weight, bias] else: inputs = [blob_in, weight] if transform_inputs is not None: transform_inputs(model, blob_out, inputs) # For the operator, we no longer need to provide the no_bias field # because it can automatically figure this out from the number of # inputs. if 'no_bias' in kwargs: del kwargs['no_bias'] if group != 1: kwargs['group'] = group if is_nd: return model.net.Conv( inputs, blob_out, kernels=kernels, order=order, **kwargs) else: return model.net.Conv( inputs, blob_out, kernel=kernel, order=order, **kwargs) def conv_nd( model, blob_in, blob_out, dim_in, dim_out, kernel, weight_init=None, bias_init=None, group=1, transform_inputs=None, order="NCHW", **kwargs ): """N-dimensional convolution for inputs with NCHW storage order. """ assert order == "NCHW", "ConvNd only supported for NCHW storage." return _ConvBase(model, True, blob_in, blob_out, dim_in, dim_out, kernel, weight_init, bias_init, group, transform_inputs, order=order, **kwargs) def conv( model, blob_in, blob_out, dim_in, dim_out, kernel, weight_init=None, bias_init=None, group=1, transform_inputs=None, **kwargs ): """2-dimensional convolution. """ return _ConvBase(model, False, blob_in, blob_out, dim_in, dim_out, kernel, weight_init, bias_init, group, transform_inputs, **kwargs) def conv_transpose( model, blob_in, blob_out, dim_in, dim_out, kernel, weight_init=None, bias_init=None, use_cudnn=False, order="NCHW", cudnn_exhaustive_search=False, ws_nbytes_limit=None, **kwargs ): """ConvTranspose. """ weight_init = weight_init if weight_init else ('XavierFill', {}) bias_init = bias_init if bias_init else ('ConstantFill', {}) blob_out = blob_out or model.net.NextName() weight_shape = ( [dim_in, dim_out, kernel, kernel] if order == "NCHW" else [dim_in, kernel, kernel, dim_out] ) if model.init_params: weight = model.param_init_net.__getattr__(weight_init[0])( [], blob_out + '_w', shape=weight_shape, **weight_init[1] ) bias = model.param_init_net.__getattr__(bias_init[0])( [], blob_out + '_b', shape=[dim_out, ], **bias_init[1] ) else: weight = core.ScopedBlobReference( blob_out + '_w', model.param_init_net) bias = core.ScopedBlobReference( blob_out + '_b', model.param_init_net) model.params.extend([weight, bias]) model.weights.append(weight) model.biases.append(bias) if use_cudnn: kwargs['engine'] = 'CUDNN' kwargs['exhaustive_search'] = cudnn_exhaustive_search if ws_nbytes_limit: kwargs['ws_nbytes_limit'] = ws_nbytes_limit return model.net.ConvTranspose( [blob_in, weight, bias], blob_out, kernel=kernel, order=order, **kwargs ) def group_conv( model, blob_in, blob_out, dim_in, dim_out, kernel, weight_init=None, bias_init=None, group=1, **kwargs ): """Group Convolution. This is essentially the same as Conv with a group argument passed in. We specialize this for backward interface compatibility. """ return conv(model, blob_in, blob_out, dim_in, dim_out, kernel, weight_init=weight_init, bias_init=bias_init, group=group, **kwargs) def group_conv_deprecated( model, blob_in, blob_out, dim_in, dim_out, kernel, weight_init=None, bias_init=None, group=1, use_cudnn=False, order="NCHW", cudnn_exhaustive_search=False, ws_nbytes_limit=None, **kwargs ): """GroupConvolution's deprecated interface. This is used to simulate a group convolution via split and concat. You should always use the new group convolution in your new code. """ weight_init = weight_init if weight_init else ('XavierFill', {}) bias_init = bias_init if bias_init else ('ConstantFill', {}) use_bias = False if ("no_bias" in kwargs and kwargs["no_bias"]) else True if use_cudnn: kwargs['engine'] = 'CUDNN' kwargs['exhaustive_search'] = cudnn_exhaustive_search if ws_nbytes_limit: kwargs['ws_nbytes_limit'] = ws_nbytes_limit if dim_in % group: raise ValueError("dim_in should be divisible by group.") if dim_out % group: raise ValueError("dim_out should be divisible by group.") splitted_blobs = model.net.DepthSplit( blob_in, ['_' + blob_out + '_gconv_split_' + str(i) for i in range(group)], dimensions=[int(dim_in / group) for i in range(group)], order=order ) weight_shape = ( [dim_out / group, dim_in / group, kernel, kernel] if order == "NCHW" else [dim_out / group, kernel, kernel, dim_in / group] ) # Make sure that the shapes are of int format. Especially for py3 where # int division gives float output. weight_shape = [int(v) for v in weight_shape] conv_blobs = [] for i in range(group): if model.init_params: weight = model.param_init_net.__getattr__(weight_init[0])( [], blob_out + '_gconv_%d_w' % i, shape=weight_shape, **weight_init[1] ) if use_bias: bias = model.param_init_net.__getattr__(bias_init[0])( [], blob_out + '_gconv_%d_b' % i, shape=[int(dim_out / group)], **bias_init[1] ) else: weight = core.ScopedBlobReference( blob_out + '_gconv_%d_w' % i, model.param_init_net) if use_bias: bias = core.ScopedBlobReference( blob_out + '_gconv_%d_b' % i, model.param_init_net) if use_bias: model.params.extend([weight, bias]) else: model.params.extend([weight]) model.weights.append(weight) if use_bias: model.biases.append(bias) if use_bias: inputs = [weight, bias] else: inputs = [weight] if 'no_bias' in kwargs: del kwargs['no_bias'] conv_blobs.append( splitted_blobs[i].Conv( inputs, blob_out + '_gconv_%d' % i, kernel=kernel, order=order, **kwargs ) ) concat, concat_dims = model.net.Concat( conv_blobs, [blob_out, "_" + blob_out + "_concat_dims"], order=order ) return concat
from __future__ import absolute_import from __future__ import division from __future__ import print_function import contextlib import copy import threading _threadlocal_scope = threading.local() @contextlib.contextmanager def arg_scope(single_helper_or_list, **kwargs): global _threadlocal_scope if not isinstance(single_helper_or_list, list): assert callable(single_helper_or_list), \ "arg_scope is only supporting single or a list of helper functions." single_helper_or_list = [single_helper_or_list] old_scope = copy.deepcopy(get_current_scope()) for helper in single_helper_or_list: assert callable(helper), \ "arg_scope is only supporting a list of callable helper functions." helper_key = helper.__name__ if helper_key not in old_scope: _threadlocal_scope.current_scope[helper_key] = {} _threadlocal_scope.current_scope[helper_key].update(kwargs) yield _threadlocal_scope.current_scope = old_scope def get_current_scope(): global _threadlocal_scope if not hasattr(_threadlocal_scope, "current_scope"): _threadlocal_scope.current_scope = {} return _threadlocal_scope.current_scope
## @package normalization # Module caffe2.python.helpers.normalization from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python import core, scope from caffe2.proto import caffe2_pb2 def lrn(model, blob_in, blob_out, order="NCHW", use_cudnn=False, **kwargs): """LRN""" dev = kwargs['device_option'] if 'device_option' in kwargs \ else scope.CurrentDeviceScope() is_cpu = dev is None or dev.device_type == caffe2_pb2.CPU if use_cudnn and (not is_cpu): kwargs['engine'] = 'CUDNN' blobs_out = blob_out else: blobs_out = [blob_out, "_" + blob_out + "_scale"] lrn = model.net.LRN( blob_in, blobs_out, order=order, **kwargs ) if use_cudnn and (not is_cpu): return lrn else: return lrn[0] def softmax(model, blob_in, blob_out=None, use_cudnn=False, **kwargs): """Softmax.""" if use_cudnn: kwargs['engine'] = 'CUDNN' if blob_out is not None: return model.net.Softmax(blob_in, blob_out, **kwargs) else: return model.net.Softmax(blob_in, **kwargs) def instance_norm(model, blob_in, blob_out, dim_in, order="NCHW", **kwargs): blob_out = blob_out or model.net.NextName() # Input: input, scale, bias # Output: output, saved_mean, saved_inv_std # scale: initialize with ones # bias: initialize with zeros def init_blob(value, suffix): return model.param_init_net.ConstantFill( [], blob_out + "_" + suffix, shape=[dim_in], value=value) scale, bias = init_blob(1.0, "s"), init_blob(0.0, "b") model.params.extend([scale, bias]) model.weights.append(scale) model.biases.append(bias) blob_outs = [blob_out, blob_out + "_sm", blob_out + "_siv"] if 'is_test' in kwargs and kwargs['is_test']: blob_outputs = model.net.InstanceNorm( [blob_in, scale, bias], [blob_out], order=order, **kwargs) return blob_outputs else: blob_outputs = model.net.InstanceNorm( [blob_in, scale, bias], blob_outs, order=order, **kwargs) # Return the output return blob_outputs[0] def spatial_bn(model, blob_in, blob_out, dim_in, order="NCHW", **kwargs): blob_out = blob_out or model.net.NextName() # Input: input, scale, bias, est_mean, est_inv_var # Output: output, running_mean, running_inv_var, saved_mean, # saved_inv_var # scale: initialize with ones # bias: initialize with zeros # est mean: zero # est var: ones def init_blob(value, suffix): return model.param_init_net.ConstantFill( [], blob_out + "_" + suffix, shape=[dim_in], value=value) if model.init_params: scale, bias = init_blob(1.0, "s"), init_blob(0.0, "b") running_mean = init_blob(0.0, "rm") running_inv_var = init_blob(1.0, "riv") else: scale = core.ScopedBlobReference( blob_out + '_s', model.param_init_net) bias = core.ScopedBlobReference( blob_out + '_b', model.param_init_net) running_mean = core.ScopedBlobReference( blob_out + '_rm', model.param_init_net) running_inv_var = core.ScopedBlobReference( blob_out + '_riv', model.param_init_net) model.params.extend([scale, bias]) model.computed_params.extend([running_mean, running_inv_var]) model.weights.append(scale) model.biases.append(bias) blob_outs = [blob_out, running_mean, running_inv_var, blob_out + "_sm", blob_out + "_siv"] if 'is_test' in kwargs and kwargs['is_test']: blob_outputs = model.net.SpatialBN( [blob_in, scale, bias, blob_outs[1], blob_outs[2]], [blob_out], order=order, **kwargs) return blob_outputs else: blob_outputs = model.net.SpatialBN( [blob_in, scale, bias, blob_outs[1], blob_outs[2]], blob_outs, order=order, **kwargs) # Return the output return blob_outputs[0]
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.proto import caffe2_pb2 from caffe2.python import workspace, core, lstm_benchmark, utils from copy import copy @utils.debug def Compare(args): results = [] num_iters = 1000 args.gpu = True with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)): for batch_size in [64, 128, 256]: for seq_length in [20, 100]: for hidden_dim in [40, 100, 400, 800]: args.batch_size = batch_size args.seq_length = seq_length args.hidden_dim = hidden_dim args.data_size = batch_size * seq_length * num_iters args.iters_to_report = num_iters // 3 args.implementation = 'own' t_own = lstm_benchmark.Benchmark(args) workspace.ResetWorkspace() args.implementation = 'cudnn' t_cudnn = lstm_benchmark.Benchmark(args) workspace.ResetWorkspace() results.append((copy(args), float(t_own), float(t_cudnn))) print(args) print("t_cudnn / t_own: {}".format(t_cudnn / t_own)) for args, t_own, t_cudnn in results: print("{}: cudnn time: {}, own time: {}, ratio: {}".format( str(args), t_cudnn, t_own, t_cudnn / t_own)) ratio_sum = 0 for args, t_own, t_cudnn in results: ratio = float(t_cudnn) / t_own ratio_sum += ratio print("hidden_dim: {}, seq_lengths: {}, batch_size: {}, num_layers: {}:" " cudnn time: {}, own time: {}, ratio: {}".format( args.hidden_dim, args.seq_length, args.batch_size, args.num_layers, t_cudnn, t_own, ratio)) print("Ratio average: {}".format(ratio_sum / len(results))) if __name__ == '__main__': args = lstm_benchmark.GetArgumentParser().parse_args() workspace.GlobalInit([ 'caffe2', '--caffe2_log_level=0', '--caffe2_print_blob_sizes_at_exit=0', '--caffe2_gpu_memory_tracking=1']) Compare(args)
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import os import uuid from caffe2.distributed.store_ops_test_util import StoreOpsTests from caffe2.python import core, workspace, dyndep from caffe2.python.test_util import TestCase dyndep.InitOpsLibrary("@/caffe2/caffe2/distributed:redis_store_handler_ops") dyndep.InitOpsLibrary("@/caffe2/caffe2/distributed:store_ops") class TestRedisStoreHandlerOp(TestCase): def setUp(self): super(TestRedisStoreHandlerOp, self).setUp() self.uuid = str(uuid.uuid4()) + "/" def tearDown(self): super(TestRedisStoreHandlerOp, self).tearDown() def create_store_handler(self): store_handler = "store_handler" workspace.RunOperatorOnce( core.CreateOperator( "RedisStoreHandlerCreate", [], [store_handler], prefix=self.uuid, host=os.getenv("REDIS_HOST", "localhost"), port=int(os.getenv("REDIS_PORT", 6379)))) return store_handler def test_set_get(self): StoreOpsTests.test_set_get(self.create_store_handler)
## @package store_ops_test_util # Module caffe2.distributed.store_ops_test_util from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from multiprocessing import Process, Queue import numpy as np from caffe2.python import core, workspace class StoreOpsTests(object): @classmethod def _test_set_get(cls, queue, create_store_handler_fn, index, num_procs): store_handler = create_store_handler_fn() blob = "blob" value = np.full(1, 1, np.float32) # Use last process to set blob to make sure other processes # are waiting for the blob before it is set. if index == (num_procs - 1): workspace.FeedBlob(blob, value) workspace.RunOperatorOnce( core.CreateOperator( "StoreSet", [store_handler, blob], [], blob_name=blob)) output_blob = "output_blob" workspace.RunOperatorOnce( core.CreateOperator( "StoreGet", [store_handler], [output_blob], blob_name=blob)) try: np.testing.assert_array_equal(workspace.FetchBlob(output_blob), 1) except AssertionError as err: queue.put(err) workspace.ResetWorkspace() @classmethod def test_set_get(cls, create_store_handler_fn): # Queue for assertion errors on subprocesses queue = Queue() # Start N processes in the background num_procs = 4 procs = [] for index in range(num_procs): proc = Process( target=cls._test_set_get, args=(queue, create_store_handler_fn, index, num_procs, )) proc.start() procs.append(proc) # Test complete, join background processes for proc in procs: proc.join() # Raise first error we find, if any if not queue.empty(): raise queue.get()
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import errno import os import tempfile import shutil from caffe2.distributed.store_ops_test_util import StoreOpsTests from caffe2.python import core, workspace, dyndep from caffe2.python.test_util import TestCase dyndep.InitOpsLibrary("@/caffe2/caffe2/distributed:file_store_handler_ops") dyndep.InitOpsLibrary("@/caffe2/caffe2/distributed:store_ops") class TestFileStoreHandlerOp(TestCase): testCounter = 0 def setUp(self): super(TestFileStoreHandlerOp, self).setUp() self.tmpdir = tempfile.mkdtemp() # Use counter to tell test cases apart TestFileStoreHandlerOp.testCounter += 1 def tearDown(self): shutil.rmtree(self.tmpdir) super(TestFileStoreHandlerOp, self).tearDown() def create_store_handler(self): # Use new path for every test so they are isolated path = self.tmpdir + "/" + str(TestFileStoreHandlerOp.testCounter) # Ensure path exists (including counter) try: os.makedirs(path) except OSError as exc: if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise store_handler = "store_handler" workspace.RunOperatorOnce( core.CreateOperator( "FileStoreHandlerCreate", [], [store_handler], path=path)) return store_handler def test_set_get(self): StoreOpsTests.test_set_get(self.create_store_handler)
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import numpy as np from scipy.sparse import coo_matrix from hypothesis import given import hypothesis.strategies as st from caffe2.python import core import caffe2.python.hypothesis_test_util as hu class TestFunHash(hu.HypothesisTestCase): @given(n_out=st.integers(min_value=5, max_value=20), n_in=st.integers(min_value=10, max_value=20), n_data=st.integers(min_value=2, max_value=8), n_weight=st.integers(min_value=8, max_value=15), n_alpha=st.integers(min_value=3, max_value=8), sparsity=st.floats(min_value=0.1, max_value=1.0), **hu.gcs) def test_funhash(self, n_out, n_in, n_data, n_weight, n_alpha, sparsity, gc, dc): A = np.random.rand(n_data, n_in) A[A > sparsity] = 0 A_coo = coo_matrix(A) val, key, seg = A_coo.data, A_coo.col, A_coo.row weight = np.random.rand(n_weight).astype(np.float32) alpha = np.random.rand(n_alpha).astype(np.float32) val = val.astype(np.float32) key = key.astype(np.int64) seg = seg.astype(np.int32) op = core.CreateOperator( 'SparseFunHash', ['val', 'key', 'seg', 'weight', 'alpha'], ['out'], num_outputs=n_out) # Gradient check wrt weight self.assertGradientChecks( gc, op, [val, key, seg, weight, alpha], 3, [0]) # Gradient check wrt alpha self.assertGradientChecks( gc, op, [val, key, seg, weight, alpha], 4, [0]) op2 = core.CreateOperator( 'SparseFunHash', ['val', 'key', 'seg', 'weight'], ['out'], num_outputs=n_out) # Gradient check wrt weight self.assertGradientChecks( gc, op2, [val, key, seg, weight], 3, [0])
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import numpy as np from hypothesis import given import hypothesis.strategies as st from caffe2.python import core, workspace import caffe2.python.hypothesis_test_util as hu class TestTTPad(hu.HypothesisTestCase): @given(K=st.integers(min_value=2, max_value=10), M=st.integers(min_value=10, max_value=20), N=st.integers(min_value=10, max_value=20), **hu.gcs) def test_tt_pad(self, K, M, N, gc, dc): op = core.CreateOperator( 'TTPad', ['A'], ['A', 'dim0'], scale=(K)) A = np.random.rand(M, N).astype(np.float32) workspace.FeedBlob('A', A) workspace.RunOperatorOnce(op) def tt_pad_ref(A_): M_ = A_.shape[0] if M_ % K == 0: new_dim0 = M_ else: new_dim0 = (M_ // K + 1) * K return (np.vstack((A_, np.zeros((new_dim0 - M_, A_.shape[1])))), np.array([A.shape[0]])) # Check against numpy reference self.assertReferenceChecks(gc, op, [A], tt_pad_ref) # Check over multiple devices self.assertDeviceChecks(dc, op, [A], [0]) # Gradient check wrt A self.assertGradientChecks(gc, op, [A], 0, [0])
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import numpy as np from scipy.sparse import coo_matrix from hypothesis import given import hypothesis.strategies as st from caffe2.python import core import caffe2.python.hypothesis_test_util as hu class TestFunHash(hu.HypothesisTestCase): @given(n_out=st.integers(min_value=5, max_value=20), n_in=st.integers(min_value=10, max_value=20), n_data=st.integers(min_value=2, max_value=8), n_weight=st.integers(min_value=8, max_value=15), n_alpha=st.integers(min_value=3, max_value=8), sparsity=st.floats(min_value=0.1, max_value=1.0), **hu.gcs) def test_funhash(self, n_out, n_in, n_data, n_weight, n_alpha, sparsity, gc, dc): A = np.random.rand(n_data, n_in) A[A > sparsity] = 0 A_coo = coo_matrix(A) val, key, seg = A_coo.data, A_coo.col, A_coo.row weight = np.random.rand(n_weight).astype(np.float32) alpha = np.random.rand(n_alpha).astype(np.float32) val = val.astype(np.float32) key = key.astype(np.int64) seg = seg.astype(np.int32) op = core.CreateOperator( 'FunHash', ['val', 'key', 'seg', 'weight', 'alpha'], ['out'], num_outputs=n_out) # Check over multiple devices self.assertDeviceChecks( dc, op, [val, key, seg, weight, alpha], [0]) # Gradient check wrt weight self.assertGradientChecks( gc, op, [val, key, seg, weight, alpha], 3, [0]) # Gradient check wrt alpha self.assertGradientChecks( gc, op, [val, key, seg, weight, alpha], 4, [0]) op2 = core.CreateOperator( 'FunHash', ['val', 'key', 'seg', 'weight'], ['out'], num_outputs=n_out) # Check over multiple devices self.assertDeviceChecks( dc, op2, [val, key, seg, weight], [0]) # Gradient check wrt weight self.assertGradientChecks( gc, op2, [val, key, seg, weight], 3, [0])
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import numpy as np from scipy.sparse import coo_matrix from caffe2.python import core, workspace from caffe2.python.test_util import TestCase def test_reshape(old_shape, new_shape, stride_only=False): blob_in0 = 'col' blob_out0 = 'col_out' blob_in1 = 'row' blob_out1 = 'row_out' old_shape_for_op = (-1, old_shape[1]) if stride_only else old_shape op = core.CreateOperator('SparseMatrixReshape', [blob_in0, blob_in1], [blob_out0, blob_out1], old_shape=old_shape_for_op, new_shape=new_shape) A = np.random.random_sample(old_shape) A[np.random.random_sample(old_shape) > .5] = 0 A_coo = coo_matrix(A) old_row, old_col = A_coo.row, A_coo.col workspace.FeedBlob(blob_in0, old_col.astype(np.int64)) workspace.FeedBlob(blob_in1, old_row.astype(np.int32)) workspace.RunOperatorOnce(op) A_new_coo = coo_matrix(A.reshape(new_shape)) new_row, new_col = A_new_coo.row, A_new_coo.col col_out = workspace.FetchBlob(blob_out0) row_out = workspace.FetchBlob(blob_out1) np.testing.assert_array_equal(col_out, new_col) np.testing.assert_array_equal(row_out, new_row) class TestSparseMatrixReshapeOp(TestCase): def test_basic_reshape(self): test_reshape(old_shape=(3, 4), new_shape=(4, 3)) def test_missing_dim(self): test_reshape(old_shape=(2, 8), new_shape=(-1, 4)) def test_stride_only(self): test_reshape(old_shape=(2, 8), new_shape=(-1, 4), stride_only=True) def test_sparse_reshape_mm(self): M, N, K = 300, 400, 500 A = np.random.rand(M, K).astype(np.float32) A_sparse = A * (np.random.rand(*A.shape) > .5) A_sparse = A_sparse.reshape((K, M)) A_coo = coo_matrix(A_sparse) idx0, idx1, a = A_coo.row, A_coo.col, A_coo.data B = np.random.rand(K, N).astype(np.float32) workspace.FeedBlob('col', idx1.astype(np.int64)) workspace.FeedBlob('row', idx0.astype(np.int32)) workspace.FeedBlob('B', B) workspace.FeedBlob('a', a) reshape_op = core.CreateOperator( 'SparseMatrixReshape', ['col', 'row'], ['new_col', 'new_row'], old_shape=(K, M), new_shape=(M, K)) mm_op = core.CreateOperator( 'SparseUnsortedSegmentWeightedSum', ['B', 'a', 'new_col', 'new_row'], ['Y']) workspace.RunOperatorOnce(reshape_op) workspace.RunOperatorOnce(mm_op) Y = workspace.FetchBlob('Y') np.testing.assert_allclose(A_sparse.reshape(M, K).dot(B), Y, rtol=1e-4)
## @package convnet_benchmarks # Module caffe2.experiments.python.convnet_benchmarks from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals """ Benchmark for common convnets. (NOTE: Numbers below prior with missing parameter=update step, TODO to update) Speed on Titan X, with 10 warmup steps and 10 main steps and with different versions of cudnn, are as follows (time reported below is per-batch time, forward / forward+backward): CuDNN V3 CuDNN v4 AlexNet 32.5 / 108.0 27.4 / 90.1 OverFeat 113.0 / 342.3 91.7 / 276.5 Inception 134.5 / 485.8 125.7 / 450.6 VGG (batch 64) 200.8 / 650.0 164.1 / 551.7 Speed on Inception with varied batch sizes and CuDNN v4 is as follows: Batch Size Speed per batch Speed per image 16 22.8 / 72.7 1.43 / 4.54 32 38.0 / 127.5 1.19 / 3.98 64 67.2 / 233.6 1.05 / 3.65 128 125.7 / 450.6 0.98 / 3.52 Speed on Tesla M40, which 10 warmup steps and 10 main steps and with cudnn v4, is as follows: AlexNet 68.4 / 218.1 OverFeat 210.5 / 630.3 Inception 300.2 / 1122.2 VGG (batch 64) 405.8 / 1327.7 (Note that these numbers involve a "full" backprop, i.e. the gradient with respect to the input image is also computed.) To get the numbers, simply run: for MODEL in AlexNet OverFeat Inception; do PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \ --batch_size 128 --model $MODEL --forward_only True done for MODEL in AlexNet OverFeat Inception; do PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \ --batch_size 128 --model $MODEL done PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \ --batch_size 64 --model VGGA --forward_only True PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \ --batch_size 64 --model VGGA for BS in 16 32 64 128; do PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \ --batch_size $BS --model Inception --forward_only True PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \ --batch_size $BS --model Inception done Note that VGG needs to be run at batch 64 due to memory limit on the backward pass. """ import argparse import time from caffe2.python import cnn, workspace, core import caffe2.python.SparseTransformer as SparseTransformer def MLP(order): model = cnn.CNNModelHelper() d = 256 depth = 20 width = 3 for i in range(depth): for j in range(width): current = "fc_{}_{}".format(i, j) if i > 0 else "data" next_ = "fc_{}_{}".format(i + 1, j) model.FC( current, next_, dim_in=d, dim_out=d, weight_init=model.XavierInit, bias_init=model.XavierInit) model.Sum(["fc_{}_{}".format(depth, j) for j in range(width)], ["sum"]) model.FC("sum", "last", dim_in=d, dim_out=1000, weight_init=model.XavierInit, bias_init=model.XavierInit) xent = model.LabelCrossEntropy(["last", "label"], "xent") model.AveragedLoss(xent, "loss") return model, d def AlexNet(order): model = cnn.CNNModelHelper(order, name="alexnet", use_cudnn=True, cudnn_exhaustive_search=True) conv1 = model.Conv( "data", "conv1", 3, 64, 11, ('XavierFill', {}), ('ConstantFill', {}), stride=4, pad=2 ) relu1 = model.Relu(conv1, "conv1") pool1 = model.MaxPool(relu1, "pool1", kernel=3, stride=2) conv2 = model.Conv( pool1, "conv2", 64, 192, 5, ('XavierFill', {}), ('ConstantFill', {}), pad=2 ) relu2 = model.Relu(conv2, "conv2") pool2 = model.MaxPool(relu2, "pool2", kernel=3, stride=2) conv3 = model.Conv( pool2, "conv3", 192, 384, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu3 = model.Relu(conv3, "conv3") conv4 = model.Conv( relu3, "conv4", 384, 256, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu4 = model.Relu(conv4, "conv4") conv5 = model.Conv( relu4, "conv5", 256, 256, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu5 = model.Relu(conv5, "conv5") pool5 = model.MaxPool(relu5, "pool5", kernel=3, stride=2) fc6 = model.FC( pool5, "fc6", 256 * 6 * 6, 4096, ('XavierFill', {}), ('ConstantFill', {}) ) relu6 = model.Relu(fc6, "fc6") fc7 = model.FC( relu6, "fc7", 4096, 4096, ('XavierFill', {}), ('ConstantFill', {}) ) relu7 = model.Relu(fc7, "fc7") fc8 = model.FC( relu7, "fc8", 4096, 1000, ('XavierFill', {}), ('ConstantFill', {}) ) pred = model.Softmax(fc8, "pred") xent = model.LabelCrossEntropy([pred, "label"], "xent") model.AveragedLoss(xent, "loss") return model, 224 def OverFeat(order): model = cnn.CNNModelHelper(order, name="overfeat", use_cudnn=True, cudnn_exhaustive_search=True) conv1 = model.Conv( "data", "conv1", 3, 96, 11, ('XavierFill', {}), ('ConstantFill', {}), stride=4 ) relu1 = model.Relu(conv1, "conv1") pool1 = model.MaxPool(relu1, "pool1", kernel=2, stride=2) conv2 = model.Conv( pool1, "conv2", 96, 256, 5, ('XavierFill', {}), ('ConstantFill', {}) ) relu2 = model.Relu(conv2, "conv2") pool2 = model.MaxPool(relu2, "pool2", kernel=2, stride=2) conv3 = model.Conv( pool2, "conv3", 256, 512, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu3 = model.Relu(conv3, "conv3") conv4 = model.Conv( relu3, "conv4", 512, 1024, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu4 = model.Relu(conv4, "conv4") conv5 = model.Conv( relu4, "conv5", 1024, 1024, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu5 = model.Relu(conv5, "conv5") pool5 = model.MaxPool(relu5, "pool5", kernel=2, stride=2) fc6 = model.FC( pool5, "fc6", 1024 * 6 * 6, 3072, ('XavierFill', {}), ('ConstantFill', {}) ) relu6 = model.Relu(fc6, "fc6") fc7 = model.FC( relu6, "fc7", 3072, 4096, ('XavierFill', {}), ('ConstantFill', {}) ) relu7 = model.Relu(fc7, "fc7") fc8 = model.FC( relu7, "fc8", 4096, 1000, ('XavierFill', {}), ('ConstantFill', {}) ) pred = model.Softmax(fc8, "pred") xent = model.LabelCrossEntropy([pred, "label"], "xent") model.AveragedLoss(xent, "loss") return model, 231 def VGGA(order): model = cnn.CNNModelHelper(order, name='vgg-a', use_cudnn=True, cudnn_exhaustive_search=True) conv1 = model.Conv( "data", "conv1", 3, 64, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu1 = model.Relu(conv1, "conv1") pool1 = model.MaxPool(relu1, "pool1", kernel=2, stride=2) conv2 = model.Conv( pool1, "conv2", 64, 128, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu2 = model.Relu(conv2, "conv2") pool2 = model.MaxPool(relu2, "pool2", kernel=2, stride=2) conv3 = model.Conv( pool2, "conv3", 128, 256, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu3 = model.Relu(conv3, "conv3") conv4 = model.Conv( relu3, "conv4", 256, 256, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu4 = model.Relu(conv4, "conv4") pool4 = model.MaxPool(relu4, "pool4", kernel=2, stride=2) conv5 = model.Conv( pool4, "conv5", 256, 512, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu5 = model.Relu(conv5, "conv5") conv6 = model.Conv( relu5, "conv6", 512, 512, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu6 = model.Relu(conv6, "conv6") pool6 = model.MaxPool(relu6, "pool6", kernel=2, stride=2) conv7 = model.Conv( pool6, "conv7", 512, 512, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu7 = model.Relu(conv7, "conv7") conv8 = model.Conv( relu7, "conv8", 512, 512, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu8 = model.Relu(conv8, "conv8") pool8 = model.MaxPool(relu8, "pool8", kernel=2, stride=2) fcix = model.FC( pool8, "fcix", 512 * 7 * 7, 4096, ('XavierFill', {}), ('ConstantFill', {}) ) reluix = model.Relu(fcix, "fcix") fcx = model.FC( reluix, "fcx", 4096, 4096, ('XavierFill', {}), ('ConstantFill', {}) ) relux = model.Relu(fcx, "fcx") fcxi = model.FC( relux, "fcxi", 4096, 1000, ('XavierFill', {}), ('ConstantFill', {}) ) pred = model.Softmax(fcxi, "pred") xent = model.LabelCrossEntropy([pred, "label"], "xent") model.AveragedLoss(xent, "loss") return model, 231 def net_DAG_Builder(model): print("====================================================") print(" Start Building DAG ") print("====================================================") net_root = SparseTransformer.netbuilder(model) return net_root def _InceptionModule( model, input_blob, input_depth, output_name, conv1_depth, conv3_depths, conv5_depths, pool_depth ): # path 1: 1x1 conv conv1 = model.Conv( input_blob, output_name + ":conv1", input_depth, conv1_depth, 1, ('XavierFill', {}), ('ConstantFill', {}) ) conv1 = model.Relu(conv1, conv1) # path 2: 1x1 conv + 3x3 conv conv3_reduce = model.Conv( input_blob, output_name + ":conv3_reduce", input_depth, conv3_depths[0], 1, ('XavierFill', {}), ('ConstantFill', {}) ) conv3_reduce = model.Relu(conv3_reduce, conv3_reduce) conv3 = model.Conv( conv3_reduce, output_name + ":conv3", conv3_depths[0], conv3_depths[1], 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) conv3 = model.Relu(conv3, conv3) # path 3: 1x1 conv + 5x5 conv conv5_reduce = model.Conv( input_blob, output_name + ":conv5_reduce", input_depth, conv5_depths[0], 1, ('XavierFill', {}), ('ConstantFill', {}) ) conv5_reduce = model.Relu(conv5_reduce, conv5_reduce) conv5 = model.Conv( conv5_reduce, output_name + ":conv5", conv5_depths[0], conv5_depths[1], 5, ('XavierFill', {}), ('ConstantFill', {}), pad=2 ) conv5 = model.Relu(conv5, conv5) # path 4: pool + 1x1 conv pool = model.MaxPool( input_blob, output_name + ":pool", kernel=3, stride=1, pad=1 ) pool_proj = model.Conv( pool, output_name + ":pool_proj", input_depth, pool_depth, 1, ('XavierFill', {}), ('ConstantFill', {}) ) pool_proj = model.Relu(pool_proj, pool_proj) output = model.Concat([conv1, conv3, conv5, pool_proj], output_name) return output def Inception(order): model = cnn.CNNModelHelper(order, name="inception", use_cudnn=True, cudnn_exhaustive_search=True) conv1 = model.Conv( "data", "conv1", 3, 64, 7, ('XavierFill', {}), ('ConstantFill', {}), stride=2, pad=3 ) relu1 = model.Relu(conv1, "conv1") pool1 = model.MaxPool(relu1, "pool1", kernel=3, stride=2, pad=1) conv2a = model.Conv( pool1, "conv2a", 64, 64, 1, ('XavierFill', {}), ('ConstantFill', {}) ) conv2a = model.Relu(conv2a, conv2a) conv2 = model.Conv( conv2a, "conv2", 64, 192, 3, ('XavierFill', {}), ('ConstantFill', {}), pad=1 ) relu2 = model.Relu(conv2, "conv2") pool2 = model.MaxPool(relu2, "pool2", kernel=3, stride=2, pad=1) # Inception modules inc3 = _InceptionModule( model, pool2, 192, "inc3", 64, [96, 128], [16, 32], 32 ) inc4 = _InceptionModule( model, inc3, 256, "inc4", 128, [128, 192], [32, 96], 64 ) pool5 = model.MaxPool(inc4, "pool5", kernel=3, stride=2, pad=1) inc5 = _InceptionModule( model, pool5, 480, "inc5", 192, [96, 208], [16, 48], 64 ) inc6 = _InceptionModule( model, inc5, 512, "inc6", 160, [112, 224], [24, 64], 64 ) inc7 = _InceptionModule( model, inc6, 512, "inc7", 128, [128, 256], [24, 64], 64 ) inc8 = _InceptionModule( model, inc7, 512, "inc8", 112, [144, 288], [32, 64], 64 ) inc9 = _InceptionModule( model, inc8, 528, "inc9", 256, [160, 320], [32, 128], 128 ) pool9 = model.MaxPool(inc9, "pool9", kernel=3, stride=2, pad=1) inc10 = _InceptionModule( model, pool9, 832, "inc10", 256, [160, 320], [32, 128], 128 ) inc11 = _InceptionModule( model, inc10, 832, "inc11", 384, [192, 384], [48, 128], 128 ) pool11 = model.AveragePool(inc11, "pool11", kernel=7, stride=1) fc = model.FC( pool11, "fc", 1024, 1000, ('XavierFill', {}), ('ConstantFill', {}) ) # It seems that Soumith's benchmark does not have softmax on top # for Inception. We will add it anyway so we can have a proper # backward pass. pred = model.Softmax(fc, "pred") xent = model.LabelCrossEntropy([pred, "label"], "xent") model.AveragedLoss(xent, "loss") return model, 224 def AddInput(model, batch_size, db, db_type): """Adds the data input part.""" data_uint8, label = model.TensorProtosDBInput( [], ["data_uint8", "label"], batch_size=batch_size, db=db, db_type=db_type ) data = model.Cast(data_uint8, "data_nhwc", to=core.DataType.FLOAT) data = model.NHWC2NCHW(data, "data") data = model.Scale(data, data, scale=float(1. / 256)) data = model.StopGradient(data, data) return data, label def AddParameterUpdate(model): """ Simple plain SGD update -- not tuned to actually train the models """ ITER = model.Iter("iter") LR = model.LearningRate( ITER, "LR", base_lr=-1e-8, policy="step", stepsize=10000, gamma=0.999) ONE = model.param_init_net.ConstantFill([], "ONE", shape=[1], value=1.0) for param in model.params: param_grad = model.param_to_grad[param] model.WeightedSum([param, ONE, param_grad, LR], param) def Benchmark(model_gen, arg): model, input_size = model_gen(arg.order) model.Proto().type = arg.net_type model.Proto().num_workers = arg.num_workers # In order to be able to run everything without feeding more stuff, let's # add the data and label blobs to the parameter initialization net as well. if arg.order == "NCHW": input_shape = [arg.batch_size, 3, input_size, input_size] else: input_shape = [arg.batch_size, input_size, input_size, 3] if arg.model == "MLP": input_shape = [arg.batch_size, input_size] model.param_init_net.GaussianFill( [], "data", shape=input_shape, mean=0.0, std=1.0 ) model.param_init_net.UniformIntFill( [], "label", shape=[arg.batch_size, ], min=0, max=999 ) if arg.forward_only: print('{}: running forward only.'.format(arg.model)) else: print('{}: running forward-backward.'.format(arg.model)) model.AddGradientOperators(["loss"]) AddParameterUpdate(model) if arg.order == 'NHWC': print( '==WARNING==\n' 'NHWC order with CuDNN may not be supported yet, so I might\n' 'exit suddenly.' ) if not arg.cpu: model.param_init_net.RunAllOnGPU() model.net.RunAllOnGPU() if arg.dump_model: # Writes out the pbtxt for benchmarks on e.g. Android with open( "{0}_init_batch_{1}.pbtxt".format(arg.model, arg.batch_size), "w" ) as fid: fid.write(str(model.param_init_net.Proto())) with open("{0}.pbtxt".format(arg.model, arg.batch_size), "w") as fid: fid.write(str(model.net.Proto())) workspace.RunNetOnce(model.param_init_net) workspace.CreateNet(model.net) for i in range(arg.warmup_iterations): workspace.RunNet(model.net.Proto().name) plan = core.Plan("plan") plan.AddStep(core.ExecutionStep("run", model.net, arg.iterations)) start = time.time() workspace.RunPlan(plan) print('Spent: {}'.format((time.time() - start) / arg.iterations)) if arg.layer_wise_benchmark: print('Layer-wise benchmark.') workspace.BenchmarkNet(model.net.Proto().name, 1, arg.iterations, True) def GetArgumentParser(): parser = argparse.ArgumentParser(description="Caffe2 benchmark.") parser.add_argument( "--batch_size", type=int, default=128, help="The batch size." ) parser.add_argument("--model", type=str, help="The model to benchmark.") parser.add_argument( "--order", type=str, default="NCHW", help="The order to evaluate." ) parser.add_argument( "--cudnn_ws", type=int, default=-1, help="The cudnn workspace size." ) parser.add_argument( "--iterations", type=int, default=10, help="Number of iterations to run the network." ) parser.add_argument( "--warmup_iterations", type=int, default=10, help="Number of warm-up iterations before benchmarking." ) parser.add_argument( "--forward_only", action='store_true', help="If set, only run the forward pass." ) parser.add_argument( "--layer_wise_benchmark", action='store_true', help="If True, run the layer-wise benchmark as well." ) parser.add_argument( "--cpu", action='store_true', help="If True, run testing on CPU instead of GPU." ) parser.add_argument( "--dump_model", action='store_true', help="If True, dump the model prototxts to disk." ) parser.add_argument("--net_type", type=str, default="dag") parser.add_argument("--num_workers", type=int, default=2) return parser if __name__ == '__main__': args = GetArgumentParser().parse_args() if ( not args.batch_size or not args.model or not args.order or not args.cudnn_ws ): GetArgumentParser().print_help() workspace.GlobalInit(['caffe2', '--caffe2_log_level=0']) model_map = { 'AlexNet': AlexNet, 'OverFeat': OverFeat, 'VGGA': VGGA, 'Inception': Inception, 'MLP': MLP, } Benchmark(model_map[args.model], args)
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import numpy as np from hypothesis import given import hypothesis.strategies as st from caffe2.python import core, workspace import caffe2.python.hypothesis_test_util as hu class TestTTContraction(hu.HypothesisTestCase): @given(D=st.integers(min_value=5, max_value=20), K=st.integers(min_value=5, max_value=20), M=st.integers(min_value=5, max_value=20), N=st.integers(min_value=5, max_value=20), **hu.gcs) def test_tt_contraction(self, D, K, M, N, gc, dc): A = np.random.rand(K, M).astype(np.float32) B = np.random.rand(D, K, N).astype(np.float32) workspace.FeedBlob('A', A) workspace.FeedBlob('B', B) op = core.CreateOperator( 'TTContraction', ['A', 'B'], ['C'], K=K, M=M, N=N) workspace.RunOperatorOnce(op) def tt_contraction_ref(A_, B_): return ((A_[:, :, np.newaxis] * B_[:, :, np.newaxis, :]) .sum(axis=1).flatten()), # Check against numpy reference self.assertReferenceChecks(gc, op, [A, B], tt_contraction_ref) # Check over multiple devices self.assertDeviceChecks(dc, op, [A, B], [0]) # Gradient check wrt A self.assertGradientChecks(gc, op, [A, B], 0, [0]) # Gradient check wrt B self.assertGradientChecks(gc, op, [A, B], 1, [0])
## @package SparseTransformer # Module caffe2.experiments.python.SparseTransformer from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python import workspace import scipy.sparse class NetDefNode(): def __init__(self, name, optype, p=None, op=None): self.name = name self.optype = optype self.ops = {} self.prev = {} self.insertInput(p) self.visited = False self.op = op def insertInput(self, p): """ Insert input of this op also maintain the output of previous op p: a node or a list of node """ if isinstance(p, list): for i in p: self.prev[i.name] = i i.ops[self.name] = self elif isinstance(p, NetDefNode): self.prev[p.name] = p p.ops[self.name] = self def deleteInput(self, p): if isinstance(p, NetDefNode): del self.prev[p.name] del p.ops[self.name] def maskNallocate(weight_name): """ Combine mask and weights create wcsr, iw, jw, return their names """ w = workspace.FetchBlob(weight_name) w_csr = scipy.sparse.csr_matrix(w) wcsr = w_csr.data iw = w_csr.indptr jw = w_csr.indices workspace.FeedBlob(weight_name + "wcsr", wcsr) workspace.FeedBlob(weight_name + "iw", iw) workspace.FeedBlob(weight_name + "jw", jw) return weight_name + "wcsr", weight_name + "iw", weight_name + "jw" def transFCRelu(cur, id2node, name2id, ops, model): """ Add trans before and after this FC_Prune->(Relu)->FC_Prune chain. """ # 1. add trans before the start of this chain # assuming that cur is an FC_Prune, and it has only one input pre = cur.prev.itervalues().next() # Create an node /op and insert it. # TODO(wyiming): check whether it is correct here current_blob = model.Transpose(cur.op.input[0], cur.op.input[0] + "_trans") # print model.net.Proto() trans_op = model.net.Proto().op[-1] trans_node = NetDefNode(trans_op.output[0], "Transpose", pre, trans_op) trans_node.visited = True pre_new = trans_node # 2. use while loop to visit the chain while True: # breakup with the parent cur.deleteInput(pre) if not (cur.optype == "FC_Prune" or cur.optype == "Relu"): print("Reaching the end of the chain") break if len(cur.ops) > 1: print("A FC/Relu giving more than 1 useful outputs") if cur.optype == "FC_Prune": op = cur.op wcsr, iw, jw = maskNallocate(op.input[1]) bias_name = op.input[3] # TODO(wyiming): create a new Op here current_blob = model.FC_Sparse(current_blob, cur.op.output[0] + "_Sparse", wcsr, iw, jw, bias_name) sps_op = model.net.Proto().op[-1] sps_node = NetDefNode(cur.op.output[0] + "_Sparse", "FC_Sparse", pre_new, sps_op) sps_node.visited = True pre_new = sps_node if cur.optype == "Relu": op = cur.op current_blob = model.Relu(current_blob, current_blob) rel_op = model.net.Proto().op[-1] rel_node = NetDefNode(str(current_blob), "Relu", pre_new, rel_op) rel_node.visited = True pre_new = rel_node cur.visited = True pre = cur flag = False for _, temp in cur.ops.iteritems(): if temp.optype == "Relu" or temp.optype == "FC_Prune": flag = True cur = temp if not flag: # assume that there is only 1 output that is not PrintOP cur = cur.ops.itervalues().next() cur.deleteInput(pre) print("No FC/RElu children") print(cur.op.type) break # 3. add trans after this chain like 1. current_blob = model.Transpose(current_blob, pre.op.output[0]) trans_op = model.net.Proto().op[-1] trans_node = NetDefNode(str(current_blob), "Transpose", pre_new, trans_op) trans_node.visited = True cur.insertInput(trans_node) print(cur.prev) print(trans_node.ops) def Prune2Sparse(cur, id2node, name2id, ops, model): # Assume that FC and Relu takes in only 1 input; # If not raise warning if not cur.visited and cur.optype == "FC_Prune": transFCRelu(cur, id2node, name2id, ops, model) cur.visited = True for name, n in cur.ops.iteritems(): Prune2Sparse(n, id2node, name2id, ops, model) def net2list(net_root): """ Use topological order(BFS) to print the op of a net in a list """ bfs_queue = [] op_list = [] cur = net_root for _, n in cur.ops.iteritems(): bfs_queue.append(n) while bfs_queue: node = bfs_queue[0] bfs_queue = bfs_queue[1:] op_list.append(node.op) for _, n in node.ops.iteritems(): bfs_queue.append(n) return op_list def netbuilder(model): print("Welcome to model checker") proto = model.net.Proto() net_name2id = {} net_id2node = {} net_root = NetDefNode("net_root", "root", None) for op_id, op in enumerate(proto.op): if op.type == "Print": continue op_name = '%s/%s (op#%d)' % (op.name, op.type, op_id) \ if op.name else '%s (op#%d)' % (op.type, op_id) # print(op_name) op_node = NetDefNode(op_name, op.type, op=op) net_id2node[op_id] = op_node if_has_layer_input = False for input_name in op.input: if input_name not in net_name2id: # assume that un_occured name are non_layers # TODO: write a non-layer checker and log it continue op_node.insertInput(net_id2node[net_name2id[input_name]]) if_has_layer_input = True if not if_has_layer_input: op_node.insertInput(net_root) for output_name in op.output: net_name2id[output_name] = op_id return net_root, net_name2id, net_id2node
## @package net_construct_bench # Module caffe2.experiments.python.net_construct_bench from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import argparse import logging import time from caffe2.python import workspace, data_parallel_model from caffe2.python import cnn import caffe2.python.models.resnet as resnet ''' Simple benchmark that creates a data-parallel resnet-50 model and measurs the time. ''' logging.basicConfig() log = logging.getLogger("net_construct_bench") log.setLevel(logging.DEBUG) def AddMomentumParameterUpdate(train_model, LR): ''' Add the momentum-SGD update. ''' params = train_model.GetParams() assert(len(params) > 0) ONE = train_model.param_init_net.ConstantFill( [], "ONE", shape=[1], value=1.0, ) NEGONE = train_model.param_init_net.ConstantFill( [], 'NEGONE', shape=[1], value=-1.0, ) for param in params: param_grad = train_model.param_to_grad[param] param_momentum = train_model.param_init_net.ConstantFill( [param], param + '_momentum', value=0.0 ) # Update param_grad and param_momentum in place train_model.net.MomentumSGD( [param_grad, param_momentum, LR], [param_grad, param_momentum], momentum=0.9, nesterov=1 ) # Update parameters by applying the moment-adjusted gradient train_model.WeightedSum( [param, ONE, param_grad, NEGONE], param ) def Create(args): gpus = range(args.num_gpus) log.info("Running on gpus: {}".format(gpus)) # Create CNNModeLhelper object train_model = cnn.CNNModelHelper( order="NCHW", name="resnet50", use_cudnn=True, cudnn_exhaustive_search=False ) # Model building functions def create_resnet50_model_ops(model): [softmax, loss] = resnet.create_resnet50( model, "data", num_input_channels=3, num_labels=1000, label="label", ) model.Accuracy([softmax, "label"], "accuracy") return [loss] # SGD def add_parameter_update_ops(model, lr_scale): model.AddWeightDecay(1e-4) ITER = model.Iter("ITER") stepsz = int(30) LR = model.net.LearningRate( [ITER], "LR", base_lr=0.1, policy="step", stepsize=stepsz, gamma=0.1, ) AddMomentumParameterUpdate(model, LR) def add_image_input(model): pass start_time = time.time() # Create parallelized model data_parallel_model.Parallelize_GPU( train_model, input_builder_fun=add_image_input, forward_pass_builder_fun=create_resnet50_model_ops, param_update_builder_fun=add_parameter_update_ops, devices=gpus, ) ct = time.time() - start_time train_model.net._CheckLookupTables() log.info("Model create for {} gpus took: {} secs".format(len(gpus), ct)) def main(): # TODO: use argv parser = argparse.ArgumentParser( description="Caffe2: Benchmark for net construction" ) parser.add_argument("--num_gpus", type=int, default=1, help="Number of GPUs.") args = parser.parse_args() Create(args) if __name__ == '__main__': workspace.GlobalInit(['caffe2', '--caffe2_log_level=2']) import cProfile cProfile.run('main()', sort="cumulative")
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest import hypothesis.strategies as st from hypothesis import given, assume, settings import numpy as np import time import os from caffe2.python import core, dyndep import caffe2.python.hypothesis_test_util as hu dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/nnpack:nnpack_ops") np.random.seed(1) def benchmark(ws, net, warmups=5, iters=100): for _ in range(warmups): ws.run(net) plan = core.Plan("plan") plan.AddStep(core.ExecutionStep("test-step", net, iters)) before = time.time() ws.run(plan) after = time.time() print("Timing network, time taken per-iteration: {:.6f}ms".format(( after - before) / float(iters) * 1000.0)) return after - before def has_avx2(): import subprocess try: subprocess.check_output(["grep", "avx2", "/proc/cpuinfo"]) return True except subprocess.CalledProcessError: # grep exits with rc 1 on no matches return False @unittest.skipIf(not has_avx2(), "NNPACK requires AVX2") class NNPackOpsTest(hu.HypothesisTestCase): @given(stride=st.integers(1, 3), pad=st.integers(0, 2), kernel=st.integers(3, 5), size=st.integers(5, 10), input_channels=st.integers(1, 8), output_channels=st.integers(1, 8), batch_size=st.integers(1, 5)) def test_convolution_correctness(self, stride, pad, kernel, size, input_channels, output_channels, batch_size): assume(stride <= kernel) if stride != 1: assume(batch_size == 1) X = np.random.rand( batch_size, input_channels, size, size).astype(np.float32) - 0.5 w = np.random.rand( output_channels, input_channels, kernel, kernel).astype(np.float32)\ - 0.5 b = np.random.rand(output_channels).astype(np.float32) - 0.5 order = "NCHW" outputs = {} for engine in ["", "NNPACK"]: op = core.CreateOperator( "Conv", ["X", "w", "b"], ["Y"], stride=stride, kernel=kernel, pad=pad, order=order, kts="TUPLE", engine=engine, ) self.ws.create_blob("X").feed(X) self.ws.create_blob("w").feed(w) self.ws.create_blob("b").feed(b) self.ws.run(op) outputs[engine] = self.ws.blobs["Y"].fetch() np.testing.assert_allclose( outputs[""], outputs["NNPACK"], atol=1e-4, rtol=1e-4) @given(size=st.sampled_from([6, 8]), input_channels=st.integers(1, 8), batch_size=st.integers(1, 5)) def test_max_pool_correctness(self, size, input_channels, batch_size): X = np.random.rand( batch_size, input_channels, size, size).astype(np.float32) - 0.5 order = "NCHW" outputs = {} # only 2 * 2 stride and 2 * 2 pool is supported in NNPack now stride = 2 kernel = 2 # The pooling strategy of NNPack is different from caffe2 pooling pad = 0 for engine in ["", "NNPACK"]: op = core.CreateOperator( "MaxPool", ["X"], ["Y"], stride=stride, kernel=kernel, pad=pad, order=order, engine=engine, ) self.ws.create_blob("X").feed(X) self.ws.run(op) outputs[engine] = self.ws.blobs["Y"].fetch() np.testing.assert_allclose( outputs[""], outputs["NNPACK"], atol=1e-4, rtol=1e-4) @settings(timeout=3600) @unittest.skipIf(not os.environ.get("CAFFE2_BENCHMARK"), "Benchmark") @given(stride=st.integers(1, 1), pad=st.integers(0, 2), kernel=st.sampled_from([3, 5, 7]), size=st.integers(30, 90), input_channels=st.sampled_from([3, 64, 256]), output_channels=st.sampled_from([32, 96, 256]), batch_size=st.sampled_from([32, 64, 96, 128])) def test_timings(self, stride, pad, kernel, size, input_channels, output_channels, batch_size): assume(stride <= kernel) X = np.random.rand( batch_size, input_channels, size, size).astype(np.float32) - 0.5 w = np.random.rand(output_channels, input_channels, kernel, kernel).astype(np.float32) - 0.5 b = np.random.rand(output_channels).astype(np.float32) - 0.5 order = "NCHW" times = {} for engine in ["", "NNPACK"]: net = core.Net(engine + "_test") net.Conv( ["X", "W", "b"], "Y", order=order, kernel=kernel, stride=stride, pad=pad, kts="TUPLE", engine=engine, ) self.ws.create_blob("X").feed(X) self.ws.create_blob("W").feed(w) self.ws.create_blob("b").feed(b) self.ws.run(net) times[engine] = benchmark(self.ws, net) print("Speedup for NNPACK: {:.2f}".format( times[""] / times["NNPACK"]))
from __future__ import absolute_import from __future__ import division from __future__ import print_function import unittest import numpy as np from caffe2.proto import caffe2_pb2 from caffe2.python import core, workspace, dyndep, test_util dyndep.InitOpsLibrary('@/caffe2/caffe2/contrib/warpctc:ctc_ops') workspace.GlobalInit(["python"]) def softmax(w): maxes = np.amax(w, axis=-1, keepdims=True) e = np.exp(w - maxes) dist = e / np.sum(e, axis=-1, keepdims=True) return dist class CTCOpsTest(test_util.TestCase): def verify_cost(self, device_option): alphabet_size = 5 N = 1 T = 2 inputs = np.asarray( [ [[0.1, 0.6, 0.1, 0.1, 0.1]], [[0.1, 0.1, 0.6, 0.1, 0.1]], ] ).reshape(T, N, alphabet_size).astype(np.float32) labels = np.asarray([1, 2]).astype(np.int32).reshape(T) label_lengths = np.asarray([2]).astype(np.int32).reshape(N) input_lengths = np.asarray([T]).astype(np.int32) net = core.Net("test-net") net.CTC(["inputs", "labels", "label_lengths", "input_lengths"], ["inputs_grad", "costs", "workspace"], device_option=device_option) self.ws.create_blob("inputs").feed(inputs, device_option=device_option) self.ws.create_blob("labels").feed(labels) self.ws.create_blob("label_lengths").feed(label_lengths) self.ws.create_blob("input_lengths").feed(input_lengths) self.ws.run(net) probs = softmax(inputs) expected = probs[0, 0, 1] * probs[1, 0, 2] self.assertEqual(self.ws.blobs["costs"].fetch().shape, (N,)) self.assertEqual(self.ws.blobs["costs"].fetch().dtype, np.float32) cost = self.ws.blobs["costs"].fetch()[0] print(cost) self.assertAlmostEqual(np.exp(-cost), expected) def test_ctc_cost_cpu(self): self.verify_cost( caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CPU)) def test_ctc_cost_gpu(self): self.verify_cost( caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CUDA, cuda_gpu_id=0))
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest import hypothesis.strategies as st from hypothesis import given, assume import numpy as np import time import os from caffe2.proto import caffe2_pb2 from caffe2.python import core, workspace, muji, dyndep import caffe2.python.hypothesis_test_util as hu np.random.seed(1) dyndep.InitOpsLibrary('@/caffe2/caffe2/contrib/nccl:nccl_ops') def gpu_device(i): device_option = caffe2_pb2.DeviceOption() device_option.device_type = caffe2_pb2.CUDA device_option.cuda_gpu_id = i return device_option def benchmark(ws, net, warmups=5, iters=100): for _ in range(warmups): ws.run(net) plan = core.Plan("plan") plan.AddStep(core.ExecutionStep("test-step", net, iters)) before = time.time() ws.run(plan) after = time.time() print("Timing network, time taken per-iteration: {:.6f}ms".format(( after - before) / float(iters) * 1000.0)) return after - before @unittest.skipIf(not workspace.has_gpu_support, "NCCL only on GPU") class NCCLOpsTest(hu.HypothesisTestCase): @given(n=st.integers(min_value=2, max_value=workspace.NumCudaDevices()), m=st.integers(min_value=1, max_value=1000), in_place=st.booleans()) def test_nccl_allreduce(self, n, m, in_place): xs = [np.random.randn(m).astype(np.float32) for i in range(n)] inputs = [str("x_{}".format(i)) for i in range(n)] prefix = "" if in_place else "o" outputs = [str("{}x_{}".format(prefix, i)) for i in range(n)] op = core.CreateOperator("NCCLAllreduce", inputs, outputs) input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)} def allreduce(*args): assert len(args) == n output = np.sum(args, axis=0) return [output for _ in range(n)] outputs = self.assertReferenceChecks( hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)], allreduce, input_device_options) for output in outputs: np.testing.assert_array_equal(outputs[0], output) self.assertEqual(outputs[0].tobytes(), output.tobytes()) @given(n=st.integers(min_value=2, max_value=workspace.NumCudaDevices()), m=st.integers(min_value=1, max_value=1000), root=st.integers(min_value=0, max_value=workspace.NumCudaDevices() - 1)) def test_nccl_broadcast(self, n, m, root): assume(root < n) xs = [np.random.randn(m).astype(np.float32) for i in range(n)] inputs = [str("x_{}".format(i)) for i in range(n)] op = core.CreateOperator("NCCLBroadcast", inputs, inputs, root=root) input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)} def broadcast(*args): assert len(args) == n return [args[root] for _ in range(n)] self.assertReferenceChecks( hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)], broadcast, input_device_options) @given(n=st.integers(min_value=2, max_value=workspace.NumCudaDevices()), m=st.integers(min_value=1, max_value=1000), # NCCL Reduce seems to deadlock for non-zero roots. root=st.integers(min_value=0, max_value=0), in_place=st.booleans()) def test_nccl_reduce(self, n, m, root, in_place): assume(in_place is False or root == 0) xs = [np.random.randn(m).astype(np.float32) for i in range(n)] inputs = [str("x_{}".format(i)) for i in range(n)] op = core.CreateOperator( "NCCLReduce", inputs, inputs[root] if in_place else b"o", root=root) input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)} def reduce(*args): assert len(args) == n return [np.sum(args, axis=0)] self.assertReferenceChecks( hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)], reduce, input_device_options) @given(n=st.integers(min_value=2, max_value=workspace.NumCudaDevices()), m=st.integers(min_value=1, max_value=1000)) def test_nccl_allgather(self, n, m): xs = [np.random.randn(m).astype(np.float32) for i in range(n)] inputs = [str("x_{}".format(i)) for i in range(n)] outputs = [str("o_{}".format(i)) for i in range(n)] op = core.CreateOperator("NCCLAllGather", inputs, outputs) input_device_options = {n: gpu_device(i) for i, n in enumerate(inputs)} def allgather(*args): assert len(args) == n return [np.stack(args, axis=0) for _ in range(n)] outputs = self.assertReferenceChecks( hu.gpu_do, op, [xs[i] for i, _ in enumerate(inputs)], allgather, input_device_options) for output in outputs: np.testing.assert_array_equal(outputs[0], output) self.assertEqual(outputs[0].tobytes(), output.tobytes()) @given(n=st.integers(min_value=2, max_value=workspace.NumCudaDevices()), m=st.integers(min_value=100000, max_value=100000), iters=st.integers(min_value=1, max_value=100), net_type=st.sampled_from(["dag", "async_dag", "simple"])) def test_nccl_sync(self, n, m, iters, net_type): inputs = [str("x_{}".format(i)) for i in range(n)] extra_inputs = [str("xe_{}".format(i)) for i in range(n)] net = core.Net("asdf") net.Proto().type = net_type net.Proto().num_workers = n for i in range(n): net.ConstantFill([], inputs[i], shape=[m], value=0.0, device_option=gpu_device(i)) net.ConstantFill([], extra_inputs[i], shape=[m], value=1.0, device_option=gpu_device(i)) for _ in range(iters): net.Sum([inputs[i], extra_inputs[i]], [inputs[i]], device_option=gpu_device(i)) net.NCCLReduce(inputs, [inputs[0]], device_option=gpu_device(0)) self.ws.run(net) np.testing.assert_array_equal( self.ws.blobs[inputs[0]].fetch(), np.full(shape=(m,), fill_value=iters * n, dtype=np.float32)) @unittest.skipIf(not os.environ.get("CAFFE2_BENCHMARK"), "Benchmark") def test_timings(self): for n in range(2, workspace.NumCudaDevices()): for in_place in [False, True]: xs = [np.random.randn(1e7).astype(np.float32) for i in range(n)] inputs = [str("x_{}".format(i)) for i in range(n)] prefix = "" if in_place else "o" outputs = [str("{}x_{}".format(prefix, i)) for i in range(n)] net = core.Net("test") net.NCCLAllreduce(inputs, outputs) net.RunAllOnGPU() for i in range(n): self.ws.create_blob(inputs[i]).feed(xs[i], gpu_device(i)) self.ws.run(net) net_time = benchmark(self.ws, net) vanilla = core.Net("vanilla") muji.Allreduce(vanilla, inputs) vanilla_time = benchmark(self.ws, vanilla) print("Speedup for NCCL: {:.2f}".format( vanilla_time / net_time))
#!/usr/bin/env python from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from hypothesis import given import hypothesis.strategies as st from multiprocessing import Process, Queue import numpy as np import os import pickle import tempfile import shutil from caffe2.python import core, workspace, dyndep import caffe2.python.hypothesis_test_util as hu dyndep.InitOpsLibrary("@/caffe2/caffe2/distributed:file_store_handler_ops") dyndep.InitOpsLibrary("@/caffe2/caffe2/distributed:redis_store_handler_ops") dyndep.InitOpsLibrary("@/caffe2/caffe2/distributed:store_ops") dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/gloo:gloo_ops") dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/gloo:gloo_ops_gpu") op_engine = 'GLOO' class TemporaryDirectory: def __enter__(self): self.tmpdir = tempfile.mkdtemp() return self.tmpdir def __exit__(self, type, value, traceback): shutil.rmtree(self.tmpdir) class TestCase(hu.HypothesisTestCase): test_counter = 0 sync_counter = 0 def run_test_locally(self, fn, device_option=None, **kwargs): # Queue for assertion errors on subprocesses queue = Queue() # Capture any exception thrown by the subprocess def run_fn(*args, **kwargs): try: with core.DeviceScope(device_option): fn(*args, **kwargs) workspace.ResetWorkspace() except Exception as ex: queue.put(ex) # Start N processes in the background procs = [] for i in range(kwargs['comm_size']): kwargs['comm_rank'] = i proc = Process( target=run_fn, kwargs=kwargs) proc.start() procs.append(proc) # Test complete, join background processes while len(procs) > 0: proc = procs.pop(0) while proc.is_alive(): proc.join(1) # Raise exception if we find any. # Note that the following is executed ALSO after # the last process was joined, so if ANY exception # was raised, it will be re-raised here. if not queue.empty(): raise queue.get() def run_test_distributed(self, fn, device_option=None, **kwargs): comm_rank = os.getenv('COMM_RANK') self.assertIsNotNone(comm_rank) comm_size = os.getenv('COMM_SIZE') self.assertIsNotNone(comm_size) kwargs['comm_rank'] = int(comm_rank) kwargs['comm_size'] = int(comm_size) with core.DeviceScope(device_option): fn(**kwargs) workspace.ResetWorkspace() def create_common_world(self, comm_rank, comm_size, tmpdir=None): store_handler = "store_handler" # If REDIS_HOST is set, use RedisStoreHandler for rendezvous. redis_host = os.getenv("REDIS_HOST") redis_port = int(os.getenv("REDIS_PORT", 6379)) if redis_host is not None: workspace.RunOperatorOnce( core.CreateOperator( "RedisStoreHandlerCreate", [], [store_handler], prefix=str(TestCase.test_counter) + "/", host=redis_host, port=redis_port)) else: workspace.RunOperatorOnce( core.CreateOperator( "FileStoreHandlerCreate", [], [store_handler], path=tmpdir)) common_world = "common_world" workspace.RunOperatorOnce( core.CreateOperator( "CreateCommonWorld", [store_handler], [common_world], size=comm_size, rank=comm_rank, sync=True, engine=op_engine)) return (store_handler, common_world) def synchronize(self, store_handler, value, comm_rank=None): TestCase.sync_counter += 1 blob = "sync_{}".format(TestCase.sync_counter) if comm_rank == 0: workspace.FeedBlob(blob, pickle.dumps(value)) workspace.RunOperatorOnce( core.CreateOperator( "StoreSet", [store_handler, blob], [])) else: workspace.RunOperatorOnce( core.CreateOperator( "StoreGet", [store_handler], [blob])) return pickle.loads(workspace.FetchBlob(blob)) def _test_broadcast(self, comm_rank=None, comm_size=None, blob_size=None, num_blobs=None, tmpdir=None, use_float16=False, ): store_handler, common_world = self.create_common_world( comm_rank=comm_rank, comm_size=comm_size, tmpdir=tmpdir) blob_size = self.synchronize( store_handler, blob_size, comm_rank=comm_rank) num_blobs = self.synchronize( store_handler, num_blobs, comm_rank=comm_rank) for i in range(comm_size): blobs = [] for j in range(num_blobs): blob = "blob_{}".format(j) offset = (comm_rank * num_blobs) + j value = np.full(blob_size, offset, np.float16 if use_float16 else np.float32) workspace.FeedBlob(blob, value) blobs.append(blob) net = core.Net("broadcast") net.Broadcast( [common_world] + blobs, blobs, root=i, engine=op_engine) workspace.CreateNet(net) workspace.RunNet(net.Name()) for j in range(num_blobs): np.testing.assert_array_equal( workspace.FetchBlob(blobs[j]), i * num_blobs) # Run the net a few more times to check the operator # works not just the first time it's called for _tmp in range(4): workspace.RunNet(net.Name()) @given(comm_size=st.integers(min_value=2, max_value=8), blob_size=st.integers(min_value=1e3, max_value=1e6), num_blobs=st.integers(min_value=1, max_value=4), device_option=st.sampled_from([hu.cpu_do]), use_float16=st.booleans()) def test_broadcast(self, comm_size, blob_size, num_blobs, device_option, use_float16): TestCase.test_counter += 1 if os.getenv('COMM_RANK') is not None: self.run_test_distributed( self._test_broadcast, blob_size=blob_size, num_blobs=num_blobs, use_float16=use_float16, device_option=device_option) else: with TemporaryDirectory() as tmpdir: self.run_test_locally( self._test_broadcast, comm_size=comm_size, blob_size=blob_size, num_blobs=num_blobs, device_option=device_option, tmpdir=tmpdir, use_float16=use_float16) def _test_allreduce(self, comm_rank=None, comm_size=None, blob_size=None, num_blobs=None, tmpdir=None, use_float16=False ): store_handler, common_world = self.create_common_world( comm_rank=comm_rank, comm_size=comm_size, tmpdir=tmpdir) blob_size = self.synchronize( store_handler, blob_size, comm_rank=comm_rank) num_blobs = self.synchronize( store_handler, num_blobs, comm_rank=comm_rank) blobs = [] for i in range(num_blobs): blob = "blob_{}".format(i) value = np.full(blob_size, (comm_rank * num_blobs) + i, np.float16 if use_float16 else np.float32) workspace.FeedBlob(blob, value) blobs.append(blob) net = core.Net("allreduce") net.Allreduce( [common_world] + blobs, blobs, engine=op_engine) workspace.CreateNet(net) workspace.RunNet(net.Name()) for i in range(num_blobs): np.testing.assert_array_equal( workspace.FetchBlob(blobs[i]), (num_blobs * comm_size) * (num_blobs * comm_size - 1) / 2) # Run the net a few more times to check the operator # works not just the first time it's called for _tmp in range(4): workspace.RunNet(net.Name()) @given(comm_size=st.integers(min_value=2, max_value=8), blob_size=st.integers(min_value=1e3, max_value=1e6), num_blobs=st.integers(min_value=1, max_value=4), device_option=st.sampled_from([hu.cpu_do]), use_float16=st.booleans()) def test_allreduce(self, comm_size, blob_size, num_blobs, device_option, use_float16): TestCase.test_counter += 1 if os.getenv('COMM_RANK') is not None: self.run_test_distributed( self._test_allreduce, blob_size=blob_size, num_blobs=num_blobs, use_float16=use_float16, device_option=device_option) else: with TemporaryDirectory() as tmpdir: self.run_test_locally( self._test_allreduce, comm_size=comm_size, blob_size=blob_size, num_blobs=num_blobs, device_option=device_option, tmpdir=tmpdir, use_float16=use_float16) if __name__ == "__main__": import unittest unittest.main()
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python import core, dyndep import caffe2.python.hypothesis_test_util as hu from hypothesis import given import hypothesis.strategies as st import numpy as np import os import unittest try: from libfb import parutil except ImportError as e: # If libfb not found, skip all tests in this file raise unittest.SkipTest(str(e)) core.GlobalInit(["python", "--caffe2_log_level=0"]) dyndep.InitOpsLibrary('@/caffe2/caffe2/contrib/torch:torch_ops') RUNTIME = parutil.get_runtime_path() if 'LUA_PATH' not in os.environ: os.environ['LUA_PATH'] = ";".join([ os.path.join(RUNTIME, '_lua', '?.lua'), os.path.join(RUNTIME, '_lua', '?', 'init.lua'), ]) os.environ['LUA_CPATH'] = os.path.join(RUNTIME, '_lua', '?.so') class TorchOpTest(hu.HypothesisTestCase): @given(n=st.integers(min_value=1, max_value=10), i=st.integers(min_value=1, max_value=10), h=st.integers(min_value=2, max_value=10)) def test_feed(self, n, i, h): op = core.CreateOperator( "Torch", ["x", "W", "b"], ["y"], init=b"nn.Linear({i}, {h})".format(h=h, i=i), num_inputs=1, num_params=2, num_outputs=1 ) x = np.random.randn(n, i).astype(np.float32) W = np.random.randn(h, i).astype(np.float32) b = np.random.randn(h).astype(np.float32) self.ws.create_blob("x").feed(x) self.ws.create_blob("W").feed(W) self.ws.create_blob("b").feed(b) self.ws.run(op) y = self.ws.blobs["y"].fetch() print("y", y) y = y.reshape((n, h)) np.testing.assert_allclose(y, np.dot(x, W.T) + b, atol=1e-4, rtol=1e-4) @given(n=st.integers(min_value=1, max_value=10), i=st.integers(min_value=1, max_value=10), h=st.integers(min_value=2, max_value=10), **hu.gcs) def test_gradient(self, n, i, h, gc, dc): op = core.CreateOperator( "Torch", ["x", "W", "b"], ["y"], init=b"nn.Linear({i}, {h})".format(h=h, i=i), num_inputs=1, num_params=2, num_outputs=1 ) x = np.random.randn(n, i).astype(np.float32) W = np.random.randn(h, i).astype(np.float32) b = np.random.randn(h).astype(np.float32) inputs = [x, W, b] self.assertDeviceChecks(dc, op, inputs, [0]) for i, _ in enumerate(inputs): self.assertGradientChecks(gc, op, inputs, i, [0]) @given(n=st.integers(min_value=1, max_value=10), i=st.integers(min_value=1, max_value=10), h=st.integers(min_value=2, max_value=10), iters=st.integers(min_value=1, max_value=100)) def test_iterated(self, n, i, h, iters): x = np.random.randn(n, i).astype(np.float32) W = np.random.randn(h, i).astype(np.float32) b = np.random.randn(h).astype(np.float32) self.ws.create_blob("x").feed(x) self.ws.create_blob("W").feed(W) self.ws.create_blob("b").feed(b) net = core.Net("op") net.Torch( ["x", "W", "b"], ["y"], init=b"nn.Linear({i}, {h})".format(h=h, i=i), num_inputs=1, num_params=2, num_outputs=1 ) print(net.Proto()) net_ = self.ws.create_net(net) for i in range(iters): if i % 1000 == 0: print(i) net_.run() y = self.ws.blobs["y"].fetch() y = y.reshape((n, h)) np.testing.assert_allclose(y, np.dot(x, W.T) + b, atol=1e-4, rtol=1e-4) def test_leakage_torch(self): n = 1 i = 100 h = 1000 iters = 2000 x = np.random.randn(n, i).astype(np.float32) W = np.random.randn(h, i).astype(np.float32) b = np.random.randn(h).astype(np.float32) self.ws.create_blob("x").feed(x) self.ws.create_blob("W").feed(W) self.ws.create_blob("b").feed(b) net = core.Net("op") net.Torch( ["x", "W", "b"], ["y"], init=b"nn.Linear({i}, {h})".format(h=h, i=i), num_inputs=1, num_params=2, num_outputs=1 ) net_ = self.ws.create_net(net) for i in range(iters): if i % 1000 == 0: print(i) net_.run() y = self.ws.blobs["y"].fetch() y = y.reshape((n, h)) np.testing.assert_allclose(y, np.dot(x, W.T) + b, atol=1e-4, rtol=1e-4)
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from caffe2.python import core, dyndep import caffe2.python.hypothesis_test_util as hu from hypothesis import given import hypothesis.strategies as st import numpy as np dyndep.InitOpsLibrary('@/caffe2/caffe2/contrib/torch:th_ops') try: dyndep.InitOpsLibrary('@/caffe2/caffe2/contrib/torch:th_ops_gpu') HAS_GPU = True except Exception as e: print("Exception loading Torch GPU library: ", e) # GPU import can fail, as Torch is not using cuda-lazy HAS_GPU = False pass class THOpsTest(hu.HypothesisTestCase): @given(X=hu.tensor(), alpha=st.floats(min_value=0.1, max_value=2.0), in_place=st.booleans(), **(hu.gcs if HAS_GPU else hu.gcs_cpu_only)) def test_elu(self, X, alpha, in_place, gc, dc): op = core.CreateOperator( "ELU", ["X"], ["X" if in_place else "Y"], engine="THNN", alpha=alpha) self.assertDeviceChecks(dc, op, [X], [0]) def elu(X): Y = np.copy(X) Y[Y <= 0] = (np.exp(Y[Y <= 0]) - 1) * alpha return (Y,) self.assertReferenceChecks(gc, op, [X], elu) # Avoid the nonlinearity at 0 for gradient checker. X[X == 0] += 0.2 X[np.abs(X) < 0.2] += np.sign(X[np.abs(X) < 0.2]) assert len(X[np.abs(X) < 0.2]) == 0 self.assertGradientChecks(gc, op, [X], 0, [0])
## @package utils # Module caffe2.contrib.perf_contbld.utils from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import getpass import time from collections import defaultdict import numpy as np from caffe2.proto import prof_dag_pb2 from scubadata import Sample, ScubaData from rfe import client as rfe_client from RockfortExpress import RockfortExpress as rfe from libfb.employee import unixname_to_uid class OperatorStatsContainer(): ''' This class works as a wrapper to log ProfDAGNet statistics to Scuba ''' def __init__(self, stats_proto): self.op_stats = prof_dag_pb2.ProfDAGProtos() self.op_stats.ParseFromString(stats_proto) def Print(self): print("Time per operator type:") for stat in self.op_stats.stats: print("{:12.6f} ms/iter [{:10.6f} ms/iter ]\t{}".format( stat.mean, stat.stddev, stat.name )) def _scuba_query(self, sql): user_name = getpass.getuser() user_id = unixname_to_uid(user_name) query = rfe.QueryCommon( user_name=user_name, user_id=user_id, ) return rfe_client.getClient().querySQL(query, sql) def _query(self, model, num_days): ''' Given a model, returns the op stats ''' cur_unix_epoch = int(time.time()) sql = """ SELECT operator, op_mean, op_stddev FROM caffe2_op_runs WHERE model_name = \'{}\' and time >= {} """.format(model, cur_unix_epoch - num_days * 86400) result = self._scuba_query(sql) if not result.headers: return headers = result.headers op_idx = headers.index('operator') mean_idx = headers.index('op_mean') # dict (key, (value1, value2,...)) # key: "operator" value: "op_mean" d = defaultdict(list) for row in result.value: d[row[op_idx]].append(float(row[mean_idx])) return d def ReadOpRuns(self, model, num_days): print("Reading op stats for model {}".format(model)) return self._query(model, num_days) def WriteOpRuns(self, model): print("Logging to scuba for model {}".format(model)) scuba = ScubaData("caffe2_op_runs") sample = Sample() sample.add_normal("model_name", model) for stat in self.op_stats.stats: sample.add_normal("operator", stat.name) sample.add_double("op_mean", stat.mean) sample.add_double("op_stddev", stat.stddev) scuba.add_sample(sample) def CheckRegression( self, model, num_days, min_exec_time, std_coefficient, mean_coefficient ): print("Regression check") op_runs = self.ReadOpRuns(model, num_days) or defaultdict(list) regression = False op_list = {} # Iterate over current run's operator timing for stat in self.op_stats.stats: times = op_runs[stat.name] print("{} execution times: {}".format(stat.name, times)) mean = np.mean(times) std = np.std(times) if not times or stat.mean < min_exec_time: continue if stat.mean > (std_coefficient * std + mean) and stat.mean > (mean_coefficient * mean): regression = True op_list[stat.name] = str((stat.mean - mean) * 100 / mean) + "%" print( "\tregression for {}: current runtime {} ms". format(stat.name, stat.mean) ) if not regression: # Write the operator execution times to caffe2_op_runs table self.WriteOpRuns(model) else: raise Exception(op_list)
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import unittest from caffe2.proto import caffe2_pb2 from caffe2.python import core, dyndep, workspace dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/prof:cuda_profile_ops") class CudaProfileOpsTest(unittest.TestCase): @unittest.skipIf(workspace.NumCudaDevices() < 1, "Need at least 1 GPU") def test_run(self): net = core.Net("net") net.CudaProfileInitialize([], [], output="/tmp/cuda_profile_test") net.CudaProfileStart([], []) with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)): net.ConstantFill([], ["out"], shape=[1, 3, 244, 244]) net.CudaProfileStop([], []) workspace.CreateNet(net) workspace.RunNet(net)
## @package htrace_to_chrome # Module caffe2.contrib.prof.htrace_to_chrome from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import argparse import json import re import sys display_levels = ["network", "worker", "operator", "kernel"] def stop_display(limit, curr): return display_levels.index(limit) <= display_levels.index(curr) def build_trace_dict(f, start_time, end_time): """Creates a python dictionary that has trace ids as keys and the corresponding trace objects as values. Input: python file object that points to a file with traces, written by htrace-c's local file span receiver. The exact format shouldn't concern you if you're using htrace-c correctly. https://github.com/apache/incubator-htrace/blob/master/htrace-c. Returns: a tuple (trace_dic, root_list), where trace_dic is a dictionary containing all traces parsed from the input file object, and root_list is a list of traces from trace_dic which have no parents. Each value in trace_dic is in the form of another dictionary with the folowing keys: "begin" : timestamp of trace start time, microseconds "end" : timestamp of trace end time, microseconds "desc" : description of trace "parent" : trace id of parent trace "children": dictionary of child traces, in the same format as trace_dic """ trace_dic = {} root_list = [] for line in f: h = json.loads(line) if h["e"] < start_time or h["b"] > end_time: continue entry = {"begin": h["b"], "end": h["e"], "desc": h["d"]} if "p" not in h or len(h["p"]) == 0: root_list.append(entry) else: entry["parent"] = h["p"][0] trace_dic[h["a"]] = entry for k, v in trace_dic.items(): if "parent" not in v: continue parent = trace_dic[v["parent"]] if "children" not in parent: parent["children"] = {} parent["children"][k] = v return trace_dic, root_list def generate_chrome_trace(root_list, display): """Takes trace objects created by build_trace_dict() and generates a list of python dictionaries that can be written to a file in json format, which in turn can be given to Chrome tracing (chrome://tracing). Input: refer to root_list in build_trace_dict()'s return value. Output: list of dictionaries that can be directly written to a json file by json.dumps(). The dictionary format follows the JSON array format of Chrome tracing. Complete events ("ph": "X") are used to express most traces; such events will appear as horizontal blocks with lengths equal to the trace duration. Instant events ("ph": "i") are used for traces with many occurrencs which may make the trace graph unreadable; such events are shown as thin lines. """ ct = [] for root_idx, root in enumerate(root_list): # network-level spans ct.append({ "name": root["desc"], "ph": "X", "ts": root["begin"], "dur": root["end"] - root["begin"], "pid": root_idx, "tid": root_idx, "args": { "Start timestamp": root["begin"], "End timestamp": root["end"] } }) for _, v in root["children"].items(): # run-scopes and worker-scopes c = { "name": v["desc"], "ph": "X", "ts": v["begin"], "dur": v["end"] - v["begin"], "pid": root_idx, "args": { "Start timestamp": v["begin"], "End timestamp": v["end"] } } if "run-scope" in v["desc"]: c["tid"] = root_idx ct.append(c) else: if stop_display(display, "network"): continue m = re.search("(?<=worker-scope-)\d+", v["desc"]) wid = m.group(0) c["tid"] = wid ct.append(c) if stop_display(display, "worker") or "children" not in v: continue for k_op, v_op in v["children"].items(): # operator scopes ct.append({ "name": v_op["desc"], "ph": "X", "ts": v_op["begin"], "dur": v_op["end"] - v_op["begin"], "pid": root_idx, "tid": wid, "args": { "Start timestamp": v_op["begin"], "End timestamp": v_op["end"] } }) if stop_display(display, "operator") or "children" not in v_op: continue for idx, (k_gpu_op, v_gpu_op) in \ enumerate(sorted(v_op["children"].items(), key=lambda e: e[1]["begin"])): # kernel scopes if idx == 0: ct.append({ "name": v_op["desc"] + "-GPU", "ph": "X", "ts": v_gpu_op["begin"], "dur": v_gpu_op["end"] - v_gpu_op["begin"], "pid": root_idx, "tid": wid, "args": { "desc": "NEW OPERATOR", "Start timestamp": v_gpu_op["begin"], "End timestamp": v_gpu_op["end"] } }) ct.append({ "name": v_op["desc"] + "-GPU", "ph": "i", "ts": v_gpu_op["begin"], "pid": root_idx, "tid": wid, "args": { "desc": v_gpu_op["desc"] } }) return ct def get_argument_parser(): parser = argparse.ArgumentParser( description="Format conversion from HTrace to Chrome tracing.") parser.add_argument("htrace_log", type=str, help="input htrace span log file") parser.add_argument("--display", type=str, choices=display_levels, default="operator", help="deepest level of spans to display (default: operator)") parser.add_argument("--start_time", type=int, default=-1, help="do not display spans occuring before this timestamp") parser.add_argument("--end_time", type=int, default=sys.maxsize, help="do not display spans occuring after this timestamp") return parser def main(): args = get_argument_parser().parse_args() with open(args.htrace_log, "r") as f: trace_dic, root_list = build_trace_dict(f, args.start_time, args.end_time) ct = generate_chrome_trace(root_list, args.display) print("Writing chrome json file to %s.json" % args.htrace_log) print("Now import %s.json in chrome://tracing" % args.htrace_log) with open(args.htrace_log + ".json", "w") as f: f.write(json.dumps(ct)) if __name__ == '__main__': main()
## @package process # Module doxygen.process # Script to insert preamble for doxygen and regen API docs import glob, os, shutil # Module caffe2...caffe2.python.control_test def insert(originalfile,first_line,description): with open(originalfile,'r') as f: f1 = f.readline() if(f1.find(first_line)<0): docs = first_line + description + f1 with open('newfile.txt','w') as f2: f2.write(docs) f2.write(f.read()) os.rename('newfile.txt',originalfile) else: print('already inserted') # move up from /caffe2_root/doxygen os.chdir("..") os.system("git checkout caffe2/contrib/.") os.system("git checkout caffe2/distributed/.") os.system("git checkout caffe2/experiments/.") os.system("git checkout caffe2/python/.") for root, dirs, files in os.walk("."): for file in files: if (file.endswith(".py") and not file.endswith("_test.py") and not file.endswith("__.py")): filepath = os.path.join(root, file) print("filepath: " + filepath) directory = os.path.dirname(filepath)[2:] directory = directory.replace("/",".") print "directory: " + directory name = os.path.splitext(file)[0] first_line = "## @package " + name description = "\n# Module " + directory + "." + name + "\n" print first_line,description insert(filepath,first_line,description) if os.path.exists("doxygen/doxygen-python"): print("Looks like you ran this before, so we need to cleanup those old files...") shutil.rmtree("doxygen/doxygen-python") else: os.makedirs("doxygen/doxygen-python") if os.path.exists("doxygen/doxygen-c"): print("Looks like you ran this before, so we need to cleanup those old files...") shutil.rmtree("doxygen/doxygen-c") else: os.makedirs("doxygen/doxygen-c") os.system("doxygen .Doxyfile-python") os.system("doxygen .Doxyfile-c")
## @package diagnose_protobuf # Module scripts.diagnose_protobuf """Diagnoses the current protobuf situation. Protocol buffer needs to be properly installed for Caffe2 to work, and sometimes it is rather tricky. Specifically, we will need to have a consistent version between C++ and python simultaneously. This is a convenience script for one to quickly check if this is so on one's local machine. Usage: [set your environmental variables like PATH and PYTHONPATH] python scripts/diagnose_protobuf.py """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import os import re from subprocess import Popen, PIPE # Get python protobuf version. try: import google.protobuf python_version = google.protobuf.__version__ python_protobuf_installed = True except ImportError: print("DEBUG: cannot find python protobuf install.") python_protobuf_installed = False if os.name == 'nt': protoc_name = 'protoc.exe' else: protoc_name = 'protoc' try: p = Popen([protoc_name, '--version'], stdout=PIPE, stderr=PIPE) out, err = p.communicate() except: print('DEBUG: did not find protoc binary.') print('DEBUG: out: ' + out) print('DEBUG: err: ' + err) native_protobuf_installed = False else: if p.returncode: print('DEBUG: protoc returned a non-zero return code.') print('DEBUG: out: ' + out) print('DEBUG: err: ' + err) native_protobuf_installed = False else: tmp = re.search('\d\.\d\.\d', out) if tmp: native_version = tmp.group(0) native_protobuf_installed = True else: print('DEBUG: cannot parse protoc version string.') print('DEBUG: out: ' + out) native_protobuf_installed = False PYTHON_PROTOBUF_NOT_INSTALLED = """ You have not installed python protobuf. Protobuf is needed to run caffe2. You can install protobuf via pip or conda (if you are using anaconda python). """ NATIVE_PROTOBUF_NOT_INSTALLED = """ You have not installed the protoc binary. Protoc is needed to compile Caffe2 protobuf source files. Depending on the platform you are on, you can install protobuf via: (1) Mac: using homebrew and do brew install protobuf. (2) Linux: use apt and do apt-get install libprotobuf-dev (3) Windows: install from source, or from the releases here: https://github.com/google/protobuf/releases/ """ VERSION_MISMATCH = """ Your python protobuf is of version {py_ver} but your native protoc version is of version {native_ver}. This will cause the installation to produce incompatible protobuf files. This is bad in general - consider installing the same version. """.format(py_ver=python_version, native_ver=native_version) # Now, give actual recommendations if not python_protobuf_installed: print(PYTHON_PROTOBUF_NOT_INSTALLED) if not native_protobuf_installed: print(NATIVE_PROTOBUF_NOT_INSTALLED) if python_protobuf_installed and native_protobuf_installed: if python_version != native_version: print(VERSION_MISMATCH) else: print('All looks good.')
## @package get_python_cmake_flags # Module scripts.get_python_cmake_flags ############################################################################## # Use this script to find your preferred python installation. ############################################################################## # # You can use the following to build with your preferred version of python # if your installation is not being properly detected by CMake. # # mkdir -p build && cd build # cmake $(python ../scripts/get_python_libs.py) .. # make # from __future__ import absolute_import from __future__ import unicode_literals from __future__ import print_function from distutils import sysconfig import os import sys import platform version = platform.python_version() if version[:3] != '2.7': print('ERROR: Python {version} is not officially supported yet.' .format(version=version), file=sys.stderr) exit(1) # Flags to print to stdout flags = '' inc = sysconfig.get_python_inc() lib = sysconfig.get_config_var("LIBDIR") # macOS specific if sys.platform == "darwin": lib = os.path.dirname(lib) + '/Python' if os.path.isfile(lib): flags += '-DPYTHON_LIBRARY={lib} '.format(lib=lib) if os.path.isfile(inc + '/Python.h'): flags += '-DPYTHON_INCLUDE_DIR={inc} '.format(inc=inc) print(flags, end='')
import torch from setuptools import setup, find_packages import subprocess import sys if not torch.cuda.is_available(): print("\nWarning: Torch did not find available GPUs on this system.\n", "If your intention is to cross-compile, this is not an error.\n") print("torch.__version__ = ", torch.__version__) TORCH_MAJOR = int(torch.__version__.split('.')[0]) TORCH_MINOR = int(torch.__version__.split('.')[1]) if TORCH_MAJOR == 0 and TORCH_MINOR < 4: raise RuntimeError("Apex requires Pytorch 0.4 or newer.\n" + "The latest stable release can be obtained from https://pytorch.org/") cmdclass = {} ext_modules = [] if "--cpp_ext" in sys.argv or "--cuda_ext" in sys.argv: if TORCH_MAJOR == 0: raise RuntimeError("--cpp_ext requires Pytorch 1.0 or later, " "found torch.__version__ = {}".format(torch.__version__)) from torch.utils.cpp_extension import BuildExtension cmdclass['build_ext'] = BuildExtension if "--cpp_ext" in sys.argv: from torch.utils.cpp_extension import CppExtension sys.argv.remove("--cpp_ext") ext_modules.append( CppExtension('apex_C', ['csrc/flatten_unflatten.cpp',])) def check_cuda_torch_binary_vs_bare_metal(cuda_dir): raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True) output = raw_output.split() release_idx = output.index("release") + 1 release = output[release_idx].split(".") bare_metal_major = release[0] bare_metal_minor = release[1][0] torch_binary_major = torch.version.cuda.split(".")[0] torch_binary_minor = torch.version.cuda.split(".")[1] print("\nCompiling cuda extensions with") print(raw_output + "from " + cuda_dir + "/bin\n") if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor): raise RuntimeError("Cuda extensions are being compiled with a version of Cuda that does " + "not match the version used to compile Pytorch binaries. " + "Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda) + "In some cases, a minor-version mismatch will not cause later errors: " + "https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. " "You can try commenting out this check (at your own risk).") if "--cuda_ext" in sys.argv: from torch.utils.cpp_extension import CUDAExtension sys.argv.remove("--cuda_ext") if torch.utils.cpp_extension.CUDA_HOME is None: raise RuntimeError("--cuda_ext was requested, but nvcc was not found. Are you sure your environment has nvcc available? If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, only images whose names contain 'devel' will provide nvcc.") else: check_cuda_torch_binary_vs_bare_metal(torch.utils.cpp_extension.CUDA_HOME) # Set up macros for forward/backward compatibility hack around # https://github.com/pytorch/pytorch/commit/4404762d7dd955383acee92e6f06b48144a0742e version_ge_1_1 = [] if (TORCH_MAJOR > 1) or (TORCH_MAJOR == 1 and TORCH_MINOR > 0): version_ge_1_1 = ['-DVERSION_GE_1_1'] ext_modules.append( CUDAExtension(name='amp_C', sources=['csrc/amp_C_frontend.cpp', 'csrc/multi_tensor_scale_kernel.cu', 'csrc/multi_tensor_axpby_kernel.cu', 'csrc/multi_tensor_l2norm_kernel.cu', 'csrc/multi_tensor_lamb_stage_1.cu', 'csrc/multi_tensor_lamb_stage_2.cu'], extra_compile_args={'cxx': ['-O3'], 'nvcc':['-lineinfo', '-O3', # '--resource-usage', '--use_fast_math']})) ext_modules.append( CUDAExtension(name='fused_adam_cuda', sources=['csrc/fused_adam_cuda.cpp', 'csrc/fused_adam_cuda_kernel.cu'], extra_compile_args={'cxx': ['-O3',], 'nvcc':['-O3', '--use_fast_math']})) ext_modules.append( CUDAExtension(name='syncbn', sources=['csrc/syncbn.cpp', 'csrc/welford.cu'])) ext_modules.append( CUDAExtension(name='fused_layer_norm_cuda', sources=['csrc/layer_norm_cuda.cpp', 'csrc/layer_norm_cuda_kernel.cu'], extra_compile_args={'cxx': ['-O3'] + version_ge_1_1, 'nvcc':['-maxrregcount=50', '-O3', '--use_fast_math'] + version_ge_1_1})) setup( name='apex', version='0.1', packages=find_packages(exclude=('build', 'csrc', 'include', 'tests', 'dist', 'docs', 'tests', 'examples', 'apex.egg-info',)), description='PyTorch Extensions written by NVIDIA', ext_modules=ext_modules, cmdclass=cmdclass, )
# May help avoid undefined symbol errors https://pytorch.org/cppdocs/notes/faq.html#undefined-symbol-errors-from-pytorch-aten import torch from . import parallel from . import amp from . import fp16_utils # For optimizers and normalization there is no Python fallback. # Absence of cuda backend is a hard error. # I would like the errors from importing fused_adam_cuda or fused_layer_norm_cuda # to be triggered lazily, because if someone has installed with --cpp_ext and --cuda_ext # so they expect those backends to be available, but for some reason they actually aren't # available (for example because they built improperly in a way that isn't revealed until # load time) the error message is timely and visible. from . import optimizers from . import normalization
import torch from torch.nn.modules.batchnorm import _BatchNorm from torch.nn import functional as F import syncbn from .optimized_sync_batchnorm_kernel import SyncBatchnormFunction class SyncBatchNorm(_BatchNorm): """ synchronized batch normalization module extented from `torch.nn.BatchNormNd` with the added stats reduction across multiple processes. :class:`apex.parallel.SyncBatchNorm` is designed to work with `DistributedDataParallel`. When running in training mode, the layer reduces stats across all processes to increase the effective batchsize for normalization layer. This is useful in applications where batch size is small on a given process that would diminish converged accuracy of the model. The model uses collective communication package from `torch.distributed`. When running in evaluation mode, the layer falls back to `torch.nn.functional.batch_norm` Args: num_features: :math:`C` from an expected input of size :math:`(N, C, L)` or :math:`L` from input of size :math:`(N, L)` eps: a value added to the denominator for numerical stability. Default: 1e-5 momentum: the value used for the running_mean and running_var computation. Can be set to ``None`` for cumulative moving average (i.e. simple average). Default: 0.1 affine: a boolean value that when set to ``True``, this module has learnable affine parameters. Default: ``True`` track_running_stats: a boolean value that when set to ``True``, this module tracks the running mean and variance, and when set to ``False``, this module does not track such statistics and always uses batch statistics in both training and eval modes. Default: ``True`` process_group: pass in a process group within which the stats of the mini-batch is being synchronized. ``None`` for using default process group channel_last: a boolean value that when set to ``True``, this module take the last dimension of the input tensor to be the channel dimension. Default: False Examples:: >>> # channel first tensor >>> sbn = apex.parallel.SyncBatchNorm(100).cuda() >>> inp = torch.randn(10, 100, 14, 14).cuda() >>> out = sbn(inp) >>> inp = torch.randn(3, 100, 20).cuda() >>> out = sbn(inp) >>> # channel last tensor >>> sbn = apex.parallel.SyncBatchNorm(100, channel_last=True).cuda() >>> inp = torch.randn(10, 14, 14, 100).cuda() """ def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, process_group=None, channel_last=False): super(SyncBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats) self.process_group = process_group self.channel_last = channel_last def _specify_process_group(self, process_group): self.process_group = process_group def _specify_channel_last(self, channel_last): self.channel_last = channel_last def forward(self, input): # if input.dim() == 2, we switch to channel_last for efficient memory accessing channel_last = self.channel_last if input.dim() != 2 else True if not self.training and self.track_running_stats and not channel_last: # fall back to pytorch implementation for inference return F.batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, False, 0.0, self.eps) else: exponential_average_factor = 0.0 if self.training and self.track_running_stats: self.num_batches_tracked += 1 if self.momentum is None: exponential_average_factor = 1.0 / float(self.num_batches_tracked) else: exponential_average_factor = self.momentum return SyncBatchnormFunction.apply(input, self.weight, self.bias, self.running_mean, self.running_var, self.eps, self.training or not self.track_running_stats, exponential_average_factor, self.process_group, channel_last)
import torch from torch.autograd.function import Function from apex.parallel import ReduceOp class SyncBatchnormFunction(Function): @staticmethod def forward(ctx, input, weight, bias, running_mean, running_variance, eps, process_group, world_size): torch.cuda.nvtx.range_push("sync_BN_fw") # transpose it to channel last to support broadcasting for input with different rank c_last_input = input.transpose(1, -1).contiguous().clone() ctx.save_for_backward(c_last_input, weight, bias, running_mean, running_variance) ctx.eps = eps ctx.process_group = process_group ctx.world_size = world_size c_last_input = (c_last_input - running_mean) / \ torch.sqrt(running_variance + eps) if weight is not None: c_last_input = c_last_input * weight if bias is not None: c_last_input = c_last_input + bias torch.cuda.nvtx.range_pop() return c_last_input.transpose(1, -1).contiguous().clone() @staticmethod def backward(ctx, grad_output): torch.cuda.nvtx.range_push("sync_BN_bw") # mini batch mean & var are calculated by forward path. # mu = 1./N*np.sum(h, axis = 0) # var = 1./N*np.sum((h-mu)**2, axis = 0) c_last_input, weight, bias, running_mean, running_variance = ctx.saved_tensors eps = ctx.eps process_group = ctx.process_group world_size = ctx.world_size grad_input = grad_weight = grad_bias = None num_features = running_mean.size()[0] # transpose it to channel last to support broadcasting for input with different rank torch.cuda.nvtx.range_push("carilli field") c_last_grad = grad_output.transpose(1, -1).contiguous() # squash non-channel dimension so we can easily calculate mean c_grad = c_last_grad.view(-1, num_features).contiguous() torch.cuda.nvtx.range_pop() # calculate grad_input if ctx.needs_input_grad[0]: # dh = gamma * (var + eps)**(-1. / 2.) * (dy - np.mean(dy, axis=0) # - (h - mu) * (var + eps)**(-1.0) * np.mean(dy * (h - mu), axis=0)) mean_dy = c_grad.mean(0) mean_dy_xmu = (c_last_grad * (c_last_input - running_mean)).view(-1, num_features).mean(0) if torch.distributed.is_initialized(): torch.distributed.all_reduce( mean_dy, ReduceOp.SUM, process_group) mean_dy = mean_dy / world_size torch.distributed.all_reduce( mean_dy_xmu, ReduceOp.SUM, process_group) mean_dy_xmu = mean_dy_xmu / world_size c_last_grad_input = (c_last_grad - mean_dy - (c_last_input - running_mean) / ( running_variance + eps) * mean_dy_xmu) / torch.sqrt(running_variance + eps) if weight is not None: c_last_grad_input.mul_(weight) grad_input = c_last_grad_input.transpose(1, -1).contiguous() # calculate grad_weight grad_weight = None if weight is not None and ctx.needs_input_grad[1]: # dgamma = np.sum((h - mu) * (var + eps)**(-1. / 2.) * dy, axis=0) grad_weight = ((c_last_input - running_mean) / torch.sqrt( running_variance + eps) * c_last_grad).view(-1, num_features).sum(0) # calculate grad_bias grad_bias = None if bias is not None and ctx.needs_input_grad[2]: # dbeta = np.sum(dy, axis=0) grad_bias = c_grad.sum(0) torch.cuda.nvtx.range_pop() return grad_input, grad_weight, grad_bias, None, None, None, None, None
import torch if hasattr(torch.distributed, 'ReduceOp'): ReduceOp = torch.distributed.ReduceOp elif hasattr(torch.distributed, 'reduce_op'): ReduceOp = torch.distributed.reduce_op else: ReduceOp = torch.distributed.deprecated.reduce_op from .distributed import DistributedDataParallel, Reducer # This is tricky because I'd like SyncBatchNorm to be exposed the same way # for both the cuda-enabled and python-fallback versions, and I don't want # to suppress the error information. try: import syncbn from .optimized_sync_batchnorm import SyncBatchNorm except ImportError as err: from .sync_batchnorm import SyncBatchNorm SyncBatchNorm.syncbn_import_error = err def convert_syncbn_model(module, process_group=None, channel_last=False): ''' Recursively traverse module and its children to replace all instances of ``torch.nn.modules.batchnorm._BatchNorm`` with :class:`apex.parallel.SyncBatchNorm`. All ``torch.nn.BatchNorm*N*d`` wrap around ``torch.nn.modules.batchnorm._BatchNorm``, so this function lets you easily switch to use sync BN. Args: module (torch.nn.Module): input module Example:: >>> # model is an instance of torch.nn.Module >>> import apex >>> sync_bn_model = apex.parallel.convert_syncbn_model(model) ''' mod = module if isinstance(module, torch.nn.modules.batchnorm._BatchNorm): mod = SyncBatchNorm(module.num_features, module.eps, module.momentum, module.affine, module.track_running_stats, process_group, channel_last=channel_last) mod.running_mean = module.running_mean mod.running_var = module.running_var if module.affine: mod.weight.data = module.weight.data.clone().detach() mod.bias.data = module.bias.data.clone().detach() for name, child in module.named_children(): mod.add_module(name, convert_syncbn_model(child, process_group=process_group, channel_last=channel_last)) # TODO(jie) should I delete model explicitly? del module return mod def create_syncbn_process_group(group_size): ''' Creates process groups to be used for syncbn of a give ``group_size`` and returns process group that current GPU participates in. ``group_size`` must divide the total number of GPUs (world_size). ``group_size`` of 0 would be considered as =world_size. In this case ``None`` will be returned. ``group_size`` of 1 would be equivalent to using non-sync bn, but will still carry the overhead. Args: group_size (int): number of GPU's to collaborate for sync bn Example:: >>> # model is an instance of torch.nn.Module >>> import apex >>> group = apex.parallel.create_syncbn_process_group(group_size) ''' if group_size==0: return None world_size = torch.distributed.get_world_size() assert(world_size >= group_size) assert(world_size % group_size == 0) group=None for group_num in (range(world_size//group_size)): group_ids = range(group_num*group_size, (group_num+1)*group_size) cur_group = torch.distributed.new_group(ranks=group_ids) if (torch.distributed.get_rank()//group_size == group_num): group = cur_group #can not drop out and return here, every process must go through creation of all subgroups assert(group is not None) return group
import torch from torch.nn.modules.batchnorm import _BatchNorm from torch.nn import functional as F from .sync_batchnorm_kernel import SyncBatchnormFunction from apex.parallel import ReduceOp class SyncBatchNorm(_BatchNorm): """ synchronized batch normalization module extented from ``torch.nn.BatchNormNd`` with the added stats reduction across multiple processes. :class:`apex.parallel.SyncBatchNorm` is designed to work with ``DistributedDataParallel``. When running in training mode, the layer reduces stats across all processes to increase the effective batchsize for normalization layer. This is useful in applications where batch size is small on a given process that would diminish converged accuracy of the model. The model uses collective communication package from ``torch.distributed``. When running in evaluation mode, the layer falls back to ``torch.nn.functional.batch_norm``. Args: num_features: :math:`C` from an expected input of size :math:`(N, C, L)` or :math:`L` from input of size :math:`(N, L)` eps: a value added to the denominator for numerical stability. Default: 1e-5 momentum: the value used for the running_mean and running_var computation. Can be set to ``None`` for cumulative moving average (i.e. simple average). Default: 0.1 affine: a boolean value that when set to ``True``, this module has learnable affine parameters. Default: ``True`` track_running_stats: a boolean value that when set to ``True``, this module tracks the running mean and variance, and when set to ``False``, this module does not track such statistics and always uses batch statistics in both training and eval modes. Default: ``True`` Example:: >>> sbn = apex.parallel.SyncBatchNorm(100).cuda() >>> inp = torch.randn(10, 100, 14, 14).cuda() >>> out = sbn(inp) >>> inp = torch.randn(3, 100, 20).cuda() >>> out = sbn(inp) """ warned = False def __init__(self, num_features, eps=1e-5, momentum=0.1, affine=True, track_running_stats=True, process_group=None, channel_last=False): if channel_last == True: raise AttributeError("channel_last is not supported by primitive SyncBatchNorm implementation. Try install apex with `--cuda_ext` if channel_last is desired.") if not SyncBatchNorm.warned: print("Warning: using Python fallback for SyncBatchNorm, possibly because apex was installed without --cuda_ext. The exception raised when attempting to import the cuda backend was: ", self.syncbn_import_error) SyncBatchNorm.warned = True super(SyncBatchNorm, self).__init__(num_features, eps=eps, momentum=momentum, affine=affine, track_running_stats=track_running_stats) self.process_group = process_group def _specify_process_group(self, process_group): self.process_group = process_group def forward(self, input): torch.cuda.nvtx.range_push("sync_bn_fw_with_mean_var") mean = None var = None cast = None out = None # casting to handle mismatch input type to layer type if self.running_mean is not None: if self.running_mean.dtype != input.dtype: input = input.to(self.running_mean.dtype) cast = input.dtype elif self.weight is not None: if self.weight.dtype != input.dtype: input = input.to(self.weight.dtype) cast = input.dtype if not self.training and self.track_running_stats: # fall back to pytorch implementation for inference torch.cuda.nvtx.range_pop() out = F.batch_norm(input, self.running_mean, self.running_var, self.weight, self.bias, False, 0.0, self.eps) else: process_group = self.process_group world_size = 1 if not self.process_group: process_group = torch.distributed.group.WORLD self.num_batches_tracked += 1 with torch.no_grad(): channel_first_input = input.transpose(0, 1).contiguous() squashed_input_tensor_view = channel_first_input.view( channel_first_input.size(0), -1) # total number of data points for each variance entry. Used to calculate unbiased variance estimate m = None local_m = float(squashed_input_tensor_view.size()[1]) local_mean = torch.mean(squashed_input_tensor_view, 1) local_sqr_mean = torch.pow( squashed_input_tensor_view, 2).mean(1) if torch.distributed.is_initialized(): world_size = torch.distributed.get_world_size(process_group) torch.distributed.all_reduce( local_mean, ReduceOp.SUM, process_group) mean = local_mean / world_size torch.distributed.all_reduce( local_sqr_mean, ReduceOp.SUM, process_group) sqr_mean = local_sqr_mean / world_size m = local_m * world_size else: m = local_m mean = local_mean sqr_mean = local_sqr_mean # var(x) = E (( x - mean_x ) ** 2) # = 1 / N * sum ( x - mean_x ) ** 2 # = 1 / N * sum (x**2) - mean_x**2 var = sqr_mean - mean.pow(2) if self.running_mean is not None: self.running_mean = self.momentum * mean + \ (1 - self.momentum) * self.running_mean if self.running_var is not None: # as noted by the paper, we used unbiased variance estimate of the mini-batch # Var[x] = m / (m-1) * Eb (sample_variance) self.running_var = m / \ (m-1) * self.momentum * var + \ (1 - self.momentum) * self.running_var torch.cuda.nvtx.range_pop() out = SyncBatchnormFunction.apply(input, self.weight, self.bias, mean, var, self.eps, process_group, world_size) out = out.to(cast)
import torch import torch.distributed as dist from torch.nn.modules import Module from torch.autograd import Variable from collections import OrderedDict from itertools import chain import copy import importlib from ..multi_tensor_apply import multi_tensor_applier imported_flatten_impl = False def import_flatten_impl(): global flatten_impl, unflatten_impl, imported_flatten_impl try: import apex_C flatten_impl = apex_C.flatten unflatten_impl = apex_C.unflatten except ImportError: print("Warning: apex was installed without --cpp_ext. Falling back to Python flatten and unflatten.") flatten_impl = torch._utils._flatten_dense_tensors unflatten_impl = torch._utils._unflatten_dense_tensors imported_flatten_impl = True def flatten(bucket): if not imported_flatten_impl: import_flatten_impl() return flatten_impl(bucket) def unflatten(coalesced, bucket): if not imported_flatten_impl: import_flatten_impl() return unflatten_impl(coalesced, bucket) # apply_dist_call requires that tensors in 'bucket' are all the same type. def apply_flat_dist_call(bucket, call, extra_args=None): coalesced = flatten(bucket) if extra_args is not None: call(coalesced, *extra_args) else: call(coalesced) if call is dist.all_reduce: coalesced /= dist.get_world_size() for buf, synced in zip(bucket, unflatten(coalesced, bucket)): buf.copy_(synced) def split_half_float_double(tensors): dtypes = ["torch.cuda.HalfTensor", "torch.cuda.FloatTensor", "torch.cuda.DoubleTensor"] buckets = [] for i, dtype in enumerate(dtypes): bucket = [t for t in tensors if t.type() == dtype] if bucket: buckets.append(bucket) return buckets def split_by_type(tensors): buckets = OrderedDict() for tensor in tensors: tp = tensor.type() if tp not in buckets: buckets[tp] = [] buckets[tp].append(tensor) return buckets # flat_dist_call organizes 'tensors' by type. def flat_dist_call(tensors, call, extra_args=None): buckets = split_by_type(tensors) for tp in buckets: bucket = buckets[tp] apply_flat_dist_call(bucket, call, extra_args) def extract_tensors(maybe_tensor, tensor_list): if torch.is_tensor(maybe_tensor): tensor_list.append(maybe_tensor) else: try: for item in maybe_tensor: extract_tensors(item, tensor_list) except TypeError: return class Reducer(object): """ :class:`apex.parallel.Reducer` is a simple class that helps allreduce a module's parameters across processes. :class:`Reducer` is intended to give the user additional control: Unlike :class:`DistributedDataParallel`, :class:`Reducer` will not automatically allreduce parameters during ``backward()``. Instead, :class:`Reducer` waits for the user to call ``<reducer_instance>.reduce()`` manually. This enables, for example, delaying the allreduce to be carried out every several iterations instead of every single iteration. Like :class:`DistributedDataParallel`, :class:`Reducer` averages any tensors it allreduces over the number of participating processes. :class:`Reducer` is designed to work with the upstream launch utility script ``torch.distributed.launch`` with ``--nproc_per_node <= number of gpus per node``. When used with this launcher, :class:`Reducer` assumes 1:1 mapping of processes to GPUs. It also assumes that your script calls ``torch.cuda.set_device(args.rank)`` before creating the model. Args: module_or_grads_list: Either a network definition (module) being run in multi-gpu/distributed mode, or an iterable of gradients to be reduced. If a module is passed in, the Reducer constructor will sync the parameters across processes (broadcasting from rank 0) to make sure they're all initialized with the same values. If a list of gradients (that came from some module) is passed in, the user is responsible for manually syncing that module's parameters at the beginning of training. """ def __init__(self, module_or_grads_list): if isinstance(module_or_grads_list, Module): self.module = module_or_grads_list flat_dist_call([param.data for param in self.module.parameters()], dist.broadcast, (0,) ) else: self.module = None self.grads = [] extract_tensors(module_or_grads_list, self.grads) def reduce(self): if self.module: grads = [param.grad.data for param in self.module.parameters() if param.grad is not None] flat_dist_call(grads, dist.all_reduce) else: flat_dist_call(self.grads, dist.all_reduce) class DistributedDataParallel(Module): """ :class:`apex.parallel.DistributedDataParallel` is a module wrapper that enables easy multiprocess distributed data parallel training, similar to ``torch.nn.parallel.DistributedDataParallel``. Parameters are broadcast across participating processes on initialization, and gradients are allreduced and averaged over processes during ``backward()``. :class:`DistributedDataParallel` is optimized for use with NCCL. It achieves high performance by overlapping communication with computation during ``backward()`` and bucketing smaller gradient transfers to reduce the total number of transfers required. :class:`DistributedDataParallel` is designed to work with the upstream launch utility script ``torch.distributed.launch`` with ``--nproc_per_node <= number of gpus per node``. When used with this launcher, :class:`DistributedDataParallel` assumes 1:1 mapping of processes to GPUs. It also assumes that your script calls ``torch.cuda.set_device(args.rank)`` before creating the model. https://github.com/NVIDIA/apex/tree/master/examples/simple/distributed shows detailed usage. https://github.com/NVIDIA/apex/tree/master/examples/imagenet shows another example that combines :class:`DistributedDataParallel` with mixed precision training. Args: module: Network definition to be run in multi-gpu/distributed mode. message_size (int, default=1e7): Minimum number of elements in a communication bucket. delay_allreduce (bool, default=False): Delay all communication to the end of the backward pass. This disables overlapping communication with computation. allreduce_trigger_params (list, optional, default=None): If supplied, should contain a list of parameters drawn from the model. Allreduces will be kicked off whenever one of these parameters receives its gradient (as opposed to when a bucket of size message_size is full). At the end of backward(), a cleanup allreduce to catch any remaining gradients will also be performed automatically. If allreduce_trigger_params is supplied, the message_size argument will be ignored. allreduce_always_fp32 (bool, default=False): Convert any FP16 gradients to FP32 before allreducing. This can improve stability for widely scaled-out runs. gradient_average (bool, default=True): Option to toggle whether or not DDP averages the allreduced gradients over processes. For proper scaling, the default value of True is recommended. gradient_predivide_factor (float, default=1.0): Allows perfoming the average of gradients over processes partially before and partially after the allreduce. Before allreduce: ``grads.mul_(1.0/gradient_predivide_factor)``. After allreduce: ``grads.mul_(gradient_predivide_factor/world size)``. This can reduce the stress on the dynamic range of FP16 allreduces for widely scaled-out runs. .. warning:: If ``gradient_average=False``, the pre-allreduce division (``grads.mul_(1.0/gradient_predivide_factor)``) will still be applied, but the post-allreduce gradient averaging (``grads.mul_(gradient_predivide_factor/world size)``) will be omitted. """ def __init__(self, module, message_size=10000000, delay_allreduce=False, shared_param=None, allreduce_trigger_params=None, retain_allreduce_buffers=False, allreduce_always_fp32=False, gradient_average=True, gradient_predivide_factor=1.0): super(DistributedDataParallel, self).__init__() # Backward/forward compatibility around # https://github.com/pytorch/pytorch/commit/540ef9b1fc5506369a48491af8a285a686689b36 and # https://github.com/pytorch/pytorch/commit/044d00516ccd6572c0d6ab6d54587155b02a3b86 if hasattr(dist, "get_backend"): self._backend = dist.get_backend() if hasattr(dist, "DistBackend"): self.backend_enum_holder = dist.DistBackend else: self.backend_enum_holder = dist.Backend else: self._backend = dist._backend self.backend_enum_holder = dist.dist_backend self.warn_on_half = True if self._backend == self.backend_enum_holder.GLOO else False if shared_param is not None: raise ValueError("shared_param is no longer supported as an option. It was misleadingly named from the start. It turns out overlapping communication with computation should work fine with shared parameters. If you still wish to delay communication to the end of the backward pass, use delay_allreduce=True|False instead.") self.world_size = float(dist.get_world_size()) self.retain_allreduce_buffers = retain_allreduce_buffers self.allreduce_always_fp32 = allreduce_always_fp32 self.gradient_average = gradient_average self.gradient_predivide_factor = gradient_predivide_factor self.custom_allreduce_triggers = False if allreduce_trigger_params is not None: if delay_allreduce: raise ValueError("Setting allreduce_trigger_params is only valid if delay_allreduce=False.") self.custom_allreduce_triggers = True self.allreduce_trigger_params = set([id(param) for param in allreduce_trigger_params]) self.delay_allreduce = delay_allreduce self.message_size = message_size self.reduction_stream = torch.cuda.Stream() self.reduction_event = torch.cuda.Event(enable_timing=False, blocking=False) self.module = module self._disable_allreduce = False if self._backend == self.backend_enum_holder.NCCL: for param in self.module.parameters(): assert param.is_cuda, "NCCL backend only supports model parameters to be on GPU." self.active_params = [] self.param_type_to_tmp_i = {"torch.cuda.HalfTensor" : 0, "torch.cuda.FloatTensor" : 1, "torch.cuda.DoubleTensor" : 2} if multi_tensor_applier.available: # TODO: I really need to centralize the C++ backed imports import amp_C self.multi_tensor_scale = amp_C.multi_tensor_scale self._overflow_buf = torch.cuda.IntTensor([0]) self.create_hooks() flat_dist_call([param.data for param in self.module.parameters()], dist.broadcast, (0,) ) def __setstate__(self, state): super(DistributedDataParallel, self).__setstate__(state) self.reduction_stream = torch.cuda.Stream() self.reduction_event = torch.cuda.Event(enable_timing=False, blocking=False) def __getstate__(self): attrs = copy.copy(self.__dict__) if self._backend != self.backend_enum_holder.NCCL: del attrs['self.reduction_stream'] del attrs['self.reduction_event'] return attrs def enable_allreduce(self): self._disable_allreduce = False def disable_allreduce(self): self._disable_allreduce = True # Broadcast rank 0's bucket structure across all processes, and have all processes # regenerate their bucket structures to match. def sync_bucket_structure(self): # Append leftover buckets for tmp_bucket in self.tmp_buckets: if len(tmp_bucket) > 0: self.active_i_buckets.append(tmp_bucket) self.num_buckets = len(self.active_i_buckets) self.bucket_sizes = [len(bucket) for bucket in self.active_i_buckets] info_tensor = torch.cuda.IntTensor([self.num_buckets] + self.bucket_sizes + list(chain(*self.active_i_buckets))) dist.broadcast(info_tensor, 0) info = [int(entry) for entry in info_tensor] self.num_buckets = info[0] self.bucket_sizes = info[1:self.num_buckets + 1] self.buckets = [[None for _ in range(self.bucket_sizes[i])] for i in range(self.num_buckets)] # Technically, active_i_buckets' work is done. But the information is still useful to # keep around. Therefore, refresh active_i_buckets based on rank 0 as well. self.active_i_buckets = [[None for _ in range(self.bucket_sizes[i])] for i in range(self.num_buckets)] flattened_buckets = info[self.num_buckets + 1:] flat_i = 0 for bucket_idx in range(self.num_buckets): for bucket_loc in range(self.bucket_sizes[bucket_idx]): param_i = flattened_buckets[flat_i] self.active_i_buckets[bucket_idx][bucket_loc] = param_i self.param_id_to_bucket[id(self.active_params[param_i])] = (bucket_idx, bucket_loc) flat_i += 1 def create_hooks(self): # Fallback hook that's only called at the end of backward. # Used if you deliberately want to delay allreduces to the end, or to refresh the # bucket structure that will be used to overlap communication with computation in later # iterations. def allreduce_params(): # Bucket record refresh if not self.delay_allreduce: if self.needs_refresh: self.sync_bucket_structure() self.needs_refresh = False self.allreduce_fallback() def overlapping_backward_epilogue(): self.reduction_stream.record_event(self.reduction_event) torch.cuda.current_stream().wait_event(self.reduction_event) # Sanity checks that all the buckets were kicked off if self.next_bucket != self.num_buckets: raise RuntimeError("In epilogue, next_bucket ({}) != num_buckets ({}). ".format( self.next_bucket, self.num_buckets), "This probably indicates some buckets were not allreduced.") for actual, expected in zip(self.buckets_ready_size, self.bucket_sizes): if actual != expected: raise RuntimeError("Some param buckets were not allreduced.") self.grad_accs = [] for param in self.module.parameters(): if param.requires_grad: def wrapper(param): param_tmp = param.expand_as(param) grad_acc = param_tmp.grad_fn.next_functions[0][0] def allreduce_hook(*unused): if not self._disable_allreduce: if self.delay_allreduce or self.needs_refresh: # TODO: How do we want to handle multiple backward passes between # each forward, e.g., backward passes with retain_graph=True? # needs_refresh and callback_queued are both vulnerable states. if not self.delay_allreduce and self.needs_refresh: # Use the backward pass to build the bucket structure on the fly. active_i = self.param_id_to_active_i[id(param)] # Float, half, and double tensors are grouped into buckets separately. current_type = self.param_type_to_tmp_i[param.type()] self.tmp_buckets[current_type].append(active_i) ship_tmp_bucket = False if self.custom_allreduce_triggers: if id(param) in self.allreduce_trigger_params: ship_tmp_bucket = True else: self.tmp_numels[current_type] += param.numel() if self.tmp_numels[current_type] >= self.message_size: ship_tmp_bucket = True # To consider: If custom_allreduce_triggers are in use, ship all # tmp_buckets, not just tmp_buckets[current_type]. if ship_tmp_bucket: self.active_i_buckets.append(self.tmp_buckets[current_type]) self.tmp_buckets[current_type] = [] self.tmp_numels[current_type] = 0 if not self.callback_queued: Variable._execution_engine.queue_callback(allreduce_params) self.callback_queued = True else: if not self.callback_queued: Variable._execution_engine.queue_callback(overlapping_backward_epilogue) self.callback_queued = True self.comm_ready_buckets(param) grad_acc.register_hook(allreduce_hook) self.grad_accs.append(grad_acc) wrapper(param) def allreduce_bucket(self, bucket): tensor = flatten(bucket) tensor_to_allreduce = tensor if self.allreduce_always_fp32: tensor_to_allreduce = tensor.float() if self.gradient_predivide_factor != 1.0: tensor_to_allreduce.mul_(1./self.gradient_predivide_factor) dist.all_reduce(tensor_to_allreduce) if self.gradient_average: if self.gradient_predivide_factor != self.world_size: tensor_to_allreduce.mul_(self.gradient_predivide_factor/self.world_size) if self.allreduce_always_fp32 and tensor is not tensor_to_allreduce: tensor.copy_(tensor_to_allreduce) return tensor def allreduce_maybe_retain(self, bucket, bucket_idx=-1): allreduced = self.allreduce_bucket(bucket) if self.retain_allreduce_buffers: if self.allreduce_buffers[bucket_idx] is not None: raise RuntimeError("The backward pass is attempting to replace an already-filled " "allreduce buffer. This is almost certainly an error.") self.allreduce_buffers[bucket_idx] = allreduced else: if multi_tensor_applier.available: multi_tensor_applier( self.multi_tensor_scale, self._overflow_buf, [unflatten(allreduced, bucket), bucket], 1.0) else: for buf, synced in zip(bucket, unflatten(allreduced, bucket)): buf.copy_(synced) def allreduce_fallback(self): grads = [param.grad.data for param in self.module.parameters() if param.grad is not None] split_buckets = split_half_float_double(grads) # If retain_allreduce_buffers is True and delay_allreduce is False, # this will only be done during the first backward pass, ignored by the # training script, and overwritten in the next forward pass. So it's harmless. if self.retain_allreduce_buffers: self.allreduce_buffers = [None for _ in range(len(split_buckets))] for i, bucket in enumerate(split_buckets): allreduced = self.allreduce_maybe_retain(bucket, i) def comm_ready_buckets(self, param): # Need to do this in every hook for compatibility with Ruberry's streaming backward PR. # self.reduction_stream.wait_stream(torch.cuda.current_stream()) bucket_idx, bucket_loc = self.param_id_to_bucket[id(param)] if self.buckets[bucket_idx][bucket_loc] is not None: raise RuntimeError("The backward pass is attempting to replace an already-filled " "bucket slot. This is almost certainly an error.") self.buckets[bucket_idx][bucket_loc] = param.grad.data self.buckets_ready_size[bucket_idx] += 1 if self.buckets_ready_size[bucket_idx] == self.bucket_sizes[bucket_idx]: if bucket_idx == self.next_bucket: torch.cuda.current_stream().record_event(self.reduction_event) self.reduction_stream.wait_event(self.reduction_event) with torch.cuda.stream(self.reduction_stream): self.allreduce_maybe_retain(self.buckets[bucket_idx], bucket_idx) self.next_bucket += 1 # Reversing upstream's logic here, because we constructed our buckets based on # the order things were received during backward. if len(self.ready_buckets_not_reduced) > 0: sorted_todo = sorted(self.ready_buckets_not_reduced) for i in sorted_todo: # Nothing can be reduced now if i > self.next_bucket: break elif i == self.next_bucket: self.allreduce_maybe_retain(self.buckets[i], i) self.ready_buckets_not_reduced.remove(i) self.next_bucket += 1 else: raise ValueError("i should always be >= next_bucket") else: self.ready_buckets_not_reduced.add(bucket_idx) def forward(self, *inputs, **kwargs): result = self.module(*inputs, **kwargs) if not self._disable_allreduce: if not self.delay_allreduce: param_list = [param for param in self.module.parameters() if param.requires_grad] # Conditions under which to refresh self.record # Forward has the authority to set needs_refresh to True, but only allreduce_params # in backward has the authority to set needs_refresh to False. # Parentheses are not necessary for correct order of operations, but make the intent clearer. if ((not self.active_params) or (len(param_list) != len(self.active_params)) or any([param1 is not param2 for param1, param2 in zip(param_list, self.active_params)])): self.needs_refresh = True if self.needs_refresh: self.active_i_buckets = [] self.buckets = [] self.tmp_buckets = [[], [], []] # [running half, float, double buckets] self.tmp_numels = [0, 0, 0] self.bucket_sizes = [] self.param_id_to_active_i = {id(param) : i for i, param in enumerate(param_list)} self.param_id_to_bucket = {} else: self.buckets = [[None for _ in range(self.bucket_sizes[i])] for i in range(self.num_buckets)] self.buckets_ready_size = [0 for i in range(self.num_buckets)] if(self.retain_allreduce_buffers): self.allreduce_buffers = [None for _ in range(self.num_buckets)] self.next_bucket = 0 self.ready_buckets_not_reduced = set() self.active_params = param_list self.callback_queued = False return result
import torch from torch.autograd.function import Function import syncbn from apex.parallel import ReduceOp class SyncBatchnormFunction(Function): @staticmethod def forward(ctx, input, weight, bias, running_mean, running_variance, eps, track_running_stats = True, momentum = 1.0, process_group = None, channel_last = False): torch.cuda.nvtx.range_push("sync_BN_fw") input = input.contiguous() world_size = 0 mean = None var_biased = None inv_std = None var = None out = None count = None if track_running_stats: if channel_last: count = int(input.numel()/input.size(-1)) mean, var_biased = syncbn.welford_mean_var_c_last(input) else: count = int(input.numel()/input.size(1)) mean, var_biased = syncbn.welford_mean_var(input) if torch.distributed.is_initialized(): if not process_group: process_group = torch.distributed.group.WORLD world_size = torch.distributed.get_world_size(process_group) mean_all = torch.empty(world_size, mean.size(0), dtype=mean.dtype, device=mean.device) var_all = torch.empty(world_size, var_biased.size(0), dtype=var_biased.dtype, device=var_biased.device) mean_l = [mean_all.narrow(0, i, 1) for i in range(world_size)] var_l = [var_all.narrow(0, i, 1) for i in range(world_size)] torch.distributed.all_gather(mean_l, mean, process_group) torch.distributed.all_gather(var_l, var_biased, process_group) mean, var, inv_std = syncbn.welford_parallel(mean_all, var_all, count, eps) # TODO(Jie): should do fp32 math instead! else: inv_std = 1.0 / torch.sqrt(var_biased + eps) var = var_biased * (count) / (count-1) if count == 1 and world_size < 2: raise ValueError('Expected more than 1 value per channel when training, got input size{}'.format(input.size())) r_m_inc = mean if running_mean.dtype != torch.float16 else mean.half() r_v_inc = var if running_variance.dtype != torch.float16 else var.half() running_mean.data = running_mean.data * (1-momentum) + momentum*r_m_inc running_variance.data = running_variance.data * (1-momentum) + momentum*r_v_inc else: mean = running_mean.data inv_std = 1.0 / torch.sqrt(running_variance.data + eps) ctx.save_for_backward(input, weight, mean, inv_std) ctx.process_group = process_group ctx.channel_last = channel_last ctx.world_size = world_size if channel_last: out = syncbn.batchnorm_forward_c_last(input, mean, inv_std, weight, bias) else: out = syncbn.batchnorm_forward(input, mean, inv_std, weight, bias) torch.cuda.nvtx.range_pop() return out @staticmethod def backward(ctx, grad_output): grad_output = grad_output.contiguous() torch.cuda.nvtx.range_push("sync_BN_bw") # mini batch mean & var are calculated by forward path. # mu = 1./N*np.sum(h, axis = 0) # var = 1./N*np.sum((h-mu)**2, axis = 0) saved_input, weight, mean, inv_std = ctx.saved_tensors process_group = ctx.process_group channel_last = ctx.channel_last world_size = ctx.world_size grad_input = grad_weight = grad_bias = None # TODO(jie): why do I have to clone here? life time of grad_output? if channel_last: mean_dy, mean_dy_xmu, grad_weight, grad_bias = syncbn.reduce_bn_c_last(grad_output, saved_input, mean, inv_std, weight) else: mean_dy, mean_dy_xmu, grad_weight, grad_bias = syncbn.reduce_bn(grad_output, saved_input, mean, inv_std, weight) # calculate grad_input if ctx.needs_input_grad[0]: if torch.distributed.is_initialized(): torch.distributed.all_reduce( mean_dy, ReduceOp.SUM, process_group) mean_dy = mean_dy / world_size torch.distributed.all_reduce( mean_dy_xmu, ReduceOp.SUM, process_group) mean_dy_xmu = mean_dy_xmu / world_size if channel_last: grad_input = syncbn.batchnorm_backward_c_last(grad_output, saved_input, mean, inv_std, weight, mean_dy, mean_dy_xmu) else: grad_input = syncbn.batchnorm_backward(grad_output, saved_input, mean, inv_std, weight, mean_dy, mean_dy_xmu) if weight is None or not ctx.needs_input_grad[1]: grad_weight = None if weight is None or not ctx.needs_input_grad[2]: grad_bias = None torch.cuda.nvtx.range_pop() return grad_input, grad_weight, grad_bias, None, None, None, None, None, None, None
import torch from torch import nn from torch.autograd import Variable from torch.nn.parameter import Parameter class LARC(object): """ :class:`LARC` is a pytorch implementation of both the scaling and clipping variants of LARC, in which the ratio between gradient and parameter magnitudes is used to calculate an adaptive local learning rate for each individual parameter. The algorithm is designed to improve convergence of large batch training. See https://arxiv.org/abs/1708.03888 for calculation of the local learning rate. In practice it modifies the gradients of parameters as a proxy for modifying the learning rate of the parameters. This design allows it to be used as a wrapper around any torch.optim Optimizer. ``` model = ... optim = torch.optim.Adam(model.parameters(), lr=...) optim = LARC(optim) ``` It can even be used in conjunction with apex.fp16_utils.FP16_optimizer. ``` model = ... optim = torch.optim.Adam(model.parameters(), lr=...) optim = LARC(optim) optim = apex.fp16_utils.FP16_Optimizer(optim) ``` Args: optimizer: Pytorch optimizer to wrap and modify learning rate for. trust_coefficient: Trust coefficient for calculating the lr. See https://arxiv.org/abs/1708.03888 clip: Decides between clipping or scaling mode of LARC. If `clip=True` the learning rate is set to `min(optimizer_lr, local_lr)` for each parameter. If `clip=False` the learning rate is set to `local_lr*optimizer_lr`. eps: epsilon kludge to help with numerical stability while calculating adaptive_lr """ def __init__(self, optimizer, trust_coefficient=0.02, clip=True, eps=1e-8): self.param_groups = optimizer.param_groups self.optim = optimizer self.trust_coefficient = trust_coefficient self.eps = eps self.clip = clip def __getstate__(self): return self.optim.__getstate__() def __setstate__(self, state): self.optim.__setstate__(state) def __repr__(self): return self.optim.__repr__() def state_dict(self): return self.optim.state_dict() def load_state_dict(self, state_dict): self.optim.load_state_dict(state_dict) def zero_grad(self): self.optim.zero_grad() def add_param_group(self, param_group): self.optim.add_param_group( param_group) def step(self): with torch.no_grad(): weight_decays = [] for group in self.optim.param_groups: # absorb weight decay control from optimizer weight_decay = group['weight_decay'] if 'weight_decay' in group else 0 weight_decays.append(weight_decay) group['weight_decay'] = 0 for p in group['params']: if p.grad is None: continue param_norm = torch.norm(p.data) grad_norm = torch.norm(p.grad.data) if param_norm != 0 and grad_norm != 0: # calculate adaptive lr + weight decay adaptive_lr = self.trust_coefficient * (param_norm) / (grad_norm + param_norm * weight_decay + self.eps) # clip learning rate for LARC if self.clip: # calculation of adaptive_lr so that when multiplied by lr it equals `min(adaptive_lr, lr)` adaptive_lr = min(adaptive_lr/group['lr'], 1) p.grad.data += weight_decay * p.data p.grad.data *= adaptive_lr self.optim.step() # return weight decay control to optimizer for i, group in enumerate(self.optim.param_groups): group['weight_decay'] = weight_decays[i]
import torch import sys import subprocess def docstring_hack(): """ Multiproc file which will launch a set of processes locally for multi-gpu usage: python -m apex.parallel.multiproc main.py ... """ pass argslist = list(sys.argv)[1:] world_size = torch.cuda.device_count() if '--world-size' in argslist: world_size = int(argslist[argslist.index('--world-size')+1]) else: argslist.append('--world-size') argslist.append(str(world_size)) workers = [] for i in range(world_size): if '--rank' in argslist: argslist[argslist.index('--rank')+1] = str(i) else: argslist.append('--rank') argslist.append(str(i)) stdout = None if i == 0 else open("GPU_"+str(i)+".log", "w") print(argslist) p = subprocess.Popen([str(sys.executable)]+argslist, stdout=stdout) workers.append(p) for p in workers: p.wait()
import math import torch import numbers from torch.nn.parameter import Parameter from torch.nn import init from torch.nn import functional as F import importlib class FusedLayerNormAffineFunction(torch.autograd.Function): def __init__(self, normalized_shape, eps=1e-6): global fused_layer_norm_cuda fused_layer_norm_cuda = importlib.import_module("fused_layer_norm_cuda") self.normalized_shape = normalized_shape self.eps = eps def forward(self, input, weight, bias): input_ = input.contiguous() weight_ = weight.contiguous() bias_ = bias.contiguous() output, mean, invvar = fused_layer_norm_cuda.forward_affine( input_, self.normalized_shape, weight_, bias_, self.eps) self.save_for_backward(input_, weight_, bias_, mean, invvar) return output def backward(self, grad_output): input_, weight_, bias_, mean, invvar = self.saved_tensors grad_input = grad_weight = grad_bias = None grad_input, grad_weight, grad_bias = fused_layer_norm_cuda.backward_affine( grad_output.contiguous(), mean, invvar, input_, self.normalized_shape, weight_, bias_, self.eps) return grad_input, grad_weight, grad_bias; class FusedLayerNormFunction(torch.autograd.Function): def __init__(self, normalized_shape, eps=1e-6): global fused_layer_norm_cuda fused_layer_norm_cuda = importlib.import_module("fused_layer_norm_cuda") self.normalized_shape = normalized_shape self.eps = eps def forward(self, input): input_ = input.contiguous() output, mean, invvar = fused_layer_norm_cuda.forward( input_, self.normalized_shape, self.eps) self.save_for_backward(input_, mean, invvar) return output def backward(self, grad_output): input_, mean, invvar = self.saved_tensors grad_input = None grad_input = fused_layer_norm_cuda.backward( grad_output.contiguous(), mean, invvar, input_, self.normalized_shape, self.eps) return grad_input def fused_layer_norm_affine(input, normalized_shape, weight, bias, eps=1e-6): return FusedLayerNormAffineFunction(normalized_shape,eps)(input, weight, bias) def fused_layer_norm(input, normalized_shape, eps=1e-6): return FusedLayerNormFunction(normalized_shape,eps)(input) class FusedLayerNorm(torch.nn.Module): r"""Applies Layer Normalization over a mini-batch of inputs as described in the paper `Layer Normalization`_ . Currently only runs on cuda() tensors. .. math:: y = \frac{x - \mathrm{E}[x]}{ \sqrt{\mathrm{Var}[x] + \epsilon}} * \gamma + \beta The mean and standard-deviation are calculated separately over the last certain number dimensions which have to be of the shape specified by :attr:`normalized_shape`. :math:`\gamma` and :math:`\beta` are learnable affine transform parameters of :attr:`normalized_shape` if :attr:`elementwise_affine` is ``True``. .. note:: Unlike Batch Normalization and Instance Normalization, which applies scalar scale and bias for each entire channel/plane with the :attr:`affine` option, Layer Normalization applies per-element scale and bias with :attr:`elementwise_affine`. This layer uses statistics computed from input data in both training and evaluation modes. Args: normalized_shape (int or list or torch.Size): input shape from an expected input of size .. math:: [* \times \text{normalized}\_\text{shape}[0] \times \text{normalized}\_\text{shape}[1] \times \ldots \times \text{normalized}\_\text{shape}[-1]] If a single integer is used, it is treated as a singleton list, and this module will normalize over the last dimension which is expected to be of that specific size. eps: a value added to the denominator for numerical stability. Default: 1e-5 elementwise_affine: a boolean value that when set to ``True``, this module has learnable per-element affine parameters initialized to ones (for weights) and zeros (for biases). Default: ``True``. Shape: - Input: :math:`(N, *)` - Output: :math:`(N, *)` (same shape as input) Examples:: >>> input = torch.randn(20, 5, 10, 10) >>> # With Learnable Parameters >>> m = apex.normalization.FusedLayerNorm(input.size()[1:]) >>> # Without Learnable Parameters >>> m = apex.normalization.FusedLayerNorm(input.size()[1:], elementwise_affine=False) >>> # Normalize over last two dimensions >>> m = apex.normalization.FusedLayerNorm([10, 10]) >>> # Normalize over last dimension of size 10 >>> m = apex.normalization.FusedLayerNorm(10) >>> # Activating the module >>> output = m(input) .. _`Layer Normalization`: https://arxiv.org/abs/1607.06450 """ def __init__(self, normalized_shape, eps=1e-5, elementwise_affine=True): super(FusedLayerNorm, self).__init__() global fused_layer_norm_cuda fused_layer_norm_cuda = importlib.import_module("fused_layer_norm_cuda") if isinstance(normalized_shape, numbers.Integral): normalized_shape = (normalized_shape,) self.normalized_shape = torch.Size(normalized_shape) self.eps = eps self.elementwise_affine = elementwise_affine if self.elementwise_affine: self.weight = Parameter(torch.Tensor(*normalized_shape)) self.bias = Parameter(torch.Tensor(*normalized_shape)) else: self.register_parameter('weight', None) self.register_parameter('bias', None) self.reset_parameters() def reset_parameters(self): if self.elementwise_affine: init.ones_(self.weight) init.zeros_(self.bias) def forward(self, input): if not input.is_cuda: return F.layer_norm( input, self.normalized_shape, self.weight, self.bias, self.eps) if self.elementwise_affine: return FusedLayerNormAffineFunction(self.normalized_shape,self.eps)( input, self.weight, self.bias) else: return FusedLayerNormFunction(self.normalized_shape,self.eps)( input) def extra_repr(self): return '{normalized_shape}, eps={eps}, ' \ 'elementwise_affine={elementwise_affine}'.format(**self.__dict__)
from .fused_layer_norm import FusedLayerNorm
from .fp16util import ( BN_convert_float, network_to_half, prep_param_lists, model_grads_to_master_grads, master_params_to_model_params, tofp16, to_python_float, clip_grad_norm, convert_module, convert_network, FP16Model, ) from .fp16_optimizer import FP16_Optimizer from .loss_scaler import LossScaler, DynamicLossScaler
import torch import torch.nn as nn from torch.autograd import Variable from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors class tofp16(nn.Module): """ Utility module that implements:: def forward(self, input): return input.half() """ def __init__(self): super(tofp16, self).__init__() def forward(self, input): return input.half() def BN_convert_float(module): """ Utility function for network_to_half(). Retained for legacy purposes. """ if isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and module.affine is True: module.float() for child in module.children(): BN_convert_float(child) return module def network_to_half(network): """ Convert model to half precision in a batchnorm-safe way. Retained for legacy purposes. It is recommended to use FP16Model. """ return nn.Sequential(tofp16(), BN_convert_float(network.half())) def convert_module(module, dtype): """ Converts a module's immediate parameters and buffers to dtype. """ for param in module.parameters(recurse=False): if param is not None: if param.data.dtype.is_floating_point: param.data = param.data.to(dtype=dtype) if param._grad is not None and param._grad.data.dtype.is_floating_point: param._grad.data = param._grad.data.to(dtype=dtype) for buf in module.buffers(recurse=False): if buf is not None and buf.data.dtype.is_floating_point: buf.data = buf.data.to(dtype=dtype) def convert_network(network, dtype): """ Converts a network's parameters and buffers to dtype. """ for module in network.modules(): if isinstance(module, torch.nn.modules.batchnorm._BatchNorm) and module.affine is True: continue convert_module(module, dtype) if isinstance(module, torch.nn.RNNBase) or isinstance(module, torch.nn.modules.rnn.RNNBase): module.flatten_parameters() return network class FP16Model(nn.Module): """ Convert model to half precision in a batchnorm-safe way. """ def __init__(self, network): super(FP16Model, self).__init__() self.network = convert_network(network, dtype=torch.half) def forward(self, *inputs): inputs = tuple(t.half() for t in inputs) return self.network(*inputs) def backwards_debug_hook(grad): raise RuntimeError("master_params recieved a gradient in the backward pass!") def prep_param_lists(model, flat_master=False): """ Creates a list of FP32 master parameters for a given model, as in `Training Neural Networks with Mixed Precision: Real Examples`_. Args: model (torch.nn.Module): Existing Pytorch model flat_master (bool, optional, default=False): Flatten the master parameters into a single tensor, as a performance optimization. Returns: A tuple (``model_params``, ``master_params``). ``model_params`` is a list of the model's parameters for later use with :func:`model_grads_to_master_grads` and :func:`master_params_to_model_params`. ``master_params`` is a list of FP32 master gradients. If ``flat_master=True``, ``master_params`` will be a list with one element. Example:: model_params, master_params = prep_param_lists(model) .. warning:: Currently, if ``flat_master=True``, all the model's parameters must be the same type. If the model has parameters of different types, use ``flat_master=False``, or use :class:`FP16_Optimizer`. .. _`Training Neural Networks with Mixed Precision: Real Examples`: http://on-demand.gputechconf.com/gtc/2018/video/S81012/ """ model_params = [param for param in model.parameters() if param.requires_grad] if flat_master: # Give the user some more useful error messages try: # flatten_dense_tensors returns a contiguous flat array. # http://pytorch.org/docs/master/_modules/torch/_utils.html master_params = _flatten_dense_tensors([param.data for param in model_params]).float() except: print("Error in prep_param_lists: model may contain a mixture of parameters " "of different types. Use flat_master=False, or use F16_Optimizer.") raise master_params = torch.nn.Parameter(master_params) master_params.requires_grad = True # master_params.register_hook(backwards_debug_hook) if master_params.grad is None: master_params.grad = master_params.new(*master_params.size()) return model_params, [master_params] else: master_params = [param.clone().float().detach() for param in model_params] for param in master_params: param.requires_grad = True return model_params, master_params def model_grads_to_master_grads(model_params, master_params, flat_master=False): """ Copy model gradients to master gradients. Args: model_params: List of model parameters created by :func:`prep_param_lists`. master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`model_grads_to_master_grads`. """ if flat_master: # The flattening may incur one more deep copy than is necessary. master_params[0].grad.data.copy_( _flatten_dense_tensors([p.grad.data for p in model_params])) else: for model, master in zip(model_params, master_params): if model.grad is not None: if master.grad is None: master.grad = Variable(master.data.new(*master.data.size())) master.grad.data.copy_(model.grad.data) else: master.grad = None def master_params_to_model_params(model_params, master_params, flat_master=False): """ Copy master parameters to model parameters. Args: model_params: List of model parameters created by :func:`prep_param_lists`. master_params: List of FP32 master parameters created by :func:`prep_param_lists`. If ``master_params`` was created with ``flat_master=True``, ``flat_master=True`` should also be supplied to :func:`master_params_to_model_params`. """ if flat_master: for model, master in zip(model_params, _unflatten_dense_tensors(master_params[0].data, model_params)): model.data.copy_(master) else: for model, master in zip(model_params, master_params): model.data.copy_(master.data) # Backward compatibility fixes def to_python_float(t): if hasattr(t, 'item'): return t.item() else: return t[0] TORCH_MAJOR = int(torch.__version__.split('.')[0]) TORCH_MINOR = int(torch.__version__.split('.')[1]) if TORCH_MAJOR == 0 and TORCH_MINOR <= 4: clip_grad_norm = torch.nn.utils.clip_grad_norm else: clip_grad_norm = torch.nn.utils.clip_grad_norm_
import torch from torch import nn from torch.autograd import Variable from torch.nn.parameter import Parameter from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors from ..amp._amp_state import _amp_state, maybe_print from ..amp.scaler import LossScaler from ..multi_tensor_apply import multi_tensor_applier from .fp16util import model_grads_to_master_grads, master_params_to_model_params, clip_grad_norm # TODO: Update overflow check + downscale to use Carl's fused kernel. class FP16_Optimizer(object): """ :class:`FP16_Optimizer` is designed to wrap an existing PyTorch optimizer, and manage static or dynamic loss scaling and master weights in a manner transparent to the user. For standard use, only two lines must be changed: creating the :class:`FP16_Optimizer` instance, and changing the call to ``backward``. Example:: model = torch.nn.Linear(D_in, D_out).cuda().half() optimizer = torch.optim.SGD(model.parameters(), lr=1e-3) # Name the FP16_Optimizer instance to replace the existing optimizer # (recommended but not required): optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0) ... # loss.backward() becomes: optimizer.backward(loss) ... Example with dynamic loss scaling:: ... optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) # optional arg to control dynamic loss scaling behavior # dynamic_loss_args={'scale_window' : 500}) # Usually, dynamic_loss_args is not necessary. Args: init_optimizer (torch.optim.optimizer): Existing optimizer created with the parameters to optimize. Internally, :class:`FP16_Optimizer` replaces the passed optimizer's fp16 parameters, if any, with fp32 master parameters copied from the original ones. :class:`FP16_Optimizer` also stores references to the original fp16 parameters, and updates these fp16 parameters from the master fp32 copy at the end of each :attr:`step`. static_loss_scale (float, optional, default=1.0): Loss scale used internally to scale gradients computed by the model. Any fp16 gradients will be copied to fp32, then downscaled before being applied to the fp32 master params, so ``static_loss_scale`` should not affect learning rate. dynamic_loss_scale (bool, optional, default=False): Use dynamic loss scaling. If True, this will override any ``static_loss_scale`` option. dynamic_loss_args (dict, optional, default=None): Dict of kwargs that will be forwarded to the internal :class:`LossScaler` instance's constructor. Keys of this dict must match kwargs accepted by :class:`LossScaler`'s constructor. If ``dynamic_loss_args`` is unspecified, :class:`LossScaler`'s defaults will be used. verbose (bool, optional, default=True): By default, FP16_Optimizer's constructor prints out the parameters and parameter groups it is ingesting, as a sanity check. If this becomes annoying (e.g. for large models), it can be disabled by passing ``verbose=False``. ``verbose=False`` will not disable printing when the loss scale is readjusted during dynamic loss scaling. ``init_optimizer`` is expected to have been constructed in the ordinary way. It is recommended (although not required) that the newly constructed :class:`FP16_Optimizer` instance be named to replace ``init_optimizer``, for two reasons: First, it means that references to the same name later in the file will not have to change. Second, :class:`FP16_Optimizer` reserves the right (as an implementation detail) to modify ``init_optimizer``. If you do choose a unique name for the new :class:`FP16_Optimizer` instance, you should only work with this new instance, because the preexisting optimizer might no longer behave as expected. ``init_optimizer`` may be any Pytorch optimizer. It may contain a mixture of fp16 and fp32 parameters organized into any number of ``param_groups`` with different hyperparameters. The :class:`FP16_Optimizer` constructor will ingest these ``param_groups`` and remember them. Calls to :: loss.backward() must be replaced with :: optimizer.backward(loss) because :class:`FP16_Optimizer` requires ownership of the backward pass to implement loss scaling and copies to master gradients. .. note:: Loss scaling, either static or dynamic, is orthogonal to learning rate, because gradients are downscaled before being applied. This means that adjusting the loss scale, or using dynamic loss scaling, should not require retuning the learning rate or any other hyperparameters. **Advanced options** **Closures**: :class:`FP16_Optimizer` can wrap a Pytorch optimizer that receives a closure. See docstring for :attr:`step`. **Gradient clipping**: Use :attr:`clip_master_grads`. **Multiple losses**: If your model accumulates gradients from multiple losses, this can be made more efficient by supplying ``update_master_grads=False`` to :attr:`backward`. See docstring for :attr:`backward`. **Manually adjusting loss scale**: The current loss scale can be retrieved or set via :: print(optimizer.loss_scale) optimizer.loss_scale = new_loss_scale For static loss scaling, manually adjusting the loss scale over time is a reasonable thing to do. During later epochs, gradients may become smaller, and a higher loss scale may be required, analogous to scheduling the learning rate. Dynamic loss scaling is more subtle (see :class:`DynamicLossScaler`) and in this case, manually adjusting the loss scale is not recommended. **Multi_GPU training**: If the wrapped ``init_optimizer`` was created from a model wrapped in Pytorch DistributedDataParallel or Apex DistributedDataParallel, :class:`FP16_Optimizer` should still work as intended. """ def __init__(self, init_optimizer, static_loss_scale=1.0, dynamic_loss_scale=False, dynamic_loss_args=None, verbose=True): if not torch.cuda.is_available: raise SystemError("Cannot use fp16 without CUDA.") self.verbose = verbose self.optimizer = init_optimizer # init_state_dict sets up an alternative way to cast per-param state tensors. # Stashing here in case https://github.com/pytorch/pytorch/issues/7733 makes it necessary. # init_state_dict = init_optimizer.state_dict() self.fp16_groups = [] self.fp32_from_fp16_groups = [] self.fp32_from_fp32_groups = [] for i, param_group in enumerate(self.optimizer.param_groups): self.maybe_print("FP16_Optimizer processing param group {}:".format(i)) fp16_params_this_group = [] fp32_params_this_group = [] fp32_from_fp16_params_this_group = [] for i, param in enumerate(param_group['params']): if param.requires_grad: if param.type() == 'torch.cuda.HalfTensor': self.maybe_print("FP16_Optimizer received torch.cuda.HalfTensor with {}" .format(param.size())) fp16_params_this_group.append(param) master_param = param.detach().clone().float() master_param.requires_grad = True param_group['params'][i] = master_param fp32_from_fp16_params_this_group.append(master_param) # Reset existing state dict key to the new master param. # We still need to recast per-param state tensors, if any, to FP32. if param in self.optimizer.state: self.optimizer.state[master_param] = self.optimizer.state.pop(param) elif param.type() == 'torch.cuda.FloatTensor': self.maybe_print("FP16_Optimizer received torch.cuda.FloatTensor with {}" .format(param.size())) fp32_params_this_group.append(param) param_group['params'][i] = param else: raise TypeError("Wrapped parameters must be either " "torch.cuda.FloatTensor or torch.cuda.HalfTensor. " "Received {}".format(param.type())) self.fp16_groups.append(fp16_params_this_group) self.fp32_from_fp16_groups.append(fp32_from_fp16_params_this_group) self.fp32_from_fp32_groups.append(fp32_params_this_group) self.all_fp16_params = [] for group in self.fp16_groups: self.all_fp16_params += group self.all_fp32_from_fp16_params = [] for group in self.fp32_from_fp16_groups: self.all_fp32_from_fp16_params += group self.all_fp32_from_fp32_params = [] for group in self.fp32_from_fp32_groups: self.all_fp32_from_fp32_params += group # Leverage state_dict() and load_state_dict() to recast preexisting per-param state tensors self.optimizer.load_state_dict(self.optimizer.state_dict()) # alternative way to cast per-param state tensors: # self.optimizer.load_state_dict(init_state_dict) if dynamic_loss_scale: self.dynamic_loss_scale = True if dynamic_loss_args is not None: self.loss_scaler = LossScaler("dynamic", **dynamic_loss_args) else: self.loss_scaler = LossScaler("dynamic") else: self.dynamic_loss_scale = False self.loss_scaler = LossScaler(static_loss_scale) self.overflow = False self.first_closure_call_this_step = True self.clip_grad_norm = clip_grad_norm # TODO: Centralize exposure and import error checking for the C backend. if multi_tensor_applier.available: import amp_C self.multi_tensor_scale = amp_C.multi_tensor_scale self._dummy_overflow_buf = torch.cuda.IntTensor([0]); # Having self.maybe_print distinct from _amp_state.maybe_print is another artifact # of having to support FP16_Optimizer separately, for the time being. def maybe_print(self, msg): if self.verbose: print(msg) def __getstate__(self): raise RuntimeError("FP16_Optimizer should be serialized using state_dict().") def __setstate__(self, state): raise RuntimeError("FP16_Optimizer should be deserialized using load_state_dict().") def zero_grad(self, set_grads_to_None=False): """ Zero fp32 and fp16 parameter grads. """ # In principle, only the .grad attributes of the model params need to be zeroed, # because gradients are copied into the FP32 master params. However, we zero # all gradients owned by the optimizer, just to be safe: for group in self.optimizer.param_groups: for p in group['params']: if set_grads_to_None: p.grad = None else: if p.grad is not None: p.grad.detach_() p.grad.zero_() # Zero fp16 gradients owned by the model: for fp16_group in self.fp16_groups: for param in fp16_group: if set_grads_to_None: param.grad = None else: if param.grad is not None: param.grad.detach_() # as in torch.optim.optimizer.zero_grad() param.grad.zero_() # Should not be used anymore. # def _check_overflow(self): # params = [] # for group in self.fp16_groups: # for param in group: # params.append(param) # for group in self.fp32_from_fp32_groups: # for param in group: # params.append(param) # self.overflow = self.loss_scaler.has_overflow(params) # def _update_scale(self, has_overflow=False): # self.loss_scaler.update_scale(has_overflow) def _master_params_to_model_params(self): if multi_tensor_applier.available: if len(self.all_fp16_params) > 0: multi_tensor_applier( self.multi_tensor_scale, self._dummy_overflow_buf, [self.all_fp32_from_fp16_params, self.all_fp16_params], 1.0) else: for fp16_group, fp32_from_fp16_group in zip(self.fp16_groups, self.fp32_from_fp16_groups): master_params_to_model_params(fp16_group, fp32_from_fp16_group) # To consider: Integrate distributed with this wrapper by registering a hook on each variable # that does the overflow check, gradient copy + downscale, and fp32 allreduce in a different stream. # def _model_grads_to_master_grads(self): # for fp16_group, fp32_from_fp16_group in zip(self.fp16_groups, self.fp32_from_fp16_groups): # model_grads_to_master_grads(fp16_group, fp32_from_fp16_group) # def _downscale_master(self): # if self.loss_scale != 1.0: # for group in self.optimizer.param_groups: # for param in group['params']: # if param.grad is not None: # param.grad.data.mul_(1./self.loss_scale) def clip_master_grads(self, max_norm, norm_type=2): """ Clips fp32 master gradients via ``torch.nn.utils.clip_grad_norm``. Args: max_norm (float or int): max norm of the gradients norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm. Returns: Total norm of the current fp32 gradients (viewed as a single vector). .. warning:: Returns -1 if the most recently computed fp16 gradients overflowed (that is, if ``self.overflow`` is ``True``). """ if not self.overflow: fp32_params = [] for param_group in self.optimizer.param_groups: for param in param_group['params']: fp32_params.append(param) return self.clip_grad_norm(fp32_params, max_norm, norm_type) else: return -1 def state_dict(self): """ Returns a dict containing the current state of this :class:`FP16_Optimizer` instance. This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict of the contained Pytorch optimizer. Example:: checkpoint = {} checkpoint['model'] = model.state_dict() checkpoint['optimizer'] = optimizer.state_dict() torch.save(checkpoint, "saved.pth") """ state_dict = {} state_dict['loss_scaler'] = self.loss_scaler state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale state_dict['overflow'] = self.overflow state_dict['first_closure_call_this_step'] = self.first_closure_call_this_step state_dict['optimizer_state_dict'] = self.optimizer.state_dict() state_dict['fp32_from_fp16'] = self.fp32_from_fp16_groups return state_dict def load_state_dict(self, state_dict): """ Loads a state_dict created by an earlier call to state_dict(). If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``, whose parameters in turn came from ``model``, it is expected that the user will call ``model.load_state_dict()`` before ``fp16_optimizer_instance.load_state_dict()`` is called. Example:: model = torch.nn.Linear(D_in, D_out).cuda().half() optimizer = torch.optim.SGD(model.parameters(), lr=1e-3) optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0) ... checkpoint = torch.load("saved.pth") model.load_state_dict(checkpoint['model']) optimizer.load_state_dict(checkpoint['optimizer']) """ # I think it should actually be ok to reload the optimizer before the model. self.loss_scaler = state_dict['loss_scaler'] self.dynamic_loss_scale = state_dict['dynamic_loss_scale'] self.overflow = state_dict['overflow'] self.first_closure_call_this_step = state_dict['first_closure_call_this_step'] self.optimizer.load_state_dict(state_dict['optimizer_state_dict']) # At this point, the optimizer's references to the model's fp32 parameters are up to date. # The optimizer's hyperparameters and internal buffers are also up to date. # However, the fp32 master copies of the model's fp16 params stored by the optimizer are still # out of date. There are two options. # 1: Refresh the master params from the model's fp16 params. # This requires less storage but incurs precision loss. # 2: Save and restore the fp32 master copies separately. # We choose option 2. # # Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device # of their associated parameters, because it's possible those buffers might not exist yet in # the current optimizer instance. In our case, as long as the current FP16_Optimizer has been # constructed in the same way as the one whose state_dict we are loading, the same master params # are guaranteed to exist, so we can just copy_() from the saved master params. for current_group, saved_group in zip(self.fp32_from_fp16_groups, state_dict['fp32_from_fp16']): for current, saved in zip(current_group, saved_group): current.data.copy_(saved.data) def step(self, closure=None): # could add clip option. """ If no closure is supplied, :attr:`step` should be called after ``fp16_optimizer_obj.backward(loss)``. :attr:`step` updates the fp32 master copy of parameters using the optimizer supplied to :class:`FP16_Optimizer`'s constructor, then copies the updated fp32 params into the fp16 params originally referenced by :class:`FP16_Optimizer`'s constructor, so the user may immediately run another forward pass using their model. If a closure is supplied, :attr:`step` may be called without a prior call to :attr:`backward(loss)`. This control flow is identical to `ordinary Pytorch optimizer use`_ with closures. However, the user should take care that any ``loss.backward()`` call within the closure has been replaced by ``fp16_optimizer_obj.backward(loss)``. Args: closure (optional): Closure that will be supplied to the underlying optimizer originally passed to :class:`FP16_Optimizer`'s constructor. closure should call :attr:`zero_grad()` on the :class:`FP16_Optimizer` object, compute the loss, call :attr:`backward(loss)`, and return the loss. Example with closure:: # optimizer is assumed to be an FP16_Optimizer object, previously constructed from an # existing pytorch optimizer. for input, target in dataset: def closure(): optimizer.zero_grad() output = model(input) loss = loss_fn(output, target) # loss.backward() becomes: optimizer.backward(loss) return loss optimizer.step(closure) .. warning:: Currently, calling :attr:`step` with a closure is not compatible with dynamic loss scaling. .. _`ordinary Pytorch optimizer use`: http://pytorch.org/docs/master/optim.html#optimizer-step-closure """ scale = self.loss_scaler.loss_scale() # To consider: Should this be in step(), or update_master_grads? It works either way, # but I should make it consistent with the Amp control flow, which updates the scale # during backward context manager exit. # self._update_scale(self.overflow) if self.overflow: # Using _amp_state.maybe_print instead of self.print here is intentional. maybe_print("Gradient overflow. Skipping step, reducing " + "loss scale to {}".format(self.loss_scaler.loss_scale())) return if closure is not None: retval = self._step_with_closure(closure) else: # torch.cuda.nvtx.range_push("pytorch optimizer step") retval = self.optimizer.step() # torch.cuda.nvtx.range_pop() self._master_params_to_model_params() return retval def _step_with_closure(self, closure): def wrapped_closure(): # helpful for debugging # print("Calling wrapped_closure, first_closure_call_this_step = {}" # .format(self.first_closure_call_this_step)) if self.first_closure_call_this_step: # We expect that the fp16 params are initially fresh on entering self.step(), # so _master_params_to_model_params() is unnecessary the first time wrapped_closure() # is called within self.optimizer.step(). self.first_closure_call_this_step = False else: # If self.optimizer.step() internally calls wrapped_closure more than once, # it may update the fp32 params after each call. However, self.optimizer # doesn't know about the fp16 params at all. If the fp32 params get updated, # we can't rely on self.optimizer to refresh the fp16 params. We need # to handle that manually: self._master_params_to_model_params() # Our API expects the user to give us ownership of the backward() call by # replacing all calls to loss.backward() with optimizer.backward(loss). # This requirement holds whether or not the call to backward() is made within a closure. # If the user is properly calling optimizer.backward(loss) within "closure," # calling closure() here will give the fp32 master params fresh gradients # for the optimizer to play with, so all wrapped_closure needs to do is call # closure() and return the loss. temp_loss = closure() while(self.overflow): scale = self.loss_scaler.loss_scale() # self._update_scale(self.overflow) # now done at the end of backward print("OVERFLOW within closure! Skipping step, reducing loss scale to {}".format( self.loss_scaler.loss_scale())) temp_loss = closure() return temp_loss retval = self.optimizer.step(wrapped_closure) self.first_closure_call_this_step = True return retval def backward(self, loss, update_master_grads=True, retain_graph=False): """ :attr:`backward` performs the following conceptual steps: 1. fp32_loss = loss.float() (see first Note below) 2. scaled_loss = fp32_loss*loss_scale 3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's leaves (which may be fp16, fp32, or a mixture, depending how your model was defined). 4. fp16 grads are then copied to the master params' ``.grad`` attributes (see second Note), which are guaranteed to be fp32. 5. Finally, master grads are divided by loss_scale. In this way, after :attr:`backward`, the master params have fresh gradients, and :attr:`step` may be called. .. note:: :attr:`backward` internally converts the loss to fp32 before applying the loss scale. This provides some additional safety against overflow if the user has supplied an fp16 loss value. However, for maximum overflow safety, the user should compute the loss criterion (MSE, cross entropy, etc) in fp32 before supplying it to :attr:`backward`. .. warning:: The gradients found in a model's leaves after the call to :attr:`backward` should not be regarded as valid in general, because it's possible they have been scaled (and in the case of dynamic loss scaling, the scale factor may change over time). If the user wants to inspect gradients after a call to :attr:`backward`, only the master gradients should be regarded as valid. These can be retrieved via :attr:`inspect_master_grad_data()`. Args: loss: The loss output by the user's model. loss may be either float or half (but see first Note above). update_master_grads (bool, optional, default=True): Option to copy fp16 grads to fp32 grads on this call. By setting this to False, the user can delay the copy, which is useful to eliminate redundant fp16->fp32 grad copies if :attr:`backward` is being called on multiple losses in one iteration. If set to False, the user becomes responsible for calling :attr:`update_master_grads` before calling :attr:`step`. retain_graph (bool, optional, default=False): Forwards the usual ``retain_graph=True`` option to the internal call to ``loss.backward``. If ``retain_graph`` is being used to accumulate gradient values from multiple backward passes before calling ``optimizer.step``, passing ``update_master_grads=False`` is also recommended (see Example below). Example:: # Ordinary operation: optimizer.backward(loss) # Naive operation with multiple losses (technically valid, but less efficient): # fp32 grads will be correct after the second call, but # the first call incurs an unnecessary fp16->fp32 grad copy. optimizer.backward(loss1) optimizer.backward(loss2) # More efficient way to handle multiple losses: # The fp16->fp32 grad copy is delayed until fp16 grads from all # losses have been accumulated. optimizer.backward(loss1, update_master_grads=False) optimizer.backward(loss2, update_master_grads=False) optimizer.update_master_grads() """ # To consider: try multiple backward passes using retain_grad=True to find # a loss scale that works. After you find a loss scale that works, do a final dummy # backward pass with retain_graph=False to tear down the graph. Doing this would avoid # discarding the iteration, but probably wouldn't improve overall efficiency. scaled_loss = loss.float()*self.loss_scaler.loss_scale() scaled_loss.backward(retain_graph=retain_graph) if update_master_grads: self.update_master_grads() def update_master_grads(self): # torch.cuda.nvtx.range_push("update_master_grads") """ Copy the ``.grad`` attribute from stored references to fp16 parameters to the ``.grad`` attribute of the fp32 master parameters that are directly updated by the optimizer. :attr:`update_master_grads` only needs to be called if ``fp16_optimizer_obj.backward`` was called with ``update_master_grads=False``. """ # if self.dynamic_loss_scale: # self._check_overflow() # if self.overflow: return # self._model_grads_to_master_grads() # self._downscale_master() # Use the one-shot multi-tensor apply kernel self.loss_scaler.clear_overflow_state() if len(self.all_fp16_params) > 0: # print("Model grads before") # print([param.grad.data for param in self.all_fp16_params]) # I'm ONLY writing this as an incremental way to make some tests pass until # I can refactor the tests as well. # FP16_Optimizer should not be used by anyone. model_grads = [] master_grads = [] for model_param, master_param in zip(self.all_fp16_params, self.all_fp32_from_fp16_params): if model_param.grad is not None: model_grads.append(model_param.grad) if master_param.grad is None: master_param.grad = torch.empty_like(master_param) master_grads.append(master_param.grad) self.loss_scaler.unscale( model_grads, master_grads, self.loss_scaler.loss_scale()) # print("Master grads after") # print([param.grad.data for param in self.all_fp32_from_fp16_params]) if len(self.all_fp32_from_fp32_params) > 0: model_grads = [] master_grads = [] for model_param, master_param in zip(self.all_fp32_from_fp32_params, self.all_fp32_from_fp32_params): if model_param.grad is not None: model_grads.append(model_param.grad) master_grads.append(master_param.grad) # print("Model grads before") # print([param.grad.data for param in self.all_fp32_from_fp32_params]) self.loss_scaler.unscale( model_grads, master_grads, self.loss_scaler.loss_scale()) # print("Master grads after") # print([param.grad.data for param in self.all_fp32_from_fp32_params]) # quit() self.overflow = self.loss_scaler.update_scale() # torch.cuda.nvtx.range_pop() def inspect_master_grad_data(self): """ When running with :class:`FP16_Optimizer`, ``.grad`` attributes of a model's fp16 leaves should not be regarded as truthful, because they might be scaled. After a call to :attr:`fp16_optimizer_obj.backward(loss)`, if no overflow was encountered, the fp32 master params' ``.grad`` attributes will contain valid gradients properly divided by the loss scale. However, because :class:`FP16_Optimizer` flattens some parameters, accessing them may be nonintuitive. :attr:`inspect_master_grad_data` allows those gradients to be viewed with shapes corresponding to their associated model leaves. Returns: List of lists (one list for each parameter group). The list for each parameter group is a list of the ``.grad.data`` attributes of the fp32 master params belonging to that group. """ if self.overflow: print("Warning: calling FP16_Optimizer.inspect_master_grad_data while in an overflow state. " "Gradients are currently invalid (may be inf, nan, or stale). Returning None.") return None else: # The optimizer owns only references to master params. master_grads_data = [] for param_group in self.optimizer.param_groups: master_grads_this_group = [] for param in param_group['params']: if param.grad is not None: master_grads_this_group.append(param.grad.data) else: master_grads_this_group.append(None) master_grads_data.append(master_grads_this_group) return master_grads_data # Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale" def _get_loss_scale(self): return self.loss_scaler.loss_scale() def _set_loss_scale(self, value): self.loss_scaler._loss_scale = value loss_scale = property(_get_loss_scale, _set_loss_scale) # Promote state so it can be retrieved or set via "fp16_optimizer_instance.state" def _get_state(self): return self.optimizer.state def _set_state(self, value): self.optimizer.state = value state = property(_get_state, _set_state) # Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups" # (for example, to adjust the learning rate) def _get_param_groups(self): return self.optimizer.param_groups def _set_param_groups(self, value): self.optimizer.param_groups = value param_groups = property(_get_param_groups, _set_param_groups)
import torch # item() is a recent addition, so this helps with backward compatibility. def to_python_float(t): if hasattr(t, 'item'): return t.item() else: return t[0] class LossScaler: """ Class that manages a static loss scale. This class is intended to interact with :class:`FP16_Optimizer`, and should not be directly manipulated by the user. Use of :class:`LossScaler` is enabled via the ``static_loss_scale`` argument to :class:`FP16_Optimizer`'s constructor. Args: scale (float, optional, default=1.0): The loss scale. """ def __init__(self, scale=1): self.cur_scale = scale # `params` is a list / generator of torch.Variable def has_overflow(self, params): return False # `x` is a torch.Tensor def _has_inf_or_nan(x): return False def update_scale(self, overflow): pass @property def loss_scale(self): return self.cur_scale def scale_gradient(self, module, grad_in, grad_out): return tuple(self.loss_scale * g for g in grad_in) def backward(self, loss, retain_graph=False): scaled_loss = loss*self.loss_scale scaled_loss.backward(retain_graph=retain_graph) class DynamicLossScaler: """ Class that manages dynamic loss scaling. It is recommended to use :class:`DynamicLossScaler` indirectly, by supplying ``dynamic_loss_scale=True`` to the constructor of :class:`FP16_Optimizer`. However, it's important to understand how :class:`DynamicLossScaler` operates, because the default options can be changed using the the ``dynamic_loss_args`` argument to :class:`FP16_Optimizer`'s constructor. Loss scaling is designed to combat the problem of underflowing gradients encountered at long times when training fp16 networks. Dynamic loss scaling begins by attempting a very high loss scale. Ironically, this may result in OVERflowing gradients. If overflowing gradients are encountered, :class:`DynamicLossScaler` informs :class:`FP16_Optimizer` that an overflow has occurred. :class:`FP16_Optimizer` then skips the update step for this particular iteration/minibatch, and :class:`DynamicLossScaler` adjusts the loss scale to a lower value. If a certain number of iterations occur without overflowing gradients detected, :class:`DynamicLossScaler` increases the loss scale once more. In this way :class:`DynamicLossScaler` attempts to "ride the edge" of always using the highest loss scale possible without incurring overflow. Args: init_scale (float, optional, default=2**32): Initial loss scale attempted by :class:`DynamicLossScaler.` scale_factor (float, optional, default=2.0): Factor used when adjusting the loss scale. If an overflow is encountered, the loss scale is readjusted to loss scale/``scale_factor``. If ``scale_window`` consecutive iterations take place without an overflow, the loss scale is readjusted to loss_scale*``scale_factor``. scale_window (int, optional, default=1000): Number of consecutive iterations without an overflow to wait before increasing the loss scale. """ def __init__(self, init_scale=2**32, scale_factor=2., scale_window=1000): self.cur_scale = init_scale self.cur_iter = 0 self.last_overflow_iter = -1 self.scale_factor = scale_factor self.scale_window = scale_window # `params` is a list / generator of torch.Variable def has_overflow(self, params): for p in params: if p.grad is not None and DynamicLossScaler._has_inf_or_nan(p.grad.data): return True return False # `x` is a torch.Tensor def _has_inf_or_nan(x): try: # if x is half, the .float() incurs an additional deep copy, but it's necessary if # Pytorch's .sum() creates a one-element tensor of the same type as x # (which is true for some recent version of pytorch). cpu_sum = float(x.float().sum()) # More efficient version that can be used if .sum() returns a Python scalar # cpu_sum = float(x.sum()) except RuntimeError as instance: # We want to check if inst is actually an overflow exception. # RuntimeError could come from a different error. # If so, we still want the exception to propagate. if "value cannot be converted" not in instance.args[0]: raise return True else: if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum: return True return False # `overflow` is boolean indicating whether the gradient overflowed def update_scale(self, overflow): if overflow: # self.cur_scale /= self.scale_factor self.cur_scale = max(self.cur_scale/self.scale_factor, 1) self.last_overflow_iter = self.cur_iter else: if (self.cur_iter - self.last_overflow_iter) % self.scale_window == 0: self.cur_scale *= self.scale_factor self.cur_iter += 1 @property def loss_scale(self): return self.cur_scale def scale_gradient(self, module, grad_in, grad_out): return tuple(self.loss_scale * g for g in grad_in) def backward(self, loss, retain_graph=False): scaled_loss = loss*self.loss_scale scaled_loss.backward(retain_graph=retain_graph) ############################################################## # Example usage below here -- assuming it's in a separate file ############################################################## """ TO-DO separate out into an example. if __name__ == "__main__": import torch from torch.autograd import Variable from dynamic_loss_scaler import DynamicLossScaler # N is batch size; D_in is input dimension; # H is hidden dimension; D_out is output dimension. N, D_in, H, D_out = 64, 1000, 100, 10 # Create random Tensors to hold inputs and outputs, and wrap them in Variables. x = Variable(torch.randn(N, D_in), requires_grad=False) y = Variable(torch.randn(N, D_out), requires_grad=False) w1 = Variable(torch.randn(D_in, H), requires_grad=True) w2 = Variable(torch.randn(H, D_out), requires_grad=True) parameters = [w1, w2] learning_rate = 1e-6 optimizer = torch.optim.SGD(parameters, lr=learning_rate) loss_scaler = DynamicLossScaler() for t in range(500): y_pred = x.mm(w1).clamp(min=0).mm(w2) loss = (y_pred - y).pow(2).sum() * loss_scaler.loss_scale print('Iter {} loss scale: {}'.format(t, loss_scaler.loss_scale)) print('Iter {} scaled loss: {}'.format(t, loss.data[0])) print('Iter {} unscaled loss: {}'.format(t, loss.data[0] / loss_scaler.loss_scale)) # Run backprop optimizer.zero_grad() loss.backward() # Check for overflow has_overflow = DynamicLossScaler.has_overflow(parameters) # If no overflow, unscale grad and update as usual if not has_overflow: for param in parameters: param.grad.data.mul_(1. / loss_scaler.loss_scale) optimizer.step() # Otherwise, don't do anything -- ie, skip iteration else: print('OVERFLOW!') # Update loss scale for next iteration loss_scaler.update_scale(has_overflow) """
from .multi_tensor_apply import MultiTensorApply multi_tensor_applier = MultiTensorApply(2048*32)
import torch class MultiTensorApply(object): available = False warned = False def __init__(self, chunk_size): try: import amp_C MultiTensorApply.available = True self.chunk_size = chunk_size except ImportError as err: MultiTensorApply.available = False MultiTensorApply.import_err = err def check_avail(self): if MultiTensorApply.available == False: raise RuntimeError( "Attempted to call MultiTensorApply method, but MultiTensorApply " "is not available, possibly because Apex was installed without " "--cpp_ext --cuda_ext. Original import error message:", MultiTensorApply.import_err) def __call__(self, op, noop_flag_buffer, tensor_lists, *args): self.check_avail() return op(self.chunk_size, noop_flag_buffer, tensor_lists, *args)
import types import torch import importlib class FusedAdam(torch.optim.Optimizer): """Implements Adam algorithm. Currently GPU-only. Requires Apex to be installed via ``python setup.py install --cuda_ext --cpp_ext``. It has been proposed in `Adam: A Method for Stochastic Optimization`_. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups. lr (float, optional): learning rate. (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square. (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability. (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ (default: False) NOT SUPPORTED in FusedAdam! eps_inside_sqrt (boolean, optional): in the 'update parameters' step, adds eps to the bias-corrected second moment estimate before evaluating square root instead of adding it to the square root of second moment estimate as in the original paper. (default: False) .. _Adam\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__(self, params, lr=1e-3, bias_correction = True, betas=(0.9, 0.999), eps=1e-8, eps_inside_sqrt = False, weight_decay=0., max_grad_norm=0., amsgrad=False): global fused_adam_cuda fused_adam_cuda = importlib.import_module("fused_adam_cuda") if amsgrad: raise RuntimeError('FusedAdam does not support the AMSGrad variant.') defaults = dict(lr=lr, bias_correction=bias_correction, betas=betas, eps=eps, weight_decay=weight_decay, max_grad_norm=max_grad_norm) super(FusedAdam, self).__init__(params, defaults) self.eps_mode = 0 if eps_inside_sqrt else 1 def step(self, closure=None, grads=None, output_params=None, scale=1., grad_norms=None): """Performs a single optimization step. Arguments: closure (callable, optional): A closure that reevaluates the model and returns the loss. grads (list of tensors, optional): weight gradient to use for the optimizer update. If gradients have type torch.half, parameters are expected to be in type torch.float. (default: None) output params (list of tensors, optional): A reduced precision copy of the updated weights written out in addition to the regular updated weights. Have to be of same type as gradients. (default: None) scale (float, optional): factor to divide gradient tensor values by before applying to weights. (default: 1) """ loss = None if closure is not None: loss = closure() if grads is None: grads_group = [None]*len(self.param_groups) # backward compatibility # assuming a list/generator of parameter means single group elif isinstance(grads, types.GeneratorType): grads_group = [grads] elif type(grads[0])!=list: grads_group = [grads] else: grads_group = grads if output_params is None: output_params_group = [None]*len(self.param_groups) elif isinstance(output_params, types.GeneratorType): output_params_group = [output_params] elif type(output_params[0])!=list: output_params_group = [output_params] else: output_params_group = output_params if grad_norms is None: grad_norms = [None]*len(self.param_groups) for group, grads_this_group, output_params_this_group, grad_norm in zip(self.param_groups, grads_group, output_params_group, grad_norms): if grads_this_group is None: grads_this_group = [None]*len(group['params']) if output_params_this_group is None: output_params_this_group = [None]*len(group['params']) # compute combined scale factor for this group combined_scale = scale if group['max_grad_norm'] > 0: # norm is in fact norm*scale clip = ((grad_norm / scale) + 1e-6) / group['max_grad_norm'] if clip > 1: combined_scale = clip * scale bias_correction = 1 if group['bias_correction'] else 0 for p, grad, output_param in zip(group['params'], grads_this_group, output_params_this_group): #note: p.grad should not ever be set for correct operation of mixed precision optimizer that sometimes sends None gradients if p.grad is None and grad is None: continue if grad is None: grad = p.grad.data if grad.is_sparse: raise RuntimeError('FusedAdam does not support sparse gradients, please consider SparseAdam instead') state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p.data) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] beta1, beta2 = group['betas'] state['step'] += 1 out_p = torch.tensor([], dtype = torch.float) if output_param is None else output_param fused_adam_cuda.adam(p.data, out_p, exp_avg, exp_avg_sq, grad, group['lr'], beta1, beta2, group['eps'], combined_scale, state['step'], self.eps_mode, bias_correction, group['weight_decay']) return loss
from .fused_adam import FusedAdam from .fp16_optimizer import FP16_Optimizer
import torch from torch._utils import _flatten_dense_tensors, _unflatten_dense_tensors class FP16_Optimizer(object): """ :class:`FP16_Optimizer` A cutdown version of apex.fp16_utils.FP16_Optimizer. Designed only to wrap apex.optimizers.FusedAdam. Refer to apex.fp16_utils documents for more information. Example:: model = torch.nn.Linear(D_in, D_out).cuda().half() optimizer = apex.optimizers.FusedAdam(model.parameters()) # Name the FP16_Optimizer instance to replace the existing optimizer # (recommended but not required): optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0) ... # loss.backward() becomes: optimizer.backward(loss) ... Example with dynamic loss scaling:: ... optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True) # optional arg to control dynamic loss scaling behavior # dynamic_loss_args={'scale_window' : 500}) # Usually, dynamic_loss_args is not necessary. """ def __init__(self, init_optimizer, static_loss_scale=1.0, dynamic_loss_scale=False, dynamic_loss_args=None, verbose=True): # The fused optimizer does all the work. We need this layer for two reason: # 1. maintain same user API from apex.fp16_utils # 2. keep common stuff here in case we need to add new fused optimizer later # differences from apex.fp16_utils: # - assume all model params in fp16 # - assume all params requires grad # - flat by groups, not keeping state. TODO: remove state explicitly? # - master gard and unflat master weight never exist. TODO: a way to save out unflat master? if not torch.cuda.is_available: raise SystemError("Cannot use fp16 without CUDA.") self.optimizer = init_optimizer # param flattened by groups self.fp16_groups = [] self.fp16_groups_flat = [] self.fp32_groups_flat = [] # loop to deal with groups for i, param_group in enumerate(self.optimizer.param_groups): # push this group to list before modify self.fp16_groups.append(param_group['params']) # init fp16 weight buffer, flattened self.fp16_groups_flat.append(_flatten_dense_tensors([p.clone().detach() for p in self.fp16_groups[i]])) # set model fp16 weight to slices of flattened buffer updated_params = _unflatten_dense_tensors(self.fp16_groups_flat[i], self.fp16_groups[i]) for p,q in zip(self.fp16_groups[i], updated_params): p.data = q.data # init master weight, flattened self.fp32_groups_flat.append(self.fp16_groups_flat[i].clone().float().detach()) # modify optimizer of have flat master weight self.fp32_groups_flat[i].requires_grad = True # keep this in case internal optimizer uses it param_group['params'] = [self.fp32_groups_flat[i]] # we may have a way of fusing dynamic scale. Do not support for now if dynamic_loss_scale: if dynamic_loss_args is not None: raise SystemError("Do not support dynamic loss scale args for now.") self.dynamic_loss_scale = True self.cur_scale = 2**16 self.cur_iter = 0 self.last_overflow_iter = -1 self.scale_factor = 2 self.scale_window = 1000 else: self.dynamic_loss_scale = False self.cur_iter = 0 self.cur_scale = static_loss_scale self.verbose = verbose def zero_grad(self, set_grads_to_None=True): """ Zero FP16 parameter grads. """ # FP32 grad should never exist. # For speed, set model fp16 grad to None by default for group in self.fp16_groups: for p in group: if set_grads_to_None: p.grad = None else: if p.grad is not None: p.grad.detach_() p.grad.zero_() def _compute_grad_norm(self, fp16_grads_flat, norm_type=2): """ Compute fp16 grad norm for later clipping(fused with update). Internal accumulated in fp32. Also fused in NaN check. Possibly other reduction needed for grad. Args: fp16_grads_flat (tensor): fp16 grad flattened norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for infinity norm. Returns: Total norm of the current fp16 gradients (viewed as a single vector). Returns -1 if the most recently computed fp16 gradients overflowed """ # TODO: Not most efficient with copy to cpu and sync # only support 2-norm now # for torch version <= 1.0.1, torch.norm with dtype will fail and fall back to cast try: norm = float(torch.norm(fp16_grads_flat, 2.0, dtype=torch.float32)) except TypeError as err: norm = float(torch.norm(fp16_grads_flat.float(), 2.0)) if norm == float('inf') or norm == -float('inf') or norm != norm: return -1 else: return norm def step(self, closure=None): """ Not supporting closure. """ # First compute norm for all group so we know if there is overflow grads_groups_flat = [] norm_groups = [] skip = False for i, group in enumerate(self.fp16_groups): grads_groups_flat.append(_flatten_dense_tensors([p.grad for p in group])) norm_groups.append(self._compute_grad_norm(grads_groups_flat[i])) if norm_groups[i] == -1: #TODO: early break skip = True if skip: self._update_scale(skip) return # norm is in fact norm*cur_scale self.optimizer.step(grads=[[g] for g in grads_groups_flat], output_params=[[p] for p in self.fp16_groups_flat], scale=self.cur_scale, grad_norms=norm_groups) # TODO: we probably don't need this? just to be safe for i in range(len(norm_groups)): updated_params = _unflatten_dense_tensors(self.fp16_groups_flat[i], self.fp16_groups[i]) for p,q in zip(self.fp16_groups[i], updated_params): p.data = q.data self._update_scale(False) return def backward(self, loss): """ :attr:`backward` performs the following steps: 1. fp32_loss = loss.float() 2. scaled_loss = fp32_loss*loss_scale 3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves """ scaled_loss = (loss.float()) * self.cur_scale scaled_loss.backward() def _update_scale(self, skip): if self.dynamic_loss_scale: if skip: if self.verbose: print("\nGrad overflow on iteration", self.cur_iter) print("Using dynamic loss scale of", self.cur_scale) self.cur_scale = max(self.cur_scale/self.scale_factor, 1) self.last_overflow_iter = self.cur_iter else: if (self.cur_iter - self.last_overflow_iter) % self.scale_window == 0: self.cur_scale *= self.scale_factor else: if skip: print("\nGrad overflow on iteration", self.cur_iter) print("Using static loss scale of", self.cur_scale) self.cur_iter +=1 return # Promote state so it can be retrieved or set via "fp16_optimizer_instance.state" def _get_state(self): return self.optimizer.state def _set_state(self, value): self.optimizer.state = value state = property(_get_state, _set_state) # Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups" # (for example, to adjust the learning rate) def _get_param_groups(self): return self.optimizer.param_groups def _set_param_groups(self, value): self.optimizer.param_groups = value param_groups = property(_get_param_groups, _set_param_groups) def state_dict(self): """ Returns a dict containing the current state of this :class:`FP16_Optimizer` instance. This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict of the contained Pytorch optimizer. Example:: checkpoint = {} checkpoint['model'] = model.state_dict() checkpoint['optimizer'] = optimizer.state_dict() torch.save(checkpoint, "saved.pth") """ state_dict = {} state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale state_dict['cur_scale'] = self.cur_scale state_dict['cur_iter'] = self.cur_iter if state_dict['dynamic_loss_scale']: state_dict['last_overflow_iter'] = self.last_overflow_iter state_dict['scale_factor'] = self.scale_factor state_dict['scale_window'] = self.scale_window state_dict['optimizer_state_dict'] = self.optimizer.state_dict() state_dict['fp32_groups_flat'] = self.fp32_groups_flat return state_dict def load_state_dict(self, state_dict): """ Loads a state_dict created by an earlier call to state_dict(). If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``, whose parameters in turn came from ``model``, it is expected that the user will call ``model.load_state_dict()`` before ``fp16_optimizer_instance.load_state_dict()`` is called. Example:: model = torch.nn.Linear(D_in, D_out).cuda().half() optimizer = torch.optim.SGD(model.parameters(), lr=1e-3) optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0) ... checkpoint = torch.load("saved.pth") model.load_state_dict(checkpoint['model']) optimizer.load_state_dict(checkpoint['optimizer']) """ # I think it should actually be ok to reload the optimizer before the model. self.dynamic_loss_scale = state_dict['dynamic_loss_scale'] self.cur_scale = state_dict['cur_scale'] self.cur_iter = state_dict['cur_iter'] if state_dict['dynamic_loss_scale']: self.last_overflow_iter = state_dict['last_overflow_iter'] self.scale_factor = state_dict['scale_factor'] self.scale_window = state_dict['scale_window'] self.optimizer.load_state_dict(state_dict['optimizer_state_dict']) # At this point, the optimizer's references to the model's fp32 parameters are up to date. # The optimizer's hyperparameters and internal buffers are also up to date. # However, the fp32 master copies of the model's fp16 params stored by the optimizer are still # out of date. There are two options. # 1: Refresh the master params from the model's fp16 params. # This requires less storage but incurs precision loss. # 2: Save and restore the fp32 master copies separately. # We choose option 2. # # Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device # of their associated parameters, because it's possible those buffers might not exist yet in # the current optimizer instance. In our case, as long as the current FP16_Optimizer has been # constructed in the same way as the one whose state_dict we are loading, the same master params # are guaranteed to exist, so we can just copy_() from the saved master params. for current, saved in zip(self.fp32_groups_flat, state_dict['fp32_groups_flat']): current.data.copy_(saved.data)
from .weight_norm import WeightNorm from .reparameterization import Reparameterization def apply_weight_norm(module, name='', dim=0, hook_child=True): """ Applies weight normalization to a parameter in the given module. If no parameter is provided, applies weight normalization to all parameters in model (except 1-d vectors and scalars). .. math:: \mathbf{w} = g \dfrac{\mathbf{v}}{\|\mathbf{v}\|} Weight normalization is a reparameterization that decouples the magnitude of a weight tensor from its direction. This replaces the parameter specified by `name` (e.g. "weight") with two parameters: one specifying the magnitude (e.g. "weight_g") and one specifying the direction (e.g. "weight_v"). Weight normalization is implemented via a hook that recomputes the weight tensor from the magnitude and direction before every :meth:`~Module.forward` call. By default, with `dim=0`, the norm is computed independently per output channel/plane. To compute a norm over the entire weight tensor, use `dim=None`. See https://arxiv.org/abs/1602.07868 Args: module (nn.Module): containing module name (str, optional): name of weight parameter dim (int, optional): dimension over which to compute the norm hook_child (boolean, optional): adds reparameterization hook to direct parent of the parameters. If False, it's added to `module` instead. Default: True Returns: The original module with the weight norm hook Example:: >>> m = apply_weight_norm(nn.Linear(20, 40), name='weight') Linear (20 -> 40) >>> m.weight_g.size() torch.Size([40, 1]) >>> m.weight_v.size() torch.Size([40, 20]) """ return apply_reparameterization(module, reparameterization=WeightNorm, hook_child=hook_child, name=name, dim=dim) def remove_weight_norm(module, name='', remove_all=False): """ Removes the weight normalization reparameterization of a parameter from a module. If no parameter is supplied then all weight norm parameterizations are removed. Args: module (nn.Module): containing module name (str, optional): name of weight parameter Example: >>> m = apply_weight_norm(nn.Linear(20, 40)) >>> remove_weight_norm(m) """ return remove_reparameterization(module, reparameterization=WeightNorm, name=name, remove_all=remove_all) def apply_reparameterization(module, reparameterization=None, name='', dim=0, hook_child=True): """ Applies a given weight reparameterization (such as weight normalization) to a parameter in the given module. If no parameter is given, applies the reparameterization to all parameters in model (except 1-d vectors and scalars). Args: module (nn.Module): containing module reparameterization (Reparameterization): reparamaterization class to apply name (str, optional): name of weight parameter dim (int, optional): dimension over which to perform reparameterization op hook_child (boolean, optional): adds reparameterization hook to direct parent of the parameters. If False, it's added to `module` instead. Default: True Returns: The original module with the reparameterization hook Example:: >>> m = apply_reparameterization(nn.Linear(20, 40), WeightNorm) Linear (20 -> 40) """ assert reparameterization is not None if name != '': Reparameterization.apply(module, name, dim, reparameterization, hook_child) else: names = list(module.state_dict().keys()) for name in names: apply_reparameterization(module, reparameterization, name, dim, hook_child) return module def remove_reparameterization(module, reparameterization=Reparameterization, name='', remove_all=False): """ Removes the given reparameterization of a parameter from a module. If no parameter is supplied then all reparameterizations are removed. Args: module (nn.Module): containing module reparameterization (Reparameterization): reparamaterization class to apply name (str, optional): name of weight parameter remove_all (bool, optional): if True, remove all reparamaterizations of given type. Default: False Example: >>> m = apply_reparameterization(nn.Linear(20, 40),WeightNorm) >>> remove_reparameterization(m) """ if name != '' or remove_all: to_remove = [] for k, hook in module._forward_pre_hooks.items(): if isinstance(hook, reparameterization) and (hook.name == name or remove_all): hook.remove(module) to_remove.append(k) if len(to_remove) > 0: for k in to_remove: del module._forward_pre_hooks[k] return module if not remove_all: raise ValueError("reparameterization of '{}' not found in {}" .format(name, module)) else: modules = [module]+[x for x in module.modules()] for m in modules: remove_reparameterization(m, reparameterization=reparameterization, remove_all=True) return module
import torch from torch.nn.parameter import Parameter from ..fp16_utils import Fused_Weight_Norm import time from .reparameterization import Reparameterization def _norm(p, dim): """Computes the norm over all dimensions except dim""" if dim is None: return p.norm() elif dim == 0: output_size = (p.size(0),) + (1,) * (p.dim() - 1) return p.contiguous().view(p.size(0), -1).norm(dim=1).view(*output_size) elif dim == p.dim() - 1: output_size = (1,) * (p.dim() - 1) + (p.size(-1),) return p.contiguous().view(-1, p.size(-1)).norm(dim=0).view(*output_size) return _norm(p.transpose(0, dim), 0).transpose(0, dim) HALF_TYPES = (torch.cuda.HalfTensor, torch.HalfTensor) class WeightNorm(Reparameterization): """ Weight normalization is a reparameterization that decouples the magnitude of a weight tensor from its direction. This replaces the parameter specified by `name` (e.g. "weight") with two parameters: one specifying the magnitude (e.g. "weight_g") and one specifying the direction (e.g. "weight_v"). Weight normalization is implemented via a hook that recomputes the weight tensor from the magnitude and direction before every :meth:`~Module.forward` call. .. math:: \mathbf{w} = g \dfrac{\mathbf{v}}{\|\mathbf{v}\|} By default, with `dim=0`, the norm is computed independently per output channel/plane. To compute a norm over the entire weight tensor, use `dim=None`. """ def compute_weight(self, module=None, name=None): """ Computes weight normalized weight value to assign value to module attribute with name `name`. Arguments: module (nn.Module): module with weight we'd like to reparameterize Returns: w (Tensor): Tensor object containing value of reparameterized weight """ if module is None: module = self.module if name is None: name = self.name module, name = Reparameterization.get_module_and_name(module, name) g = getattr(module, name + '_g') v = getattr(module, name + '_v') fused_weight_norm = Fused_Weight_Norm.apply v = v.contiguous() w = fused_weight_norm(v, g, self.dim) return w def reparameterize(self, name, weight, dim): """ Creates Parameters v and gto be used for weight normalization and creates names that for attributes for the module these Parameters will correspond to. The parameters will be registered according to the names provided. Arguments: module (nn.Module): module with weight we'd like to reparameterize name (str, optional): name of weight parameter dim (int, optional): dimension over which to compute parameterization Returns: names (list, str): names of Parameters to be used for reparameterization params (list, Parameter): Parameters to be used for reparameterization """ names = [name + '_g', name + '_v'] params = [Parameter(_norm(weight, dim).data), Parameter(weight.data)] return names, params
import torch from torch.nn.parameter import Parameter import sys class Reparameterization(object): """ Class interface for performing weight reparameterizations Arguments: name (str): name of weight parameter dim (int): dimension over which to compute the norm module (nn.Module): parent module to which param `name` is registered to retain_forward (bool, optional): if False deletes weight on call to module.backward. Used to avoid memory leaks with DataParallel Default: True Attributes: reparameterization_names (list, str): contains names of all parameters needed to compute reparameterization. backward_hook_key (int): torch.utils.hooks.RemovableHandle.id for hook used in module backward pass. """ def __init__(self, name, dim, module, retain_forward=True): self.name = name self.dim = dim self.evaluated = False self.retain_forward = retain_forward self.reparameterization_names = [] self.backward_hook_key = None self.module = module def compute_weight(self, module=None, name=None): """ Computes reparameterized weight value to assign value to module attribute with name `name`. See WeightNorm class for example. Arguments: module (nn.Module): module with weight we'd like to reparameterize Returns: w (Tensor): Tensor object containing value of reparameterized weight """ raise NotImplementedError def reparameterize(self, name, weight, dim): """ Creates Parameters to be used for reparameterization and creates names that for attributes for the module these Parameters will correspond to. The parameters will be registered according to the names provided. See WeightNorm class for example. Arguments: module (nn.Module): module with weight we'd like to reparameterize name (str, optional): name of weight parameter dim (int, optional): dimension over which to compute parameterization Returns: names (list, str): names of Parameters to be used for reparameterization params (list, Parameter): Parameters to be used for reparameterization """ raise NotImplementedError @staticmethod def apply(module, name, dim, reparameterization=None, hook_child=True): """ Applies reparametrization to module's `name` parameter and modifies instance attributes as appropriate. `hook_child` adds reparameterization hook to direct parent of the parameters. If False, it's added to `module` instead. """ if reparameterization is None: reparameterization = Reparameterization module2use, name2use = Reparameterization.get_module_and_name(module, name) # does not work on sparse if name2use is None or isinstance(module2use, (torch.nn.Embedding, torch.nn.EmbeddingBag)): return if hook_child: fn = reparameterization(name2use, dim, module2use) else: fn = reparameterization(name, dim, module) weight = getattr(module2use, name2use) if weight.dim() <= 1: return # remove weight from parameter list del module2use._parameters[name2use] # add parameters of reparameterization of parameter to module names, params = fn.reparameterize(name2use, weight, dim) for n, p in zip(names, params): module2use.register_parameter(n, p) # add parameters to reparameterization so they can be removed later fn.reparameterization_names = names setattr(module2use, name2use, None) hook_module = module2use if not hook_child: hook_module = module # recompute weight before every forward() hook_module.register_forward_pre_hook(fn) # remove weight during backward handle = hook_module.register_backward_hook(fn.backward_hook) # get hook key so we can delete it later fn.backward_hook_key = handle.id return fn @staticmethod def get_module_and_name(module, name): """ recursively fetches (possible) child module and name of weight to be reparameterized """ name2use = None module2use = None names = name.split('.') if len(names) == 1 and names[0] != '': name2use = names[0] module2use = module elif len(names) > 1: module2use = module name2use = names[0] for i in range(len(names)-1): module2use = getattr(module2use, name2use) name2use = names[i+1] return module2use, name2use def get_params(self, module): """gets params of reparameterization based on known attribute names""" return [getattr(module, n) for n in self.reparameterization_names] def remove(self, module): """removes reparameterization and backward hook (does not remove forward hook)""" module2use, name2use = Reparameterization.get_module_and_name(module, self.name) for p in self.get_params(module2use): p.requires_grad = False weight = self.compute_weight(module2use, name2use) delattr(module2use, name2use) for n in self.reparameterization_names: del module2use._parameters[n] module2use.register_parameter(name2use, Parameter(weight.data)) del module._backward_hooks[self.backward_hook_key] def __call__(self, module, inputs): """callable hook for forward pass""" module2use, name2use = Reparameterization.get_module_and_name(module, self.name) _w = getattr(module2use, name2use) if not self.evaluated or _w is None: setattr(module2use, name2use, self.compute_weight(module2use, name2use)) self.evaluated = True def backward_hook(self, module, grad_input, grad_output): """callable hook for backward pass""" module2use, name2use = Reparameterization.get_module_and_name(module, self.name) wn = getattr(module2use, name2use) self.evaluated = False
import types from ..fp16_utils import master_params_to_model_params from ..multi_tensor_apply import multi_tensor_applier from ._amp_state import maybe_print import torch class AmpOptimizerState(object): def __init__(self): pass def lazy_init_with_master_weights(self): stash = self._amp_stash stash.fp16_groups = [] stash.fp32_from_fp16_groups = [] stash.fp32_from_fp32_groups = [] for i, param_group in enumerate(self.param_groups): # maybe_print("FP16_Optimizer processing param group {}:".format(i)) fp16_params_this_group = [] fp32_params_this_group = [] fp32_from_fp16_params_this_group = [] for i, param in enumerate(param_group['params']): if param.requires_grad: if param.type() == 'torch.cuda.HalfTensor': # maybe_print("FP16_Optimizer received torch.cuda.HalfTensor with {}" # .format(param.size())) fp16_params_this_group.append(param) master_param = param.detach().clone().float() master_param.requires_grad = True param_group['params'][i] = master_param fp32_from_fp16_params_this_group.append(master_param) # Reset existing state dict key to the new master param. # We still need to recast per-param state tensors, if any, to FP32. if param in self.state: self.state[master_param] = self.state.pop(param) elif param.type() == 'torch.cuda.FloatTensor': # maybe_print("FP16_Optimizer received torch.cuda.FloatTensor with {}" # .format(param.size())) fp32_params_this_group.append(param) param_group['params'][i] = param else: raise TypeError("Optimizer's parameters must be either " "torch.cuda.FloatTensor or torch.cuda.HalfTensor. " "Received {}".format(param.type())) stash.fp16_groups.append(fp16_params_this_group) stash.fp32_from_fp16_groups.append(fp32_from_fp16_params_this_group) stash.fp32_from_fp32_groups.append(fp32_params_this_group) stash.all_fp16_params = [] for group in stash.fp16_groups: stash.all_fp16_params += group stash.all_fp32_from_fp16_params = [] for group in stash.fp32_from_fp16_groups: stash.all_fp32_from_fp16_params += group stash.all_fp32_from_fp32_params = [] for group in stash.fp32_from_fp32_groups: stash.all_fp32_from_fp32_params += group # stash.all_fp32_from_fp16_grad_stash = [None for _ in stash.all_fp32_from_fp16_params] stash.all_fp32_from_fp32_grad_stash = [None for _ in stash.all_fp32_from_fp32_params] for param in stash.all_fp32_from_fp16_params: param.grad = None for param in stash.all_fp32_from_fp32_params: param.grad = None # Leverage state_dict() and load_state_dict() to recast preexisting per-param state tensors self.load_state_dict(self.state_dict()) def prepare_backward_with_master_weights(self): stash = self._amp_stash if not stash.lazy_init_called: self._lazy_init_maybe_master_weights() stash.lazy_init_called = True for i, param in enumerate(stash.all_fp16_params): # Set up to leverage grad copy elision: param.grad = None # for i, param in enumerate(stash.all_fp32_from_fp16_params): # stash.all_fp32_from_fp16_grad_stash[i] = param.grad for i, param in enumerate(stash.all_fp32_from_fp32_params): stash.all_fp32_from_fp32_grad_stash[i] = param.grad # Set up to leverage grad copy elision: param.grad = None def post_backward_with_master_weights(self, scaler): stash = self._amp_stash # This is a lot of python overhead... fp16_grads_needing_unscale = [] new_fp32_grads = [] fp16_grads_needing_unscale_with_stash = [] preexisting_fp32_grads = [] for fp16_param, fp32_param in zip(stash.all_fp16_params, stash.all_fp32_from_fp16_params): if fp16_param.grad is None and fp32_param.grad is not None: continue elif fp16_param.grad is not None and fp32_param.grad is None: fp32_param.grad = torch.empty_like(fp32_param) fp16_grads_needing_unscale.append(fp16_param.grad) new_fp32_grads.append(fp32_param.grad) elif fp16_param.grad is not None and fp32_param.grad is not None: fp16_grads_needing_unscale_with_stash.append(fp16_param.grad) preexisting_fp32_grads.append(fp32_param.grad) else: # fp16_param.grad is None and fp32_param.grad is None: continue if len(fp16_grads_needing_unscale) > 0: scaler.unscale( fp16_grads_needing_unscale, new_fp32_grads, scaler.loss_scale(), models_are_masters=False) if len(fp16_grads_needing_unscale_with_stash) > 0: scaler.unscale_with_stashed( fp16_grads_needing_unscale_with_stash, preexisting_fp32_grads, preexisting_fp32_grads) # fp32 params can be treated as they would be in the "no_master_weights" case. grads_needing_unscale = [] grads_needing_unscale_with_stash = [] stashed = [] for param, stashed_grad in zip(stash.all_fp32_from_fp32_params, stash.all_fp32_from_fp32_grad_stash): if param.grad is None and stashed_grad is not None: param.grad = stashed_grad elif param.grad is not None and stashed_grad is None: grads_needing_unscale.append(param.grad) elif param.grad is not None and stashed_grad is not None: grads_needing_unscale_with_stash.append(param.grad) stashed.append(stashed_grad) else: # param.grad is None and stashed_grad is None: continue if len(grads_needing_unscale) > 0: scaler.unscale( grads_needing_unscale, grads_needing_unscale, scaler.loss_scale(), models_are_masters=True) if len(grads_needing_unscale_with_stash) > 0: scaler.unscale_with_stashed( grads_needing_unscale_with_stash, stashed, grads_needing_unscale_with_stash) # Clear the stash. for i in range(len(stash.all_fp32_from_fp32_grad_stash)): stash.all_fp32_from_fp32_grad_stash[i] = None def lazy_init_no_master_weights(self): stash = self._amp_stash stash.all_fp16_params = [] stash.all_fp32_params = [] for i, param_group in enumerate(self.param_groups): for i, param in enumerate(param_group['params']): if param.type() == 'torch.cuda.HalfTensor': stash.all_fp16_params.append(param) elif param.type() == 'torch.cuda.FloatTensor': stash.all_fp32_params.append(param) else: raise TypeError("Optimizer's parameters must be either " "torch.cuda.FloatTensor or torch.cuda.HalfTensor. " "Received {}".format(param.type())) stash.all_fp16_grad_stash = [None for _ in stash.all_fp16_params] stash.all_fp32_grad_stash = [None for _ in stash.all_fp32_params] def prepare_backward_no_master_weights(self): stash = self._amp_stash if not stash.lazy_init_called: self._lazy_init_maybe_master_weights() stash.lazy_init_called = True for i, param in enumerate(stash.all_fp16_params): stash.all_fp16_grad_stash[i] = param.grad # Set up to leverage grad copy elision: param.grad = None for i, param in enumerate(stash.all_fp32_params): stash.all_fp32_grad_stash[i] = param.grad # Set up to leverage grad copy elision: param.grad = None def post_backward_no_master_weights(self, scaler): stash = self._amp_stash split_types = ((stash.all_fp16_params, stash.all_fp16_grad_stash), (stash.all_fp32_params, stash.all_fp32_grad_stash)) for params, stashed_grads in split_types: # This is a lot of python overhead... grads_needing_unscale = [] grads_needing_unscale_with_stash = [] stashed = [] for param, stashed_grad in zip(params, stashed_grads): if param.grad is None and stashed_grad is not None: param.grad = stashed_grad elif param.grad is not None and stashed_grad is None: grads_needing_unscale.append(param.grad) elif param.grad is not None and stashed_grad is not None: grads_needing_unscale_with_stash.append(param.grad) stashed.append(stashed_grad) else: # param.grad is None and stashed_grad is None continue if len(grads_needing_unscale) > 0: scaler.unscale( grads_needing_unscale, grads_needing_unscale, scaler.loss_scale(), models_are_masters=True) if len(grads_needing_unscale_with_stash) > 0: scaler.unscale_with_stashed( grads_needing_unscale_with_stash, stashed, grads_needing_unscale_with_stash) # Clear the stash. for i in range(len(stashed_grads)): stashed_grads[i] = None def _master_params_to_model_params(self): stash = self._amp_stash if multi_tensor_applier.available: if len(stash.all_fp16_params) > 0: multi_tensor_applier( stash.multi_tensor_scale, stash.dummy_overflow_buf, [stash.all_fp32_from_fp16_params, stash.all_fp16_params], 1.0) else: for fp16_group, fp32_from_fp16_group in zip(stash.fp16_groups, stash.fp32_from_fp16_groups): master_params_to_model_params(fp16_group, fp32_from_fp16_group) def _process_optimizer(optimizer, properties): if hasattr(optimizer, "_amp_stash"): raise RuntimeError("A given optimizer should only be passed through amp.initialize once.") else: optimizer._amp_stash = AmpOptimizerState() optimizer._amp_stash.lazy_init_called = False optimizer._amp_stash.already_patched = False optimizer._amp_stash.params_have_scaled_gradients = False for name in ("_lazy_init_maybe_master_weights", "_master_params_to_model_params", "_prepare_amp_backward", "_post_amp_backward"): if hasattr(optimizer, name): raise RuntimeError("Incoming optimizer already has {} defined.".format(name)) # TODO: Centralize exposure and import error checking for the C backend. if multi_tensor_applier.available: import amp_C optimizer._amp_stash.multi_tensor_scale = amp_C.multi_tensor_scale optimizer._amp_stash.dummy_overflow_buf = torch.cuda.IntTensor([0]); if properties.master_weights: optimizer._lazy_init_maybe_master_weights = types.MethodType( lazy_init_with_master_weights, optimizer) optimizer._master_params_to_model_params = types.MethodType( _master_params_to_model_params, optimizer) old_step = optimizer.step def new_step(self, closure=None): if closure is not None: raise RuntimeError("Currently, Amp does not support closure use with optimizers.") retval = old_step() self._master_params_to_model_params() # Clear the master grads that wouldn't be zeroed by model.zero_grad() for param in self._amp_stash.all_fp32_from_fp16_params: param.grad = None return retval optimizer.step = types.MethodType(new_step, optimizer) old_zero_grad = optimizer.zero_grad def new_zero_grad(self): stash = self._amp_stash if not stash.lazy_init_called: self._lazy_init_maybe_master_weights() stash.lazy_init_called = True # Zero the model grads. for param in stash.all_fp16_params: if param.grad is not None: param.grad.detach_() param.grad.zero_() for param in stash.all_fp32_from_fp32_params: if param.grad is not None: param.grad.detach_() param.grad.zero_() # Clear the master grads that are independent of model grads for param in self._amp_stash.all_fp32_from_fp16_params: param.grad = None optimizer.zero_grad = types.MethodType(new_zero_grad, optimizer) optimizer._prepare_amp_backward = types.MethodType( prepare_backward_with_master_weights, optimizer) optimizer._post_amp_backward = types.MethodType( post_backward_with_master_weights, optimizer) else: optimizer._lazy_init_maybe_master_weights = types.MethodType( lazy_init_no_master_weights, optimizer) optimizer._prepare_amp_backward = types.MethodType( prepare_backward_no_master_weights, optimizer) optimizer._post_amp_backward = types.MethodType( post_backward_no_master_weights, optimizer) old_add_param_group = optimizer.add_param_group def new_add_param_group(self, new_group): stash = self._amp_stash if not stash.lazy_init_called: self._lazy_init_maybe_master_weights() stash.lazy_init_called = True assert isinstance(new_group, dict), "param group must be a dict" new_params = new_group['params'] if isinstance(new_params, torch.Tensor): new_group['params'] = [new_params] elif isinstance(new_params, set): raise TypeError('optimizer parameters need to be organized in ordered collections, but ' 'the ordering of tensors in sets will change between runs. Please use a list instead.') else: new_group['params'] = list(new_params) if properties.master_weights: # Mutate new_group in-place to use FP32 master params fp16_params_this_group = [] fp32_params_this_group = [] fp32_from_fp16_params_this_group = [] for i, param in enumerate(new_group['params']): if param.requires_grad: if param.type() == 'torch.cuda.HalfTensor': fp16_params_this_group.append(param) master_param = param.detach().clone().float() master_param.requires_grad = True new_group['params'][i] = master_param fp32_from_fp16_params_this_group.append(master_param) elif param.type() == 'torch.cuda.FloatTensor': fp32_params_this_group.append(param) new_group['params'][i] = param else: raise TypeError("Optimizer's parameters must be either " "torch.cuda.FloatTensor or torch.cuda.HalfTensor. " "Received {}".format(param.type())) stash.fp16_groups.append(fp16_params_this_group) stash.fp32_from_fp16_groups.append(fp32_from_fp16_params_this_group) stash.fp32_from_fp32_groups.append(fp32_params_this_group) stash.all_fp16_params += fp16_params_this_group stash.all_fp32_from_fp16_params += fp32_from_fp16_params_this_group stash.all_fp32_from_fp32_params += fp32_params_this_group # stash.all_fp32_from_fp16_grad_stash = [None for _ in stash.all_fp32_from_fp16_params] stash.all_fp32_from_fp32_grad_stash += [None for _ in fp32_params_this_group] # It should be ok to let params be added with existing .grad attributes. # for param in fp16_params_this_group: # param.grad = None # for param in fp32_from_fp16_params_this_group: # param.grad = None # for param in stash.fp32_params_this_group: # param.grad = None else: for param in new_group['params']: if param.type() == 'torch.cuda.HalfTensor': stash.all_fp16_params.append(param) stash.all_fp16_grad_stash.append(None) elif param.type() == 'torch.cuda.FloatTensor': stash.all_fp32_params.append(param) stash.all_fp32_grad_stash.append(None) else: raise TypeError("Optimizer's parameters must be either " "torch.cuda.FloatTensor or torch.cuda.HalfTensor. " "Received {}".format(param.type())) old_add_param_group(new_group) optimizer.add_param_group = types.MethodType(new_add_param_group, optimizer) return optimizer
import torch # True for post-0.4, when Variables/Tensors merged. def variable_is_tensor(): v = torch.autograd.Variable() return isinstance(v, torch.Tensor) def tensor_is_variable(): x = torch.Tensor() return type(x) == torch.autograd.Variable # False for post-0.4 def tensor_is_float_tensor(): x = torch.Tensor() return type(x) == torch.FloatTensor # Akin to `torch.is_tensor`, but returns True for Variable # objects in pre-0.4. def is_tensor_like(x): return torch.is_tensor(x) or isinstance(x, torch.autograd.Variable) # Wraps `torch.is_floating_point` if present, otherwise checks # the suffix of `x.type()`. def is_floating_point(x): if hasattr(torch, 'is_floating_point'): return torch.is_floating_point(x) try: torch_type = x.type() return torch_type.endswith('FloatTensor') or \ torch_type.endswith('HalfTensor') or \ torch_type.endswith('DoubleTensor') except AttributeError: return False def scalar_python_val(x): if hasattr(x, 'item'): return x.item() else: if isinstance(x, torch.autograd.Variable): return x.data[0] else: return x[0]
import contextlib import warnings import torch from . import utils from .opt import OptimWrapper from .scaler import LossScaler from ._amp_state import _amp_state, master_params, maybe_print from ..fp16_utils import FP16_Optimizer as FP16_Optimizer_general from ..optimizers import FP16_Optimizer as FP16_Optimizer_for_fused from ..parallel.LARC import LARC # There's no reason to expose the notion of a "handle". Everything can happen through amp.* calls. @contextlib.contextmanager def scale_loss(loss, optimizers, loss_id=0, model=None, delay_unscale=False, delay_overflow_check=False): """ On context manager entrance, creates ``scaled_loss = (loss.float())*current loss scale``. ``scaled_loss`` is yielded so that the user can call ``scaled_loss.backward()``:: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() On context manager exit (if ``delay_unscale=False``), the gradients are checked for infs/NaNs and unscaled, so that ``optimizer.step()`` can be called. .. note:: If Amp is using explicit FP32 master params (which is the default for ``opt_level=O2``, and can also be manually enabled by supplying ``master_weights=True`` to ``amp.initialize``) any FP16 gradients are copied to FP32 master gradients before being unscaled. ``optimizer.step()`` will then apply the unscaled master gradients to the master params. .. warning:: If Amp is using explicit FP32 master params, only the FP32 master gradients will be unscaled. The direct ``.grad`` attributes of any FP16 model params will remain scaled after context manager exit. This subtlety affects gradient clipping. See "Gradient clipping" under `Advanced Amp Usage`_ for best practices. Args: loss(Tensor): Typically a scalar Tensor. The ``scaled_loss`` that the context manager yields is simply ``loss.float()*loss_scale``, so in principle ``loss`` could have more than one element, as long as you call ``backward()`` on ``scaled_loss`` appropriately within the context manager body. optimizers: All optimizer(s) for which the current backward pass is creating gradients. Must be an optimizer or list of optimizers returned from an earlier call to ``amp.initialize``. For example use with multiple optimizers, see "Multiple models/optimizers/losses" under `Advanced Amp Usage`_. loss_id(int, optional, default=0): When used in conjunction with the ``num_losses`` argument to ``amp.initialize``, enables Amp to use a different loss scale per loss. ``loss_id`` must be an integer between 0 and ``num_losses`` that tells Amp which loss is being used for the current backward pass. See "Multiple models/optimizers/losses" under `Advanced Amp Usage`_ for examples. If ``loss_id`` is left unspecified, Amp will use the default global loss scaler for this backward pass. model(torch.nn.Module, optional, default=None): Currently unused, reserved to enable future optimizations. delay_unscale(bool, optional, default=False): ``delay_unscale`` is never necessary, and the default value of ``False`` is strongly recommended. If ``True``, Amp will not unscale the gradients or perform model->master gradient copies on context manager exit. ``delay_unscale=True`` is a minor ninja performance optimization and can result in weird gotchas (especially with multiple models/optimizers/losses), so only use it if you know what you're doing. "Gradient accumulation across iterations" under `Advanced Amp Usage`_ illustrates a situation where this CAN (but does not need to) be used. .. warning:: If ``delay_unscale`` is ``True`` for a given backward pass, ``optimizer.step()`` cannot be called yet after context manager exit, and must wait for another, later backward context manager invocation with ``delay_unscale`` left to False. .. _`Advanced Amp Usage`: https://nvidia.github.io/apex/advanced.html """ if not hasattr(_amp_state, "opt_properties"): raise RuntimeError("Invoked 'with amp.scale_loss`, but internal Amp state has not been initialized. " "model, optimizer = amp.initialize(model, optimizer, opt_level=...) must be called " "before `with amp.scale_loss`.") if not _amp_state.opt_properties.enabled: yield loss return if isinstance(optimizers, torch.optim.Optimizer) or isinstance(optimizers, LARC): optimizers = [optimizers] # this is what happens when i have to support tools from different sources under the same API... # TODO: Rewrite FusedAdam to use multi-tensor apply and the same loss scaler. if isinstance(optimizers, FP16_Optimizer_for_fused): loss_scale = optimizers.cur_scale else: loss_scaler = _amp_state.loss_scalers[loss_id] loss_scale = loss_scaler.loss_scale() if ((not _amp_state.opt_properties.master_weights) and (not loss_scaler.dynamic) and loss_scale == 1.0): yield loss.float() # Needing to drop the cache here as well is an ugly gotcha. # But for now I think it's necessary to short-circuit. # Probably ok to skip this if not delay_unscale if _amp_state.opt_properties.patch_torch_functions: _amp_state.handle._clear_cache() return if not delay_unscale: if isinstance(optimizers, list): for optimizer in optimizers: if not optimizer._amp_stash.params_have_scaled_gradients: optimizer._prepare_amp_backward() yield (loss.float())*loss_scale if delay_unscale: for optimizer in optimizers: optimizer._amp_stash.params_have_scaled_gradients = True else: # FusedAdam and FusedSGD will take care of unscaling as part of their step() methods. if not isinstance(optimizers, FP16_Optimizer_for_fused): loss_scaler.clear_overflow_state() for optimizer in optimizers: optimizer._post_amp_backward(loss_scaler) optimizer._amp_stash.params_have_scaled_gradients = False # For future fused optimizers that enable sync-free dynamic loss scaling, # should_skip will always be False. should_skip = False if delay_overflow_check else loss_scaler.update_scale() if should_skip: for optimizer in optimizers: if not optimizer._amp_stash.already_patched: # Close on loss_scaler and loss_id as well, to be safe. Probably not # necessary because amp.scale_loss is already creating a temporary scope. def patch_step(opt, loss_scaler, loss_id): opt_step = opt.step def skip_step(closure=None): if closure is not None: raise RuntimeError("Currently, Amp does not support closure use with optimizers.") maybe_print(("Gradient overflow. Skipping step, loss scaler " + "{} reducing loss scale to {}").format(loss_id, loss_scaler.loss_scale())) if hasattr(opt._amp_stash, "all_fp32_from_fp16_params"): # Clear the master grads that wouldn't be zeroed by model.zero_grad() for param in opt._amp_stash.all_fp32_from_fp16_params: param.grad = None opt.step = opt_step opt._amp_stash.already_patched = False return skip_step optimizer.step = patch_step(optimizer, loss_scaler, loss_id) optimizer._amp_stash.already_patched = True # Probably ok to skip this if not delay_unscale if _amp_state.opt_properties.patch_torch_functions: _amp_state.handle._clear_cache() # Free function version of AmpHandle.disable_casts, another step on the # path to removing the concept of "AmpHandle" @contextlib.contextmanager def disable_casts(): _amp_state.handle._is_active = False yield _amp_state.handle._is_active = True class AmpHandle(object): def __init__(self, loss_scale="dynamic", enable_caching=True, verbose=False): self._enable_caching = enable_caching self._verbose = verbose self._cache = dict() self._default_scaler = LossScaler(loss_scale) self._is_active = True self._all_wrappers = [] def is_active(self): return self._is_active @contextlib.contextmanager def _disable_casts(self): self._is_active = False yield self._is_active = True def wrap_optimizer(self, optimizer, num_loss=1): self._default_scaler = None return OptimWrapper(optimizer, self, num_loss) @contextlib.contextmanager def scale_loss(self, loss, optimizer): raise RuntimeError("The old Amp API is no longer supported. Please move to the new API, " "documented here: https://nvidia.github.io/apex/amp.html. Transition guide: " "https://nvidia.github.io/apex/amp.html#transition-guide-for-old-api-users") if not self.is_active(): yield loss return if self._default_scaler is None: raise RuntimeError( 'After calling `handle.wrap_optimizer()`, you must explicitly ' + 'use `optimizer.scale_loss(loss)`.') # TODO: this code block is duplicated here and `opt.py`. Unify. loss_scale = self._default_scaler.loss_scale() yield loss * loss_scale self._default_scaler.clear_overflow_state() self._default_scaler.unscale( master_params(optimizer), master_params(optimizer), loss_scale) should_skip = self._default_scaler.update_scale() if should_skip: optimizer_step = optimizer.step def skip_step(): maybe_print('Gradient overflow, skipping update') optimizer.step = optimizer_step optimizer.step = skip_step self._clear_cache() def _clear_cache(self): self._cache.clear() # Experimental support for saving / restoring uncasted versions of functions def _save_func(self, mod, fn, func): self._all_wrappers.append((mod, fn, func)) def _deactivate(self): for mod, fn, func in self._all_wrappers: utils.set_func(mod, fn, func) self._all_wrappers = [] @property def has_cache(self): return self._enable_caching @property def cache(self): return self._cache def remove_cache(self, param): if self.has_cache and param in self.cache: del self.cache[param] @property def verbose(self): return self._verbose class NoOpHandle(object): def is_active(self): return False @contextlib.contextmanager def _disable_casts(self): yield def wrap_optimizer(self, optimizer, num_loss=1): return OptimWrapper(optimizer, self, num_loss) @contextlib.contextmanager def scale_loss(self, loss, optimizer): yield loss @property def has_cache(self): return False @property def verbose(self): return False def _clear_cache(self): pass def _deactivate(self): pass
import torch from torch._six import string_classes import functools import numpy as np import warnings from ._amp_state import _amp_state, warn_or_err, container_abcs from .handle import disable_casts from .scaler import LossScaler from ._process_optimizer import _process_optimizer from apex.fp16_utils import convert_network from ..fp16_utils import FP16_Optimizer as FP16_Optimizer_general from ..optimizers import FP16_Optimizer as FP16_Optimizer_for_fused from ..optimizers import FusedAdam from ..parallel import DistributedDataParallel as apex_DDP from ..parallel.LARC import LARC def to_type(dtype, t): if isinstance(t, torch.Tensor): if not t.is_cuda: # This should not be a hard error, since it may be legitimate. warnings.warn("An input tensor was not cuda.") # GANs require this. # if t.requires_grad: # warn_or_err("input data requires grad. Since input data is not a model parameter,\n" # "its gradients will not be properly allreduced by DDP.") if t.is_floating_point(): return t.to(dtype) return t else: # Trust the user's custom batch type, that's all I can do here. return t.to(dtype) # Modified from torch.optim.optimizer.py. This is a bit more general than casted_args in utils.py. def applier(value, fn): if isinstance(value, torch.Tensor): return fn(value) elif isinstance(value, string_classes): return value elif isinstance(value, np.ndarray): return value elif hasattr(value, "to"): # Allow handling of custom batch classes return fn(value) elif isinstance(value, container_abcs.Mapping): return {applier(k, fn) : applier(v, fn) for k, v in value.items()} elif isinstance(value, container_abcs.Iterable): return type(value)(applier(v, fn) for v in value) else: # Do I want this to fire off even if someone chooses to pass something ordinary like # an int or float? May be more annoying than it's worth. # print("Warning: unrecognized type in applier. If your input data is a custom class, " # "provide it with a .to(dtype) method which converts its floating-point Tensors to dtype. " # "Amp will check for your custom to() and invoke it to cast the batch's " # "floating-point Tensors to the appropriate type. " # "Also, if your data is a custom class, it is your responsibility to ensure that " # "any Tensors you want to be cuda are already cuda." return value def check_models(models): for model in models: parallel_type = None if isinstance(model, torch.nn.parallel.DistributedDataParallel): parallel_type = "torch.nn.parallel.DistributedDataParallel" if isinstance(model, apex_DDP): parallel_type = "apex.parallel.DistributedDataParallel" if isinstance(model, torch.nn.parallel.DataParallel): parallel_type = "torch.nn.parallel.DataParallel" if parallel_type is not None: raise RuntimeError("Incoming model is an instance of {}. ".format(parallel_type) + "Parallel wrappers should only be applied to the model(s) AFTER \n" "the model(s) have been returned from amp.initialize.") def check_params_fp32(models): for model in models: for name, param in model.named_parameters(): if param.is_floating_point(): if 'Half' in param.type(): warn_or_err("Found param {} with type {}, expected torch.cuda.FloatTensor.\n" "When using amp.initialize, you do not need to call .half() on your model\n" "before passing it, no matter what optimization level you choose.".format( name, param.type())) elif not param.is_cuda: warn_or_err("Found param {} with type {}, expected torch.cuda.FloatTensor.\n" "When using amp.initialize, you need to provide a model with parameters\n" "located on a CUDA device before passing it no matter what optimization level\n" "you chose. Use model.to('cuda') to use the default device.".format( name, param.type())) # Backward compatibility for PyTorch 0.4 if hasattr(model, 'named_buffers'): buf_iter = model.named_buffers() else: buf_iter = model._buffers for obj in buf_iter: if type(obj)==tuple: name, buf = obj else: name, buf = obj, buf_iter[obj] if buf.is_floating_point(): if 'Half' in buf.type(): warn_or_err("Found buffer {} with type {}, expected torch.cuda.FloatTensor.\n" "When using amp.initialize, you do not need to call .half() on your model\n" "before passing it, no matter what optimization level you choose.".format( name, buf.type())) elif not buf.is_cuda: warn_or_err("Found buffer {} with type {}, expected torch.cuda.FloatTensor.\n" "When using amp.initialize, you need to provide a model with buffers\n" "located on a CUDA device before passing it no matter what optimization level\n" "you chose. Use model.to('cuda') to use the default device.".format( name, buf.type())) def check_optimizers(optimizers): for optim in optimizers: bad_optim_type = None if isinstance(optim, FP16_Optimizer_general): bad_optim_type = "apex.fp16_utils.FP16_Optimizer" if isinstance(optim, FP16_Optimizer_for_fused): bad_optim_type = "apex.optimizers.FP16_Optimizer" if bad_optim_type is not None: raise RuntimeError("An incoming optimizer is an instance of {}. ".format(bad_optim_type) + "The optimizer(s) passed to amp.initialize() must be bare \n" "instances of either ordinary Pytorch optimizers, or Apex fused \n" "optimizers (currently just FusedAdam, but FusedSGD will be added \n" "soon). You should not manually wrap your optimizer in either \n" "apex.fp16_utils.FP16_Optimizer or apex.optimizers.FP16_Optimizer. \n" "amp.initialize will take care of that for you (if necessary) based \n" "on the specified opt_level (and optional overridden properties).") def wrap_fused_adam(optimizer, properties): msg = 'Currently, the usage of FusedAdam is restricted to '\ 'amp.initialize(..., opt_level="O2", keep_batchnorm_fp32=False, '\ 'loss_scale=float or "dynamic"). We are working on enabling more general usage.' assert properties.master_weights is True, msg assert properties.cast_model_type is torch.float16, msg assert (properties.keep_batchnorm_fp32 is False or properties.keep_batchnorm_fp32 is None), msg if properties.loss_scale == "dynamic": return FP16_Optimizer_for_fused(optimizer, dynamic_loss_scale=True) else: return FP16_Optimizer_for_fused(optimizer, static_loss_scale=properties.loss_scale) def _initialize(models, optimizers, properties, num_losses=1, cast_model_outputs=None): from apex.parallel import DistributedDataParallel as apex_DDP from .amp import init as amp_init optimizers_was_list = False if isinstance(optimizers, torch.optim.Optimizer) or isinstance(optimizers, LARC): optimizers = [optimizers] elif optimizers is None: optimizers = [] elif isinstance(optimizers, list): optimizers_was_list = True check_optimizers(optimizers) else: check_optimizers([optimizers]) raise TypeError("optimizers must be either a single optimizer or a list of optimizers.") if isinstance(models, torch.nn.Module): models_was_list = False models = [models] elif isinstance(models, list): models_was_list = True else: raise TypeError("models must be either a single model or a list of models.") check_models(models) if not _amp_state.allow_incoming_model_not_fp32: check_params_fp32(models) # In the future, when FP16_Optimizer can be deprecated and master weights can # become an attribute, remember to stash master weights before casting the model. if properties.cast_model_type: if properties.keep_batchnorm_fp32: for model in models: convert_network(model, properties.cast_model_type) else: for model in models: model.to(properties.cast_model_type) input_caster = functools.partial(to_type, properties.cast_model_type) if cast_model_outputs is not None: output_caster = functools.partial(to_type, cast_model_outputs) else: output_caster = functools.partial(to_type, torch.float32) for model in models: # Patch the forward method to cast incoming data to the correct type, and # outgoing data to float32, so "the user never needs to call .half()." # I like writing things explicitly more than decorators. def patch_forward(old_fwd): def new_fwd(*args, **kwargs): output = old_fwd(*applier(args, input_caster), **applier(kwargs, input_caster)) return applier(output, output_caster) return new_fwd model.forward = patch_forward(model.forward) # State dict trick to recast any preexisting per-param state tensors for optimizer in optimizers: optimizer.load_state_dict(optimizer.state_dict()) elif cast_model_outputs is not None: output_caster = functools.partial(to_type, cast_model_outputs) for model in models: def patch_forward(old_fwd): def new_fwd(*args, **kwargs): output = old_fwd(*args, **kwargs) return applier(output, output_caster) return new_fwd model.forward = patch_forward(model.forward) for i, optimizer in enumerate(optimizers): # Still need to special case this for the first pass if isinstance(optimizer, FusedAdam): optimizers[i] = wrap_fused_adam(optimizer, properties) else: optimizers[i] = _process_optimizer(optimizer, properties) _amp_state.loss_scalers = [] for _ in range(num_losses): _amp_state.loss_scalers.append(LossScaler(properties.loss_scale, min_loss_scale=_amp_state.min_loss_scale, max_loss_scale=_amp_state.max_loss_scale)) if properties.patch_torch_functions: # handle is unused here. It's accessible later through a global value anyway. handle = amp_init(loss_scale=properties.loss_scale, verbose=(_amp_state.verbosity == 2)) for optimizer in optimizers: # Disable Amp casting for the optimizer step, because it should only be # applied to FP32 master params anyway. def patch_step(old_step): def new_step(*args, **kwargs): with disable_casts(): output = old_step(*args, **kwargs) return output return new_step optimizer.step = patch_step(optimizer.step) if optimizers_was_list: if models_was_list: return models, optimizers else: return models[0], optimizers else: if models_was_list: if len(optimizers) == 0: return models else: return models, optimizers[0] else: if len(optimizers) == 0: return models[0] else: return models[0], optimizers[0]
from . import compat, rnn_compat, utils, wrap from .handle import AmpHandle, NoOpHandle from .lists import functional_overrides, torch_overrides, tensor_overrides from ._amp_state import _amp_state from .frontend import * import functools import itertools import torch _DECORATOR_HANDLE = None _USER_CAST_REGISTRY = set() _USER_PROMOTE_REGISTRY = set() def _decorator_helper(orig_fn, cast_fn, wrap_fn): def wrapper(*args, **kwargs): handle = _DECORATOR_HANDLE if handle is None or not handle.is_active(): return orig_fn(*args, **kwargs) inner_cast_fn = utils.verbosify(cast_fn, orig_fn.__name__, handle.verbose) return wrap_fn(orig_fn, inner_cast_fn, handle)(*args, **kwargs) return wrapper # Decorator form def half_function(fn): wrap_fn = functools.partial(wrap.make_cast_wrapper, try_caching=True) return _decorator_helper(fn, utils.maybe_half, wrap_fn) def float_function(fn): wrap_fn = functools.partial(wrap.make_cast_wrapper, try_caching=False) return _decorator_helper(fn, utils.maybe_float, wrap_fn) def promote_function(fn): wrap_fn = functools.partial(wrap.make_promote_wrapper) return _decorator_helper(fn, utils.maybe_float, wrap_fn) # Registry form def register_half_function(module, name): if not hasattr(module, name): raise ValueError('No function named {} in module {}.'.format( name, module)) _USER_CAST_REGISTRY.add((module, name, utils.maybe_half)) def register_float_function(module, name): if not hasattr(module, name): raise ValueError('No function named {} in module {}.'.format( name, module)) _USER_CAST_REGISTRY.add((module, name, utils.maybe_float)) def register_promote_function(module, name): if not hasattr(module, name): raise ValueError('No function named {} in module {}.'.format( name, module)) _USER_PROMOTE_REGISTRY.add((module, name)) # Top-level function to insert _all_ the hooks. def init(enabled=True, loss_scale="dynamic", enable_caching=True, verbose=False, allow_banned=False): global _DECORATOR_HANDLE if not enabled: handle = NoOpHandle() _DECORATOR_HANDLE = handle return handle handle = AmpHandle(loss_scale, enable_caching, verbose) # 0) Force-{fp16, fp32} for user-annotated functions for mod, fn, cast_fn in _USER_CAST_REGISTRY: try_caching = (cast_fn == utils.maybe_half) wrap.cached_cast(mod, fn, cast_fn, handle, try_caching, verbose) _USER_CAST_REGISTRY.clear() # 0.5) Force-promote for user-annotated functions for mod, fn in _USER_PROMOTE_REGISTRY: wrap.promote(mod, fn, handle, verbose) _USER_PROMOTE_REGISTRY.clear() # 1) Force-{fp16, fp32} on white- / black-list functions override_modules = [functional_overrides, torch_overrides, tensor_overrides] cast_table = [('FP16_FUNCS', utils.maybe_half), ('FP32_FUNCS', utils.maybe_float)] for module, (list_name, cast_fn) in itertools.product(override_modules, cast_table): for fn in getattr(module, list_name): try_caching = (cast_fn == utils.maybe_half) wrap.cached_cast(module.MODULE, fn, cast_fn, handle, try_caching, verbose) # 1.5) Pre-0.4, put the blacklist methods on HalfTensor and whitelist # methods on FloatTensor, since they're distinct types. if compat.tensor_is_float_tensor(): for fn in tensor_overrides.FP16_FUNCS: wrap.cached_cast(torch.cuda.FloatTensor, fn, utils.maybe_half, handle, try_caching=True, verbose=verbose) for fn in tensor_overrides.FP32_FUNCS: wrap.cached_cast(torch.cuda.HalfTensor, fn, utils.maybe_float, handle, try_caching=False, verbose=verbose) # 2) Enable type-promotion on multi-arg functions and methods. # NB: special handling for sequence fns (e.g. `torch.cat`). promote_modules = [torch_overrides, tensor_overrides] promote_table = [('CASTS', wrap.promote), ('SEQUENCE_CASTS', wrap.sequence_promote)] for promote_mod, (list_name, promote_fn) in itertools.product(promote_modules, promote_table): for fn in getattr(promote_mod, list_name): promote_fn(promote_mod.MODULE, fn, handle, verbose) # 2.5) Pre-0.4, add blacklist methods directly to HalfTensor and FloatTensor types if compat.tensor_is_float_tensor(): for cls, (list_name, promote_fn) in itertools.product([torch.cuda.FloatTensor, torch.cuda.HalfTensor], promote_table): for fn in getattr(tensor_overrides, list_name): promote_fn(cls, fn, handle, verbose) # 3) For any in-place version of a blacklist function, error if any input is fp16. # NB: this is overly conservative. for fn in utils.as_inplace(torch_overrides.FP32_FUNCS): wrap.err_if_any_half(torch_overrides.MODULE, fn, handle) # 3.5) For any in-place blacklist method, error if called on fp16 tensor for fn in utils.as_inplace(tensor_overrides.FP32_FUNCS): wrap.err_if_arg0_half(tensor_overrides.MODULE, fn, handle, verbose) if compat.tensor_is_float_tensor(): wrap.err_if_arg0_half(torch.cuda.HalfTensor, fn, handle, verbose) # 4) For other in-place methods, match the type of self tensor for fn in utils.as_inplace(itertools.chain( tensor_overrides.FP16_FUNCS, tensor_overrides.CASTS)): wrap.promote_match_arg0(tensor_overrides.MODULE, fn, handle, verbose) if compat.tensor_is_float_tensor(): wrap.promote_match_arg0(torch.cuda.HalfTensor, fn, handle, verbose) wrap.promote_match_arg0(torch.cuda.FloatTensor, fn, handle, verbose) # 5) RNNs + RNN cells are whitelisted specially if rnn_compat.has_old_rnns(): wrap.rnn_cast(torch.nn.backends.thnn.backend, 'RNN', handle, verbose) if not rnn_compat.has_old_rnns(): # Patch in our own indirection of `_VF` in modules/rnn s.t. it is mutable. torch.nn.modules.rnn._VF = rnn_compat.VariableFunctionsShim() # Wrap all the rnns for x in rnn_compat.RNN_NAMES: wrap.new_rnn_cast(x.upper(), handle, verbose) # Wrap all the RNN cells rnn_compat.whitelist_rnn_cells(handle, verbose) # 6) Place error+print message on banned functions. # Or, if allow_banned, then cast to FP32. for fn, err_msg in functional_overrides.BANNED_FUNCS: if allow_banned: wrap.cached_cast(functional_overrides.MODULE, fn, utils.maybe_float, handle, try_caching=True, verbose=verbose) else: wrap.err_if_any_half(functional_overrides.MODULE, fn, handle, err_msg) _DECORATOR_HANDLE = handle _amp_state.handle = handle return handle
import torch from ._initialize import _initialize from ._amp_state import _amp_state, warn_or_err, maybe_print class Properties(object): """ This class has two purposes: to establish a set of default properties, and to route setting of these attributes through __setattr__ so that (in theory) they can be checked for consistency with other existing args. """ def __init__(self): self.options = { "enabled" : False, "opt_level" : None, "cast_model_type" : None, "patch_torch_functions" : False, "keep_batchnorm_fp32" : None, "master_weights" : None, "loss_scale" : 1.0, # Reserved for future functionality # "fused_optimizer" : False, # "enable_ddp_interop" : False, } """ This function allows updating several options at a time without routing through __setattr__ checks, to avoid "you can't get there from here" scenarios. Currently not intended to be exposed; users are expected to select an opt_level and apply consistent modifications. """ def _update_options_dict(new_options): for k, v in new_options: if k in self.options: self.options[k] = v else: raise ValueError("Tried to set unexpected option {}".format(k)) """ The members of "options" are not direct attributes of self, so access attempts will roll down to __getattr__. This borrows from the logic in torch.nn.Module. """ def __getattr__(self, name): if "options" in self.__dict__: options = self.__dict__["options"] if name in options: return options[name] raise AttributeError("'{}' object has no attribute '{}'".format( type(self).__name__, name)) def __setattr__(self, name, value): if "options" in self.__dict__: if name in self.options: # print("setting {} {}".format(name, value)) if name == "cast_model_type": if self.opt_level == "O1" and value is not None: if value is not False: if value is not torch.float32: warn_or_err("O1 inserts casts around Torch functions rather than " "model weights, so with O1, the model weights themselves " "should remain FP32. If you wish to cast the model to a " "different type, use opt_level='O2' or 'O3'. " + "cast_model_type was {}".format(value)) self.options[name] = value elif name == "patch_torch_functions": if self.opt_level != "O1" and value: warn_or_err("Currently, patch_torch_functions=True should only be set by " "selecting opt_level='O1'.") self.options[name] = value elif name == "keep_batchnorm_fp32": if self.opt_level == "O1" and value is not None: warn_or_err("With opt_level O1, batchnorm functions are automatically patched " "to run in FP32, so keep_batchnorm_fp32 should be None." + " keep_batchnorm_fp32 was {}".format(value)) if value == "False": self.options[name] = False elif value == "True": self.options[name] = True else: assert (value is True or value is False or value is None),\ "keep_batchnorm_fp32 must be a boolean, the string 'True' or 'False', "\ "or None, found keep_batchnorm_fp32={}".format(value) self.options[name] = value elif name == "master_weights": if self.opt_level == "O1" and value is not None: warn_or_err("It doesn't make sense to use master_weights with O1. " "With O1, your model weights themselves should be FP32.") self.options[name] = value elif name == "loss_scale": if value == "dynamic": self.options[name] = value else: self.options[name] = float(value) else: self.options[name] = value else: super(Properties, self).__setattr__(name, value) """ O0-O3 are convenience wrappers to establish defaults for typically used mixed precision options. """ class O3: brief = "O3: Pure FP16 training." more = "Calls .half() on your model, converting the entire model to FP16.\n"\ "A casting operation is also inserted to cast incoming Tensors to FP16,\n"\ "so you don't need to change your data pipeline.\n"\ "This mode is useful for establishing a performance ceiling.\n"\ "It's also possible training may 'just work' in this mode.\n"\ "If not, try other optimization levels." def __call__(self, properties): properties.enabled = True properties.opt_level = "O3" properties.cast_model_type = torch.float16 properties.patch_torch_functions = False properties.keep_batchnorm_fp32 = False properties.master_weights = False properties.loss_scale = 1.0 # properties.fused_optimizer = False # properties.enable_ddp_interop = False return properties # modified in place so this isn't really necessary class O2: brief = "O2: FP16 training with FP32 batchnorm and FP32 master weights.\n" more = "Calls .half() on your model, converting the entire model (except for batchnorms)\n"\ "to FP16. Batchnorms are retained in FP32 for additional stability.\n"\ "The forward pass is patched to cast incoming Tensors to FP16, so you don't need to change\n"\ "your data pipeline.\n"\ "O2 creates FP32 master weights outside the model and patches any optimizers to update\n"\ "these master weights, then copy the master weights into the FP16 model weights.\n"\ "Master weights can also improve convergence and stability." def __call__(self, properties): properties.enabled = True properties.opt_level = "O2" properties.cast_model_type = torch.float16 properties.patch_torch_functions = False properties.keep_batchnorm_fp32 = True properties.master_weights = True properties.loss_scale = "dynamic" # properties.fused_optimizer = False # properties.enable_ddp_interop = False return properties # modified in place so this isn't really necessary class O1: brief = "O1: Insert automatic casts around Pytorch functions and Tensor methods.\n" more = "The type of your model's weights is not altered. However, internally,\n"\ "Pytorch functions are patched to cast any Tensor Core-friendly ops to FP16 for speed,\n"\ "while operations that might benefit from the additional stability of FP32 are patched\n"\ "to cast their inputs to fp32.\n"\ "O1 is the safest way to try mixed precision training, and is recommended when\n"\ "trying mixed precision training for the first time." def __call__(self, properties): properties.enabled = True properties.opt_level = "O1" properties.cast_model_type = None properties.patch_torch_functions = True properties.keep_batchnorm_fp32 = None properties.master_weights = None properties.loss_scale = "dynamic" # properties.fused_optimizer = False # properties.enable_ddp_interop = False return properties # modified in place so this isn't really necessary class O0: brief = "O0: Pure FP32 training.\n" more = "Your models are checked to make sure parameters are FP32, but otherwise the\n"\ "types of weights and internal Pytorch operations are not altered. This mode disables any\n"\ "FP16 arithmetic, although other optimizations like DDP interop may still be requested.\n" def __call__(self, properties): properties.enabled = True properties.opt_level = "O0" properties.cast_model_type = torch.float32 properties.patch_torch_functions = False properties.keep_batchnorm_fp32 = None properties.master_weights = False properties.loss_scale = 1.0 # properties.fused_optimizer = False # properties.enable_ddp_interop = False return properties # modified in place so this isn't really necessary opt_levels = {"O3": O3(), "O2": O2(), "O1": O1(), "O0": O0()} # allow user to directly pass Properties struct as well? def initialize( models, optimizers=None, enabled=True, opt_level="O1", cast_model_type=None, patch_torch_functions=None, keep_batchnorm_fp32=None, master_weights=None, loss_scale=None, cast_model_outputs=None, num_losses=1, verbosity=1, min_loss_scale=None, max_loss_scale=2.**24 ): """ Initialize your models, optimizers, and the Torch tensor and functional namespace according to the chosen ``opt_level`` and overridden properties, if any. ``amp.initialize`` should be called **after** you have finished constructing your model(s) and optimizer(s), but **before** you send your model through any DistributedDataParallel wrapper. See `Distributed training`_ in the Imagenet example. Currently, ``amp.initialize`` should only be called **once**, although it can process an arbitrary number of models and optimizers (see the corresponding `Advanced Amp Usage topic`_). If you think your use case requires ``amp.initialize`` to be called more than once, `let us know`_. Any property keyword argument that is not ``None`` will be interpreted as a manual override. To prevent having to rewrite anything else in your script, name the returned models/optimizers to replace the passed models/optimizers, as in the code sample below. Args: models (torch.nn.Module or list of torch.nn.Modules): Models to modify/cast. optimizers (optional, torch.optim.Optimizer or list of torch.optim.Optimizers): Optimizers to modify/cast. REQUIRED for training, optional for inference. enabled (bool, optional, default=True): If False, renders all Amp calls no-ops, so your script should run as if Amp were not present. opt_level (str, optional, default="O1"): Pure or mixed precision optimization level. Accepted values are "O0", "O1", "O2", and "O3", explained in detail above. cast_model_type (``torch.dtype``, optional, default=None): Optional property override, see above. patch_torch_functions (bool, optional, default=None): Optional property override. keep_batchnorm_fp32 (bool or str, optional, default=None): Optional property override. If passed as a string, must be the string "True" or "False". master_weights (bool, optional, default=None): Optional property override. loss_scale (float or str, optional, default=None): Optional property override. If passed as a string, must be a string representing a number, e.g., "128.0", or the string "dynamic". cast_model_outputs (torch.dtype, optional, default=None): Option to ensure that the outputs of your model(s) are always cast to a particular type regardless of ``opt_level``. num_losses (int, optional, default=1): Option to tell Amp in advance how many losses/backward passes you plan to use. When used in conjunction with the ``loss_id`` argument to ``amp.scale_loss``, enables Amp to use a different loss scale per loss/backward pass, which can improve stability. See "Multiple models/optimizers/losses" under `Advanced Amp Usage`_ for examples. If ``num_losses`` is left to 1, Amp will still support multiple losses/backward passes, but use a single global loss scale for all of them. verbosity (int, default=1): Set to 0 to suppress Amp-related output. min_loss_scale (float, default=None): Sets a floor for the loss scale values that can be chosen by dynamic loss scaling. The default value of None means that no floor is imposed. If dynamic loss scaling is not used, `min_loss_scale` is ignored. max_loss_scale (float, default=2.**24): Sets a ceiling for the loss scale values that can be chosen by dynamic loss scaling. If dynamic loss scaling is not used, `max_loss_scale` is ignored. Returns: Model(s) and optimizer(s) modified according to the ``opt_level``. If either the ``models`` or ``optimizers`` args were lists, the corresponding return value will also be a list. Permissible invocations:: model, optim = amp.initialize(model, optim,...) model, [optim1, optim2] = amp.initialize(model, [optim1, optim2],...) [model1, model2], optim = amp.initialize([model1, model2], optim,...) [model1, model2], [optim1, optim2] = amp.initialize([model1, model2], [optim1, optim2],...) # This is not an exhaustive list of the cross product of options that are possible, # just a set of examples. model, optim = amp.initialize(model, optim, opt_level="O0") model, optim = amp.initialize(model, optim, opt_level="O0", loss_scale="dynamic"|128.0|"128.0") model, optim = amp.initialize(model, optim, opt_level="O1") # uses "loss_scale="dynamic" default model, optim = amp.initialize(model, optim, opt_level="O1", loss_scale=128.0|"128.0") model, optim = amp.initialize(model, optim, opt_level="O2") # uses "loss_scale="dynamic" default model, optim = amp.initialize(model, optim, opt_level="O2", loss_scale=128.0|"128.0") model, optim = amp.initialize(model, optim, opt_level="O2", keep_batchnorm_fp32=True|False|"True"|"False") model, optim = amp.initialize(model, optim, opt_level="O3") # uses loss_scale=1.0 default model, optim = amp.initialize(model, optim, opt_level="O3", loss_scale="dynamic"|128.0|"128.0") model, optim = amp.initialize(model, optim, opt_level="O3", keep_batchnorm_fp32=True|False|"True"|"False") The `Imagenet example`_ demonstrates live use of various opt_levels and overrides. .. _`Distributed training`: https://github.com/NVIDIA/apex/tree/master/examples/imagenet#distributed-training .. _`Imagenet example`: https://github.com/NVIDIA/apex/tree/master/examples/imagenet .. _`Advanced Amp Usage`: https://nvidia.github.io/apex/advanced.html .. _`Advanced Amp Usage topic`: https://nvidia.github.io/apex/advanced.html#multiple-models-optimizers-losses .. _`let us know`: https://github.com/NVIDIA/apex/issues """ _amp_state.opt_properties = Properties() _amp_state.verbosity = verbosity if not enabled: if optimizers is None: return models else: return models, optimizers if not torch.backends.cudnn.enabled: raise RuntimeError( "Amp requires torch.backends.cudnn.enabled = True") if opt_level not in opt_levels: raise RuntimeError( "Unexpected optimization level {}. ".format(opt_level) + "Options are 'O0', 'O1', 'O2', 'O3'. Note that in `O0`, `O1`, etc., the prefix O is the letter O, " + "not the number zero.") else: _amp_state.opt_properties = opt_levels[opt_level](_amp_state.opt_properties) maybe_print("Selected optimization level {}".format(opt_levels[opt_level].brief), True) maybe_print("Defaults for this optimization level are:", True) for k, v in _amp_state.opt_properties.options.items(): maybe_print("{:22} : {}".format(k, v), True) _amp_state.min_loss_scale = min_loss_scale _amp_state.max_loss_scale = max_loss_scale maybe_print("Processing user overrides (additional kwargs that are not None)...", True) # I chose to have the keyword arguments listed directly in the argument list, # instead of **kwargs, so I can't use kwargs.items() here. if enabled is not None: _amp_state.opt_properties.enabled = enabled if opt_level is not None: _amp_state.opt_properties.opt_level = opt_level if cast_model_type is not None: _amp_state.opt_properties.cast_model_type = cast_model_type if patch_torch_functions is not None: _amp_state.opt_properties.patch_torch_functions = patch_torch_functions if keep_batchnorm_fp32 is not None: _amp_state.opt_properties.keep_batchnorm_fp32 = keep_batchnorm_fp32 if master_weights is not None: _amp_state.opt_properties.master_weights = master_weights if loss_scale is not None: _amp_state.opt_properties.loss_scale = loss_scale maybe_print("After processing overrides, optimization options are:", True) for k, v in _amp_state.opt_properties.options.items(): maybe_print("{:22} : {}".format(k, v), True) return _initialize(models, optimizers, _amp_state.opt_properties, num_losses, cast_model_outputs) # TODO: is this necessary/useful? # def check_option_consistency(enabled=True, # opt_level=None, # cast_model_type=None, # patch_torch_functions=None, # keep_batchnorm_fp32=None, # master_weights=None, # loss_scale=None, # enable_ddp_interop=None, # hard_override=False): # """ # Utility function that enables users to quickly check if the option combination they intend # to use is permitted. ``check_option_consistency`` does not require models or optimizers # to be constructed, and can be called at any point in the script. ``check_option_consistency`` # is totally self-contained; it does not set any amp global state or affect anything outside # of itself. # """ # # if not enabled: # return # # if opt_level not in opt_levels: # raise RuntimeError("Unexpected optimization level. Options are 'O0', 'O1', 'O2', 'O3'.") # else: # opt_properties = opt_levels[opt_level](Properties()) # print("Selected optimization level {}", opt_levels[opt_level].brief) # print("Defaults for this optimization level are:") # for k, v in opt_properties.options: # print("{:22} : {}".format(k, v)) # # print("Processing user overrides (additional kwargs that are not None)...") # for k, v in kwargs: # if k not in _amp_state.opt_properties.options: # raise RuntimeError("Unexpected kwarg {}".format(k)) # if v is not None: # setattr(opt_properties, k, v) # # print("After processing overrides, optimization options are:") # for k, v in opt_properties.options: # print("{:22} : {}".format(k, v))
from .amp import init, half_function, float_function, promote_function,\ register_half_function, register_float_function, register_promote_function from .handle import scale_loss, disable_casts from .frontend import initialize from ._amp_state import master_params, _amp_state
import torch from ..multi_tensor_apply import multi_tensor_applier from ._amp_state import _amp_state, master_params, maybe_print from itertools import product def scale_check_overflow_python(model_grad, master_grad, scale, check_overflow=False): # Exception handling for 18.04 compatibility if check_overflow: cpu_sum = float(model_grad.float().sum()) if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum: return True if master_grad is not model_grad: # copy_ probably internally short-circuits this master_grad.copy_(model_grad) if scale != 1.0: master_grad.mul_(scale) return False def axpby_check_overflow_python(model_grad, stashed_grad, master_grad, scale, check_overflow=False): # Exception handling for 18.04 compatibility if check_overflow: cpu_sum = float(model_grad.float().sum()) if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum: return True # if master_grad is not model_grad: # copy_ probably internally short-circuits this # master_grad.copy_(model_grad) assert stashed_grad.dtype == master_grad.dtype converted_model_grad = model_grad.to(master_grad.dtype) stashed_grad.add_(scale, converted_model_grad) master_grad.data = stashed_grad.data return False class LossScaler(object): warned_no_fused_kernel = False warned_unscaling_non_fp32_grad = False has_fused_kernel = False def __init__(self, loss_scale, init_scale=2.**16, scale_factor=2., scale_window=2000, min_loss_scale=None, max_loss_scale=2.**24): if loss_scale == "dynamic": self.dynamic = True self._loss_scale = min(max_loss_scale, init_scale) else: self.dynamic = False self._loss_scale = loss_scale self._max_loss_scale = max_loss_scale self._min_loss_scale = min_loss_scale self._scale_seq_len = scale_window self._unskipped = 0 self._has_overflow = False self._overflow_buf = torch.cuda.IntTensor([0]) if multi_tensor_applier.available: import amp_C LossScaler.has_fused_kernel = multi_tensor_applier.available LossScaler.multi_tensor_scale_cuda = amp_C.multi_tensor_scale LossScaler.multi_tensor_axpby_cuda = amp_C.multi_tensor_axpby else: if not LossScaler.warned_no_fused_kernel: maybe_print( "Warning: multi_tensor_applier fused unscale kernel is unavailable, " "possibly because apex was installed without --cuda_ext --cpp_ext. " "Using Python fallback. Original ImportError was: " + repr(multi_tensor_applier.import_err), True) LossScaler.has_fused_kernel = False LossScaler.warned_no_fused_kernel = True def loss_scale(self): return self._loss_scale def unscale_python(self, model_grads, master_grads, scale): for model, master in zip(model_grads, master_grads): if model is not None: if not LossScaler.warned_unscaling_non_fp32_grad: if master.dtype != torch.float32: maybe_print( "Attempting to unscale a grad with type {} ".format(master.type()) + "Unscaling non-fp32 grads may indicate an error. " "When using Amp, you don't need to call .half() on your model.") LossScaler.warned_unscaling_non_fp32_grad = True self._has_overflow = scale_check_overflow_python(model, master, 1./scale, self.dynamic) if self._has_overflow and self.dynamic: break # unused_scale keeps some of the old API alive for hopefully a short time. def unscale(self, model_grads, master_grads, unused_scale, models_are_masters=False): if self._has_overflow: return scale = self._loss_scale if scale == 1.0 and models_are_masters and not self.dynamic: return if LossScaler.has_fused_kernel: # if (not LossScaler.warned_unscaling_non_fp32_grad # and master_grads[0].dtype == torch.float16): # print("Warning: unscaling grads that are not FP32. " # "Unscaling non-fp32 grads may indicate an error. " # "When using Amp, you don't need to call .half() on your model.") # # Setting this to True unconditionally allows the possibility of an escape # # if never-before-seen non-fp32 grads are created in some later iteration. # LossScaler.warned_unscaling_non_fp32_grad = True multi_tensor_applier(LossScaler.multi_tensor_scale_cuda, self._overflow_buf, [model_grads, master_grads], 1./scale) else: self.unscale_python(model_grads, master_grads, scale) # Defer to update_scale # If the fused kernel is available, we only need one D2H memcopy and sync. # if LossScaler.has_fused_kernel and self.dynamic and not self._has_overflow: # self._has_overflow = self._overflow_buf.item() def unscale_with_stashed_python(self, model_grads, stashed_master_grads, master_grads, scale): for model, stashed, master in zip(model_grads, stashed_master_grads, master_grads): if model is None and stashed is None: continue else: if not LossScaler.warned_unscaling_non_fp32_grad: if master.dtype != torch.float32: maybe_print( "Attempting to unscale a grad with type {} ".format(master.type()) + "Unscaling non-fp32 grads may indicate an error. " "When using Amp, you don't need to call .half() on your model.") LossScaler.warned_unscaling_non_fp32_grad = True self._has_overflow = axpby_check_overflow_python(model, stashed, master, 1./scale, self.dynamic) if self._has_overflow and self.dynamic: break def unscale_with_stashed(self, model_grads, stashed_master_grads, master_grads): if self._has_overflow: return scale = self._loss_scale if LossScaler.has_fused_kernel: if (not LossScaler.warned_unscaling_non_fp32_grad and master_grads[0].dtype == torch.float16): print("Warning: unscaling grads that are not FP32. " "Unscaling non-fp32 grads may indicate an error. " "When using Amp, you don't need to call .half() on your model.") # Setting this to True unconditionally allows the possibility of an escape # if never-before-seen non-fp32 grads are created in some later iteration. LossScaler.warned_unscaling_non_fp32_grad = True multi_tensor_applier(LossScaler.multi_tensor_axpby_cuda, self._overflow_buf, [model_grads, stashed_master_grads, master_grads], 1./scale, 1.0, 0) # check only arg 0, aka the incoming model grads, for infs else: self.unscale_with_stashed_python(model_grads, stashed_master_grads, master_grads, scale) # Defer to update_scale # If the fused kernel is available, we only need one D2H memcopy and sync. # if LossScaler.has_fused_kernel and self.dynamic and not self._has_overflow: # self._has_overflow = self._overflow_buf.item() def clear_overflow_state(self): self._has_overflow = False if self.has_fused_kernel: self._overflow_buf.zero_() # Separate so unscale() can be called more that once before updating. def update_scale(self): # If the fused kernel is available, we only need one D2H memcopy and sync. if LossScaler.has_fused_kernel and self.dynamic and not self._has_overflow: self._has_overflow = self._overflow_buf.item() if self._has_overflow and self.dynamic: should_skip = True if(self._min_loss_scale): self._loss_scale = max(self._min_loss_scale, self._loss_scale/2.) else: self._loss_scale = self._loss_scale/2. self._unskipped = 0 else: should_skip = False self._unskipped += 1 if self._unskipped == self._scale_seq_len and self.dynamic: self._loss_scale = min(self._max_loss_scale, self._loss_scale*2.) self._unskipped = 0 return should_skip
VERSION = (0, 1, 0) __version__ = '.'.join(map(str, VERSION))
import contextlib import warnings from .scaler import LossScaler, master_params from ._amp_state import maybe_print import numpy as np class OptimWrapper(object): def __init__(self, optimizer, amp_handle, num_loss): self._optimizer = optimizer self._amp_handle = amp_handle self._num_loss = num_loss self._loss_idx = 0 self._skip_next = [False] * num_loss self._loss_scaler = [LossScaler('dynamic') for _ in range(num_loss)] @contextlib.contextmanager def scale_loss(self, loss): if not self._amp_handle.is_active(): yield loss return # When there are multiple losses per-optimizer, we need # to save out current grad accumulation, since we won't be # able to unscale this particulare loss once the grads are # all mixed together. cached_grads = [] if self._loss_idx > 0: for p in master_params(self._optimizer): if p.grad is not None: cached_grads.append(p.grad.data.detach().clone()) else: cached_grads.append(None) self._optimizer.zero_grad() loss_scale = self._cur_loss_scaler().loss_scale() yield loss * loss_scale self._cur_loss_scaler().clear_overflow_state() self._cur_loss_scaler().unscale( master_params(self._optimizer), master_params(self._optimizer), loss_scale) self._skip_next[self._loss_idx] = self._cur_loss_scaler().update_scale() self._loss_idx += 1 if len(cached_grads) > 0: for p, cached_grad in zip(master_params(self._optimizer), cached_grads): if cached_grad is not None: p.grad.data.add_(cached_grad) cached_grads = [] def _cur_loss_scaler(self): assert 0 <= self._loss_idx < self._num_loss return self._loss_scaler[self._loss_idx] def step(self, closure=None): if not self._amp_handle.is_active(): return self._optimizer.step(closure=closure) self._loss_idx = 0 for group in self._optimizer.param_groups: for p in group['params']: self._amp_handle.remove_cache(p) if closure is not None: raise NotImplementedError( 'The `closure` argument is unsupported by the amp ' + 'optimizer wrapper.') if any(self._skip_next): maybe_print('Gradient overflow, skipping update') self._skip_next = [False] * self._num_loss else: return self._optimizer.step(closure=closure) # Forward any attribute lookups def __getattr__(self, attr): return getattr(self._optimizer, attr) # Forward all torch.optim.Optimizer methods def __getstate__(self): return self._optimizer.__getstate__() def __setstate__(self): return self._optimizer.__setstate__() def __repr__(self): return self._optimizer.__repr__() def state_dict(self): return self._optimizer.state_dict() def load_state_dict(self, state_dict): return self._optimizer.load_state_dict(state_dict) def zero_grad(self): return self._optimizer.zero_grad() def add_param_group(self, param_group): return self._optimizer.add_param_group(param_group)
# This is a "header object" that allows different amp modules to communicate. # I'm a C++ guy, not a python guy. I decided this approach because it seemed most C++-like. # But apparently it's ok: # http://effbot.org/pyfaq/how-do-i-share-global-variables-across-modules.htm import os import torch TORCH_MAJOR = int(torch.__version__.split('.')[0]) TORCH_MINOR = int(torch.__version__.split('.')[1]) if TORCH_MAJOR == 0: import collections.abc as container_abcs else: from torch._six import container_abcs class AmpState(object): def __init__(self): self.hard_override=False self.allow_incoming_model_not_fp32 = False self.verbosity=1 # Attribute stash. Could also just stash things as global module attributes. _amp_state = AmpState() def warn_or_err(msg): if _amp_state.hard_override: print("Warning: " + msg) else: raise RuntimeError(msg) # I'm not sure if allowing hard_override is a good idea. # + " If you're sure you know what you're doing, supply " + # "hard_override=True to amp.initialize.") distributed = False if 'WORLD_SIZE' in os.environ: distributed = int(os.environ['WORLD_SIZE']) > 1 def maybe_print(msg, rank0=False): if _amp_state.verbosity > 0: if rank0: if distributed: if torch.distributed.get_rank() == 0: print(msg) else: print(msg) else: print(msg) # def iter_params(param_groups): # for group in param_groups: # for p in group['params']: # yield p def master_params(optimizer): """ Generator expression that iterates over the params owned by ``optimizer``. Args: optimizer: An optimizer previously returned from ``amp.initialize``. """ for group in optimizer.param_groups: for p in group['params']: yield p
from . import compat import functools import itertools import torch def get_cuda_version(): return tuple(int(x) for x in torch.version.cuda.split('.')) def is_fp_tensor(x): if is_nested(x): # Fast-fail version of all(is_fp_tensor) for y in x: if not is_fp_tensor(y): return False return True return compat.is_tensor_like(x) and compat.is_floating_point(x) def is_nested(x): return isinstance(x, tuple) or isinstance(x, list) def should_cache(x): if is_nested(x): # Fast-fail version of all(should_cache) for y in x: if not should_cache(y): return False return True return isinstance(x, torch.nn.parameter.Parameter) and \ type_string(x) == 'FloatTensor' def collect_fp_tensor_types(args, kwargs): def collect_types(x, types): if is_nested(x): for y in x: collect_types(y, types) else: types.add(type_string(x)) all_args = itertools.chain(args, kwargs.values()) types = set() for x in all_args: if is_fp_tensor(x): collect_types(x, types) return types def type_string(x): return x.type().split('.')[-1] def maybe_half(x, name='', verbose=False): if is_nested(x): return type(x)([maybe_half(y) for y in x]) if not x.is_cuda or type_string(x) == 'HalfTensor': return x else: if verbose: print('Float->Half ({})'.format(name)) return x.half() def maybe_float(x, name='', verbose=False): if is_nested(x): return type(x)([maybe_float(y) for y in x]) if not x.is_cuda or type_string(x) == 'FloatTensor': return x else: if verbose: print('Half->Float ({})'.format(name)) return x.float() # NB: returneds casted `args`, mutates `kwargs` in-place def casted_args(cast_fn, args, kwargs): new_args = [] for x in args: if is_fp_tensor(x): new_args.append(cast_fn(x)) else: new_args.append(x) for k in kwargs: val = kwargs[k] if is_fp_tensor(val): kwargs[k] = cast_fn(val) return new_args def cached_cast(cast_fn, x, cache): if is_nested(x): return type(x)([cached_cast(y) for y in x]) if x in cache: cached_x = cache[x] if x.requires_grad and cached_x.requires_grad: # Make sure x is actually cached_x's autograd parent. if cached_x.grad_fn.next_functions[1][0].variable is not x: raise RuntimeError("x and cache[x] both require grad, but x is not " "cache[x]'s parent. This is likely an error.") # During eval, it's possible to end up caching casted weights with # requires_grad=False. On the next training iter, if cached_x is found # and reused from the cache, it will not actually have x as its parent. # Therefore, we choose to invalidate the cache (and force refreshing the cast) # if x.requires_grad and cached_x.requires_grad do not match. # # During eval (i.e. running under with torch.no_grad()) the invalidation # check would cause the cached value to be dropped every time, because # cached_x would always be created with requires_grad=False, while x would # still have requires_grad=True. This would render the cache effectively # useless during eval. Therefore, if we are running under the no_grad() # context manager (torch.is_grad_enabled=False) we elide the invalidation # check, and use the cached value even though its requires_grad flag doesn't # match. During eval, we don't care that there's no autograd-graph # connection between x and cached_x. if torch.is_grad_enabled() and x.requires_grad != cached_x.requires_grad: del cache[x] else: return cached_x casted_x = cast_fn(x) cache[x] = casted_x return casted_x def verbosify(cast_fn, fn_name, verbose): if verbose: return functools.partial(cast_fn, name=fn_name, verbose=verbose) else: return cast_fn def as_inplace(fns): for x in fns: yield x + '_' def has_func(mod, fn): if isinstance(mod, torch.nn.backends.backend.FunctionBackend): return fn in mod.function_classes elif isinstance(mod, dict): return fn in mod else: return hasattr(mod, fn) def get_func(mod, fn): if isinstance(mod, torch.nn.backends.backend.FunctionBackend): return mod.function_classes[fn] elif isinstance(mod, dict): return mod[fn] else: return getattr(mod, fn) def set_func(mod, fn, new_fn): if isinstance(mod, torch.nn.backends.backend.FunctionBackend): mod.function_classes[fn] = new_fn elif isinstance(mod, dict): mod[fn] = new_fn else: setattr(mod, fn, new_fn) def set_func_save(handle, mod, fn, new_fn): cur_fn = get_func(mod, fn) handle._save_func(mod, fn, cur_fn) set_func(mod, fn, new_fn) # A couple problems get solved here: # - The flat_weight buffer is disconnected from autograd graph, # so the fp16 weights need to be derived from the input weights # to this forward call, not the flat buffer. # - The ordering of weights in the flat buffer is...idiosyncratic. # First problem is solved with combination of set_ (to set up # correct storage) and copy_ (so the fp16 weight derives from the # fp32 one in autograd. # Second is solved by doing ptr arithmetic on the fp32 weights # to derive the correct offset. # # TODO: maybe this should actually use # `torch._cudnn_rnn_flatten_weight`? But then I need to call # on first iter and cache the right offsets. Ugh. def synthesize_flattened_rnn_weights(fp32_weights, fp16_flat_tensor, rnn_fn='', verbose=False): fp16_weights = [] fp32_base_ptr = fp32_weights[0][0].data_ptr() for layer_weights in fp32_weights: fp16_layer_weights = [] for w_fp32 in layer_weights: w_fp16 = w_fp32.new().half() offset = (w_fp32.data_ptr() - fp32_base_ptr) // w_fp32.element_size() w_fp16.set_(fp16_flat_tensor.storage(), offset, w_fp32.shape) w_fp16.copy_(w_fp32) if verbose: print('Float->Half ({})'.format(rnn_fn)) fp16_layer_weights.append(w_fp16) fp16_weights.append(fp16_layer_weights) return fp16_weights # Roughly same as above, just the `fp32_weights` aren't nested. # Code kept separate for readability. def new_synthesize_flattened_rnn_weights(fp32_weights, fp16_flat_tensor, rnn_fn='', verbose=False): fp16_weights = [] fp32_base_ptr = fp32_weights[0].data_ptr() for w_fp32 in fp32_weights: w_fp16 = w_fp32.new().half() offset = (w_fp32.data_ptr() - fp32_base_ptr) // w_fp32.element_size() w_fp16.set_(fp16_flat_tensor.storage(), offset, w_fp32.shape) w_fp16.copy_(w_fp32) if verbose: print('Float->Half ({})'.format(rnn_fn)) fp16_weights.append(w_fp16) return fp16_weights
from . import compat from . import utils from ._amp_state import _amp_state from . import rnn_compat import functools import torch def make_cast_wrapper(orig_fn, cast_fn, handle, try_caching=False): @functools.wraps(orig_fn) def wrapper(*args, **kwargs): if not handle.is_active(): return orig_fn(*args, **kwargs) if try_caching and handle.has_cache: args = list(args) for i in range(len(args)): if utils.should_cache(args[i]): args[i] = utils.cached_cast(cast_fn, args[i], handle.cache) for k in kwargs: if utils.should_cache(kwargs[k]): kwargs[k] = utils.cached_cast(cast_fn, kwargs[k], handle.cache) new_args = utils.casted_args(cast_fn, args, kwargs) return orig_fn(*new_args, **kwargs) return wrapper def cached_cast(mod, fn, cast_fn, handle, try_caching=False, verbose=False): if not utils.has_func(mod, fn): return orig_fn = utils.get_func(mod, fn) cast_fn = utils.verbosify(cast_fn, fn, verbose) wrapper = make_cast_wrapper(orig_fn, cast_fn, handle, try_caching) utils.set_func_save(handle, mod, fn, wrapper) # `handle` arg is unused, but simplifies API to make `make_cast_wrapper` # Annoyingly, make_promote_wrapper still uses the global handle. Once everyone # is on the new API and I am free to get rid of handle, I can clean this up. def make_promote_wrapper(orig_fn, cast_fn, handle=None): @functools.wraps(orig_fn) def wrapper(*args, **kwargs): if not _amp_state.handle.is_active(): return orig_fn(*args, **kwargs) types = utils.collect_fp_tensor_types(args, kwargs) if len(types) <= 1: return orig_fn(*args, **kwargs) elif len(types) == 2 and types == set(['HalfTensor', 'FloatTensor']): new_args = utils.casted_args(cast_fn, args, kwargs) return orig_fn(*new_args, **kwargs) else: raise NotImplementedError('Do not know how to handle ' + 'these types to promote: {}' .format(types)) return wrapper def promote(mod, fn, handle, verbose=False): orig_fn = utils.get_func(mod, fn) maybe_float = utils.verbosify(utils.maybe_float, fn, verbose) wrapper = make_promote_wrapper(orig_fn, maybe_float) utils.set_func_save(handle, mod, fn, wrapper) def sequence_promote(mod, fn, handle, verbose=False): orig_fn = utils.get_func(mod, fn) maybe_float = utils.verbosify(utils.maybe_float, fn, verbose) @functools.wraps(orig_fn) def wrapper(seq, *args, **kwargs): if not _amp_state.handle.is_active(): return orig_fn(seq, *args, **kwargs) types = set([utils.type_string(x) for x in seq]) if len(types) <= 1: return orig_fn(seq, *args, **kwargs) elif types == set(['HalfTensor', 'FloatTensor']): cast_seq = utils.casted_args(maybe_float, seq, {}) return orig_fn(cast_seq, *args, **kwargs) else: # TODO: other mixed-type cases aren't due to amp. # Just pass through? return orig_fn(seq, *args, **kwargs) utils.set_func_save(handle, mod, fn, wrapper) def promote_match_arg0(mod, fn, handle, verbose=False): if not utils.has_func(mod, fn): return orig_fn = utils.get_func(mod, fn) @functools.wraps(orig_fn) def wrapper(arg0, *args, **kwargs): assert compat.is_tensor_like(arg0) if not _amp_state.handle.is_active(): return orig_fn(arg0, *args, **kwargs) if utils.type_string(arg0) == 'HalfTensor': cast_fn = utils.maybe_half elif utils.type_string(arg0) == 'FloatTensor': cast_fn = utils.maybe_float else: return orig_fn(arg0, *args, **kwargs) cast_fn = utils.verbosify(cast_fn, fn, verbose) new_args = utils.casted_args(cast_fn, args, kwargs) return orig_fn(arg0, *new_args, **kwargs) utils.set_func_save(handle, mod, fn, wrapper) def err_if_any_half(mod, fn, handle, custom_err_msg=None): if not utils.has_func(mod, fn): return orig_fn = utils.get_func(mod, fn) @functools.wraps(orig_fn) def wrapper(*args, **kwargs): types = utils.collect_fp_tensor_types(args, kwargs) if 'HalfTensor' in types: if custom_err_msg: raise NotImplementedError(custom_err_msg) else: raise NotImplementedError('Cannot call in-place function ' + '{} with fp16 arguments.'.format(fn)) else: return orig_fn(*args, **kwargs) utils.set_func_save(handle, mod, fn, wrapper) def err_if_arg0_half(mod, fn, handle, verbose=False): if not utils.has_func(mod, fn): return orig_fn = utils.get_func(mod, fn) @functools.wraps(orig_fn) def wrapper(arg0, *args, **kwargs): assert compat.is_tensor_like(arg0) if utils.type_string(arg0) == 'HalfTensor': raise NotImplementedError('Cannot call in-place method ' + '{} on fp16 Tensors.'.format(fn)) else: cast_fn = utils.verbosify(utils.maybe_float, fn, verbose) new_args = utils.casted_args(cast_fn, args, kwargs) return orig_fn(arg0, *new_args, **kwargs) utils.set_func_save(handle, mod, fn, wrapper) # Current RNN approach: # - Wrap top-level `RNN` function in thnn backend # - Will call into either CudnnRNN or AutogradRNN # - Each of these are factory functions that return a per-iter # `forward` function # - We interpose on the factory function to: # 1) Interpose on the actual forward function and put in casts # 2) Insert an fp16 `flat_weight` if necessary def rnn_cast(backend, fn, handle, verbose=False): orig_rnn = utils.get_func(backend, fn) @functools.wraps(orig_rnn) def rnn_wrapper(*args, **kwargs): flat_weight = kwargs.get('flat_weight') if flat_weight is not None: # We replace `flat_weight` with an uninitialized fp16 # Tensor. The "actual" weight tensors (provided in `forward`), # will then be set up as ptrs into the buffer and have the # corresponding fp32 values copied in. # We need to call `copy` on the "actual" weights so that the # autograd graph correctly backprops from the wgrads computed # inside cuDNN (on fp16 weights) into the fp32 weights. assert utils.type_string(flat_weight) == 'FloatTensor' if compat.tensor_is_float_tensor() or compat.tensor_is_variable(): # Pre-0.4. A little slower, since it zeros out memory. flat_weight_fp16 = flat_weight.new().half().resize_(flat_weight.shape) else: flat_weight_fp16 = torch.empty_like(flat_weight, dtype=torch.float16) kwargs['flat_weight'] = flat_weight_fp16 else: flat_weight_fp16 = None forward = orig_rnn(*args, **kwargs) @functools.wraps(forward) def fwd_wrapper(*fargs, **fkwargs): assert len(fargs) == 3 or len(fargs) == 4 inputs, weights, hiddens = fargs[:3] assert utils.is_fp_tensor(inputs) assert isinstance(weights, list) cast_fn = utils.verbosify(utils.maybe_half, fn, verbose) new_args = [] # 0) Inputs new_args.append(cast_fn(inputs)) # 1) Weights if flat_weight_fp16 is not None: fp16_weights = utils.synthesize_flattened_rnn_weights( weights, flat_weight_fp16, fn, verbose) else: fp16_weights = [[cast_fn(w) for w in layer] for layer in weights] new_args.append(fp16_weights) # 2) Inputs: either a tuple (for LSTM) or single tensor if isinstance(hiddens, tuple): new_args.append(tuple(cast_fn(x) for x in hiddens)) elif utils.is_fp_tensor(hiddens): new_args.append(cast_fn(hiddens)) else: # Hiddens can, in principle, be `None` -- pass through new_args.append(hiddens) # 3) Batch sizes (0.4 or later only) if len(fargs) == 4: new_args.append(fargs[3]) return forward(*new_args, **fkwargs) return fwd_wrapper utils.set_func_save(handle, backend, fn, rnn_wrapper) def new_rnn_cast(fn, handle, verbose=False): # Forward+backward compatibility around https://github.com/pytorch/pytorch/pull/15744 # For rnn backend calls that route through _rnn_impls, we must patch the ref # that _rnn_impls stashed. For rnn backend calls that directly invoke # _VF.<backend>, e.g. _VF.lstm, we can patch onto VariableFunctionsShim, # which in turn has patched the ref named "_VF" in torch.nn.modules.rnn. if utils.has_func(torch.nn.modules.rnn._rnn_impls, fn): mod = torch.nn.modules.rnn._rnn_impls else: mod = torch.nn.modules.rnn._VF assert isinstance(mod, rnn_compat.VariableFunctionsShim) fn = fn.lower() orig_fn = utils.get_func(mod, fn) cast_fn = utils.verbosify(utils.maybe_half, fn, verbose) @functools.wraps(orig_fn) def wrapper(*args, **kwargs): # Exact call signature from modules/rnn.py assert len(args) == 9 assert len(kwargs) == 0 if not _amp_state.handle.is_active(): return orig_fn(*args, **kwargs) if isinstance(args[6], bool): params_idx = 2 # Not PackedSequence case else: params_idx = 3 # PackedSequence case new_args = [] for i, arg in enumerate(args): if i == params_idx: num_params = sum([x.numel() for x in arg]) fp16_weight_buf = args[0].new_empty((num_params,), dtype=torch.half) casted_weights = utils.new_synthesize_flattened_rnn_weights( arg, fp16_weight_buf, fn, verbose) new_args.append(casted_weights) elif utils.is_fp_tensor(arg): new_args.append(cast_fn(arg)) else: new_args.append(arg) return orig_fn(*new_args) utils.set_func_save(handle, mod, fn, wrapper) def disable_casts(mod, fn, handle): if not utils.has_func(mod, fn): return orig_fn = utils.get_func(mod, fn) @functools.wraps(orig_fn) def wrapper(*args, **kwargs): with handle._disable_casts(): return orig_fn(*args, **kwargs) utils.set_func_save(handle, mod, fn, wrapper)
from . import utils, wrap import torch _VF = torch._C._VariableFunctions RNN_NAMES = ['rnn_relu', 'rnn_tanh', 'gru', 'lstm'] def _gen_VF_wrapper(name): def wrapper(*args, **kwargs): return getattr(_VF, name)(*args, **kwargs) return wrapper # Some python magic to generate an object that has the rnn cell functions # defined on it, all of which call into corresponding _VF version. # Intended to patch torch.nn.modules.rnn._VF (aka, the ref named "_VF" # imported at module scope within torch.nn.modules.rnn). This should # not affect third-party importers of _VF.py. class VariableFunctionsShim(object): def __init__(self): for name in RNN_NAMES: for suffix in ['', '_cell']: fn_name = name + suffix setattr(self, fn_name, _gen_VF_wrapper(fn_name)) def has_old_rnns(): try: torch.nn.backends.thnn.backend.LSTMCell return True except: return False def whitelist_rnn_cells(handle, verbose): # Different module + function names in old/new RNN cases if has_old_rnns(): fn_names = ['RNNReLUCell', 'RNNTanhCell', 'LSTMCell', 'GRUCell'] mod = torch.nn.backends.thnn.backend else: fn_names = [x + '_cell' for x in RNN_NAMES] mod = torch.nn.modules.rnn._VF assert isinstance(mod, VariableFunctionsShim) # Insert casts on cell functions for fn in fn_names: wrap.cached_cast(mod, fn, utils.maybe_half, handle, try_caching=True, verbose=verbose) if has_old_rnns(): # Special handling of `backward` for fused gru / lstm: # The `backward` method calls Tensor.sum() (blacklist) internally, # and then the resulting grad_input has the wrong type. # TODO: where else is this a problem? for rnn_type in ['GRUFused', 'LSTMFused']: mod = getattr(torch.nn._functions.thnn.rnnFusedPointwise, rnn_type) wrap.disable_casts(mod, 'backward', handle)
import torch from .. import utils MODULE = torch FP16_FUNCS = [ # Low level functions wrapped by torch.nn layers. # The wrapper layers contain the weights which are then passed in as a parameter # to these functions. 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d', 'conv_tbc', 'prelu', # BLAS 'addmm', 'addmv', 'addr', 'matmul', 'mm', 'mv', ] FP32_FUNCS = [ # Pointwise 'acos', 'asin', 'cosh', 'erfinv', 'exp', 'expm1', 'log', 'log10', 'log2', 'reciprocal', 'rsqrt', 'sinh', 'tan', # Other math 'pow', # Reduction 'cumprod', 'cumsum', 'dist', # 'mean', 'norm', 'prod', 'std', 'sum', 'var', # Misc 'renorm' ] version_strings = torch.__version__.split('.') version_major = version_strings[0] version_minor = version_strings[1] version_num = float(version_major + "." + version_minor) # Before torch 1.1, mean must be blacklisted. if version_num < 1.1: FP32_FUNCS.append('mean') # Before CUDA 9.1, batched matmul was missing fast FP16 kernels. We # check the CUDA version -- if at least 9.1, then put the bmm # functions on the fp16 list. Otherwise, put them on the fp32 list. _bmms = ['addbmm', 'baddbmm', 'bmm'] if utils.get_cuda_version() >= (9, 1, 0): FP16_FUNCS.extend(_bmms) else: FP32_FUNCS.extend(_bmms) # Multi-tensor fns that may need type promotion CASTS = [ # Multi-tensor math 'addcdiv', 'addcmul', 'atan2', 'cross', 'bilinear', 'dot', # Element-wise _or_ tensor-wise math 'add', 'div', 'mul', # Comparison 'eq', 'equal', 'ge', 'gt', 'le', 'lt', 'ne' ] # Functions that take sequence arguments. We need to inspect the whole # sequence and cast to the widest type. SEQUENCE_CASTS = [ 'cat', 'stack' ]
# TODO: think about the following two. They do weird things. # - torch.nn.utils.clip_grad (but it should always be fp32 anyway) # - torch.nn.utils.weight_norm # Notes: # F.instance_norm uses batch_norm internally. Which correctly handles # fp16 in/out with fp32 weights. So we shouldn't do anything for # either of these. # F.normalize calls `input.norm()` internally, so it's redundant, but # kept here in case impl. changes. # F.cosine_similarity is same: calls `x.norm()` internally. import torch.nn.functional MODULE = torch.nn.functional FP16_FUNCS = [ 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d', 'conv_tbc', # Undocumented / maybe new? 'linear', ] FP32_FUNCS = [ # Interpolation/Upsampling TODO: Remove for 1.2 'interpolate', 'grid_sample', # Pointwise 'softplus', 'softmin', 'log_softmax', 'softmax', # Normalization 'layer_norm', 'group_norm', 'local_response_norm', 'normalize', 'cosine_similarity', # Loss functions # TODO: which of these can be fp16? 'poisson_nll_loss', 'cosine_embedding_loss', 'cross_entropy', 'hinge_embedding_loss', 'kl_div', 'l1_loss', 'mse_loss', 'margin_ranking_loss', 'multilabel_margin_loss', 'multilabel_soft_margin_loss', 'multi_margin_loss', 'nll_loss', 'binary_cross_entropy_with_logits', 'smooth_l1_loss', 'soft_margin_loss', 'triplet_margin_loss' ] BANNED_FUNCS = [ ('binary_cross_entropy', ("\namp does not work out-of-the-box with `F.binary_cross_entropy` or `torch.nn.BCELoss.` " "It requires that the output of the previous function be already a FloatTensor. \n\n" "Most models have a Sigmoid right before BCELoss. In that case, you can use\n" " torch.nn.BCEWithLogitsLoss\nto combine Sigmoid+BCELoss into a single layer " "that is compatible with amp.\nAnother option is to add\n" " amp.register_float_function(torch, 'sigmoid')\nbefore calling `amp.init()`.\n" "If you _really_ know what you are doing, you can disable this warning by passing " "allow_banned=True to `amp.init()`.")) ]
from .. import compat from . import torch_overrides import importlib import torch # if compat.variable_is_tensor() and not compat.tensor_is_variable(): MODULE = torch.Tensor # else: # MODULE = torch.autograd.Variable FP16_FUNCS = [ '__matmul__', ] FP32_FUNCS = [ '__ipow__', '__pow__', '__rpow__', # Cast to fp32 before transfer to CPU 'cpu', ] CASTS = [ '__add__', '__div__', '__eq__', '__ge__', '__gt__', '__iadd__', '__idiv__', '__imul__', '__isub__', '__itruediv__', '__le__', '__lt__', '__mul__', '__ne__', '__radd__', '__rdiv__', '__rmul__', '__rsub__', '__rtruediv__', '__sub__', '__truediv__', ] # None of these, but here to make code cleaner. SEQUENCE_CASTS = [] # We need to grab all the methods from torch_overrides and add them to # the Tensor lists as well, as almost all methods are duplicated # between `torch` and `torch.Tensor` (and check with `hasattr`, # because a few random ones aren't defined on Tensor) _self_mod = importlib.import_module(__name__) for attrname in ['FP16_FUNCS', 'FP32_FUNCS', 'CASTS', 'SEQUENCE_CASTS']: lst = getattr(_self_mod, attrname) for fn in getattr(torch_overrides, attrname): if hasattr(MODULE, fn): lst.append(fn)