python_code
stringlengths 0
258k
|
---|
## @package optimizer_test_util
# Module caffe2.python.optimizer_test_util
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import core, workspace, cnn
class OptimizerTestBase(object):
"""
This is an abstract base class.
Don't inherit from unittest.TestCase, and don't name it 'Test*'.
Do, however, do these things in classes which inherit from this.
"""
def testDense(self):
perfect_model = np.array([2, 6, 5, 0, 1]).astype(np.float32)
np.random.seed(123) # make test deterministic
data = np.random.randint(
2,
size=(20, perfect_model.size)).astype(np.float32)
label = np.dot(data, perfect_model)[:, np.newaxis]
model = cnn.CNNModelHelper("NCHW", name="test")
out = model.FC(
'data', 'fc', perfect_model.size, 1, ('ConstantFill', {}),
('ConstantFill', {}), axis=0
)
sq = model.SquaredL2Distance([out, 'label'])
loss = model.AveragedLoss(sq, "avg_loss")
grad_map = model.AddGradientOperators([loss])
self.assertIsInstance(grad_map['fc_w'], core.BlobReference)
optimizer = self.build_optimizer(model)
workspace.FeedBlob('data', data[0])
workspace.FeedBlob('label', label[0])
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net, True)
for _ in range(2000):
idx = np.random.randint(data.shape[0])
workspace.FeedBlob('data', data[idx])
workspace.FeedBlob('label', label[idx])
workspace.RunNet(model.net.Proto().name)
np.testing.assert_allclose(
perfect_model[np.newaxis, :],
workspace.FetchBlob('fc_w'),
atol=1e-2
)
self.check_optimizer(optimizer)
def testSparse(self):
# to test duplicated indices we assign two indices to each weight and
# thus each weight might count once or twice
DUPLICATION = 2
perfect_model = np.array([2, 6, 5, 0, 1]).astype(np.float32)
np.random.seed(123) # make test deterministic
data = np.random.randint(
2,
size=(20, perfect_model.size * DUPLICATION)).astype(np.float32)
label = np.dot(data, np.repeat(perfect_model, DUPLICATION))
model = cnn.CNNModelHelper("NCHW", name="test")
# imitate what model wrapper does
w = model.param_init_net.ConstantFill(
[], 'w', shape=[perfect_model.size], value=0.0)
model.params.append(w)
picked = model.net.Gather([w, 'indices'], 'gather')
out = model.ReduceFrontSum(picked, 'sum')
sq = model.SquaredL2Distance([out, 'label'])
loss = model.AveragedLoss(sq, "avg_loss")
grad_map = model.AddGradientOperators([loss])
self.assertIsInstance(grad_map['w'], core.GradientSlice)
optimizer = self.build_optimizer(model)
workspace.CreateBlob('indices')
workspace.CreateBlob('label')
for indices_type in [np.int32, np.int64]:
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net, True)
for _ in range(2000):
idx = np.random.randint(data.shape[0])
# transform into indices of binary features
indices = np.repeat(np.arange(perfect_model.size),
DUPLICATION)[data[idx] == 1]
if indices.size == 0:
continue
workspace.FeedBlob(
'indices',
indices.reshape((indices.size,)).astype(indices_type)
)
workspace.FeedBlob('label',
np.array(label[idx]).astype(np.float32))
workspace.RunNet(model.net.Proto().name)
np.testing.assert_allclose(
perfect_model,
workspace.FetchBlob('w'),
atol=1e-2
)
self.check_optimizer(optimizer)
|
## @package muji
# Module caffe2.python.muji
"""muji.py does multi-gpu training for caffe2 with no need to change the c++
side code. Everything is defined on the computation graph level.
Currently, here are the assumptions: we only support the following use cases:
- 2 gpus, where peer access is enabled between them.
- 4 gpus, where peer access are enabled between all of them.
- 8 gpus, where peer access are enabled in two groups,
between {1, 2, 3, 4} and {5, 6, 7, 8}.
"""
from caffe2.python import core
from caffe2.proto import caffe2_pb2
def OnGPU(gpu_id):
"""A utility function that returns a device option protobuf of the
specified gpu id.
"""
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CUDA
device_option.cuda_gpu_id = gpu_id
return device_option
def OnCPU():
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CPU
return device_option
def Allreduce(net, blobs, reduced_affix="_reduced", gpu_indices=None):
"""The general Allreduce interface that reroutes the function calls.
"""
if gpu_indices is None:
gpu_indices = range(len(blobs))
if len(gpu_indices) != len(blobs):
raise RuntimeError(
"gpu_indices length and blobs length mismatch: %d vs %d" %
(len(gpu_indices), len(blobs))
)
if len(blobs) == 2:
return Allreduce2(net, blobs, reduced_affix, gpu_indices)
elif len(blobs) == 4:
return Allreduce4(net, blobs, reduced_affix, gpu_indices)
elif len(blobs) == 8:
return Allreduce8(net, blobs, reduced_affix, gpu_indices)
else:
return AllreduceFallback(net, blobs, reduced_affix, gpu_indices)
def Allreduce2(net, blobs, reduced_affix, gpu_indices):
"""Allreduce for 2 gpus.
Algorithm: 0r <- 0 + 1, 1r <- 0r, where r means "reduced"
"""
a, b = blobs
gpu_a, gpu_b = gpu_indices
a_reduced = net.Add([a, b], a + reduced_affix, device_option=OnGPU(gpu_a))
b_reduced = a_reduced.Copy(
[],
b + reduced_affix,
device_option=OnGPU(gpu_b)
)
return a_reduced, b_reduced
def Allreduce4(net, blobs, reduced_affix, gpu_indices):
"""Allreduce for 4 gpus.
Algorithm: 2 level reduction.
0r <- 0 + 1, 2r <- 2 + 3
0r <- 0r + 2r
2r <- 0r,
1r <- 0r, 3r <- 2r
"""
a, b, c, d = blobs
gpu_a, gpu_b, gpu_c, gpu_d = gpu_indices
# a_reduced <- a+b, c_reduced <- c + d
a_reduced = net.Add(
[a, b],
str(a) + reduced_affix,
device_option=OnGPU(gpu_a)
)
c_reduced = net.Add(
[c, d],
str(c) + reduced_affix,
device_option=OnGPU(gpu_c)
)
# a_reduced <- a_reduced + c_reduced
a_reduced = a_reduced.Add(c_reduced, a_reduced, device_option=OnGPU(gpu_a))
# broadcast a_reduced to c_reduced
c_reduced = a_reduced.Copy([], c_reduced, device_option=OnGPU(gpu_c))
# broadcast to b and d
b_reduced = a_reduced.Copy(
[],
str(b) + reduced_affix,
device_option=OnGPU(gpu_b)
)
d_reduced = c_reduced.Copy(
[],
str(d) + reduced_affix,
device_option=OnGPU(gpu_d)
)
return a_reduced, b_reduced, c_reduced, d_reduced
def Allreduce8(net, blobs, reduced_affix, gpu_indices):
"""Allreduce for 8 gpus.
Algorithm: 3 level reduction.
0r <- 0 + 1, 2r <- 2 + 3, 4r <- 4 + 5, 6r <- 6 + 7
0r <- 0r + 2r, 4r <- 4r + 6r
0r <- 0r + 4r
4r <- 0r
2r <- 0r, 6r <- 4r
1r <- 0r, 3r <- 2r, 5r <- 4r, 7r <- 6r
"""
reduced = [None] * 8
# Reduction level 1
for i in [0, 2, 4, 6]:
reduced[i] = net.Add(
[blobs[i], blobs[i + 1]],
blobs[i] + reduced_affix,
device_option=OnGPU(gpu_indices[i])
)
# Reduction level 2
for i in [0, 4]:
reduced[i] = net.Add(
[reduced[i], reduced[i + 2]],
str(blobs[i]) + reduced_affix,
device_option=OnGPU(gpu_indices[i])
)
# Reduction level 3: this involves a copy.
reduced_4_copy = reduced[4].Copy(
[],
str(reduced[4]) + '_copy',
device_option=OnGPU(gpu_indices[0])
)
reduced[0] = reduced[0].Add(
reduced_4_copy,
reduced[0],
device_option=OnGPU(gpu_indices[0])
)
# Broadcast level 1
reduced[4] = reduced[0].Copy(
[],
reduced[4],
device_option=OnGPU(gpu_indices[4])
)
# Broadcast level 2
for i in [2, 6]:
reduced[i] = reduced[i - 2].Copy(
[],
reduced[i],
device_option=OnGPU(gpu_indices[i])
)
# Broadcast level 3
for i in [1, 3, 5, 7]:
reduced[i] = reduced[i - 1].Copy(
[],
blobs[i] + reduced_affix,
device_option=OnGPU(gpu_indices[i])
)
return reduced
def AllreduceFallback(net, blobs, reduced_affix, gpu_indices):
"""A fallback option for Allreduce with no assumption on p2p.
Algorithm: a flat operation on gpu 0
0r <- 0
0r <- 0r + i for i in gpu_indices[1:]
ir <- 0r for i in gpu_indices[1:]
"""
reduced = [None] * len(gpu_indices)
# copy first
reduced[0] = net.Copy(
blobs[0],
blobs[0] + reduced_affix,
device_option=OnGPU(gpu_indices[0])
)
# do temp copy and add
temp_name = reduced[0] + '_temp_copy'
for i in range(1, len(gpu_indices)):
temp = net.Copy(
blobs[i],
temp_name,
device_option=OnGPU(gpu_indices[0])
)
reduced[0] = reduced[0].Add(
temp,
reduced[0],
device_option=OnGPU(gpu_indices[0])
)
# Broadcast to everyone else
for i in range(1, len(gpu_indices)):
reduced[i] = net.Copy(
reduced[0],
blobs[i] + reduced_affix,
device_option=OnGPU(gpu_indices[i])
)
return reduced
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import unittest
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, data_parallel_model, cnn, rnn_cell
from caffe2.python.test_util import TestCase
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support.")
@unittest.skipIf(workspace.NumCudaDevices() < 2, "Need at least 2 GPUs.")
class GPUDataParallelModelTest(TestCase):
def run_model(self, gpu_devices):
'''
Helper function for test_equiv
'''
def input_builder_fun(model):
return None
def model_build_fun(model, loss_scale):
fc = model.FC("data", "fc", 16, 1,
("ConstantFill", {}), ("ConstantFill", {}))
fc_fl = model.FlattenToVec(fc, "fc_fl")
sigm = model.Sigmoid(fc_fl, "sigm")
sq = model.SquaredL2Distance([sigm, "label"], "sq")
loss = model.AveragedLoss(sq, "loss")
loss = model.Scale(loss, scale=loss_scale)
return [loss]
def param_update_fun(model):
ITER = model.Iter("ITER")
LR = model.net.LearningRate(
[ITER],
"LR",
base_lr=(-0.1),
policy="fixed",
)
ONE = model.param_init_net.ConstantFill(
[], "ONE", shape=[1], value=1.0,
)
for param in model.GetParams():
grad = model.param_to_grad[param]
model.WeightedSum([param, ONE, grad, LR], param)
workspace.ResetWorkspace()
model = cnn.CNNModelHelper(
order="NHWC",
name="test{}".format(gpu_devices),
)
data_parallel_model.Parallelize_GPU(
model,
input_builder_fun=input_builder_fun,
forward_pass_builder_fun=model_build_fun,
param_update_builder_fun=param_update_fun,
devices=gpu_devices,
)
np.random.seed(2603)
# Each run has same input, independent of number of gpus
batch_size = 64
for i in range(0, 10):
full_data = np.random.rand(batch_size, 16)
full_labels = np.round(full_data[:, 0])
batch_per_device = batch_size // len(gpu_devices)
for (j, g) in enumerate(gpu_devices):
st = j * batch_per_device
en = st + batch_per_device
data = full_data[st:en, :].astype(np.float32)
labels = full_labels[st:en].astype(np.float32)
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, g)):
workspace.FeedBlob("gpu_{}/data".format(g), data)
workspace.FeedBlob("gpu_{}/label".format(g), labels)
if i == 0:
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
print(i, workspace.FetchBlob("gpu_0/fc_w").flatten()[:5])
workspace.RunNet(model.net.Proto().name)
return workspace.FetchBlob("gpu_0/fc_w")
def test_equiv(self):
'''
Test that the model produces exactly same results given
total batchsize, independent of number of GPUs.
'''
result_2gpus = self.run_model([0, 1])
result_1gpus = self.run_model([0])
self.assertTrue(np.allclose(result_1gpus, result_2gpus))
if workspace.NumCudaDevices() >= 4:
result_4gpus = self.run_model(range(4))
self.assertTrue(np.allclose(result_1gpus, result_4gpus))
if workspace.NumCudaDevices() >= 8:
result_8gpus = self.run_model(range(8))
self.assertTrue(np.allclose(result_1gpus, result_8gpus))
def test_checkpoint_params(self):
def add_input_ops(model):
pass
def add_model_ops(model, loss_scale):
model.NHWC2NCHW("data", "data_nchw")
model.Conv("data_nchw", 'conv1', 3, 64,
weight_init=("MSRAFill", {}), kernel=7,
stride=2, pad=3, no_bias=0)
model.SpatialBN('conv1', 'conv1_spatbn_relu', 64, epsilon=1e-3)
model.Relu('conv1_spatbn_relu', 'conv1_spatbn_relu')
model.MaxPool('conv1_spatbn_relu', 'pool1', kernel=3, stride=2)
model.FC('pool1', 'fc', dim_in=(64 * 56 * 56), dim_out=100)
model.Sigmoid('fc', 'fc_sigm')
model.Softmax('fc_sigm', 'softmax')
model.LabelCrossEntropy(['softmax', 'label'], 'xent')
loss = model.AveragedLoss('xent', 'loss')
# Add a duplicate param init to ensure it does not cause issues
model.param_init_net.ConstantFill(
[], ["fc_w"], shape=((64 * 56 * 56), 1000)
)
return [loss]
def add_parameter_update_ops(model):
model.Iter("ITER")
LR = model.param_init_net.ConstantFill(
[], 'LR', shape=[1], value=0.1
)
for param in model.GetParams():
param_grad = model.param_to_grad[param]
param_momentum = model.param_init_net.ConstantFill(
[param], param + '_momentum', value=0.0
)
model.net.MomentumSGDUpdate(
[param_grad, param_momentum, LR, param],
[param_grad, param_momentum, param],
)
model = cnn.CNNModelHelper(
order="NHWC",
name="test",
)
data_parallel_model.Parallelize_GPU(
model,
input_builder_fun=add_input_ops,
forward_pass_builder_fun=add_model_ops,
param_update_builder_fun=add_parameter_update_ops,
devices=[1, 2, 3],
)
# Only gpu_1 params should be returned (gpu_1 is the first gpu)
checkpoint_params = data_parallel_model.GetCheckpointParams(model)
for p in model.GetParams("gpu_1/"):
self.assertTrue(p in checkpoint_params)
self.assertTrue(p + "_momentum" in checkpoint_params)
for p in model.GetParams("gpu_2/"):
self.assertFalse(p in checkpoint_params)
for c in model.GetComputedParams("gpu_1/"):
self.assertTrue(c in checkpoint_params)
for c in model.GetComputedParams("gpu_2/"):
self.assertFalse(c in checkpoint_params)
self.assertFalse(core.BlobReference("gpu_1/data") in checkpoint_params)
self.assertTrue(core.BlobReference("gpu_1/ITER") in checkpoint_params)
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support.")
@unittest.skipIf(workspace.NumCudaDevices() < 2, "Need at least 2 GPUs.")
class RecurrentNetworkParallelTest(TestCase):
def run_model(self, gpu_devices):
'''
Helper function for test_equiv
'''
def input_builder_fun(model):
return None
def model_build_fun(model, loss_scale):
workspace.FeedBlob(
core.ScopedBlobReference("seq_lengths"),
np.array([self.T] * self.batch_per_device, dtype=np.int32)
)
model.param_init_net.ConstantFill(
[],
"hidden_init",
value=0.0,
shape=[1, self.batch_per_device, self.hidden_dim]
)
model.param_init_net.ConstantFill(
[],
"cell_init",
value=0.0,
shape=[1, self.batch_per_device, self.hidden_dim]
)
output, _last_hidden, _, _last_state, = rnn_cell.LSTM(
model=model,
input_blob="data",
seq_lengths="seq_lengths",
initial_states=("hidden_init", "cell_init"),
dim_in=self.input_dim,
dim_out=self.hidden_dim,
scope="partest",
)
# A silly loss function
loss = model.AveragedLoss(
model.Sub([output, "target"], "dist"),
"loss",
)
loss = model.Scale(loss, "loss_scaled", scale=loss_scale)
return [loss]
def param_update_fun(model):
ITER = model.Iter("ITER")
LR = model.net.LearningRate(
[ITER],
"LR",
base_lr=(-0.1),
policy="fixed",
)
ONE = model.param_init_net.ConstantFill(
[], "ONE", shape=[1], value=1.0,
)
for param in model.GetParams():
param_grad = model.param_to_grad[param]
model.WeightedSum([param, ONE, param_grad, LR], param)
assert len(model.GetParams()) == len(model.params) // len(model._devices)
workspace.ResetWorkspace()
model = cnn.CNNModelHelper(
name="recurrent_test{}".format(gpu_devices),
)
self.T = 8
self.batch_size = 64
self.input_dim = 8
self.hidden_dim = 31
self.batch_per_device = self.batch_size // len(gpu_devices)
data_parallel_model.Parallelize_GPU(
model,
input_builder_fun=input_builder_fun,
forward_pass_builder_fun=model_build_fun,
param_update_builder_fun=param_update_fun,
devices=gpu_devices,
optimize_gradient_memory=True,
)
# Change all initialization to be ConstantFills so that
# the everything is deterministic
for op in model.param_init_net.Proto().op:
if op.type.endswith('Fill'):
op.type = 'ConstantFill'
# Each run has same input, independent of number of gpus
np.random.seed(20150210)
for i in range(0, 10):
full_data = np.random.rand(self.T, self.batch_size, self.input_dim)
full_target = np.random.rand(
self.T, self.batch_size, self.hidden_dim
)
for (j, g) in enumerate(gpu_devices):
st = j * self.batch_per_device
en = st + self.batch_per_device
data = full_data[:, st:en, :].astype(np.float32)
targets = full_target[:, st:en, :].astype(np.float32)
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, g)):
workspace.FeedBlob("gpu_{}/data".format(g), data)
workspace.FeedBlob("gpu_{}/target".format(g), targets)
if i == 0:
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
workspace.RunNet(model.net.Proto().name)
return workspace.FetchBlob("gpu_0/partest/i2h_w")
def test_equiv_recurrent(self):
'''
Test that the model produces exactly same results given
total batchsize, independent of number of GPUs.
'''
result_2gpus = self.run_model([0, 1])
result_1gpus = self.run_model([0])
print("result 1", result_1gpus.flatten()[:5])
print("result 2", result_2gpus.flatten()[:5])
self.assertTrue(np.allclose(result_1gpus, result_2gpus))
if workspace.NumCudaDevices() >= 4:
result_4gpus = self.run_model(range(4))
self.assertTrue(np.allclose(result_1gpus, result_4gpus))
if workspace.NumCudaDevices() >= 8:
result_8gpus = self.run_model(range(8))
self.assertTrue(np.allclose(result_1gpus, result_8gpus))
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support.")
@unittest.skipIf(workspace.NumCudaDevices() < 2, "Need at least 2 GPUs.")
class SparseDataParallelModelTest(TestCase):
'''
Create and run the model. We try with both storing indices for gather
on CPU and on GPU
'''
def run_model(self, V, gpu_devices, cpu_indices):
def input_builder_fun(model):
return None
def model_build_fun(model, loss_scale):
if cpu_indices:
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
gathered_cpu = model.net.Gather(
[self.vecs, 'indices'], 'gathered_cpu')
gathered = model.CopyCPUToGPU(gathered_cpu, "gathered")
else:
gpu_vecs = model.param_init_net.CopyCPUToGPU(
self.vecs, "gpuvecs",
)
model.params.append(gpu_vecs)
gathered = model.net.Gather([gpu_vecs, 'indices'], 'gathered')
flattened = model.Flatten(gathered, "flattened")
fc = model.FC(flattened, "fc", 16 * 16, 1,
("ConstantFill", {}), ("ConstantFill", {}))
fc_fl = model.FlattenToVec(fc, "fc_fl")
sigm = model.Sigmoid(fc_fl, "sigm")
sq = model.SquaredL2Distance([sigm, "label"], "sq")
loss = model.AveragedLoss(sq, "loss")
loss = model.Scale(loss, scale=loss_scale)
return [loss]
def param_update_fun(model):
ONE = model.param_init_net.ConstantFill(
[], "ONE", shape=[1], value=1.0,
)
LR = model.CopyCPUToGPU(self.LR, "LR")
for param in model.GetParams():
param_grad = model.param_to_grad[param]
if not isinstance(param_grad, core.GradientSlice):
model.WeightedSum([param, ONE, param_grad, LR], param)
else:
param_momentum = model.param_init_net.ConstantFill(
[param],
param + '_momentum',
value=0.0,
)
model.net.SparseMomentumSGDUpdate(
[
param_grad.values,
param_momentum,
LR,
param,
param_grad.indices,
],
[
param_grad.values, param_momentum, param
],
momentum=0.1,
nesterov=0,
)
workspace.ResetWorkspace()
model = cnn.CNNModelHelper(
order="NHWC",
name="sparse_test{}".format(gpu_devices),
)
with core.NameScope("cpu"):
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
self.ITER = model.Iter("ITER")
self.LR = model.net.LearningRate(
[self.ITER],
"LR",
base_lr=(-0.1),
policy="fixed",
)
self.vecs = model.param_init_net.UniformFill(
[], "vecs", shape=[V, 16])
if cpu_indices:
model.params.append(self.vecs)
self.ONE_CPU = model.param_init_net.ConstantFill(
[], "ONE_CPU", shape=[1], value=1.0,
)
data_parallel_model.Parallelize_GPU(
model,
input_builder_fun=input_builder_fun,
forward_pass_builder_fun=model_build_fun,
param_update_builder_fun=param_update_fun,
devices=gpu_devices,
)
# Update the vecs
if cpu_indices:
with core.NameScope("cpu"):
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
for param in model.GetParams():
param_grad = model.param_to_grad[param]
model.ScatterWeightedSum([param, self.ONE_CPU,
param_grad.indices,
param_grad.values,
self.LR],
self.vecs)
else:
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
model.CopyGPUToCPU("gpu_0/gpuvecs", self.vecs)
np.random.seed(2603)
# Each run has same input, independent of number of gpus
batch_size = 64
for i in range(0, 10):
full_indices = np.random.permutation(V)[:batch_size * 16].reshape(
batch_size, 16
)
full_labels = full_indices[:, 0] % 2
batch_per_device = batch_size // len(gpu_devices)
for (j, g) in enumerate(gpu_devices):
st = j * batch_per_device
en = st + batch_per_device
indices = full_indices[st:en, :].astype(np.int32)
labels = full_labels[st:en].astype(np.float32)
device_for_indices = core.DeviceOption(caffe2_pb2.CPU)
if not cpu_indices:
device_for_indices = core.DeviceOption(caffe2_pb2.CUDA, g)
with core.DeviceScope(device_for_indices):
workspace.FeedBlob("gpu_{}/indices".format(g), indices)
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, g)):
workspace.FeedBlob("gpu_{}/label".format(g), labels)
if i == 0:
workspace.RunNetOnce(model.param_init_net)
# Force vecs to be same on all runs
orig_vecs = np.random.rand(V, 16).astype(np.float32)
workspace.FeedBlob(
self.vecs,
orig_vecs
)
if not cpu_indices:
for g in gpu_devices:
workspace.FeedBlob(
"gpu_{}/gpuvecs".format(g),
orig_vecs,
device_option=core.DeviceOption(caffe2_pb2.CUDA, g),
)
workspace.CreateNet(model.net)
workspace.RunNet(model.net.Proto().name)
if len(gpu_devices) == 2:
open("dump.txt", "w").write(str(model.net.Proto()))
if not cpu_indices:
idx = workspace.FetchBlob("gpu_0/indices")
idx = list(idx.flatten())
n = len(idx)
nu = len(set(idx))
assert n == nu, "We cannot have duplicate indices"
# Sanity check to see the vecs were updated
self.assertFalse(
np.allclose(workspace.FetchBlob(self.vecs), orig_vecs))
return [workspace.FetchBlob(self.vecs if cpu_indices else "gpu_0/gpuvecs"),
workspace.FetchBlob("gpu_0/fc_w")]
def _test_equiv_sparse(self, cpu_indices):
'''
Test that the model produces exactly same results given
total batchsize, independent of number of GPUs.
'''
V = 10000
result_2gpus = self.run_model(V, [0, 1], cpu_indices)
result_1gpus = self.run_model(V, [0], cpu_indices)
self.assertTrue(np.allclose(result_1gpus[0], result_2gpus[0]))
self.assertTrue(np.allclose(result_1gpus[1], result_2gpus[1]))
if workspace.NumCudaDevices() >= 4:
result_4gpus = self.run_model(V, range(4), cpu_indices)
self.assertTrue(np.allclose(result_1gpus[0], result_4gpus[0]))
self.assertTrue(np.allclose(result_1gpus[1], result_4gpus[1]))
if workspace.NumCudaDevices() >= 8:
result_8gpus = self.run_model(V, range(8), cpu_indices)
self.assertTrue(np.allclose(result_1gpus[0], result_8gpus[0]))
self.assertTrue(np.allclose(result_1gpus[1], result_8gpus[1]))
def test_equiv_sparse(self):
self._test_equiv_sparse(True)
self._test_equiv_sparse(False)
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support.")
@unittest.skipIf(workspace.NumCudaDevices() < 2, "Need at least 2 GPUs.")
class ParallelizeGPUBMUFTest(TestCase):
def _run_model(self, gpu_devices):
'''
Helper function for test_equiv
'''
def input_builder_fun(model):
return None
def _model_build_fun(self, model, loss_scale):
fc = model.FC(
"data", "fc", 16, 1, ("ConstantFill", {}), ("ConstantFill", {})
)
fc_fl = model.FlattenToVec(fc, "fc_fl")
sigm = model.Sigmoid(fc_fl, "sigm")
sq = model.SquaredL2Distance([sigm, "label"], "sq")
loss = model.AveragedLoss(sq, "loss")
loss = model.Scale(loss, scale=loss_scale)
return [loss]
def _param_update_fun(self, model):
ITER = model.Iter("ITER")
LR = model.net.LearningRate(
[ITER],
"LR",
base_lr=(-0.1),
policy="fixed",
)
ONE = model.param_init_net.ConstantFill(
[], "ONE", shape=[1], value=1.0,
)
for param in model.GetParams():
grad = model.param_to_grad[param]
model.WeightedSum([param, ONE, grad, LR], param)
def _generate_data(self, gpu_devices):
np.random.seed(26)
# Each run has same input, independent of number of gpus
batch_size = 64
for _ in range(0, 10):
full_data = np.random.rand(batch_size, 16)
full_labels = np.round(full_data[:, 0])
batch_per_device = batch_size // len(gpu_devices)
for (j, g) in enumerate(gpu_devices):
st = j * batch_per_device
en = st + batch_per_device
data = full_data[st:en, :].astype(np.float32)
labels = full_labels[st:en].astype(np.float32)
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, g)):
workspace.FeedBlob("gpu_{}/data".format(g), data)
workspace.FeedBlob("gpu_{}/label".format(g), labels)
def test_parallelize_gpu_bmuf(self):
model = cnn.CNNModelHelper(
order="NHWC",
name="test"
)
gpu_ids = [0, 1]
def input_builder_fun(model):
return None
self._generate_data(gpu_ids)
data_parallel_model.Parallelize_GPU_BMUF(
model,
input_builder_fun,
self._model_build_fun,
self._param_update_fun,
devices=gpu_ids,
)
data_parallel_model.RunInitNet(model)
# Check initial momentum params are zeros
self.assertEqual(model._device_grouped_blobs.keys(), ['fc_w', 'fc_b'])
self.assertEqual(workspace.FetchBlob('gpu_0/fc_b_v'), 0)
np.testing.assert_equal(
workspace.FetchBlob('gpu_0/fc_w_v'),
np.zeros(16).astype(np.float32).reshape(1, 16)
)
# Run the algorithm for one iteration to have non-zero params.
data_parallel_model.RunNet(model, 1)
# Save iteration momentum and post local update params
v_b_ = workspace.FetchBlob('gpu_0/fc_b_v')
v_w_ = workspace.FetchBlob('gpu_0/fc_w_v')
workspace.RunNetOnce(model.net)
b_0_ = workspace.FetchBlob('gpu_0/fc_b')
w_0_ = workspace.FetchBlob('gpu_0/fc_w')
b_1_ = workspace.FetchBlob('gpu_1/fc_b')
w_1_ = workspace.FetchBlob('gpu_1/fc_w')
def getBlockAvg(param_name):
param_0 = workspace.FetchBlob("gpu_0/{}".format(param_name))
param_1 = workspace.FetchBlob("gpu_1/{}".format(param_name))
return (param_0 + param_1) / 2
# Compute block gradients.
b_g_ = workspace.FetchBlob('gpu_0/fc_b_g')
w_g_ = workspace.FetchBlob('gpu_0/fc_w_g')
workspace.RunNetOnce(model._global_model_param_updates_net)
g_b = (b_0_ + b_1_) / 2 - b_g_
g_w = (w_0_ + w_1_) / 2 - w_g_
v_b = workspace.FetchBlob('gpu_0/fc_b_v')
v_w = workspace.FetchBlob('gpu_0/fc_w_v')
w_g = workspace.FetchBlob('gpu_0/fc_w_g')
b_g = workspace.FetchBlob('gpu_0/fc_b_g')
w_0 = workspace.FetchBlob('gpu_0/fc_w')
b_0 = workspace.FetchBlob('gpu_0/fc_b')
w_1 = workspace.FetchBlob('gpu_1/fc_w')
b_1 = workspace.FetchBlob('gpu_1/fc_b')
# Check momentum update step
np.testing.assert_equal(v_b, 0.5 * v_b_ + g_b)
np.testing.assert_equal(v_w, 0.5 * v_w_ + g_w)
np.testing.assert_equal(w_g, w_0)
np.testing.assert_equal(w_g, w_1)
np.testing.assert_equal(b_g, b_0)
np.testing.assert_equal(b_g, b_1)
# Check params update step
np.testing.assert_equal(w_0, w_g_ + v_w)
np.testing.assert_equal(b_0, b_g_ + v_b)
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support.")
@unittest.skipIf(workspace.NumCudaDevices() < 2, "Need at least 2 GPUs.")
class SparseDataParallelModelTestWithSharedIndices(TestCase):
'''
Create and run the model. We try with both storing indices for gather
on CPU and on GPU
'''
def run_model(self, V, gpu_devices):
def input_builder_fun(model):
return None
def model_build_fun(model, loss_scale):
gpu_vecs_gathered = []
gpu_vecs = []
for num, vec in enumerate(self.vecs):
gpu_vec = model.param_init_net.CopyCPUToGPU(
vec, 'gpuvec_{}'.format(num),
)
if num != 2:
model.params.append(gpu_vec)
gpu_vecs.append(gpu_vec)
for num, gpu_vec in enumerate(gpu_vecs):
gpu_vec_gathered = model.net.Gather(
[gpu_vec, 'indices'],
['gpu_vec_gathered_{}'.format(num)]
)
gpu_vecs_gathered.append(gpu_vec_gathered)
assert len(gpu_vecs_gathered) == 3
fc = model.net.FC(
[
gpu_vecs_gathered[2],
gpu_vecs_gathered[0],
gpu_vecs_gathered[1],
],
['fc'],
)
_, loss = model.net.SoftmaxWithLoss(
[fc, 'label'],
['ce_loss', 'avg_loss'],
only_loss=True,
)
loss = model.Scale(loss, scale=loss_scale)
model.net.Print(loss, [], limit=10)
return [loss]
def param_update_fun(model):
ONE = model.param_init_net.ConstantFill(
[], "ONE", shape=[1], value=1.0,
)
LR = model.CopyCPUToGPU(self.LR, "LR")
for param in model.GetParams():
param_grad = model.param_to_grad[param]
if not isinstance(param_grad, core.GradientSlice):
model.WeightedSum([param, ONE, param_grad, LR], param)
else:
model.net.ScatterWeightedSum(
[
param,
ONE,
param_grad.indices,
param_grad.values,
ONE,
],
param,
)
workspace.ResetWorkspace()
model = cnn.CNNModelHelper(
order="NHWC",
name="sparse_test{}".format(gpu_devices),
)
batch_size = 32
batch_per_device = batch_size // len(gpu_devices)
with core.NameScope("cpu"):
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
self.ITER = model.Iter("ITER")
self.LR = model.net.LearningRate(
[self.ITER],
"LR",
base_lr=(-0.1),
policy="fixed",
)
'''
self.vecs consists of 3 big blobs on which we call Gather:
1) FC weights, shape=(V, 16)
2) FC bias, shape=(V)
3) FC input, shape=(batch_per_device, 16)
'''
self.vecs = [
model.param_init_net.UniformFill(
[], "vec_{}".format(num), shape=[V, 16])
for num in range(2)
]
self.vecs.append(
model.param_init_net.UniformFill(
[],
"vec_2", shape=[batch_per_device, 16]
)
)
self.ONE_CPU = model.param_init_net.ConstantFill(
[], "ONE_CPU", shape=[1], value=1.0,
)
data_parallel_model.Parallelize_GPU(
model,
input_builder_fun=input_builder_fun,
forward_pass_builder_fun=model_build_fun,
param_update_builder_fun=param_update_fun,
devices=gpu_devices,
)
# Update the vecs
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
for num, vec in enumerate(self.vecs[:-1]):
model.CopyGPUToCPU("gpu_0/gpuvec_{}".format(num), vec)
# Each run has same input, independent of number of gpus
for i in range(0, 10):
np.random.seed(2603)
full_indices = np.random.permutation(V)[:batch_size].reshape(
batch_size
)
full_labels = full_indices[:] % batch_per_device
for (j, g) in enumerate(gpu_devices):
st = j * batch_per_device
en = st + batch_per_device
indices = full_indices[st:en].astype(np.int32)
labels = full_labels[st:en].astype(np.int32)
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, g)):
workspace.FeedBlob("gpu_{}/indices".format(g), indices)
workspace.FeedBlob("gpu_{}/label".format(g), labels)
if i == 0:
workspace.RunNetOnce(model.param_init_net)
# Force vecs to be same on all runs
orig_vecs = [
np.random.rand(V, 16).astype(np.float32),
np.random.rand(V).astype(np.float32),
np.random.rand(V, 16).astype(np.float32),
]
for vec, orig_vec in zip(self.vecs, orig_vecs):
workspace.FeedBlob(
vec,
orig_vec
)
for g in gpu_devices:
for num, orig_vec in enumerate(orig_vecs):
workspace.FeedBlob(
"gpu_{}/gpuvec_{}".format(g, num),
orig_vec,
device_option=core.DeviceOption(
caffe2_pb2.CUDA, g),
)
workspace.CreateNet(model.net)
workspace.RunNet(model.net.Proto().name)
idx = workspace.FetchBlob('gpu_0/indices')
grad_slices = [
workspace.FetchBlob(
'gpu_{}/gpu_vec_gathered_{}_grad'.format(g, num))
for g in gpu_devices for num in range(2)
]
for grad_slice in grad_slices:
# print (len(idx), len(grad_slice))
assert len(idx) == len(grad_slice), (
'Number of indices {} is not same as number of gradient '
'slices {}. This might lead to illegal memory access'.format(
len(idx), len(grad_slice)
)
)
def test_sparse_shared_indices_gpu(self):
'''
Test that the model has same number of indices and gradient rows
given total batchsize, independent of number of GPUs.
'''
V = 10000
self.run_model(V, [0, 1])
self.run_model(V, [0])
if workspace.NumCudaDevices() >= 4:
self.run_model(V, range(4))
if workspace.NumCudaDevices() >= 8:
self.run_model(V, range(8))
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import hypothesis.strategies as st
import numpy as np
import numpy.testing as npt
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import (
layer_model_instantiator,
schema,
workspace,
)
from caffe2.python.layers.layers import (
InstantiationContext,
)
from caffe2.python.layers.tags import Tags
from caffe2.python.layer_test_util import (
LayersTestCase,
OpSpec,
)
from caffe2.python.layers.layers import (
set_request_only,
is_request_only_scalar,
)
class TestLayers(LayersTestCase):
def testFCWithoutBias(self):
output_dims = 2
fc_without_bias = self.model.FCWithoutBias(
self.model.input_feature_schema.float_features, output_dims)
self.model.output_schema = fc_without_bias
self.assertEqual(
schema.Scalar((np.float32, (output_dims, ))),
fc_without_bias
)
train_init_net, train_net = self.get_training_nets()
init_ops = self.assertNetContainOps(
train_init_net,
[
OpSpec("UniformFill", None, None),
]
)
mat_mul_spec = OpSpec(
"MatMul",
[
self.model.input_feature_schema.float_features(),
init_ops[0].output[0],
],
fc_without_bias.field_blobs()
)
self.assertNetContainOps(train_net, [mat_mul_spec])
predict_net = self.get_predict_net()
self.assertNetContainOps(predict_net, [mat_mul_spec])
def testSamplingTrain(self):
output_dims = 1000
indices = self.new_record(schema.Scalar((np.int32, (10,))))
sampling_prob = self.new_record(schema.Scalar((np.float32, (10, ))))
sampled_fc = self.model.SamplingTrain(
schema.Struct(
('input', self.model.input_feature_schema.float_features),
('indices', indices),
('sampling_prob', sampling_prob),
),
"FC",
output_dims,
)
self.model.output_schema = sampled_fc
# Check that we don't add prediction layer into the model
self.assertEqual(1, len(self.model.layers))
self.assertEqual(
schema.Scalar((np.float32, (output_dims, ))),
sampled_fc
)
train_init_net, train_net = self.get_training_nets()
init_ops = self.assertNetContainOps(
train_init_net,
[
OpSpec("UniformFill", None, None),
OpSpec("UniformFill", None, None),
]
)
sampled_fc_layer = self.model.layers[0]
gather_w_spec = OpSpec(
"Gather",
[
init_ops[0].output[0],
indices(),
],
[
sampled_fc_layer._prediction_layer.train_param_blobs[0]
]
)
gather_b_spec = OpSpec(
"Gather",
[
init_ops[1].output[0],
indices(),
],
[
sampled_fc_layer._prediction_layer.train_param_blobs[1]
]
)
train_fc_spec = OpSpec(
"FC",
[
self.model.input_feature_schema.float_features(),
] + sampled_fc_layer._prediction_layer.train_param_blobs,
sampled_fc.field_blobs()
)
log_spec = OpSpec("Log", [sampling_prob()], [None])
sub_spec = OpSpec(
"Sub",
[sampled_fc.field_blobs()[0], None],
sampled_fc.field_blobs()
)
train_ops = self.assertNetContainOps(
train_net,
[gather_w_spec, gather_b_spec, train_fc_spec, log_spec, sub_spec])
self.assertEqual(train_ops[3].output[0], train_ops[4].input[1])
predict_net = self.get_predict_net()
self.assertNetContainOps(
predict_net,
[
OpSpec(
"FC",
[
self.model.input_feature_schema.float_features(),
init_ops[0].output[0],
init_ops[1].output[0],
],
sampled_fc.field_blobs()
)
]
)
def testBatchLRLoss(self):
input_record = self.new_record(schema.Struct(
('label', schema.Scalar((np.float64, (1,)))),
('prediction', schema.Scalar((np.float32, (2,)))),
('weight', schema.Scalar((np.float64, (1,))))
))
loss = self.model.BatchLRLoss(input_record)
self.assertEqual(schema.Scalar((np.float32, tuple())), loss)
def testBatchMSELoss(self):
input_record = self.new_record(schema.Struct(
('label', schema.Scalar((np.float64, (1,)))),
('prediction', schema.Scalar((np.float32, (2,)))),
))
loss = self.model.BatchMSELoss(input_record)
self.assertEqual(schema.Scalar((np.float32, tuple())), loss)
def testBatchSigmoidCrossEntropyLoss(self):
input_record = self.new_record(schema.Struct(
('label', schema.Scalar((np.float32, (32,)))),
('prediction', schema.Scalar((np.float32, (32,))))
))
loss = self.model.BatchSigmoidCrossEntropyLoss(input_record)
self.assertEqual(schema.Scalar((np.float32, tuple())), loss)
def testBatchSoftmaxLoss(self):
input_record = self.new_record(schema.Struct(
('label', schema.Scalar((np.float32, tuple()))),
('prediction', schema.Scalar((np.float32, (32,))))
))
loss = self.model.BatchSoftmaxLoss(input_record)
self.assertEqual(schema.Struct(
('softmax', schema.Scalar((np.float32, (32,)))),
('loss', schema.Scalar(np.float32)),
), loss)
@given(
X=hu.arrays(dims=[5, 2]),
num_to_collect=st.integers(min_value=1, max_value=10),
)
def testLastNWindowCollector(self, X, num_to_collect):
input_record = self.new_record(schema.Scalar(np.float32))
schema.FeedRecord(input_record, [X])
last_n = self.model.LastNWindowCollector(input_record, num_to_collect)
self.run_train_net_forward_only()
output_record = schema.FetchRecord(last_n)
start = max(0, 5 - num_to_collect)
npt.assert_array_equal(X[start:], output_record())
def testUniformSampling(self):
input_record = self.new_record(schema.Scalar(np.int32))
input_array = np.array([3, 10, 11, 15, 20, 99], dtype=np.int32)
schema.FeedRecord(input_record, [input_array])
num_samples = 20
num_elements = 100
uniform_sampling_output = self.model.UniformSampling(
input_record, num_samples, num_elements)
self.model.loss = uniform_sampling_output
self.run_train_net()
samples = workspace.FetchBlob(uniform_sampling_output.samples())
sampling_prob = workspace.FetchBlob(
uniform_sampling_output.sampling_prob())
self.assertEqual(num_samples, len(samples))
np.testing.assert_array_equal(input_array, samples[:len(input_array)])
np.testing.assert_almost_equal(
np.array([float(num_samples) / num_elements] * num_samples,
dtype=np.float32),
sampling_prob
)
def testGatherRecord(self):
indices = np.array([1, 3, 4], dtype=np.int32)
dense = np.array(range(20), dtype=np.float32).reshape(10, 2)
lengths = np.array(range(10), dtype=np.int32)
items = np.array(range(lengths.sum()), dtype=np.int64)
items_lengths = np.array(range(lengths.sum()), dtype=np.int32)
items_items = np.array(range(items_lengths.sum()), dtype=np.int64)
record = self.new_record(schema.Struct(
('dense', schema.Scalar(np.float32)),
('sparse', schema.Struct(
('list', schema.List(np.int64)),
('list_of_list', schema.List(schema.List(np.int64))),
)),
('empty_struct', schema.Struct())
))
indices_record = self.new_record(schema.Scalar(np.int32))
input_record = schema.Struct(
('indices', indices_record),
('record', record),
)
schema.FeedRecord(
input_record,
[indices, dense, lengths, items, lengths, items_lengths,
items_items])
gathered_record = self.model.GatherRecord(input_record)
self.assertTrue(schema.equal_schemas(gathered_record, record))
self.run_train_net_forward_only()
gathered_dense = workspace.FetchBlob(gathered_record.dense())
np.testing.assert_array_equal(
np.concatenate([dense[i:i + 1] for i in indices]), gathered_dense)
gathered_lengths = workspace.FetchBlob(
gathered_record.sparse.list.lengths())
np.testing.assert_array_equal(
np.concatenate([lengths[i:i + 1] for i in indices]),
gathered_lengths)
gathered_items = workspace.FetchBlob(
gathered_record.sparse.list.items())
offsets = lengths.cumsum() - lengths
np.testing.assert_array_equal(
np.concatenate([
items[offsets[i]: offsets[i] + lengths[i]]
for i in indices
]), gathered_items)
gathered_items_lengths = workspace.FetchBlob(
gathered_record.sparse.list_of_list.items.lengths())
np.testing.assert_array_equal(
np.concatenate([
items_lengths[offsets[i]: offsets[i] + lengths[i]]
for i in indices
]),
gathered_items_lengths
)
nested_offsets = []
nested_lengths = []
nested_offset = 0
j = 0
for l in lengths:
nested_offsets.append(nested_offset)
nested_length = 0
for _i in range(l):
nested_offset += items_lengths[j]
nested_length += items_lengths[j]
j += 1
nested_lengths.append(nested_length)
gathered_items_items = workspace.FetchBlob(
gathered_record.sparse.list_of_list.items.items())
np.testing.assert_array_equal(
np.concatenate([
items_items[nested_offsets[i]:
nested_offsets[i] + nested_lengths[i]]
for i in indices
]),
gathered_items_items
)
def testMapToRange(self):
input_record = self.new_record(schema.Scalar(np.int32))
map_to_range_output = self.model.MapToRange(input_record,
max_index=100)
self.model.output_schema = schema.Struct()
train_init_net, train_net = self.get_training_nets()
schema.FeedRecord(
input_record,
[np.array([10, 3, 20, 99, 15, 11, 3, 11], dtype=np.int32)]
)
workspace.RunNetOnce(train_init_net)
workspace.RunNetOnce(train_net)
indices = workspace.FetchBlob(map_to_range_output())
np.testing.assert_array_equal(
np.array([1, 2, 3, 4, 5, 6, 2, 6], dtype=np.int32),
indices
)
schema.FeedRecord(
input_record,
[np.array([10, 3, 23, 35, 60, 15, 10, 15], dtype=np.int32)]
)
workspace.RunNetOnce(train_net)
indices = workspace.FetchBlob(map_to_range_output())
np.testing.assert_array_equal(
np.array([1, 2, 7, 8, 9, 5, 1, 5], dtype=np.int32),
indices
)
eval_net = self.get_eval_net()
schema.FeedRecord(
input_record,
[np.array([10, 3, 23, 35, 60, 15, 200], dtype=np.int32)]
)
workspace.RunNetOnce(eval_net)
indices = workspace.FetchBlob(map_to_range_output())
np.testing.assert_array_equal(
np.array([1, 2, 7, 8, 9, 5, 0], dtype=np.int32),
indices
)
schema.FeedRecord(
input_record,
[np.array([10, 3, 23, 15, 101, 115], dtype=np.int32)]
)
workspace.RunNetOnce(eval_net)
indices = workspace.FetchBlob(map_to_range_output())
np.testing.assert_array_equal(
np.array([1, 2, 7, 5, 0, 0], dtype=np.int32),
indices
)
predict_net = self.get_predict_net()
schema.FeedRecord(
input_record,
[np.array([3, 3, 20, 23, 151, 35, 60, 15, 200], dtype=np.int32)]
)
workspace.RunNetOnce(predict_net)
indices = workspace.FetchBlob(map_to_range_output())
np.testing.assert_array_equal(
np.array([2, 2, 3, 7, 0, 8, 9, 5, 0], dtype=np.int32),
indices
)
def testSelectRecordByContext(self):
float_features = self.model.input_feature_schema.float_features
float_array = np.array([1.0, 2.0], dtype=np.float32)
schema.FeedRecord(float_features, [float_array])
with Tags(Tags.EXCLUDE_FROM_PREDICTION):
log_float_features, = self.model.Log(float_features, 1)
joined = self.model.SelectRecordByContext(
schema.Struct(
(InstantiationContext.PREDICTION, float_features),
(InstantiationContext.TRAINING, log_float_features),
# TODO: TRAIN_ONLY layers are also generated in eval
(InstantiationContext.EVAL, log_float_features),
)
)
# model.output_schema has to a struct
self.model.output_schema = schema.Struct((
'joined', joined
))
predict_net = layer_model_instantiator.generate_predict_net(self.model)
workspace.RunNetOnce(predict_net)
predict_output = schema.FetchRecord(predict_net.output_record())
npt.assert_array_equal(float_array,
predict_output['joined']())
eval_net = layer_model_instantiator.generate_eval_net(self.model)
workspace.RunNetOnce(eval_net)
eval_output = schema.FetchRecord(eval_net.output_record())
npt.assert_array_equal(np.log(float_array),
eval_output['joined']())
_, train_net = (
layer_model_instantiator.generate_training_nets_forward_only(
self.model
)
)
workspace.RunNetOnce(train_net)
train_output = schema.FetchRecord(train_net.output_record())
npt.assert_array_equal(np.log(float_array),
train_output['joined']())
def testFunctionalLayer(self):
def normalize(net, in_record, out_record):
mean = net.ReduceFrontMean(in_record(), 1)
net.Sub(
[in_record(), mean],
out_record[0](),
broadcast=1)
normalized = self.model.Functional(
self.model.input_feature_schema.float_features, 1,
normalize, name="normalizer")
# Attach metadata to one of the outputs and use it in FC
normalized[0].set_type((np.float32, 32))
self.model.output_schema = self.model.FC(normalized[0], 2)
predict_net = layer_model_instantiator.generate_predict_net(
self.model)
ops = predict_net.Proto().op
assert len(ops) == 3
assert ops[0].type == "ReduceFrontMean"
assert ops[1].type == "Sub"
assert ops[2].type == "FC"
assert len(ops[0].input) == 1
assert ops[0].input[0] ==\
self.model.input_feature_schema.float_features()
assert len(ops[1].output) == 1
assert ops[1].output[0] in ops[2].input
def testFunctionalLayerHelper(self):
mean = self.model.ReduceFrontMean(
self.model.input_feature_schema.float_features, 1)
normalized = self.model.Sub(
schema.Tuple(
self.model.input_feature_schema.float_features, mean[0]),
1, broadcast=1)
# Attach metadata to one of the outputs and use it in FC
normalized[0].set_type((np.float32, (32,)))
self.model.output_schema = self.model.FC(normalized[0], 2)
predict_net = layer_model_instantiator.generate_predict_net(
self.model)
ops = predict_net.Proto().op
assert len(ops) == 3
assert ops[0].type == "ReduceFrontMean"
assert ops[1].type == "Sub"
assert ops[2].type == "FC"
assert len(ops[0].input) == 1
assert ops[0].input[0] ==\
self.model.input_feature_schema.float_features()
assert len(ops[1].output) == 1
assert ops[1].output[0] in ops[2].input
def testFunctionalLayerHelperAutoInference(self):
softsign = self.model.Softsign(
schema.Tuple(self.model.input_feature_schema.float_features),
1)
assert len(softsign.field_types()) == 1
assert softsign.field_types()[0].base == np.float32
assert softsign.field_types()[0].shape == (32,)
self.model.output_schema = self.model.FC(softsign[0], 2)
predict_net = layer_model_instantiator.generate_predict_net(
self.model)
ops = predict_net.Proto().op
assert len(ops) == 2
assert ops[0].type == "Softsign"
assert ops[1].type == "FC"
assert len(ops[0].input) == 1
assert ops[0].input[0] ==\
self.model.input_feature_schema.float_features()
assert len(ops[0].output) == 1
assert ops[0].output[0] in ops[1].input
def testFunctionalLayerHelperAutoInferenceScalar(self):
loss = self.model.AveragedLoss(self.model.input_feature_schema, 1)
self.assertEqual(1, len(loss.field_types()))
self.assertEqual(np.float32, loss.field_types()[0].base)
self.assertEqual(tuple(), loss.field_types()[0].shape)
def testFunctionalLayerInputCoercion(self):
one = self.model.global_constants['ONE']
two = self.model.Add([one, one], 1)
self.model.loss = two
self.run_train_net()
data = workspace.FetchBlob(two.field_blobs()[0])
np.testing.assert_array_equal([2.0], data)
def testFunctionalLayerWithOutputNames(self):
k = 3
topk = self.model.TopK(
self.model.input_feature_schema,
output_names_or_num=['values', 'indices'],
k=k,
)
self.assertEqual(2, len(topk.field_types()))
self.assertEqual(np.float32, topk.field_types()[0].base)
self.assertEqual((k,), topk.field_types()[0].shape)
self.assertEqual(np.int32, topk.field_types()[1].base)
self.assertEqual((k,), topk.field_types()[1].shape)
self.assertEqual(['TopK/values', 'TopK/indices'], topk.field_blobs())
def testFunctionalLayerWithOutputDtypes(self):
loss = self.model.AveragedLoss(
self.model.input_feature_schema,
1,
output_dtypes=(np.float32, (1,)),
)
self.assertEqual(1, len(loss.field_types()))
self.assertEqual(np.float32, loss.field_types()[0].base)
self.assertEqual((1,), loss.field_types()[0].shape)
def testPropagateRequestOnly(self):
# test case when output is request only
input_record = self.new_record(schema.Struct(
('input1', schema.Scalar((np.float32, (32, )))),
('input2', schema.Scalar((np.float32, (64, )))),
('input3', schema.Scalar((np.float32, (16, )))),
))
set_request_only(input_record)
concat_output = self.model.Concat(input_record)
self.assertEqual(is_request_only_scalar(concat_output), True)
# test case when output is not request only
input_record2 = self.new_record(schema.Struct(
('input4', schema.Scalar((np.float32, (100, ))))
)) + input_record
concat_output2 = self.model.Concat(input_record2)
self.assertEqual(is_request_only_scalar(concat_output2), False)
def testSetRequestOnly(self):
input_record = schema.Scalar(np.int64)
schema.attach_metadata_to_scalars(
input_record,
schema.Metadata(
categorical_limit=100000000,
expected_value=99,
feature_specs=schema.FeatureSpec(
feature_ids=[1, 100, 1001]
)
)
)
set_request_only(input_record)
self.assertEqual(input_record.metadata.categorical_limit, 100000000)
self.assertEqual(input_record.metadata.expected_value, 99)
self.assertEqual(
input_record.metadata.feature_specs.feature_ids,
[1, 100, 1001]
)
|
# TODO(jiayq): as more and more tests are moving to hypothesis test, we
# can gradually remove this test script. DO NOT ADD MORE TESTS TO THIS
# FILE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import \
core, device_checker, gradient_checker, test_util, workspace, cnn
import caffe2.python.hypothesis_test_util as hu
from hypothesis import assume, given, settings
import hypothesis.strategies as st
from caffe2.proto import caffe2_pb2
import collections
import unittest
if workspace.has_gpu_support and workspace.NumCudaDevices() > 0:
gpu_device_option = caffe2_pb2.DeviceOption()
gpu_device_option.device_type = caffe2_pb2.CUDA
cpu_device_option = caffe2_pb2.DeviceOption()
gpu_device_checker = device_checker.DeviceChecker(
0.01, [gpu_device_option]
)
device_checker = device_checker.DeviceChecker(
0.01, [gpu_device_option, cpu_device_option]
)
gpu_gradient_checkers = [
gradient_checker.GradientChecker(
0.005, 0.05, gpu_device_option, "gpu_checker_ws"
),
]
gradient_checkers = [
gradient_checker.GradientChecker(
0.005, 0.05, gpu_device_option, "gpu_checker_ws"
),
gradient_checker.GradientChecker(
0.01, 0.05, cpu_device_option, "cpu_checker_ws"
),
]
else:
cpu_device_option = caffe2_pb2.DeviceOption()
gpu_device_option = None
gpu_device_checker = device_checker.DeviceChecker(
0.01, []
)
device_checker = device_checker.DeviceChecker(0.01, [cpu_device_option])
gradient_checkers = [
gradient_checker.GradientChecker(
0.01, 0.05, cpu_device_option, "cpu_checker_ws"
)
]
gpu_gradient_checkers = []
class TestLRN(test_util.TestCase):
def setUp(self):
self.test_configs = [(6, 10), (3, 13), ]
def testLRN(self):
for input_size, depth in self.test_configs:
op = core.CreateOperator("LRN",
["X"],
["Y", "Y_scale"],
size=11,
alpha=0.001,
beta=0.5,
bias=2.0,
order="NHWC"
)
X = np.random.rand(2, input_size, input_size,
depth).astype(np.float32)
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestFlatten(test_util.TestCase):
def testFlatten(self):
op = core.CreateOperator("Flatten", ["X"], ["Y"])
X = np.random.rand(2, 3, 4, 5).astype(np.float32)
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestConcat(test_util.TestCase):
def setUp(self):
self.test_configs = [
# input_size, depth1, depth2, depth3, depth4
(3, 2, 3, 4, 5),
(4, 5, 4, 3, 2),
]
def testConcatNHWC(self):
for input_size, d1, d2, d3, d4 in self.test_configs:
op = core.CreateOperator("Concat",
["X1", "X2", "X3", "X4"],
["Y", "Y_dims"],
order="NHWC"
)
Xs = [
np.random.rand(2, input_size, input_size,
d1).astype(np.float32),
np.random.rand(2, input_size, input_size,
d2).astype(np.float32),
np.random.rand(2, input_size, input_size,
d3).astype(np.float32),
np.random.rand(2, input_size, input_size, d4).astype(np.float32)
]
for i in range(4):
res = device_checker.CheckSimple(op, Xs, [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, Xs, i,
[0])
self.assertTrue(res)
def testConcatNCHW(self):
for input_size, d1, d2, d3, d4 in self.test_configs:
op = core.CreateOperator("Concat",
["X1", "X2", "X3", "X4"],
["Y", "Y_dims"],
order="NCHW"
)
Xs = [
np.random.rand(2, d1, input_size,
input_size).astype(np.float32),
np.random.rand(2, d2, input_size,
input_size).astype(np.float32),
np.random.rand(2, d3, input_size,
input_size).astype(np.float32),
np.random.rand(2, d4, input_size, input_size).astype(np.float32)
]
for i in range(4):
res = device_checker.CheckSimple(op, Xs, [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, Xs, i,
[0])
self.assertTrue(res)
class TestRelu(test_util.TestCase):
def setUp(self):
self.test_configs = [
# input size
# (0, 1),
(1, 1),
(2, 1),
(1, 3, 3, 1),
(2, 3, 3, 1),
(1, 5, 5, 3),
(2, 5, 5, 3),
]
def testRelu(self):
for input_size in self.test_configs:
op = core.CreateOperator("Relu", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32)
# go away from the origin point to avoid kink problems
X += 0.01 * np.sign(X)
X[X == 0] = 0.01
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestTanh(test_util.TestCase):
def setUp(self):
self.test_configs = [
# (0, 1),
(1, 1),
(2, 1),
(1, 2, 3, 4),
]
def testTanh(self):
for input_size in self.test_configs:
op = core.CreateOperator("Tanh", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32) - 0.5
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestExp(test_util.TestCase):
def setUp(self):
self.test_configs = [
# (0, 1),
(1, 1),
(2, 1),
(1, 2, 3, 4),
]
def testExp(self):
for input_size in self.test_configs:
op = core.CreateOperator("Exp", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32) - 0.5
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestSigmoid(test_util.TestCase):
def setUp(self):
self.test_configs = [
# (0, 1),
(1, 1),
(2, 1),
(1, 2, 3, 4),
]
def testSigmoid(self):
for input_size in self.test_configs:
op = core.CreateOperator("Sigmoid", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32) - 0.5
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestSum(test_util.TestCase):
def setUp(self):
self.test_configs = [
# ((0, 1), False),
((1, 2, 3, 4), True),
((1, 2, 3, 4), False)]
def testSum(self):
for (input_size, in_place) in self.test_configs:
op = core.CreateOperator("Sum", ["X1", "X2"],
["Y" if not in_place else "X1"])
X1 = np.random.rand(*input_size).astype(np.float32) - 0.5
X2 = np.random.rand(*input_size).astype(np.float32) - 0.5
res = device_checker.CheckSimple(op, [X1, X2], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(
op, [X1, X2], 0, [0])
self.assertTrue(res)
class TestMakeTwoClass(test_util.TestCase):
def setUp(self):
self.test_configs = [
# input size
# (0, 1),
(1,),
(7,),
(1, 3),
(2, 5),
]
def testMakeTwoClass(self):
for input_size in self.test_configs:
op = core.CreateOperator("MakeTwoClass", ["X"], ["Y"])
X = np.random.rand(*input_size).astype(np.float32)
# step a little to avoid gradient problems
X[X < 0.01] += 0.01
X[X > 0.99] -= 0.01
res = device_checker.CheckSimple(op, [X], [0])
self.assertTrue(res)
for checker in gradient_checkers:
res, grad, grad_estimated = checker.CheckSimple(op, [X], 0, [0])
self.assertTrue(res)
class TestNetGradientChecker(test_util.TestCase):
def test_net_gradient_checker(self):
model = cnn.CNNModelHelper(name="test")
const = model.net.AddExternalInputs("const1", "const2")
fc = model.FC(dim_in=3, dim_out=4, blob_in="X", blob_out="Y", axis=0)
dist = [model.net.SquaredL2Distance([fc, c]) for c in const]
losses = [model.net.AveragedLoss(d) for d in dist] # using two losses here
workspace.RunNetOnce(model.param_init_net)
gradient_checker.NetGradientChecker.Check(
model.net,
outputs_with_grad=losses,
input_values={"X": np.array([1, 2, 3], dtype="float32"),
const[0]: np.array([1, 1, 1, 1], dtype="float32"),
const[1]: np.array([2, 2, 2, 2], dtype="float32")},
input_to_check="X",
)
if __name__ == '__main__':
workspace.GlobalInit(["python"])
unittest.main()
|
## @package attention
# Module caffe2.python.attention
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import brew
class AttentionType:
Regular, Recurrent = range(2)
def s(scope, name):
# We have to manually scope due to our internal/external blob
# relationships.
return "{}/{}".format(str(scope), str(name))
# c_i = \sum_j w_{ij}\textbf{s}_j
def _calc_weighted_context(
model,
encoder_outputs_transposed,
encoder_output_dim,
attention_weights_3d,
scope,
):
# [batch_size, encoder_output_dim, 1]
attention_weighted_encoder_context = model.net.BatchMatMul(
[encoder_outputs_transposed, attention_weights_3d],
s(scope, 'attention_weighted_encoder_context'),
)
# [batch_size, encoder_output_dim]
attention_weighted_encoder_context, _ = model.net.Reshape(
attention_weighted_encoder_context,
[
attention_weighted_encoder_context,
s(scope, 'attention_weighted_encoder_context_old_shape'),
],
shape=[1, -1, encoder_output_dim],
)
return attention_weighted_encoder_context
# Calculate a softmax over the passed in attention energy logits
def _calc_attention_weights(
model,
attention_logits_transposed,
scope,
):
# TODO: we could try to force some attention weights to be zeros,
# based on encoder_lengths.
# [batch_size, encoder_length, 1]
attention_weights_3d = brew.softmax(
model,
attention_logits_transposed,
s(scope, 'attention_weights_3d'),
engine='CUDNN',
axis=1,
)
return attention_weights_3d
# e_{ij} = \textbf{v}^T tanh \alpha(\textbf{h}_{i-1}, \textbf{s}_j)
def _calc_attention_logits_from_sum_match(
model,
decoder_hidden_encoder_outputs_sum,
encoder_output_dim,
scope,
):
# [encoder_length, batch_size, encoder_output_dim]
decoder_hidden_encoder_outputs_sum = model.net.Tanh(
decoder_hidden_encoder_outputs_sum,
decoder_hidden_encoder_outputs_sum,
)
attention_v = model.param_init_net.XavierFill(
[],
s(scope, 'attention_v'),
shape=[1, encoder_output_dim],
)
model.add_param(attention_v)
attention_zeros = model.param_init_net.ConstantFill(
[],
s(scope, 'attention_zeros'),
value=0.0,
shape=[1],
)
# [encoder_length, batch_size, 1]
attention_logits = model.net.FC(
[decoder_hidden_encoder_outputs_sum, attention_v, attention_zeros],
[s(scope, 'attention_logits')],
axis=2,
)
# [batch_size, encoder_length, 1]
attention_logits_transposed = model.Transpose(
attention_logits,
s(scope, 'attention_logits_transposed'),
axes=[1, 0, 2],
)
return attention_logits_transposed
# \textbf{W}^\alpha used in the context of \alpha_{sum}(a,b)
def _apply_fc_weight_for_sum_match(
model,
input,
dim_in,
dim_out,
scope,
name,
):
output = brew.fc(
model,
input,
s(scope, name),
dim_in=dim_in,
dim_out=dim_out,
axis=2,
)
output = model.net.Squeeze(
output,
output,
dims=[0],
)
return output
# Implement RecAtt due to section 4.1 in http://arxiv.org/abs/1601.03317
def apply_recurrent_attention(
model,
encoder_output_dim,
encoder_outputs_transposed,
weighted_encoder_outputs,
decoder_hidden_state_t,
decoder_hidden_state_dim,
attention_weighted_encoder_context_t_prev,
scope,
):
weighted_prev_attention_context = _apply_fc_weight_for_sum_match(
model=model,
input=attention_weighted_encoder_context_t_prev,
dim_in=encoder_output_dim,
dim_out=encoder_output_dim,
scope=scope,
name='weighted_prev_attention_context',
)
weighted_decoder_hidden_state = _apply_fc_weight_for_sum_match(
model=model,
input=decoder_hidden_state_t,
dim_in=decoder_hidden_state_dim,
dim_out=encoder_output_dim,
scope=scope,
name='weighted_decoder_hidden_state',
)
# [encoder_length, batch_size, encoder_output_dim]
decoder_hidden_encoder_outputs_sum_tmp = model.net.Add(
[
weighted_prev_attention_context,
weighted_decoder_hidden_state,
],
s(scope, 'decoder_hidden_encoder_outputs_sum_tmp'),
)
# [encoder_length, batch_size, encoder_output_dim]
decoder_hidden_encoder_outputs_sum = model.net.Add(
[
weighted_encoder_outputs,
decoder_hidden_encoder_outputs_sum_tmp,
],
s(scope, 'decoder_hidden_encoder_outputs_sum'),
broadcast=1,
)
attention_logits_transposed = _calc_attention_logits_from_sum_match(
model=model,
decoder_hidden_encoder_outputs_sum=decoder_hidden_encoder_outputs_sum,
encoder_output_dim=encoder_output_dim,
scope=scope,
)
# [batch_size, encoder_length, 1]
attention_weights_3d = _calc_attention_weights(
model=model,
attention_logits_transposed=attention_logits_transposed,
scope=scope,
)
# [batch_size, encoder_output_dim, 1]
attention_weighted_encoder_context = _calc_weighted_context(
model=model,
encoder_outputs_transposed=encoder_outputs_transposed,
encoder_output_dim=encoder_output_dim,
attention_weights_3d=attention_weights_3d,
scope=scope,
)
return attention_weighted_encoder_context, attention_weights_3d, [
decoder_hidden_encoder_outputs_sum,
]
def apply_regular_attention(
model,
encoder_output_dim,
encoder_outputs_transposed,
weighted_encoder_outputs,
decoder_hidden_state_t,
decoder_hidden_state_dim,
scope,
):
weighted_decoder_hidden_state = _apply_fc_weight_for_sum_match(
model=model,
input=decoder_hidden_state_t,
dim_in=decoder_hidden_state_dim,
dim_out=encoder_output_dim,
scope=scope,
name='weighted_decoder_hidden_state',
)
# [encoder_length, batch_size, encoder_output_dim]
decoder_hidden_encoder_outputs_sum = model.net.Add(
[weighted_encoder_outputs, weighted_decoder_hidden_state],
s(scope, 'decoder_hidden_encoder_outputs_sum'),
broadcast=1,
use_grad_hack=1,
)
attention_logits_transposed = _calc_attention_logits_from_sum_match(
model=model,
decoder_hidden_encoder_outputs_sum=decoder_hidden_encoder_outputs_sum,
encoder_output_dim=encoder_output_dim,
scope=scope,
)
# [batch_size, encoder_length, 1]
attention_weights_3d = _calc_attention_weights(
model=model,
attention_logits_transposed=attention_logits_transposed,
scope=scope,
)
# [batch_size, encoder_output_dim, 1]
attention_weighted_encoder_context = _calc_weighted_context(
model=model,
encoder_outputs_transposed=encoder_outputs_transposed,
encoder_output_dim=encoder_output_dim,
attention_weights_3d=attention_weights_3d,
scope=scope,
)
return attention_weighted_encoder_context, attention_weights_3d, [
decoder_hidden_encoder_outputs_sum,
]
|
## @package task
# Module caffe2.python.task
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, context
from caffe2.python.schema import Field, from_blob_list
from collections import defaultdict
from copy import copy
def _merge_node_kwargs(a, b):
# TODO(azzolini): consistency checks
if a is None:
return b
if b is None:
return a
c = copy(a)
c.update(b)
return c
@context.define_context(allow_default=True)
class Cluster(object):
"""
Context that keeps track of all the node names used.
Users shouldn't have to use them directly, since a Cluster is automatically
generated at the first usage of 'Node'.
"""
def __init__(self):
# list instead of set to keep order
self._nodes = []
self._node_kwargs = {}
def add_node(self, node):
if str(node) not in self._nodes:
self._nodes.append(str(node))
self._node_kwargs[str(node)] = _merge_node_kwargs(
node.kwargs(),
self._node_kwargs.get(str(node)))
def nodes(self):
"""
Returns the list of unique node names used within this context.
"""
return self._nodes
def node_kwargs(self):
return self._node_kwargs
@context.define_context(allow_default=True)
class Node(object):
"""
A Node context is used to indicate that all Tasks instantiated within will
run on the given node name. (Only the name of the node actually counts.)
Example:
with TaskGroup() as tg:
with Node('node1'):
s1 = execution_step(...)
Task(step=s1)
with Node('node2'):
s2 = execution_step(...)
with Node('node1'):
s3 = execution_step(...)
In this example, all three execution steps will run in parallel.
Moreover, s1 and s3 will run on the same node, and can see each
others blobs.
Additionally, a Node can be passed implementation-specific kwargs,
in order to specify properties of the node.
"""
def __init__(self, node='local', **kwargs):
self._name = str(node)
self._kwargs = kwargs
Cluster.current().add_node(self)
def __str__(self):
return self._name
def kwargs(self):
return self._kwargs
class WorkspaceType(object):
"""
Determines whether tasks of a TaskGroup will run directly at the global
workspace, which is kept alive across runs, or whether a new child
workspace will be created for the run and destroyed afterwards.
"""
PRIVATE = 'private'
GLOBAL = 'global'
def get_setup_nets(key, steps_or_nets, target):
init_net = core.Net(key + '/init')
exit_net = core.Net(key + '/exit')
init_nets = []
exit_nets = []
objs = []
for step_or_net in steps_or_nets:
if hasattr(step_or_net, 'get_all_attributes'):
objs += step_or_net.get_all_attributes(key)
elif hasattr(step_or_net, 'get_attributes'):
objs += step_or_net.get_attributes(key)
for obj in objs:
# these are needed in order to allow nesting of TaskGroup, which
# is a feature not yet implemented.
if hasattr(obj, '_setup_used') and obj._setup_used:
continue
if hasattr(obj, '_setup_target') and obj._setup_target != target:
continue
if hasattr(obj, 'setup'):
nets = obj.setup(init_net)
if isinstance(nets, (list, tuple)):
init_nets += nets
elif isinstance(nets, (core.Net, core.ExecutionStep)):
init_nets.append(nets)
elif nets is not None:
raise TypeError('Unsupported type for setup: %s' % type(nets))
obj._setup_used = True
if hasattr(obj, 'exit'):
nets = obj.exit(exit_net)
if isinstance(nets, (list, tuple)):
exit_nets += nets
elif isinstance(nets, (core.Net, core.ExecutionStep)):
exit_nets.append(nets)
elif nets is not None:
raise TypeError('Unsupported type for setup: %s' % type(nets))
obj._setup_used = True
if len(init_net.Proto().op) > 0:
init_nets.insert(0, init_net)
if len(exit_net.Proto().op) > 0:
exit_nets.insert(0, exit_net)
return init_nets, exit_nets
@context.define_context(allow_default=False)
class TaskGroup(object):
"""
Context that gathers tasks which will run concurrently, potentially on
multiple nodes. All tasks in the same node will share the same workspace
and thus can share blobs, while tasks running in different nodes won't
be able to directly share data.
All tasks of the task group will start concurrently, and the task group
will finish execution when the last task of the group finishes.
Example:
# supose that s1 ... s5 are execution steps or nets.
with TaskGroup() as tg:
# these tasks go to default node 'local'
Task(step=s1)
Task(step=s2)
with Node('n2'):
Task(step=s3)
with Node('n1'):
Task(step=s4)
with Node('n2'):
Task(step=s5)
# this will run all steps in parallel.
# s1 and s2 will run at default node 'local'
# s3 and s5 will run at node 'n2'
# s4 will run at node 'n1'
session.run(tg)
"""
LOCAL_SETUP = 'local_setup'
def __init__(self, workspace_type=None):
self._plan_cache = None
self._tasks = []
self._already_used = False
self._prev_active = None
self._tasks_to_add = []
self._report_nets = {}
self._report_steps = []
self._workspace_type = workspace_type
self._tasks_by_node = None
def add(self, task):
assert not self._already_used, (
'Cannot add Task to an already used TaskGroup.')
assert (
self._workspace_type is None or
task._workspace_type is None or
self._workspace_type == task._workspace_type)
if task._workspace_type is None:
task._workspace_type = (
self._workspace_type or WorkspaceType.PRIVATE)
if self._workspace_type is None:
self._workspace_type = task._workspace_type
task._notify_used()
self._tasks.append(task)
def tasks(self):
for task in self._tasks_to_add:
self.add(task)
self._tasks_to_add = []
self._already_used = True
return self._tasks
def num_registered_tasks(self):
return len(self._tasks_to_add) + len(self._tasks)
def used_nodes(self):
# use list to keep order
used = []
for task in self._tasks + self._tasks_to_add:
if task.node not in used:
used.append(task.node)
return used
def report_step(self, step=None, node=None, interval_ms=1000):
"""
Add a "report step" to this TaskGroup. This step will run repeatedly
every `interval_ms` milliseconds for the duration of the TaskGroup
execution on each of the nodes. It is guaranteed that this step
will be run at least once after every Task in the node has finished.
"""
step = core.to_execution_step(step)
step.RunEveryMillis(interval_ms)
self._report_steps.append((str(node or Node.current(node)), step))
def report_net(self, net=None, node=None, report_interval=5):
"""
DEPRECATED. Use report_step instead.
"""
node = str(node or Node.current(node))
assert net is None or node not in self._report_nets
if node not in self._report_nets:
self._report_nets[node] = (
net if net else core.Net('%s/reporter' % node),
report_interval)
return self._report_nets[node][0]
def tasks_by_node(self, node_remap=None):
# tasks_by_node can't be called twice because the setup won't
# work properly a second time.
node_map = {}
for task in self.tasks():
node_map[task.node] =\
node_remap(task.node) if node_remap else task.node
if self._tasks_by_node is not None:
tasks_by_node, prev_node_map = self._tasks_by_node
assert prev_node_map == node_map, (
'Cannot call tasks_by_node multiple times.')
return tasks_by_node
# now we have report_steps. report_net is deprecated
for node, (net, interval) in self._report_nets.items():
self.report_step(net, node=node, interval_ms=interval * 1000)
self._report_nets = {}
tasks_by_node = defaultdict(list)
for task in self.tasks():
mapped_node = node_map[task.node]
tasks_by_node[mapped_node].append(task)
report_steps_by_node = defaultdict(list)
for original_node, step in self._report_steps:
report_steps_by_node[node_map[original_node]].append(step)
grouped_by_node = TaskGroup()
for node, tasks in tasks_by_node.items():
report_steps = report_steps_by_node[node]
node_inits, node_exits = get_setup_nets(
TaskGroup.LOCAL_SETUP,
[t.get_step() for t in tasks] + report_steps,
self)
# shortcut for single task with no queue
steps = report_steps
outputs = []
workspace_type = tasks[0].workspace_type()
for task in tasks:
step = task.get_step()
if step is not None:
steps.append(step)
outputs += task.outputs()
assert workspace_type == task.workspace_type(), (
'All tasks for a given node need same workspace type.')
if len(steps) == 0:
steps.append(core.execution_step('empty', []))
if len(steps) == 1:
step = steps[0]
else:
step = core.execution_step(
'%s:body' % node, steps, concurrent_substeps=True)
if len(node_inits) > 0 or len(node_exits) > 0:
steps = []
if len(node_inits) > 0:
steps.append(
core.execution_step('%s:init' % node, node_inits))
steps.append(step)
if len(node_exits) > 0:
steps.append(
core.execution_step('%s:exit' % node, node_exits))
step = core.execution_step(node, steps)
Task(
node=node, step=step, outputs=outputs,
name='grouped_by_node',
group=grouped_by_node, workspace_type=workspace_type)
self._tasks_by_node = (grouped_by_node, node_map)
return grouped_by_node
def to_task(self, node=None):
node = str(Node.current(node))
tasks = self.tasks_by_node(lambda x: node).tasks()
if len(tasks) == 0:
return Task()
return tasks[0]
class TaskOutput(object):
"""
Represents the output of a task. An output can be a blob,
a list of blob, or a record.
"""
def __init__(self, names):
self._schema = None
self._is_scalar = False
if isinstance(names, Field):
self._schema = names
names = self._schema.field_blobs()
self._is_scalar = type(names) not in (tuple, list)
if self._is_scalar:
names = [names]
self.names = names
self._values = None
def set(self, values, _fetch_func=None):
assert len(values) == len(self.names)
self._values = values
self._fetch_func = _fetch_func
def get(self):
assert self._values is not None, 'Output value not set yet.'
if self._is_scalar:
return self._values[0]
elif self._schema:
return from_blob_list(self._schema, self._values)
else:
return self._values
def fetch(self):
assert self._fetch_func is not None, (
'Cannot fetch value for this output.')
fetched_vals = [self._fetch_func(v) for v in self._values]
if self._is_scalar:
return fetched_vals[0]
elif self._schema:
return from_blob_list(self._schema, fetched_vals)
else:
return fetched_vals
def final_output(blob_or_record):
"""
Adds an output to the current Task, or if no task is active,
create a dummy task that returns the given blob or record
to the client. This will return the value of the blob or record when
the last task of the TaskGroup for a given node finishes.
"""
cur_task = Task.current(required=False) or Task()
return cur_task.add_output(blob_or_record)
class TaskOutputList(object):
""" Keeps a list of outputs for a task """
def __init__(self, outputs=None):
self.outputs = outputs or []
def names(self):
"""
Retrive the output names.
TODO(azzolini): make this schema-based.
"""
names = []
for o in self.outputs:
names += o.names
return names
def set_values(self, values, _fetch_func=None):
offset = 0
for o in self.outputs:
num = len(o.names)
o.set(values[offset:offset + num], _fetch_func)
offset += num
assert offset == len(values), 'Wrong number of output values.'
@context.define_context()
class Task(object):
"""
A Task is composed of an execution step and zero or more outputs.
Tasks are executed in the context of a TaskGroup, which, in turn, can
be run by a Session.
Task outputs are fetched by the session at the end of the run.
"""
TASK_SETUP = 'task_setup'
REPORT_STEP = 'report_step'
_global_names_used = set()
@staticmethod
def _get_next_name(node, group, name):
basename = str(node) + '/' + str(name)
names_used = (
Task._global_names_used
if group is None else
set(t.name for t in group._tasks_to_add))
cur_name = basename
i = 0
while cur_name in names_used:
i += 1
cur_name = '%s:%d' % (basename, i)
return cur_name
def __init__(
self, step=None, outputs=None,
workspace_type=None, group=None, node=None, name=None):
"""
Instantiate a Task and add it to the current TaskGroup and Node.
"""
if not name and isinstance(step, core.ExecutionStep):
name = step.Proto().name
if not name:
name = 'task'
# register this node name with active context
self.node = str(Node.current(None if node is None else Node(node)))
self.group = TaskGroup.current(group, required=False)
self.name = Task._get_next_name(self.node, self.group, name)
# may need to be temporarily removed later if Task used as a context
if self.group is not None:
self.group._tasks_to_add.append(self)
self._already_used = False
self._step = None
self._step_with_setup = None
self._outputs = []
if step is not None:
self.set_step(step)
if outputs is not None:
self.add_outputs(outputs)
self._pipeline = None
self._is_pipeline_context = False
self._workspace_type = workspace_type
self._report_net = None
def __enter__(self):
# temporarily remove from _tasks_to_add to ensure correct order
if self.group is not None:
self.group._tasks_to_add.remove(self)
self._assert_not_used()
assert self._step is None, 'This Task already has an execution step.'
from caffe2.python import net_builder
self._net_builder = net_builder.NetBuilder(_fullname=self.name)
self._net_builder.__enter__()
return self
def __exit__(self, type, value, traceback):
self._net_builder.__exit__(type, value, traceback)
if type is None:
self.set_step(self._net_builder)
if self.group is not None:
self.group._tasks_to_add.append(self)
self._net_builder = None
def workspace_type(self):
return self._workspace_type
def _assert_not_used(self):
assert not self._already_used, (
'Cannot modify task since it is already been used.')
def add_output(self, output):
self._assert_not_used()
output = (
output if isinstance(output, TaskOutput) else TaskOutput(output))
self._outputs.append(output)
return output
def add_outputs(self, outputs):
self._assert_not_used()
if type(outputs) not in (list, tuple):
return self.add_output(outputs)
else:
return [self.add_output(output) for output in outputs]
def set_step(self, step):
self._assert_not_used()
self._step = core.to_execution_step(step)
def get_step(self):
if self._step is not None and self._step_with_setup is None:
report_steps = filter(
lambda s: not hasattr(s, '_report_step_used'),
self._step.get_all_attributes(Task.REPORT_STEP))
for step in report_steps:
step._report_step_used = True
if not step.Proto().run_every_ms:
step.RunEveryMillis(1000)
init_nets, exit_nets = get_setup_nets(
Task.TASK_SETUP, [self._step] + report_steps, self)
if len(self._outputs) == 0:
output_net = core.Net('%s:output' % self.name)
self.add_output(output_net.ConstantFill(
[], 1, dtype=core.DataType.INT32, value=0))
exit_nets.append(output_net)
body = self._step if not report_steps else core.execution_step(
'%s:body', report_steps + [self._step])
self._step_with_setup = core.execution_step(
self.name,
[
core.execution_step('%s:init' % self.name, init_nets),
body,
core.execution_step('%s:exit' % self.name, exit_nets),
]
)
elif self._step_with_setup is None:
self._step_with_setup = core.execution_step(self.name, [])
return self._step_with_setup
def output_list(self):
return TaskOutputList(self._outputs)
def outputs(self):
return self._outputs
def _notify_used(self):
self.get_step()
self._already_used = True
class SetupNets(object):
"""
Allow to register a list of nets to be run at initialization
and finalization of Tasks or TaskGroups.
For example, let's say you have the following:
init_net = core.Net('init')
my_val = init_net.ConstantFill([], 'my_val', value=0)
net = core.Net('counter')
net.Add([my_val, net.Const(1),], [my_val])
with TaskGroup() as task_group:
with Node('trainer'):
my_task = Task(step=[net])
In order to have `init_net` run once before `net` runs for the
first time, you can do one of the following:
net.add_object(Task.TASK_SETUP, SetupNets([init_net]))
or
net.add_object(TaskGroup.LOCAL_SETUP, SetupNets([init_net]))
- With Task.TASK_SETUP, init_net will run once at my_task startup.
- With TaskGroup.LOCAL_SETUP, init_net will run once on node 'trainer',
before any task of the task group is run on that node.
The same SetupNets object can be added to multiple nets. It will only
run once per Task/TaskGroup run.
"""
def __init__(self, init_nets=None, exit_nets=None):
self.init_nets = init_nets
self.exit_nets = exit_nets
def setup(self, init_net):
return self.init_nets
def exit(self, exit_net):
return self.exit_nets
|
import unittest
from caffe2.python import convnet_benchmarks as cb
from caffe2.python import test_util, workspace
@unittest.skipIf(not workspace.has_gpu_support, "no gpu")
class TestConvnetBenchmarks(test_util.TestCase):
def testConvnetBenchmarks(self):
all_args = [
'--batch_size 16 --order NCHW --iterations 1 '
'--warmup_iterations 1',
'--batch_size 16 --order NCHW --iterations 1 '
'--warmup_iterations 1 --forward_only',
]
for model in [cb.AlexNet, cb.OverFeat, cb.VGGA, cb.Inception]:
for arg_str in all_args:
args = cb.GetArgumentParser().parse_args(arg_str.split(' '))
cb.Benchmark(model, args)
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from caffe2.python.optimizer import build_sgd, build_ftrl, build_adagrad, build_adam
from caffe2.python.optimizer_test_util import OptimizerTestBase
from caffe2.python.test_util import TestCase
from caffe2.python import workspace
import numpy as np
class TestSgd(OptimizerTestBase, TestCase):
def build_optimizer(self, model):
return build_sgd(model, base_learning_rate=0.1)
def check_optimizer(self, optimizer):
self.assertTrue(optimizer.get_auxiliary_parameters().shared)
self.assertFalse(optimizer.get_auxiliary_parameters().local)
for param in optimizer.get_auxiliary_parameters().shared:
tensor = workspace.FetchBlob(param)
np.testing.assert_allclose(np.array([1.0]), tensor, atol=1e-5)
class TestFtrl(OptimizerTestBase, TestCase):
def build_optimizer(self, model):
return build_ftrl(
model, engine=None, alpha=1.0, beta=0.1, lambda1=0.0, lambda2=0.0)
def check_optimizer(self, optimizer):
self.assertFalse(optimizer.get_auxiliary_parameters().shared)
self.assertTrue(optimizer.get_auxiliary_parameters().local)
for param in optimizer.get_auxiliary_parameters().local:
workspace.FetchBlob(param)
class TestAdagrad(OptimizerTestBase, TestCase):
def build_optimizer(self, model):
return build_adagrad(model, base_learning_rate=1.0)
def check_optimizer(self, optimizer):
self.assertFalse(optimizer.get_auxiliary_parameters().shared)
self.assertTrue(optimizer.get_auxiliary_parameters().local)
for param in optimizer.get_auxiliary_parameters().local:
workspace.FetchBlob(param)
class TestAdam(OptimizerTestBase, TestCase):
def build_optimizer(self, model):
return build_adam(model, base_learning_rate=0.1)
def check_optimizer(self, optimizer):
self.assertTrue(optimizer.get_auxiliary_parameters().shared)
self.assertTrue(optimizer.get_auxiliary_parameters().local)
self.assertTrue(workspace.HasBlob("optimizer_iteration"))
iteration_tensor = workspace.FetchBlob("optimizer_iteration")
np.testing.assert_allclose(np.array([2000]),
iteration_tensor,
atol=1e-5)
for param in optimizer.get_auxiliary_parameters().shared:
workspace.FetchBlob(param)
for param in optimizer.get_auxiliary_parameters().local:
workspace.FetchBlob(param)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import scope, core
from caffe2.proto import caffe2_pb2
import unittest
import threading
import time
SUCCESS_COUNT = 0
def thread_runner(idx, testobj):
global SUCCESS_COUNT
testobj.assertEquals(scope.CurrentNameScope(), "")
testobj.assertEquals(scope.CurrentDeviceScope(), None)
namescope = "namescope_{}".format(idx)
dsc = core.DeviceOption(caffe2_pb2.CUDA, idx)
with scope.DeviceScope(dsc):
with scope.NameScope(namescope):
testobj.assertEquals(scope.CurrentNameScope(), namescope + "/")
testobj.assertEquals(scope.CurrentDeviceScope(), dsc)
time.sleep(0.01 + idx * 0.01)
testobj.assertEquals(scope.CurrentNameScope(), namescope + "/")
testobj.assertEquals(scope.CurrentDeviceScope(), dsc)
testobj.assertEquals(scope.CurrentNameScope(), "")
testobj.assertEquals(scope.CurrentDeviceScope(), None)
SUCCESS_COUNT += 1
class TestScope(unittest.TestCase):
def testNamescopeBasic(self):
self.assertEquals(scope.CurrentNameScope(), "")
with scope.NameScope("test_scope"):
self.assertEquals(scope.CurrentNameScope(), "test_scope/")
self.assertEquals(scope.CurrentNameScope(), "")
def testNamescopeAssertion(self):
self.assertEquals(scope.CurrentNameScope(), "")
try:
with scope.NameScope("test_scope"):
self.assertEquals(scope.CurrentNameScope(), "test_scope/")
raise Exception()
except Exception:
pass
self.assertEquals(scope.CurrentNameScope(), "")
def testDevicescopeBasic(self):
self.assertEquals(scope.CurrentDeviceScope(), None)
dsc = core.DeviceOption(caffe2_pb2.CUDA, 9)
with scope.DeviceScope(dsc):
self.assertEquals(scope.CurrentDeviceScope(), dsc)
self.assertEquals(scope.CurrentDeviceScope(), None)
def testDevicescopeAssertion(self):
self.assertEquals(scope.CurrentDeviceScope(), None)
dsc = core.DeviceOption(caffe2_pb2.CUDA, 9)
try:
with scope.DeviceScope(dsc):
self.assertEquals(scope.CurrentDeviceScope(), dsc)
raise Exception()
except Exception:
pass
self.assertEquals(scope.CurrentDeviceScope(), None)
def testMultiThreaded(self):
"""
Test that name/device scope are properly local to the thread
and don't interfere
"""
global SUCCESS_COUNT
self.assertEquals(scope.CurrentNameScope(), "")
self.assertEquals(scope.CurrentDeviceScope(), None)
threads = []
for i in range(4):
threads.append(threading.Thread(
target=thread_runner,
args=(i, self),
))
for t in threads:
t.start()
with scope.NameScope("master"):
self.assertEquals(scope.CurrentDeviceScope(), None)
self.assertEquals(scope.CurrentNameScope(), "master/")
for t in threads:
t.join()
self.assertEquals(scope.CurrentNameScope(), "master/")
self.assertEquals(scope.CurrentDeviceScope(), None)
# Ensure all threads succeeded
self.assertEquals(SUCCESS_COUNT, 4)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
from caffe2.python.core import CreatePythonOperator
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
def SubFunctionThatThrowsRuntimeError():
raise RuntimeError("This is an intentional exception.")
def MainOpFunctionThatThrowsRuntimeError(inputs, _):
return SubFunctionThatThrowsRuntimeError()
class PythonOpTest(hu.HypothesisTestCase):
@given(x=hu.tensor())
def test_feed(self, x):
def f(inputs, _):
self.assertEqual(x.shape, inputs[0].shape)
self.assertEqual(type(inputs[0].shape), tuple)
self.assertEqual(type(inputs[0].data), np.ndarray)
np.testing.assert_almost_equal(x, inputs[0].data)
op = CreatePythonOperator(f, ["x"], [])
workspace.FeedBlob("x", x)
workspace.RunOperatorOnce(op)
def test_exception(self):
op = CreatePythonOperator(MainOpFunctionThatThrowsRuntimeError, [], [])
with self.assertRaises(RuntimeError):
workspace.RunOperatorOnce(op)
@given(x=hu.tensor())
def test_feed_with_helper_function(self, x):
def f(inputs, _):
self.assertEqual(x.shape, inputs[0].shape)
self.assertEqual(type(inputs[0].shape), tuple)
self.assertEqual(type(inputs[0].data), np.ndarray)
np.testing.assert_almost_equal(x, inputs[0].data)
net = core.Net("test")
net.Python(f)(["x"], [])
workspace.FeedBlob("x", x)
workspace.RunNetOnce(net)
@given(x=hu.tensor())
def test_feed_with_gc(self, x):
def f(inputs, _):
self.assertEqual(x.shape, inputs[0].shape)
np.testing.assert_almost_equal(x, inputs[0].data)
op = CreatePythonOperator(f, ["x"], [])
workspace.FeedBlob("x", x)
workspace.RunOperatorOnce(op)
del f
workspace.FeedBlob("x", x)
workspace.RunOperatorOnce(op)
@given(x=hu.tensor())
def test_reshape(self, x):
def f(inputs, outputs):
outputs[0].reshape(inputs[0].shape)
self.assertEqual(x.shape, inputs[0].shape)
self.assertEqual(x.shape, outputs[0].shape)
outputs[0].data[...] = inputs[0].data
op = CreatePythonOperator(f, ["x"], ["y"])
workspace.FeedBlob("x", x)
workspace.RunOperatorOnce(op)
y = workspace.FetchBlob("y")
np.testing.assert_almost_equal(x, y)
@given(x=hu.tensor())
def test_workspace_manipulation(self, x):
"""
Verify that python op can manipulate workspace directly
"""
def f(inputs, outputs, ws):
fetched = ws.blobs['internal'].fetch()
np.testing.assert_almost_equal(fetched, x)
ws = workspace.C.Workspace()
net = core.Net("test")
net.GivenTensorFill([], ['internal'], values=x, shape=x.shape)
net.Python(f, pass_workspace=True)([], [])
ws.run(net)
@given(x=hu.tensor())
def test_caught_exception_doesnt_terminate(self, x):
def f(inputs, outputs):
try:
raise Exception("Exception in handler")
except Exception:
pass
op = CreatePythonOperator(f, ["x"], ["y"])
workspace.FeedBlob("x", x)
workspace.RunOperatorOnce(op)
@given(x=hu.tensor(),
n=st.integers(min_value=1, max_value=20),
w=st.integers(min_value=1, max_value=20))
def test_multithreaded_evaluation(self, x, n, w):
def f(inputs, outputs):
outputs[0].reshape(inputs[0].shape)
outputs[0].data[...] = inputs[0].data
ops = [CreatePythonOperator(f, ["x"], [str(i)]) for i in range(n)]
net = core.Net("net")
net.Proto().op.extend(ops)
net.Proto().type = "dag"
net.Proto().num_workers = w
iters = 100
plan = core.Plan("plan")
plan.AddStep(core.ExecutionStep("test-step", net, iters))
workspace.FeedBlob("x", x)
workspace.RunPlan(plan.Proto().SerializeToString())
for i in range(n):
y = workspace.FetchBlob(str(i))
np.testing.assert_almost_equal(x, y)
@given(x=hu.tensor(), in_place=st.booleans(), **hu.gcs)
def test_gradient(self, x, in_place, gc, dc):
def f(inputs, outputs):
outputs[0].reshape(inputs[0].shape)
outputs[0].data[...] = inputs[0].data * 2
def grad_f(inputs, outputs):
# Ordering is [inputs, outputs, grad_outputs]
grad_output = inputs[2]
grad_input = outputs[0]
grad_input.reshape(grad_output.shape)
grad_input.data[...] = grad_output.data * 2
op = CreatePythonOperator(
f, ["x"], ["x" if in_place else "y"], grad_f=grad_f)
self.assertGradientChecks(gc, op, [x], 0, [0])
self.assertDeviceChecks(dc, op, [x], [0])
@given(inputs=hu.tensors(n=2), **hu.gcs)
def test_gradient_multiple(self, inputs, gc, dc):
(x1, x2) = inputs
def f(inputs, outputs):
for idx in [0, 1]:
self.assertEqual(type(inputs[idx].shape), tuple)
outputs[idx].reshape(inputs[idx].shape)
outputs[idx].data[...] = inputs[idx].data * 2
def grad_f(inputs, outputs):
# Ordering is [inputs, outputs, grad_outputs]
self.assertEqual(len(inputs), 6)
self.assertEqual(len(outputs), 2)
for (grad_output_idx, grad_input_idx) in [(4, 0), (5, 1)]:
grad_output = inputs[grad_output_idx]
grad_input = outputs[grad_input_idx]
grad_input.reshape(grad_output.shape)
grad_input.data[...] = grad_output.data * 2
op = CreatePythonOperator(f, ["x1", "x2"], ["y1", "y2"], grad_f=grad_f)
for idx in [0, 1]:
self.assertGradientChecks(gc, op, [x1, x2], idx, [0, 1])
self.assertDeviceChecks(dc, op, [x1, x2], [0, 1])
|
## @package hsm_util
# Module caffe2.python.hsm_util
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.proto import hsm_pb2
'''
Hierarchical softmax utility methods that can be used to:
1) create TreeProto structure given list of word_ids or NodeProtos
2) create HierarchyProto structure using the user-inputted TreeProto
'''
def create_node_with_words(words, name='node'):
node = hsm_pb2.NodeProto()
node.name = name
for word in words:
node.word_ids.append(word)
return node
def create_node_with_nodes(nodes, name='node'):
node = hsm_pb2.NodeProto()
node.name = name
for child_node in nodes:
new_child_node = node.children.add()
new_child_node.MergeFrom(child_node)
return node
def create_hierarchy(tree_proto):
max_index = 0
def create_path(path, word):
path_proto = hsm_pb2.PathProto()
path_proto.word_id = word
for entry in path:
new_path_node = path_proto.path_nodes.add()
new_path_node.index = entry[0]
new_path_node.length = entry[1]
new_path_node.target = entry[2]
return path_proto
def recursive_path_builder(node_proto, path, hierarchy_proto, max_index):
node_proto.offset = max_index
path.append([max_index,
len(node_proto.word_ids) + len(node_proto.children), 0])
max_index += len(node_proto.word_ids) + len(node_proto.children)
if hierarchy_proto.size < max_index:
hierarchy_proto.size = max_index
for target, node in enumerate(node_proto.children):
path[-1][2] = target
max_index = recursive_path_builder(node, path, hierarchy_proto,
max_index)
for target, word in enumerate(node_proto.word_ids):
path[-1][2] = target + len(node_proto.children)
path_entry = create_path(path, word)
new_path_entry = hierarchy_proto.paths.add()
new_path_entry.MergeFrom(path_entry)
del path[-1]
return max_index
node = tree_proto.root_node
hierarchy_proto = hsm_pb2.HierarchyProto()
path = []
max_index = recursive_path_builder(node, path, hierarchy_proto, max_index)
return hierarchy_proto
|
## @package memonger
# Module caffe2.python.memonger
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import networkx as nx
import collections
import time
import heapq
import copy
from caffe2.python import workspace
from caffe2.proto import caffe2_pb2
import enum
import logging
import numpy as np
log = logging.getLogger("memonger")
log.setLevel(logging.INFO)
LiveRange = collections.namedtuple('LiveRange', ["defined", "used", "size"])
def share_grad_blobs(
net,
losses,
param_grads,
namescope,
dont_share_blobs=None,
share_activations=True,
blob_shapes=None,
):
'''
Implements similar optimization as Torch's shareGradInput():
for the gradients that are passed between layers, share blobs between
operators when possible. This yields significant memory savings with
deep networks.
Returns an optimized protobuf (assign to net._net)
'''
def is_grad_blob(b):
name = str(b)
# Note: need to look at _{namescope} pattern as it matches
# to handle the auto-split gradients
return "_grad" in name and (name.startswith(namescope) or
name.startswith("_" + namescope)) and name not in param_grads
def is_grad_op(op):
# TODO: something smarter
for b in list(op.input) + list(op.output):
if is_grad_blob(b):
return True
return False
log.warn("NOTE: Executing memonger to optimize gradient memory")
# Collect ops that have something to do with gradients
if not namescope.endswith("/"):
namescope += "/"
netproto = copy.deepcopy(net.Proto())
activations = []
external_output = set(net.Proto().external_output)
# Hacky way to get activations, think of a better way
for op in net.Proto().op:
for b in op.output:
if b + "_w" in op.input and b not in external_output:
activations.append(b)
# Remove last activations, as they are usually accessed externally
activations = set(activations[:-2])
# Gradient ops
grad_ops = [op for op in netproto.op if is_grad_op(op)]
return _compute_blob_recycling_for_dag(
netproto,
losses,
grad_ops,
lambda b: is_grad_blob(b) or (share_activations and b in activations),
namescope,
{} if dont_share_blobs is None else dont_share_blobs,
blob_shapes
)
def optimize_inference_for_dag(net, input_blobs, namescope=""):
netproto = copy.deepcopy(net.Proto())
external_input = set(net.Proto().external_input)
external_output = set(net.Proto().external_output)
def is_activation_blob(b):
return b not in external_input and b not in external_output
seen_as_output = set()
ops = list(net.Proto().op)
# Sanity check: check that all external inputs are properlyh accounted
# and that no gradient ops are included in 'net'
for op in ops:
for b in op.input:
if is_activation_blob(b) and b not in seen_as_output:
assert False, "{} not in external input".format(b)
seen_as_output = seen_as_output.union(set(op.output))
assert not op.is_gradient_op, \
"You can only pass inference-only nets to optimize_inference_for_dag"
return _compute_blob_recycling_for_dag(
netproto, input_blobs, ops, is_activation_blob,
namescope, set(), None,
)
def _compute_blob_recycling_for_dag(
netproto, heads, ops, is_shareable,
namescope, dont_share_blobs, blob_shapes=None,
):
'''
Computes a blob recycling by traversing the computation DAG. The resulting
model can be executed safely on a DAGNet.
'''
start_time = time.time()
# Create mapping from blobs to ops
blobs_to_ops = collections.defaultdict(lambda: [])
blob_input_count = collections.defaultdict(lambda: 0)
op_inputs = collections.defaultdict(lambda: 0)
op_visit_count = collections.defaultdict(lambda: 0)
share_counts = collections.defaultdict(lambda: 0)
blob_sizes = {} if blob_shapes is not None else None
# First figure out which of the shareable blobs
# are 'internal' to the optimization. For example, if optimizing
# only gradient ops, then activation blobs will be 'external' as they
# are not output by these ops.
optim_op_outputs = set()
for op in ops:
optim_op_outputs.update(set(op.output))
for i, op in enumerate(ops):
for inp in op.input:
if is_shareable(inp) or inp in heads:
if inp in optim_op_outputs:
blobs_to_ops[inp].append(i)
op_inputs[i] += 1
else:
# For external blobs, we don't increase the op_inputs
# count.
blobs_to_ops[inp].append(i)
share_counts[inp] = 1
# Traverse operators starting from the heads' blobs.
# Keep tabs on when blobs are seen first and last, and also
# when operators have their input satisfied. Share blobs only
# under same branch, avoiding problems with parallel workers.
output_blobs = set()
mapping = {}
unknown_shapes = set()
def infer_blob_size(b):
if b in blob_shapes:
return np.prod(blob_shapes[b])
else:
unknown_shapes.add(b)
return 0
saved_count = 0
def descend(op_idx, free_blobs):
cur_op = ops[op_idx]
new_free_blobs = set()
unused_free_blobs = set(free_blobs)
saved = 0
for inp in cur_op.input:
if is_shareable(inp):
blob_input_count[inp] += 1
if blob_input_count[inp] == len(blobs_to_ops[inp]):
actual_blob = inp if inp not in mapping else mapping[inp]
if actual_blob not in dont_share_blobs:
new_free_blobs.add(
(-share_counts[actual_blob], actual_blob),
)
for outp in cur_op.output:
if is_shareable(outp):
if outp not in output_blobs:
# First seen this blob as output, can assign to a free blob
if len(free_blobs) > 0:
if blob_sizes is None:
(negcnt, freeb) = heapq.heappop(free_blobs)
else:
bsize = infer_blob_size(outp)
best_blob = None
best_size = -1
# Heuristic to choose the most suitably sized blob
for b in free_blobs:
sz = blob_sizes[b]
if sz >= best_size:
if best_size < bsize or best_size >= sz:
best_size = sz
best_blob = b
assert best_blob is not None
freeb = best_blob
# blob_sizes[freeb] = max(best_size, bsize)
free_blobs.remove(freeb)
saved += bsize
mapping[outp] = freeb
if freeb in unused_free_blobs:
unused_free_blobs.remove(freeb)
share_counts[freeb] += 1
output_blobs.add(outp)
for (cnt, nf) in new_free_blobs:
if blob_sizes is None:
heapq.heappush(free_blobs, (cnt, nf))
else:
if nf not in blob_sizes:
blob_sizes[nf] = infer_blob_size(outp)
assert nf not in free_blobs, \
"Blob {} double-inserted to free_blobs".format(nf)
free_blobs.append(nf)
free_blobs_fwd = free_blobs
for outp in cur_op.output:
for inp_op_idx in blobs_to_ops[outp]:
op_visit_count[inp_op_idx] += 1
# Descend only if we have satisfied all inputs
if op_visit_count[inp_op_idx] == op_inputs[inp_op_idx]:
(unused, saved_desc) = descend(inp_op_idx, free_blobs_fwd)
saved += saved_desc
unused_free_blobs = unused.intersection(unused_free_blobs)
# We can pass unused free blobs to other branch
free_blobs_fwd = list(
unused.intersection(set(free_blobs_fwd))
)
return (unused_free_blobs, saved)
# Start DFS from the heads' (losses or inputs)
for head_blob in heads:
for op_idx in blobs_to_ops[head_blob]:
(_, saved) = descend(op_idx, [])
saved_count += saved
# Rename the shared blobs
shared_blobs = set(mapping.values())
renamed = {}
for j, b in enumerate(shared_blobs):
if b in optim_op_outputs:
renamed[b] = namescope + "__m{}_shared".format(j)
else:
renamed[b] = b
# Add the originators
mapping.update(renamed)
if saved_count > 0:
log.info("Remapping {} blobs, using {} shared; saved apprx {} MB".format(
len(mapping), len(renamed), int(saved_count * 4 / 1024 / 1024),
))
log.info("Could not infer sizes for: {}".format(unknown_shapes))
else:
log.info("Remapping {} blobs, using {} shared".format(
len(mapping), len(renamed),
))
apply_assignments(netproto, mapping)
log.info("Memonger memory optimization took {} secs".format(
time.time() - start_time),
)
return netproto
def _find_source_nodes(g):
''' Return nodes without predecessors '''
ret = []
for cn in g:
cur_pred = g.predecessors(cn)
if not cur_pred:
ret.append(cn)
return ret
def _find_target_nodes(g):
''' Return nodes without successors '''
ret = []
for cn in g:
cur_succ = g.successors(cn)
if not cur_succ:
ret.append(cn)
return ret
def _add_single_target_ifneeded(g):
targets = _find_target_nodes(g)
assert len(targets) >= 1
if len(targets) == 1:
return g
ret = copy.deepcopy(g)
def _next_available_idx(g):
ret = -1
for cn in g:
if cn > ret:
ret = cn
ret += 1
return ret
target_node_idx = _next_available_idx(g)
ret.add_node(target_node_idx)
for cn in targets:
ret.add_edge(cn, target_node_idx)
return ret
def _get_path(pred_list, dist_list):
''' Get the path from nx.bellman_ford()'s output '''
# distances are negative
assert all(dist_list[x] <= 0 for x in dist_list)
# node with longest distance to source is the target
target = min(dist_list, key=lambda x: dist_list[x])
ret = []
cur = target
while cur is not None:
ret.append(cur)
cur = pred_list[cur]
return list(reversed(ret))
def _get_longest_paths(g, source_nodes):
''' Get the longest path for nodes in 'source_nodes'
Find with bellman_ford() by setting weight = -1
'''
ng = copy.deepcopy(g)
for u, v in ng.edges():
ng[u][v]["weight"] = -1
ret = {}
for cn in source_nodes:
pred, dist = nx.bellman_ford(ng, cn, weight="weight")
path = _get_path(pred, dist)
assert path[0] == cn
assert len(path) - 1 == -dist[path[-1]]
ret[cn] = path
return ret
def _build_tree(paths):
''' Build a tree for given paths based on common elements.
Last elements of all paths are the same, which is the root of the tree.
'''
assert all(cp[-1] == paths[0][-1] for cp in paths)
g = nx.DiGraph()
node_set = {y for x in paths for y in x}
g.add_nodes_from(node_set)
for cp in paths:
for ce in zip(cp[0:-1], cp[1:]):
g.add_edge(ce[1], ce[0])
root = paths[0][-1]
_compute_tree_height(g, root)
return (g, root)
def _compute_tree_height(g, root):
''' Compute the heights of the tree for all nodes
Height of leaves are 0
'''
def _get_height(root):
children = g.successors(root)
height = 0
if children:
child_heights = [_get_height(x) for x in children]
height = max(child_heights) + 1
g.node[root]["height"] = height
return height
_get_height(root)
def _sort_tree_leaves(g, root):
''' For each node, sort its child nodes based on the height of the nodes.
Return the leaf nodes of the tree after sorting.
'''
def _get_height(root):
return g.node[root]["height"]
def _get_sorted_leaves(root):
children = g.successors(root)
if not children:
return [root]
child_heights = [_get_height(x) for x in children]
order = sorted(range(len(children)), key=lambda x: child_heights[x])
ret = []
for co in order:
cr = children[co]
ret += _get_sorted_leaves(cr)
return ret
return _get_sorted_leaves(root)
def topological_sort_traversal_longest_path(g):
''' The graph 'g' may contain several source nodes (nodes without incoming
edge), which could be in any order and still be a valid
topological sorting result. We would like to arrange these source nodes
so that the average live spans of the computed blobs are shorter.
The idea is to sort the source nodes based on the length of their path to
the target node so that the one with longer path is used first.
This is done by:
- Add a single target node if there are multiple target nodes in 'g'.
- Find the longest path between each source and the target node.
- Convert the longest paths to a tree with the target node being the root
and source nodes being the leaves.
- Sort the nodes of the tree based on the height of the tree.
'''
gt = _add_single_target_ifneeded(g)
source_nodes = _find_source_nodes(gt)
lpaths = _get_longest_paths(gt, source_nodes)
tree, root = _build_tree(lpaths.values())
sorted_sources = _sort_tree_leaves(tree, root)
assert(sorted(sorted_sources) == sorted(source_nodes))
ret = nx.topological_sort(g, sorted_sources)
assert(len(ret) == len(g.node))
return ret
def topological_sort_traversal(g):
return nx.topological_sort(g)
def compute_ranges(linearized_ops, blob_sizes=None):
if not blob_sizes:
log.warning('Provide blob sizes to get more accurate assignments.')
blobs = collections.defaultdict(
lambda: LiveRange(defined=None, used=None, size=None))
for i, op in enumerate(linearized_ops):
for blob in op.input:
used = blobs[blob].used
if used is None:
used = i
else:
used = max(used, i)
blobs[blob] = blobs[blob]._replace(used=used)
blob_size = blob_sizes[blob] if blob_sizes else None
assert not blob_sizes or blob_size is not None
blobs[blob] = blobs[blob]._replace(size=blob_size)
for blob in op.output:
defined = blobs[blob].defined
if defined is None:
defined = i
else:
defined = min(defined, i)
blobs[blob] = blobs[blob]._replace(defined=defined)
blob_size = blob_sizes[blob] if blob_sizes else None
assert not blob_sizes or blob_size is not None
blobs[blob] = blobs[blob]._replace(size=blob_size)
return blobs
def is_compatible(candidate_range, assignment, static_blobs):
(name, range_) = assignment[-1]
if name in static_blobs:
return False
if candidate_range.defined is None or range_.defined is None \
or range_.used is None:
return False
return candidate_range.defined > range_.used
def compute_blob_assignments(assignments):
blob_assignments = {}
for assignment in assignments:
if len(assignment) == 1:
continue
last_blob, _ = assignment[-1]
for (blob, _) in assignment:
blob_assignments[blob] = last_blob
return blob_assignments
def _get_max_size(assignment):
if not assignment:
return 0
ret = max([x[1].size for x in assignment])
ret = 0 if ret is None else ret
return ret
def get_memory_usage(assignments):
ret = 0
for cur in assignments:
ret += _get_max_size(cur)
return ret
def compute_assignments_greedy(ranges_sorted, init_assignments=None):
assignments = init_assignments or []
visited = {y[0] for x in assignments for y in x}
for (name, range_) in ranges_sorted:
if name in visited:
continue
assigned = False
best_assignment = 0
min_dist = float("inf")
candidate_size = range_.size or 0
for idx, assignment in enumerate(assignments):
if is_compatible(range_, assignment, []):
assigned = True
dist = abs(_get_max_size(assignment) - candidate_size)
if dist < min_dist:
min_dist = dist
best_assignment = idx
if assigned:
assignment = assignments[best_assignment]
assignment.append((name, range_))
else:
assignments.append([(name, range_)])
return assignments
def _get_count(assignments):
''' Return number of blobs in assignments '''
if assignments:
return sum([len(x) for x in assignments])
return 0
def compute_assignments_dp(ranges_sorted, init_assignment, counter=None):
''' Compute assignment for blobs in 'ranges_sorted' on top of 'init_assignment'
using dynamic programming + recursion.
ranges_sorted: blobs sorted by 'used'
init_assignment: assignment to start with, blobs in 'ranges_sorted' should
not be used in 'init_assignment'
Using f(b, k, init) to represent the best assignment for blobs b[0:k]
given initial assignment 'init', we have
f(b, k, init) = f(b, j, init) +
find_best(b[j:k], f(b, j, init))
where j is the index of the last best assignment that is independent of
blob b[k - 1] (b[k - 1] is compatible with all assignments in
f(b, j, init)), and find_best(b1, init1) gives the best assignment
for blobs in 'b1' based on the initial assignment 'init1', and blobs
b1[0:-1] should be incompatible with with b1[-1]. f(b, len(b), []) gives
the best assignment for blobs 'b'.
For find_best(b, init), since b[0:-1] are not compatible with b[-1], we
could reduce it to a smaller problem to find best assignment for b[0:-1]
as
find_best(b, init) = min {
f(b[0:-1], len(b) - 1, init - x) + [x, b[-1]] for x in init, or
f(b[0:-1], len(b) - 1, init) + [b[-1]]
}
where min{} gives the assignment with minimum memory usage.
'''
def _get_compatible_prev(candidate_range, best_assignments, cur_idx):
''' Find closest position k of best_assignments that is independent of
candidate_range that candiate_range is compatible with all assignments
in best_assignments[k].
Return -1 if not found.
'''
def is_compatible_all(candidate_range, assignments):
''' return true if compatiable for all assignments in assignments '''
return all([is_compatible(candidate_range[1], x, []) for x in assignments])
ii = cur_idx - 1
while ii >= 0:
cba = best_assignments[ii]
if is_compatible_all(candidate_range, cba):
return ii
ii -= 1
return -1
def _find_best(ranges, init_assignment, prev_best_assignment, counter):
''' Find the best assignment for blobs 'ranges' given an initialized
assignment 'init_assignment'.
Blobs in ranges[0:-1] should be incompatible with blob range[-1].
'prev_best_assignment': best assignment for blobs in ranges[:-1]
By assigning ranges[-1] to each assignment k in 'init_assignment' or
in a new assignment, the problem becomes a smaller problem to find
the best assignment for ranges[0:-1] given the initial assignment
init_assigment[0:k, (k+1):-1].
'''
# Blob to check
find_range = ranges[-1]
# Blobs in ranges[0:-1] are incompatible with ranges[-1] so that we can
# reduce it to a smaller problem.
assert all(not is_compatible(x[1], [find_range], []) for x in ranges[0:-1])
sz = len(init_assignment)
best_candidates = []
# Try to assign 'find_range' to each assignment in init_assignment
for ii in range(sz):
if not is_compatible(find_range[1], init_assignment[ii], []):
continue
cur_best = copy.deepcopy(init_assignment)
cur_best[ii].append(find_range)
if len(ranges) > 1:
cur_best_tmp = [x for i, x in enumerate(cur_best) if i != ii]
# reduce to a smaller dp problem
cur_best_tmp = compute_assignments_dp(
ranges[:-1], cur_best_tmp, counter)
cur_best = cur_best_tmp + [cur_best[ii]]
best_candidates.append(cur_best)
# Try to put 'find_range' in a new assignment
best_candidates.append(prev_best_assignment + [[find_range]])
ret = min(best_candidates, key=lambda x: get_memory_usage(x))
return ret
if not counter:
counter = [0]
counter[0] += 1
if counter and counter[0] % 5000 == 0:
rs = [ranges_sorted[0][1].defined, ranges_sorted[-1][1].used]
log.info('Finding assignments {} ({} -> {})...'.format(
counter[0], rs[0], rs[1]))
init_assignment = init_assignment or []
# best_assignments[k]: best assignments for first k blobs ranges_sorted[0:(k+1)]
best_assignments = []
# Find best assignment for blobs ranges_sorted[0:ii]
for ii, cur_range in enumerate(ranges_sorted):
# closest best_assignment that is independent of ranges_sorted[ii]
prev_idx = _get_compatible_prev(cur_range, best_assignments, ii)
prev_best = copy.deepcopy(init_assignment) if prev_idx < 0 else \
copy.deepcopy(best_assignments[prev_idx])
# Need to find best assignment for blobs in 'ranges_part'
ranges_part = ranges_sorted[(prev_idx + 1):(ii + 1)]
cur_best = _find_best(
ranges_part, prev_best,
best_assignments[-1] if best_assignments else init_assignment,
counter)
assert _get_count(cur_best) == _get_count(prev_best) + len(ranges_part)
best_assignments.append(copy.deepcopy(cur_best))
assert len(best_assignments) == len(ranges_sorted)
best = best_assignments[-1]
return best
def get_updated_ranges(ranges, max_live=None):
''' Set LiveRange.defined = -1 if it is None
Set LiveRange.used = max_live if it is None
Set LiveRanee.size = 1 if it is None
'''
def _get_max_live(ranges):
max_live = max(x[1].used for x in ranges if x[1].used) + 1
return max_live
def _update_range(x, max_live, size):
cx = x
if x[1].defined is None:
cx = (cx[0], cx[1]._replace(defined=-1))
if x[1].used is None:
cx = (cx[0], cx[1]._replace(used=max_live))
if x[1].size is None:
cx = (cx[0], cx[1]._replace(size=size))
return cx
if max_live is None:
max_live = _get_max_live(ranges)
ranges = [_update_range(x, max_live, 1) for x in ranges]
return ranges
def compute_assignments(ranges, static_blobs, algo):
'''
algo: Method used to find assignments (AssignmentAlgorithm.GREEDY or
AssignmentAlgorithm.DYNAMIC_PROGRAMMING).
AssignmentAlgorithm.DYNAMIC_PROGRAMMING gives optimal solution at the
cost of more computation.
AssignmentAlgorithm.GREEDY may be better in the case 'blob_sizes' is
not provided.
'''
# Sort the ranges based on when they are last used.
# If LiveRange.used is None, then the blob is never used and could
# be consumed externally. Sort these to the end of the list as opposed
# to the beginning so that they can be shared as well.
ranges = sorted(
list(ranges.items()),
key=lambda p: (p[1].used is None, p[1].used),
)
# Update None values
ranges = get_updated_ranges(ranges)
# Sharable blobs
ranges_sharable = [x for x in ranges if x[0] not in static_blobs]
# Static blobs, not sharable
ranges_static = [x for x in ranges if x[0] in static_blobs]
log.info("Total sharable blobs {}".format(len(ranges_sharable)))
best_assignment = []
if algo == AssignmentAlgorithm.DYNAMIC_PROGRAMMING:
best_assignment = compute_assignments_dp(ranges_sharable, [])
elif algo == AssignmentAlgorithm.GREEDY:
best_assignment = compute_assignments_greedy(ranges_sharable, [])
else:
assert "Invalid algo name {}".format(algo)
best_assignment += [[x] for x in ranges_static]
# verify_assignments(best_assignment)
return best_assignment
def verify_assignments(assignments):
for cur in assignments:
for x, y in zip(cur[0:-1], cur[1:]):
assert x[1].used < y[1].defined
def compute_interference_graph(ops):
g = nx.DiGraph()
for i, op in enumerate(ops):
g.add_node(i, op=op)
for i, parent_op in enumerate(ops):
for j, child_op in enumerate(ops):
if i == j:
continue
if any(output in child_op.input for output in parent_op.output):
deps = set(child_op.input).intersection(parent_op.output)
g.add_edge(i, j, deps=deps)
assert nx.is_directed_acyclic_graph(g), child_op
return g
Optimization = collections.namedtuple(
'Optimization', ['net', 'assignments', 'blob_assignments'])
def apply_assignments(net, blob_assignments):
def canonical_name(blob):
if blob not in blob_assignments:
return blob
return blob_assignments[blob]
for op in net.op:
# Descend into subnets of the recurrent network
if op.type.startswith('RecurrentNetwork'):
apply_recurrent_blob_assignments(op, blob_assignments, canonical_name)
for i, input_ in enumerate(op.input):
op.input[i] = canonical_name(input_)
for i, output in enumerate(op.output):
op.output[i] = canonical_name(output)
def apply_recurrent_blob_assignments(op, blob_assignments, canonical_name):
log.debug("Applying assignments to recurrent op: {}".format(op.type))
import google.protobuf.text_format as protobuftx
step_args = [a for a in op.arg if a.name.endswith("step_net")]
for step_arg in step_args:
step_proto = caffe2_pb2.NetDef()
protobuftx.Merge(step_arg.s, step_proto)
apply_assignments(step_proto, blob_assignments)
for i, einp in enumerate(step_proto.external_input):
if einp in blob_assignments:
step_proto.external_input[i] = canonical_name(einp)
step_arg.s = str(step_proto)
# Store renamings
for blob, renamed in blob_assignments.items():
if blob in list(op.input) + list(op.output):
a = caffe2_pb2.Argument()
a.name = blob + ".rename"
a.s = str(renamed)
op.arg.extend([a])
class AssignmentAlgorithm(enum.Enum):
GREEDY = 0
DYNAMIC_PROGRAMMING = 1
def optimize_interference(net, static_blobs,
ordering_function=topological_sort_traversal,
blob_sizes=None,
algo=AssignmentAlgorithm.GREEDY):
"""
ordering_function: topological_sort_traversal or
topological_sort_traversal_longest_path.
topological_sort_traversal_longest_path gives better
results but needs a bit more computation.
algo: Method used to find assignments (AssignmentAlgorithm.GREEDY or
AssignmentAlgorithm.DYNAMIC_PROGRAMMING).
AssignmentAlgorithm.DYNAMIC_PROGRAMMING gives optimal solution at the
cost of more computation.
AssignmentAlgorithm.GREEDY may be better in the case 'blob_sizes' is
not provided.
"""
"""
1) Use a BFS traversal of the execution graph to generate an
ordering of the node executions.
2) Generate use-def ranges for each `blob` in the BFS traversal
order.
3) Assign blobs to `canonical blobs`
4) Rename blobs to canonical blobs
"""
net = copy.deepcopy(net)
g = compute_interference_graph(net.op)
ordering = ordering_function(g)
linearized_ops = [net.op[i] for i in ordering]
# Reorder ops in net based on the computed linearlized order.
# If the graph has multiple topological orderings and if the NetDef's
# ordering differs from the order used to compute ranges, then the
# runtime might end up overwriting blobs before they are used.
del net.op[:]
net.op.extend(linearized_ops)
ranges = compute_ranges(linearized_ops, blob_sizes)
assignments = compute_assignments(ranges, static_blobs, algo)
blob_assignments = compute_blob_assignments(assignments)
apply_assignments(net, blob_assignments)
return Optimization(
net=net,
blob_assignments=blob_assignments,
assignments=assignments)
Statistics = collections.namedtuple(
'Statistics', ['baseline_nbytes', 'optimized_nbytes'])
def compute_statistics(assignments):
def blob_nbytes(blob):
return workspace.FetchBlob(blob).nbytes
blob_bytes = {
blob: blob_nbytes(blob) for assignment in assignments
for (blob, _) in assignment}
baseline_nbytes = sum(v for _, v in blob_bytes.items())
optimized_nbytes = sum(
max(blob_bytes[blob] for (blob, _) in assignment)
for assignment in assignments)
return Statistics(
baseline_nbytes=baseline_nbytes,
optimized_nbytes=optimized_nbytes)
def collect_blob_sizes(net):
''' Collect blob sizes from workspace '''
def blob_nbytes(blob):
return workspace.FetchBlob(blob).nbytes
blobs = {}
for op in net.op:
for blob in op.input:
blobs[blob] = blob_nbytes(blob)
for blob in op.output:
blobs[blob] = blob_nbytes(blob)
return blobs
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import workspace, brew
from caffe2.python.model_helper import ModelHelper
from caffe2.python.cnn import CNNModelHelper
import unittest
import numpy as np
class BrewTest(unittest.TestCase):
def setUp(self):
def myhelper(model, val=-1):
return val
if not brew.has_helper(myhelper):
brew.Register(myhelper)
self.myhelper = myhelper
def myhelper2(model, val=-1):
return val
if not brew.has_helper(myhelper2):
brew.Register(myhelper2)
self.myhelper2 = myhelper2
self.model = ModelHelper(name="test_model")
def test_dropout(self):
p = 0.2
X = np.ones((100, 100)).astype(np.float32) - p
workspace.FeedBlob("x", X)
model = ModelHelper(name="test_model")
brew.dropout(model, "x", "out")
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
out = workspace.FetchBlob("out")
self.assertLess(abs(out.mean() - (1 - p)), 0.05)
def test_fc(self):
m, n, k = (15, 15, 15)
X = np.random.rand(m, k).astype(np.float32) - 0.5
workspace.FeedBlob("x", X)
model = ModelHelper(name="test_model")
brew.fc(model, "x", "out_1", k, n)
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
def test_arg_scope(self):
myhelper = self.myhelper
myhelper2 = self.myhelper2
n = 15
with brew.arg_scope([myhelper], val=n):
res = brew.myhelper(self.model)
self.assertEqual(n, res)
with brew.arg_scope([myhelper, myhelper2], val=n):
res1 = brew.myhelper(self.model)
res2 = brew.myhelper2(self.model)
self.assertEqual([n, n], [res1, res2])
def test_arg_scope_single(self):
X = np.random.rand(64, 3, 32, 32).astype(np.float32) - 0.5
workspace.FeedBlob("x", X)
model = ModelHelper(name="test_model")
with brew.arg_scope(
brew.conv,
stride=2,
pad=2,
weight_init=('XavierFill', {}),
bias_init=('ConstantFill', {})
):
brew.conv(
model=model,
blob_in="x",
blob_out="out",
dim_in=3,
dim_out=64,
kernel=3,
)
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
out = workspace.FetchBlob("out")
self.assertEqual(out.shape, (64, 64, 17, 17))
def test_arg_scope_nested(self):
myhelper = self.myhelper
n = 16
with brew.arg_scope([myhelper], val=-3), \
brew.arg_scope([myhelper], val=-2):
with brew.arg_scope([myhelper], val=n):
res = brew.myhelper(self.model)
self.assertEqual(n, res)
res = brew.myhelper(self.model)
self.assertEqual(res, -2)
res = brew.myhelper(self.model, val=15)
self.assertEqual(res, 15)
def test_double_register(self):
myhelper = self.myhelper
with self.assertRaises(AttributeError):
brew.Register(myhelper)
def test_has_helper(self):
self.assertTrue(brew.has_helper(brew.conv))
self.assertTrue(brew.has_helper("conv"))
def myhelper3():
pass
self.assertFalse(brew.has_helper(myhelper3))
def test_model_helper(self):
X = np.random.rand(64, 32, 32, 3).astype(np.float32) - 0.5
workspace.FeedBlob("x", X)
my_arg_scope = {'order': 'NHWC'}
model = ModelHelper(name="test_model", arg_scope=my_arg_scope)
with brew.arg_scope(
brew.conv,
stride=2,
pad=2,
weight_init=('XavierFill', {}),
bias_init=('ConstantFill', {})
):
brew.conv(
model=model,
blob_in="x",
blob_out="out",
dim_in=3,
dim_out=64,
kernel=3,
)
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
out = workspace.FetchBlob("out")
self.assertEqual(out.shape, (64, 17, 17, 64))
def test_cnn_model_helper_deprecated(self):
X = np.random.rand(64, 32, 32, 3).astype(np.float32) - 0.5
workspace.FeedBlob("x", X)
# CNNModelHelper is going to be deprecated soon. This test is only
# covering some CNNModelHelper logic
model = CNNModelHelper(name="test_model", order='NHWC')
self.assertEqual(model.arg_scope['order'], 'NHWC')
|
## @package layer_test_util
# Module caffe2.python.layer_test_util
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
from caffe2.python import (
core,
layer_model_instantiator,
layer_model_helper,
schema,
test_util,
workspace,
)
import numpy as np
OpSpec = namedtuple("OpSpec", "type input output")
class LayersTestCase(test_util.TestCase):
def setUp(self):
super(LayersTestCase, self).setUp()
self.setup_example()
def setup_example(self):
"""
This is undocumented feature in hypothesis,
https://github.com/HypothesisWorks/hypothesis-python/issues/59
"""
workspace.ResetWorkspace()
self.reset_model()
def reset_model(self, input_feature_schema=None, trainer_extra_schema=None):
input_feature_schema = input_feature_schema or schema.Struct(
('float_features', schema.Scalar((np.float32, (32,)))),
)
trainer_extra_schema = trainer_extra_schema or schema.Struct()
self.model = layer_model_helper.LayerModelHelper(
'test_model',
input_feature_schema=input_feature_schema,
trainer_extra_schema=trainer_extra_schema)
def new_record(self, schema_obj):
return schema.NewRecord(self.model.net, schema_obj)
def get_training_nets(self):
"""
We don't use
layer_model_instantiator.generate_training_nets_forward_only()
here because it includes initialization of global constants, which make
testing tricky
"""
train_net = core.Net('train_net')
train_init_net = core.Net('train_init_net')
for layer in self.model.layers:
layer.add_operators(train_net, train_init_net)
return train_init_net, train_net
def get_eval_net(self):
return layer_model_instantiator.generate_eval_net(self.model)
def get_predict_net(self):
return layer_model_instantiator.generate_predict_net(self.model)
def run_train_net(self):
self.model.output_schema = schema.Struct()
train_init_net, train_net = \
layer_model_instantiator.generate_training_nets(self.model)
workspace.RunNetOnce(train_init_net)
workspace.RunNetOnce(train_net)
def run_train_net_forward_only(self):
self.model.output_schema = schema.Struct()
train_init_net, train_net = \
layer_model_instantiator.generate_training_nets_forward_only(
self.model)
workspace.RunNetOnce(train_init_net)
workspace.RunNetOnce(train_net)
def assertBlobsEqual(self, spec_blobs, op_blobs):
"""
spec_blobs can either be None or a list of blob names. If it's None,
then no assertion is performed. The elements of the list can be None,
in that case, it means that position will not be checked.
"""
if spec_blobs is None:
return
self.assertEqual(len(spec_blobs), len(op_blobs))
for spec_blob, op_blob in zip(spec_blobs, op_blobs):
if spec_blob is None:
continue
self.assertEqual(spec_blob, op_blob)
def assertNetContainOps(self, net, op_specs):
"""
Given a net and a list of OpSpec's, check that the net match the spec
"""
ops = net.Proto().op
self.assertEqual(len(op_specs), len(ops))
for op, op_spec in zip(ops, op_specs):
self.assertEqual(op_spec.type, op.type)
self.assertBlobsEqual(op_spec.input, op.input)
self.assertBlobsEqual(op_spec.output, op.output)
return ops
|
import numpy as np
import unittest
import sys
from caffe2.proto import caffe2_pb2, caffe2_legacy_pb2
from caffe2.python import core, cnn, workspace, device_checker, test_util
class TestMiniAlexNet(test_util.TestCase):
def _MiniAlexNetNoDropout(self, order):
# First, AlexNet using the cnn wrapper.
model = cnn.CNNModelHelper(order, name="alexnet")
conv1 = model.Conv(
"data",
"conv1",
3,
16,
11,
("XavierFill", {}),
("ConstantFill", {}),
stride=4,
pad=0
)
relu1 = model.Relu(conv1, "relu1")
norm1 = model.LRN(relu1, "norm1", size=5, alpha=0.0001, beta=0.75)
pool1 = model.MaxPool(norm1, "pool1", kernel=3, stride=2)
conv2 = model.GroupConv(
pool1,
"conv2",
16,
32,
5,
("XavierFill", {}),
("ConstantFill", {"value": 0.1}),
group=2,
stride=1,
pad=2
)
relu2 = model.Relu(conv2, "relu2")
norm2 = model.LRN(relu2, "norm2", size=5, alpha=0.0001, beta=0.75)
pool2 = model.MaxPool(norm2, "pool2", kernel=3, stride=2)
conv3 = model.Conv(
pool2,
"conv3",
32,
64,
3,
("XavierFill", {'std': 0.01}),
("ConstantFill", {}),
pad=1
)
relu3 = model.Relu(conv3, "relu3")
conv4 = model.GroupConv(
relu3,
"conv4",
64,
64,
3,
("XavierFill", {}),
("ConstantFill", {"value": 0.1}),
group=2,
pad=1
)
relu4 = model.Relu(conv4, "relu4")
conv5 = model.GroupConv(
relu4,
"conv5",
64,
32,
3,
("XavierFill", {}),
("ConstantFill", {"value": 0.1}),
group=2,
pad=1
)
relu5 = model.Relu(conv5, "relu5")
pool5 = model.MaxPool(relu5, "pool5", kernel=3, stride=2)
fc6 = model.FC(
pool5, "fc6", 1152, 1024, ("XavierFill", {}),
("ConstantFill", {"value": 0.1})
)
relu6 = model.Relu(fc6, "relu6")
fc7 = model.FC(
relu6, "fc7", 1024, 1024, ("XavierFill", {}),
("ConstantFill", {"value": 0.1})
)
relu7 = model.Relu(fc7, "relu7")
fc8 = model.FC(
relu7, "fc8", 1024, 5, ("XavierFill", {}),
("ConstantFill", {"value": 0.0})
)
pred = model.Softmax(fc8, "pred")
xent = model.LabelCrossEntropy([pred, "label"], "xent")
loss = model.AveragedLoss([xent], ["loss"])
model.AddGradientOperators([loss])
return model
def _testMiniAlexNet(self, order):
# First, we get all the random initialization of parameters.
model = self._MiniAlexNetNoDropout(order)
workspace.ResetWorkspace()
workspace.RunNetOnce(model.param_init_net)
inputs = dict(
[(str(name), workspace.FetchBlob(str(name))) for name in
model.params]
)
if order == "NCHW":
inputs["data"] = np.random.rand(4, 3, 227, 227).astype(np.float32)
else:
inputs["data"] = np.random.rand(4, 227, 227, 3).astype(np.float32)
inputs["label"] = np.array([1, 2, 3, 4]).astype(np.int32)
cpu_device = caffe2_pb2.DeviceOption()
cpu_device.device_type = caffe2_pb2.CPU
gpu_device = caffe2_pb2.DeviceOption()
gpu_device.device_type = caffe2_pb2.CUDA
checker = device_checker.DeviceChecker(0.05, [cpu_device, gpu_device])
ret = checker.CheckNet(
model.net.Proto(),
inputs,
# The indices sometimes may be sensitive to small numerical
# differences in the input, so we ignore checking them.
ignore=['_pool1_idx', '_pool2_idx', '_pool5_idx']
)
self.assertEqual(ret, True)
@unittest.skipIf(not workspace.has_gpu_support,
"No GPU support. Skipping test.")
def testMiniAlexNetNCHW(self):
self._testMiniAlexNet("NCHW")
# No Group convolution support for NHWC right now
#@unittest.skipIf(not workspace.has_gpu_support,
# "No GPU support. Skipping test.")
#def testMiniAlexNetNHWC(self):
# self._testMiniAlexNet("NHWC")
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import net_printer
from caffe2.python.checkpoint import Job
from caffe2.python.net_builder import ops
from caffe2.python.task import Task, final_output
import unittest
def example_loop():
with Task():
total = ops.Const(0)
total_large = ops.Const(0)
total_small = ops.Const(0)
total_tiny = ops.Const(0)
with ops.loop(10) as loop:
outer = ops.Mul([loop.iter(), ops.Const(10)])
with ops.loop(loop.iter()) as inner:
val = ops.Add([outer, inner.iter()])
with ops.If(ops.GE([val, ops.Const(80)])) as c:
ops.Add([total_large, val], [total_large])
with c.Elif(ops.GE([val, ops.Const(50)])) as c:
ops.Add([total_small, val], [total_small])
with c.Else():
ops.Add([total_tiny, val], [total_tiny])
ops.Add([total, val], total)
def example_task():
with Task():
with ops.task_init():
one = ops.Const(1)
two = ops.Add([one, one])
with ops.task_init():
three = ops.Const(3)
accum = ops.Add([two, three])
# here, accum should be 5
with ops.task_exit():
# here, accum should be 6, since this executes after lines below
seven_1 = ops.Add([accum, one])
six = ops.Add([accum, one])
ops.Add([accum, one], [accum])
seven_2 = ops.Add([accum, one])
o6 = final_output(six)
o7_1 = final_output(seven_1)
o7_2 = final_output(seven_2)
return o6, o7_1, o7_2
def example_job():
with Job() as job:
with job.init_group:
example_loop()
example_task()
return job
class TestNetPrinter(unittest.TestCase):
def test_print(self):
self.assertTrue(len(net_printer.to_string(example_job())) > 0)
def test_valid_job(self):
job = example_job()
with job:
with Task():
# distributed_ctx_init_* ignored by analyzer
ops.Add(['distributed_ctx_init_a', 'distributed_ctx_init_b'])
net_printer.analyze(example_job())
def test_undefined_blob(self):
job = example_job()
with job:
with Task():
ops.Add(['a', 'b'])
with self.assertRaises(AssertionError):
net_printer.analyze(job)
def test_multiple_definition(self):
job = example_job()
with job:
with Task():
ops.Add([ops.Const(0), ops.Const(1)], 'out1')
with Task():
ops.Add([ops.Const(2), ops.Const(3)], 'out1')
with self.assertRaises(AssertionError):
net_printer.analyze(job)
|
## @package dataio
# Module caffe2.python.dataio
"""
Defines the base interface for reading and writing operations.
Readers/Writers are objects that produce operations that read/write sequences
of data. Each operation reads or writes a list of BlobReferences.
Readers and Writers must be implemented such that read and write operations
are atomic and thread safe.
Examples of possible Readers and Writers:
QueueReader, QueueWriter,
DatasetReader, DatasetWriter,
See `dataset.py` for an example of implementation.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from caffe2.python.schema import Field, Struct, from_blob_list
import numpy as np
class Reader(object):
def __init__(self, schema=None):
if schema is not None:
assert isinstance(schema, Field)
self._schema = schema
def schema(self):
"""
Return the schema associated with the Reader
"""
assert self._schema is not None, 'Schema not provided for this reader.'
return self._schema
def _set_schema(self, schema):
self._schema = schema
def setup_ex(self, init_net, finish_net):
"""Nets to be executed once at startup and finish.
Experimental extension. Don't use yet"""
pass
def read_ex(self, local_init_net, local_finish_net):
"""Experimental extension to the interface. Don't use yet"""
read_net = core.Net('reader_body')
return ([read_net], ) + self.read(read_net)
def read_record_ex(self, local_init_net, local_finish_net):
"""Experimental extension to the interface. Don't use yet"""
nets, should_stop, fields = self.read_ex(
local_init_net, local_finish_net)
if self._schema:
fields = from_blob_list(self._schema, fields)
return nets, should_stop, fields
"""
Reader is a abstract class to be implemented in order to provide
operations capable of iterating through a dataset or stream of data.
A Reader must implement at least one operation, `read`, which
adds operations to a net that read the next batch of data. Readers can
optionally support the `reset` operation, which is useful when multiple
passes over the data are required.
"""
def read(self, read_net):
"""
Add operations to read_net that will read the read batch of data
and return a list of BlobReference representing the blobs that will
contain the batches produced.
Operations added to `read_net` must be thread safe and atomic, that is,
it should be possible to clone `read_net` and run multiple instances of
it in parallel.
Args:
read_net: the net that will be appended with read operations
Returns:
A tuple (should_stop, fields), with:
should_stop: BlobReference pointing to a boolean scalar
blob that indicates whether the read operation
was succesfull or whether the end of data has
been reached.
fields: A tuple of BlobReference containing the latest batch
of data that was read.
"""
raise NotImplementedError('Readers must implement `read`.')
def reset(self, net):
"""Append operations to `net` that will reset the reader.
This can be used to read the data multiple times.
Not all readers support this operation.
"""
raise NotImplementedError('This reader cannot be resetted.')
def read_record(self, read_net):
should_stop, fields = self.read(read_net)
if self._schema:
fields = from_blob_list(self._schema, fields)
return should_stop, fields
def execution_step(self, reader_net_name=None, external_should_stop=None):
"""Create an execution step with a net containing read operators.
The execution step will contain a `stop_blob` that knows how to stop
the execution loop when end of data was reached.
E.g.:
read_step, fields = reader.execution_step()
consume_net = core.Net('consume')
consume_net.Print(fields[0], [])
p = core.Plan('reader')
p.AddStep(read_step.AddNet(consume_net))
core.RunPlan(p)
Args:
reader_net_name: (optional) the name of the reader_net to be
created. The execution step will
be named accordingly.
Returns:
A tuple (read_step, fields), with:
read_step: A newly created execution step containing a net with
read operations. The step will have `stop_blob` set,
in order to stop the loop on end of data.
fields: A tuple of BlobReference containing the latest batch
of data that was read.
"""
reader_net = core.Net(reader_net_name or 'reader')
should_stop, fields = self.read_record(reader_net)
if external_should_stop is not None:
should_stop = reader_net.Or([external_should_stop, should_stop])
read_step = core.execution_step(
'{}_step'.format(reader_net_name),
reader_net,
should_stop_blob=should_stop)
return (read_step, fields)
class Writer(object):
"""
Writer is a abstract class to be implemented in order to provide
operations capable of feeding a data stream or a dataset.
A Writer must implement 2 operations:
`write`, which adds operations to a net that write the write batch of
data, and `commit`, which adds operations to a net in order to indicate
that no more data will be written.
"""
_schema = None
def schema(self):
return self._schema
def write(self, writer_net, fields):
"""Add operations to `writer_net` that write the next batch of data.
Operations added to the net must be thread-safe and unique, that is:
multiple writers must be able to write to the dataset in parallel.
Args:
fields: a tuple of BlobReference containing the batch of data to
write.
"""
raise NotImplementedError('Writers must implement write.')
def write_record(self, writer_net, fields):
if isinstance(fields, Field):
self._schema = fields
fields = fields.field_blobs()
self.write(writer_net, fields)
def setup_ex(self, init_net, finish_net):
"""Experimental, don't use yet"""
self.commit(finish_net)
def write_ex(self, fields, local_init_net, local_finish_net, stop_blob):
"""Experimental extension to the interface. Don't use yet"""
write_net = core.Net('write_net')
self.write(write_net, fields)
return [write_net]
def write_record_ex(
self, fields, local_init_net, local_finish_net, stop_blob=None):
"""Experimental extension to the interface. Don't use yet."""
if isinstance(fields, Field):
self._schema = fields
fields = fields.field_blobs()
if stop_blob is None:
stop_blob = local_init_net.NextName("dequeue_status")
write_nets = self.write_ex(
fields, local_init_net, local_finish_net, stop_blob)
return (write_nets, stop_blob)
def commit(self, finish_net):
"""Add operations to `finish_net` that signal end of data.
This must be implemented by all Writers, but may be no-op for some
of them.
"""
pass
class ReaderBuilder(object):
""" Allow usage of a reader in distributed fashion. """
def schema(self):
raise NotImplementedError()
def enqueue_splits(self, net, split_queue):
raise NotImplementedError()
def splits(self, net):
raise NotImplementedError()
def new_reader(self, split_queue):
raise NotImplementedError()
class PipedReaderBuilder(ReaderBuilder):
"""
ReaderBuilder that modifies underlying builder by calling `piper`
function on each new reader produced, and return the result of
the function. This way, it is possible to append data processing
pipelines that will be replicated for each reader that gets created.
E.g.:
PipedReaderBuilder(
ReaderBuilder(...),
lambda reader: pipe(reader, processor=my_proc))
"""
def __init__(self, builder, piper):
self._builder = builder
self._piper = piper
def schema(self):
return self._builder.schema()
def enqueue_splits(self, net, split_queue):
return self._builder.enqueue_splits(net, split_queue)
def splits(self, net):
return self._builder.splits(net)
def new_reader(self, split_queue):
output = self._piper(self._builder.new_reader(split_queue))
return output if isinstance(output, Reader) else output.reader()
class Pipe(object):
def __init__(self, schema=None, obj_key=None):
self._num_writers = 0
self._num_readers = 0
self._schema = schema
self._obj_key = obj_key
def schema(self):
return self._schema
def setup(self, global_init_net):
pass
def reader(self):
raise NotImplementedError()
def writer(self):
raise NotImplementedError()
def num_readers(self):
return self._num_readers
def num_writers(self):
return self._num_writers
def _new_writer(self, writer_schema, writer_init_net):
if writer_schema is not None and self._schema is None:
self._schema = writer_schema
self._num_writers += 1
if self._obj_key is not None:
writer_init_net.add_attribute(self._obj_key, self)
def _new_reader(self, reader_init_net):
self._num_readers += 1
if self._obj_key is not None:
reader_init_net.add_attribute(self._obj_key, self)
class CounterReader(Reader):
""" Reader that produces increasing integers. """
def __init__(self):
Reader.__init__(self, schema=Struct(('iter', np.int64)))
self.counter = None
self.should_stop = None
def setup_ex(self, global_init_net, global_finish_net):
if self.counter is None:
self.counter = global_init_net.CreateCounter([], init_count=0)
self.should_stop = global_init_net.ConstantFill(
[], shape=[], dtype=core.DataType.BOOL, value=False)
def read_ex(self, local_init_net, local_finish_net):
count_net = core.Net('limited_reader_counter')
value = count_net.CountUp([self.counter], 1)
return [count_net], self.should_stop, [value]
class ReaderWithLimit(Reader):
"""
Reader that stops after `num_iter` calls.
If num_iter is None it becomes just a simple reader that exports a global
flag for "out of data".
"""
def __init__(self, reader, num_iter=1):
Reader.__init__(self, schema=reader._schema)
self.reader = reader
self.counter = None
self.num_iter = num_iter
net = core.Net('reader_with_limit')
self._data_finished = net.AddExternalInput(
net.NextName('data_finished'))
if self.num_iter is not None:
self.counter = net.AddExternalInput(net.NextName('counter'))
def setup_ex(self, global_init_net, global_finish_net):
if self.counter:
global_init_net.CreateCounter(
[], [self.counter], init_count=int(self.num_iter))
self.reader.setup_ex(global_init_net, global_finish_net)
global_init_net.ConstantFill(
[], [self._data_finished],
shape=[], value=False, dtype=core.DataType.BOOL)
def read_ex(self, local_init_net, local_finish_net):
""" 1. check if we reached number of iterations and populate the same
should_stop blob """
count_net = core.Net('limited_reader_counter')
if self.counter:
should_stop = count_net.CountDown([self.counter], 1)
else:
should_stop = count_net.ConstantFill(
[], 1,
shape=[], value=False, dtype=core.DataType.BOOL)
""" 2. call original reader """
nets, local_data_finished, fields = self.reader.read_ex(
local_init_net, local_finish_net)
self._set_schema(self.reader._schema)
""" 3. check if original reader is done. """
check_done_net = core.Net('limited_reader_post')
# copy to the same blob as the counter output to trigger reader
# stopping
check_done_net.Copy(local_data_finished, should_stop)
# update global flag that underlying reader is done
check_done_net.Or([self._data_finished, local_data_finished],
[self._data_finished])
# this relies on `should_stop` being called after each net.
return [count_net] + nets + [check_done_net], should_stop, fields
def data_finished(self):
"""
Return a blob that can be checked after the end of the reading task,
which will contain a scalar float indicating whether the underlying
reader has been exhausted (True) or whether we stopped because reached
the limit of iterations (False).
"""
return self._data_finished
def CountUntil(num_iter):
return ReaderWithLimit(CounterReader(), num_iter)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
import numpy as np
import unittest
import pickle
class TestDB(unittest.TestCase):
def testPicklable(self):
s = schema.Struct(
('field1', schema.Scalar(dtype=np.int32)),
('field2', schema.List(schema.Scalar(dtype=str)))
)
s2 = pickle.loads(pickle.dumps(s))
for r in (s, s2):
self.assertTrue(isinstance(r.field1, schema.Scalar))
self.assertTrue(isinstance(r.field2, schema.List))
self.assertTrue(getattr(r, 'non_existent', None) is None)
def testNormalizeField(self):
s = schema.Struct(('field1', np.int32), ('field2', str))
self.assertEquals(
s,
schema.Struct(
('field1', schema.Scalar(dtype=np.int32)),
('field2', schema.Scalar(dtype=str))
)
)
def testTuple(self):
s = schema.Tuple(np.int32, str, np.float32)
s2 = schema.Struct(
('field_0', schema.Scalar(dtype=np.int32)),
('field_1', schema.Scalar(dtype=np.str)),
('field_2', schema.Scalar(dtype=np.float32))
)
self.assertEquals(s, s2)
self.assertEquals(s[0], schema.Scalar(dtype=np.int32))
self.assertEquals(s[1], schema.Scalar(dtype=np.str))
self.assertEquals(s[2], schema.Scalar(dtype=np.float32))
self.assertEquals(
s[2, 0],
schema.Struct(
('field_2', schema.Scalar(dtype=np.float32)),
('field_0', schema.Scalar(dtype=np.int32)),
)
)
# test iterator behavior
for i, (v1, v2) in enumerate(zip(s, s2)):
self.assertEquals(v1, v2)
self.assertEquals(s[i], v1)
self.assertEquals(s2[i], v1)
def testRawTuple(self):
s = schema.RawTuple(2)
self.assertEquals(
s, schema.Struct(
('field_0', schema.Scalar()), ('field_1', schema.Scalar())
)
)
self.assertEquals(s[0], schema.Scalar())
self.assertEquals(s[1], schema.Scalar())
def testStructIndexing(self):
s = schema.Struct(
('field1', schema.Scalar(dtype=np.int32)),
('field2', schema.List(schema.Scalar(dtype=str))),
('field3', schema.Struct()),
)
self.assertEquals(s['field2'], s.field2)
self.assertEquals(s['field2'], schema.List(schema.Scalar(dtype=str)))
self.assertEquals(s['field3'], schema.Struct())
self.assertEquals(
s['field2', 'field1'],
schema.Struct(
('field2', schema.List(schema.Scalar(dtype=str))),
('field1', schema.Scalar(dtype=np.int32)),
)
)
def testListInStructIndexing(self):
a = schema.List(schema.Scalar(dtype=str))
s = schema.Struct(
('field1', schema.Scalar(dtype=np.int32)),
('field2', a)
)
self.assertEquals(s['field2:lengths'], a.lengths)
self.assertEquals(s['field2:items'], a.items)
with self.assertRaises(KeyError):
s['fields2:items:non_existent']
with self.assertRaises(KeyError):
s['fields2:non_existent']
def testMapInStructIndexing(self):
a = schema.Map(
schema.Scalar(dtype=np.int32),
schema.Scalar(dtype=np.float32),
)
s = schema.Struct(
('field1', schema.Scalar(dtype=np.int32)),
('field2', a)
)
self.assertEquals(s['field2:keys'], a.keys)
self.assertEquals(s['field2:values'], a.values)
with self.assertRaises(KeyError):
s['fields2:keys:non_existent']
def testPreservesMetadata(self):
s = schema.Struct(
('a', schema.Scalar(np.float32)), (
'b', schema.Scalar(
np.int32,
metadata=schema.Metadata(categorical_limit=5)
)
), (
'c', schema.List(
schema.Scalar(
np.int32,
metadata=schema.Metadata(categorical_limit=6)
)
)
)
)
# attach metadata to lengths field
s.c.lengths.set_metadata(schema.Metadata(categorical_limit=7))
self.assertEqual(None, s.a.metadata)
self.assertEqual(5, s.b.metadata.categorical_limit)
self.assertEqual(6, s.c.value.metadata.categorical_limit)
self.assertEqual(7, s.c.lengths.metadata.categorical_limit)
sc = s.clone()
self.assertEqual(None, sc.a.metadata)
self.assertEqual(5, sc.b.metadata.categorical_limit)
self.assertEqual(6, sc.c.value.metadata.categorical_limit)
self.assertEqual(7, sc.c.lengths.metadata.categorical_limit)
sv = schema.from_blob_list(
s, [
np.array([3.4]), np.array([2]), np.array([3]),
np.array([1, 2, 3])
]
)
self.assertEqual(None, sv.a.metadata)
self.assertEqual(5, sv.b.metadata.categorical_limit)
self.assertEqual(6, sv.c.value.metadata.categorical_limit)
self.assertEqual(7, sv.c.lengths.metadata.categorical_limit)
def testDupField(self):
with self.assertRaises(ValueError):
schema.Struct(
('a', schema.Scalar()),
('a', schema.Scalar()))
def testAssignToField(self):
with self.assertRaises(TypeError):
s = schema.Struct(('a', schema.Scalar()))
s.a = schema.Scalar()
def testPreservesEmptyFields(self):
s = schema.Struct(
('a', schema.Scalar(np.float32)),
('b', schema.Struct()),
)
sc = s.clone()
self.assertIn("a", sc.fields)
self.assertIn("b", sc.fields)
sv = schema.from_blob_list(s, [np.array([3.4])])
self.assertIn("a", sv.fields)
self.assertIn("b", sv.fields)
self.assertEqual(0, len(sv.b.fields))
def testStructAddition(self):
s1 = schema.Struct(
('a', schema.Scalar())
)
s2 = schema.Struct(
('b', schema.Scalar())
)
s = s1 + s2
self.assertIn("a", s.fields)
self.assertIn("b", s.fields)
with self.assertRaises(TypeError):
s1 + s1
with self.assertRaises(TypeError):
s1 + schema.Scalar()
def testStructNestedAddition(self):
s1 = schema.Struct(
('a', schema.Scalar()),
('b', schema.Struct(
('c', schema.Scalar())
)),
)
s2 = schema.Struct(
('b', schema.Struct(
('d', schema.Scalar())
))
)
s = s1 + s2
self.assertEqual(['a', 'b:c', 'b:d'], s.field_names())
s3 = schema.Struct(
('b', schema.Scalar()),
)
with self.assertRaises(TypeError):
s = s1 + s3
def testGetFieldByNestedName(self):
st = schema.Struct(
('a', schema.Scalar()),
('b', schema.Struct(
('c', schema.Struct(
('d', schema.Scalar()),
)),
)),
)
self.assertRaises(KeyError, st.__getitem__, '')
self.assertRaises(KeyError, st.__getitem__, 'x')
self.assertRaises(KeyError, st.__getitem__, 'x:y')
self.assertRaises(KeyError, st.__getitem__, 'b:c:x')
a = st['a']
self.assertTrue(isinstance(a, schema.Scalar))
bc = st['b:c']
self.assertIn('d', bc.fields)
bcd = st['b:c:d']
self.assertTrue(isinstance(bcd, schema.Scalar))
def testAddFieldByNestedName(self):
f_a = schema.Scalar(blob=core.BlobReference('blob1'))
f_b = schema.Struct(
('c', schema.Struct(
('d', schema.Scalar(blob=core.BlobReference('blob2'))),
)),
)
f_x = schema.Struct(
('x', schema.Scalar(blob=core.BlobReference('blob3'))),
)
with self.assertRaises(TypeError):
st = schema.Struct(
('a', f_a),
('b', f_b),
('b:c:d', f_x),
)
with self.assertRaises(TypeError):
st = schema.Struct(
('a', f_a),
('b', f_b),
('b:c:d:e', f_x),
)
st = schema.Struct(
('a', f_a),
('b', f_b),
('e:f', f_x),
)
self.assertEqual(['a', 'b:c:d', 'e:f:x'], st.field_names())
self.assertEqual(['blob1', 'blob2', 'blob3'], st.field_blobs())
st = schema.Struct(
('a', f_a),
('b:c:e', f_x),
('b', f_b),
)
self.assertEqual(['a', 'b:c:e:x', 'b:c:d'], st.field_names())
self.assertEqual(['blob1', 'blob3', 'blob2'], st.field_blobs())
st = schema.Struct(
('a:a1', f_a),
('b:b1', f_b),
('a', f_x),
)
self.assertEqual(['a:a1', 'a:x', 'b:b1:c:d'], st.field_names())
self.assertEqual(['blob1', 'blob3', 'blob2'], st.field_blobs())
def testContains(self):
st = schema.Struct(
('a', schema.Scalar()),
('b', schema.Struct(
('c', schema.Struct(
('d', schema.Scalar()),
)),
)),
)
self.assertTrue('a' in st)
self.assertTrue('b:c' in st)
self.assertTrue('b:c:d' in st)
self.assertFalse('' in st)
self.assertFalse('x' in st)
self.assertFalse('b:c:x' in st)
self.assertFalse('b:c:d:x' in st)
|
## @package checkpoint
# Module caffe2.python.checkpoint
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import logging
from caffe2.python import core, context
from caffe2.python.net_builder import ops
from caffe2.python.task import Node, Task, TaskGroup, TaskOutput, WorkspaceType
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# The name of the special net that is used to store all the blob names in the
# workspace.
__BLOB_NAMES_NET__ = 'get_blob_list'
@context.define_context()
class Job(object):
"""
A Job defines three TaskGroups: the `init_group`, the `epoch_group` and the
`exit_group` which will be run by a JobRunner.
The `init_group` will be run only once at startup. Its role is to
initialize globally persistent blobs such as model weights, accumulators
and data file lists.
The `epoch_group` will be run in a loop after init_group. The loop will
exit when any of the stop signals added with `add_stop_signal` is True
at the end of an epoch.
The `exit_group` will be run only once at the very end of the job, when one
of the stopping criterias for `epoch_group` was met. The role of this group
is save the results of training in the end of the job.
Jobs are context-driven, so that Tasks can be added to the active Job
without having to explicitly pass the job object around.
Example of usage:
def build_reader(partitions):
with Job.current().init_group:
reader = HiveReader(init_reader, ..., partitions)
Task(step=init_reader)
with Job.current().epoch_group:
limited_reader = ReaderWithLimit(reader, num_iter=10000)
data_queue = pipe(limited_reader, num_threads=8)
Job.current().add_stop_signal(limited_reader.data_finished())
return data_queue
def build_hogwild_trainer(reader, model):
with Job.current().init_group:
Task(step=model.param_init_net)
with Job.current().epoch_group:
pipe(reader, processor=model, num_threads=8)
with Job.current().exit_group:
Task(step=model.save_model_net)
with Job() as job:
reader = build_reader(partitions)
model = build_model(params)
build_hogwild_trainer(reader, model)
"""
def __init__(self,
init_group=None, epoch_group=None,
exit_group=None, stop_signals=None,
nodes_to_checkpoint=None):
self.init_group = init_group or TaskGroup(
workspace_type=WorkspaceType.GLOBAL)
self.epoch_group = epoch_group or TaskGroup()
self.exit_group = exit_group or TaskGroup()
self.stop_signals = stop_signals or []
self._nodes_to_checkpoint = nodes_to_checkpoint
def nodes_to_checkpoint(self):
if self._nodes_to_checkpoint:
return self._nodes_to_checkpoint
else:
return self.init_group.used_nodes()
def compile(self, session_class):
return Job(
init_group=session_class.compile(self.init_group),
epoch_group=session_class.compile(self.epoch_group),
exit_group=session_class.compile(self.exit_group),
stop_signals=self.stop_signals,
nodes_to_checkpoint=self.nodes_to_checkpoint())
def __enter__(self):
self.epoch_group.__enter__()
return self
def __exit__(self, *args):
self.epoch_group.__exit__()
def add_stop_signal(self, output):
if isinstance(output, core.BlobReference):
t = Task(outputs=[output], group=self.epoch_group)
output = t.outputs()[0]
assert isinstance(output, TaskOutput)
self.stop_signals.append(output)
class CheckpointManager(object):
"""
Controls saving and loading of workspaces on every epoch boundary of a job.
If a CheckpointManager instance is passed to JobRunner, then JobRunner will
call `init`, `read` and `save` at different moments in between epoch runs.
"""
def __init__(self, db, db_type):
self._db = db
self._db_type = db_type
# make sure these blobs are the first in the checkpoint file.
self._net = core.Net('!!checkpoint_mngr')
self._blob_names = self._net.AddExternalInput('blob_names')
self._names_output = None
def init(self, nodes=None, retrieve_from_epoch=None):
"""
Build a Task that will be run once after the job's `init_group` is run.
This task will determine which blobs need to be checkpointed.
If retrieve_from_epoch is not None, then the checkpoint metadata is
retrieved from a previously saved checkpoint.
"""
assert nodes is None or len(nodes) == 1, (
'CheckpointManager only supports single node.')
with Task(outputs=[self._blob_names]) as task:
if retrieve_from_epoch is None:
ops.GetAllBlobNames(
[],
self._blob_names,
include_shared=False)
else:
ops.Load(
[], self._blob_names,
db=self._db_name(retrieve_from_epoch),
db_type=self._db_type,
absolute_path=True)
self._names_output = task.outputs()[0]
return task
def blob_list(self):
assert self._names_output
return self._names_output.fetch().tolist()
def _db_name(self, epoch):
return '%s.%06d' % (self._db, epoch)
def load(self, epoch):
"""
Build a Task that will be run by JobRunner when the job is to be
resumed from a given epoch. This task will run a Load op that will
load and deserialize all relevant blobs from a persistent storage.
"""
logger.info('Load from %s' % self._db_name(epoch))
with Task() as task:
ops.Load(
[],
self.blob_list(),
db=self._db_name(epoch),
db_type=self._db_type,
absolute_path=True)
return task
def load_blobs_from_checkpoint(self, blob_names, epoch):
"""
Builds a Task that loads only the necessary blobs from a checkpoint of
the given epoch. The necessary blobs are given in the blob_names
argument.
Args:
blob_names: A list of strings. Each string is the name of a
blob.
epoch: The checkpoint epoch to load from.
Returns:
A Task which loads the specified blobs from the checkpoint of the
given epoch.
"""
logger.info('Load from %s' % self._db_name(epoch))
with Task() as task:
ops.Load(
[],
blob_names,
db=self._db_name(epoch),
db_type=self._db_type,
absolute_path=True,
allow_incomplete=True)
return task
def check_db_exists(self, epoch):
logger.info('Check existence of %s' % self._db_name(epoch))
with Task() as task:
existence = ops.Const(False)
ops.DBExists(
[],
[existence],
db_name=self._db_name(epoch),
db_type=self._db_type,
absolute_path=True)
task.add_output(existence)
return task
def save(self, epoch):
"""
Build a Task that is run once after `init_group` and after each
epoch is run. This will execute a Save ops to serialize and persist
blobs present in the global workspaace.
"""
logger.info('Save to %s' % self._db_name(epoch))
with Task() as task:
ops.Save(
self.blob_list(), [], db=self._db_name(epoch),
db_type=self._db_type, absolute_path=True)
return task
class MultiNodeCheckpointManager(object):
"""
Coordinates checkpointing and checkpointing across multiple nodes.
Each of `init`, `load` and `save` will build TaskGroups which will
trigger checkpointing on each of the nodes involved in a distributed job.
"""
def __init__(
self, db_prefix, db_type, node_manager_class=CheckpointManager):
self._node_manager_class = node_manager_class
self._node_managers = None
self._db_prefix = db_prefix
self._db_type = db_type
def _task_group(self, func, *args, **kw):
assert self._node_managers is not None, 'init must be called first.'
with TaskGroup(WorkspaceType.GLOBAL) as task_group:
for node, manager in self._node_managers:
with Node(node):
func(manager, *args, **kw)
return task_group
def init(self, nodes, retrieve_from_epoch=None):
if self._node_managers is not None:
assert [node for node, _ in self._node_managers] == nodes
return
self._node_managers = []
for node in nodes:
with Node(node):
manager = self._node_manager_class(
db=os.path.join(self._db_prefix, node),
db_type=self._db_type)
self._node_managers.append((node, manager))
return self._task_group(
self._node_manager_class.init,
nodes=[node],
retrieve_from_epoch=retrieve_from_epoch)
def load(self, epoch):
return self._task_group(self._node_manager_class.load, epoch)
def load_blobs_locally(self, nodes, blob_names, epoch, session):
"""Loads the necessary blobs from the checkpoints to the current node.
Args:
blob_names: A list of strings. Each string is the name of a
blob.
epoch: An integer. The checkpoint epoch to load from.
session: A Session object to execute the Load ops.
"""
if self._node_managers is not None:
assert [node for node, _ in self._node_managers] == nodes
else:
self._node_managers = []
for node in nodes:
with Node(node):
manager = self._node_manager_class(
db=os.path.join(self._db_prefix, node),
db_type=self._db_type)
self._node_managers.append((node, manager))
assert self._node_managers is not None, 'must initialize node managers'
for _, manager in self._node_managers:
existence_task = manager.check_db_exists(epoch)
session.run(existence_task)
existence = existence_task.outputs()[0].fetch()
if not existence:
logger.info('DB %s does not exist!' % manager._db_name(epoch))
return False
load_task = manager.load_blobs_from_checkpoint(blob_names, epoch)
session.run(load_task)
logger.info('Successfully loaded from checkpoints.')
return True
def save(self, epoch):
return self._task_group(self._node_manager_class.save, epoch)
class JobRunner(object):
"""
Implement the runtime logic for jobs with checkpointing at the level of
epoch. Can be used to run either single-host or distributed jobs. Job
runner is a callable to be called once from the client, passing a Session
as argument. This call will block until the Job execution is complete.
If a checkpoint_manager is passed, checkpoints will be taken after
initialization and after each epoch execution. If, in addition,
`resume_from_epoch` is an epoch number, the corresponding checkpoint will
be loaded and job execution will continue from the given epoch. In
this case, the job's init_group will not be run.
Refer to checkpoint_test.py for an example.
"""
def __init__(self, job, checkpoint_manager=None, resume_from_epoch=None):
self.resume_from_epoch = resume_from_epoch
self.checkpoint = checkpoint_manager
self.job = job
def __call__(self, client):
from_scratch = self.resume_from_epoch is None
if from_scratch:
client.run(self.job.init_group)
if self.checkpoint:
logger.info('Preparing checkpoint ...')
client.run(self.checkpoint.init(
self.job.nodes_to_checkpoint(),
retrieve_from_epoch=self.resume_from_epoch))
if from_scratch:
logger.info('Saving first checkpoint ...')
client.run(self.checkpoint.save(0))
logger.info('First checkpoint saved.')
else:
logger.info('Loading checkpoint for epoch {} ...'.format(
self.resume_from_epoch))
client.run(self.checkpoint.load(self.resume_from_epoch))
logger.info('Checkpoint loaded.')
epoch = 1 if from_scratch else self.resume_from_epoch + 1
while True:
logger.info('Starting epoch %d.' % epoch)
client.run(self.job.epoch_group)
logger.info('Ran epoch %d.' % epoch)
stop_signals = [o.fetch() for o in self.job.stop_signals]
if self.checkpoint:
logger.info('Saving checkpoint ...')
client.run(self.checkpoint.save(epoch))
logger.info('Checkpoint saved.')
if any(stop_signals):
logger.info('Stopping.')
break
epoch += 1
client.run(self.job.exit_group)
return epoch
def load_blobs_from_checkpoints(self, blob_names, epoch, session):
"""Loads the necessary blobs from the checkpoints.
Checkpoints store the snapshots of the workspace in each node.
Sometimes we only need to load a subset of the blobs from the
checkpoints. One common scenario is to load only the model blobs from
the checkpoints for evaluation purpose. Given the names of the necessary
blobs, this function goes over all the checkpoints of all the nodes, but
only loads the blobs specified in the blob_names to the current
workspace.
Args:
blob_names: A list of strings. Each string is the name of a
blob.
epoch: An integer. The checkpoint epoch to load from.
session: A Session object to execute the load ops.
Raises:
ValueError: When the checkpoint manager is invalid.
"""
if not self.checkpoint:
raise ValueError('Checkpoint manager is None')
logger.info('Loading checkpoint for epoch {} ...'.format(epoch))
return self.checkpoint.load_blobs_locally(self.job.nodes_to_checkpoint(),
blob_names, epoch, session)
def epoch_limiter(num_epochs):
"""
Creates a task that will output True when a given
number of epochs has finished.
"""
with Job.current().init_group:
init_net = core.Net('epoch_counter_init')
counter = init_net.CreateCounter([], init_count=num_epochs - 1)
Task(step=init_net)
epoch_net = core.Net('epoch_countdown')
finished = epoch_net.CountDown(counter)
output = Task(step=epoch_net, outputs=finished).outputs()[0]
Job.current().add_stop_signal(output)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import workspace, cnn, memonger, core
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
from hypothesis import given
def has_blob(proto, needle):
for op in proto.op:
for inp in op.input:
if inp == needle:
return True
for outp in op.output:
if outp == needle:
return True
return False
def count_blobs(proto):
blobs = set()
for op in proto.op:
blobs = blobs.union(set(op.input)).union(set(op.output))
return len(blobs)
class MemongerTest(hu.HypothesisTestCase):
@given(input_dim=st.integers(min_value=1, max_value=10),
output_dim=st.integers(min_value=1, max_value=10),
batch_size=st.integers(min_value=1, max_value=10),
do=st.sampled_from(hu.device_options),
algo=st.sampled_from(memonger.AssignmentAlgorithm))
def test_simple_memonger(self, input_dim, output_dim, batch_size, do, algo):
m = cnn.CNNModelHelper()
fc1 = m.FC("data", "fc1", dim_in=input_dim, dim_out=output_dim)
fc2 = m.FC(fc1, "fc2", dim_in=output_dim, dim_out=output_dim)
fc3 = m.FC(fc2, "fc3", dim_in=output_dim, dim_out=output_dim)
fc3.Relu([], fc3)\
.Softmax([], "pred") \
.LabelCrossEntropy(["label"], ["xent"]) \
.AveragedLoss([], "loss")
input_to_grad = m.AddGradientOperators(["loss"])
m.net.Proto().device_option.CopyFrom(do)
m.param_init_net.Proto().device_option.CopyFrom(do)
static_blobs = \
[o for op in m.param_init_net.Proto().op for o in op.output] + \
["data", "label", "loss", input_to_grad["fc1_w"]]
optimization = memonger.optimize_interference(
m.Proto(), static_blobs, algo=algo)
data = np.random.randn(batch_size, input_dim).astype(np.float32)
label = np.random.randint(
low=0, high=output_dim, size=(batch_size,)).astype(np.int32)
workspace.RunNetOnce(m.param_init_net)
workspace.FeedBlob("data", data, device_option=do)
workspace.FeedBlob("label", label, device_option=do)
workspace.RunNetOnce(m.net)
loss = workspace.FetchBlob("loss")
grad = workspace.FetchBlob(str(input_to_grad["fc1_w"]))
workspace.RunNetOnce(optimization.net)
optimized_loss = workspace.FetchBlob("loss")
optimized_grad = workspace.FetchBlob(str(input_to_grad["fc1_w"]))
np.testing.assert_almost_equal(loss, optimized_loss)
np.testing.assert_almost_equal(grad, optimized_grad)
stats = memonger.compute_statistics(optimization.assignments)
self.assertLess(stats.optimized_nbytes, stats.baseline_nbytes)
# run with blob sizes
blob_sizes = memonger.collect_blob_sizes(m.Proto())
optimization1 = memonger.optimize_interference(
m.Proto(), static_blobs, blob_sizes=blob_sizes, algo=algo)
workspace.RunNetOnce(optimization1.net)
optimized_loss = workspace.FetchBlob("loss")
optimized_grad = workspace.FetchBlob(str(input_to_grad["fc1_w"]))
np.testing.assert_almost_equal(loss, optimized_loss)
np.testing.assert_almost_equal(grad, optimized_grad)
stats = memonger.compute_statistics(optimization1.assignments)
self.assertLessEqual(stats.optimized_nbytes, stats.baseline_nbytes)
@given(input_dim=st.integers(min_value=1, max_value=4),
output_dim=st.integers(min_value=1, max_value=4),
batch_size=st.integers(min_value=1, max_value=4))
def test_gradient_optim(self, input_dim, output_dim, batch_size):
m = cnn.CNNModelHelper()
with core.NameScope("name_x"):
fc1 = m.FC("data", "fc1", dim_in=input_dim, dim_out=output_dim)
fc2 = m.FC(fc1, "fc2", dim_in=output_dim, dim_out=output_dim)
fc3 = m.FC(fc2, "fc3", dim_in=output_dim, dim_out=output_dim)
fc4 = m.FC(fc3, "fc4", dim_in=output_dim, dim_out=output_dim)
fc5 = m.FC(fc4, "fc5", dim_in=output_dim, dim_out=output_dim)
fc5.Relu([], fc5)\
.Softmax([], "pred") \
.LabelCrossEntropy(["label"], ["xent"]) \
.AveragedLoss([], "loss")
input_to_grad = m.AddGradientOperators(["name_x/loss"])
blobs_before = count_blobs(m.net.Proto())
optim_proto = memonger.share_grad_blobs(
m.net,
["name_x/loss"],
set(m.param_to_grad.values()),
"name_x/",
share_activations=False,
)
blobs_after = count_blobs(optim_proto)
self.assertLess(blobs_after, blobs_before)
optim_proto_wacts = memonger.share_grad_blobs(
m.net,
["name_x/loss"],
set(m.param_to_grad.values()),
"name_x/",
share_activations=True,
)
blobs_wact_optim = count_blobs(optim_proto_wacts)
self.assertLessEqual(blobs_wact_optim, blobs_after)
# Check that the last activations are not shared
self.assertTrue(has_blob(optim_proto, "name_x/fc5"))
self.assertTrue(
has_blob(optim_proto_wacts, "name_x/fc5"),
"Dont remap final activation",
)
# Test networks produce exactly same gradients
data = np.random.randn(batch_size, input_dim).astype(np.float32)
label = np.random.randint(
low=0, high=output_dim, size=(batch_size,)).astype(np.int32)
workspace.RunNetOnce(m.param_init_net)
workspace.FeedBlob("name_x/data", data)
workspace.FeedBlob("name_x/label", label)
workspace.RunNetOnce(m.net)
loss = workspace.FetchBlob("name_x/loss")
grad = workspace.FetchBlob(str(input_to_grad["name_x/fc1_w"]))
workspace.RunNetOnce(optim_proto)
optimized_loss = workspace.FetchBlob("name_x/loss")
optimized_grad = workspace.FetchBlob(str(input_to_grad["name_x/fc1_w"]))
np.testing.assert_almost_equal(loss, optimized_loss)
np.testing.assert_almost_equal(grad, optimized_grad)
# Run with the forward optimization
workspace.RunNetOnce(optim_proto_wacts)
optimized_loss = workspace.FetchBlob("name_x/loss")
optimized_grad = workspace.FetchBlob(str(input_to_grad["name_x/fc1_w"]))
np.testing.assert_almost_equal(loss, optimized_loss)
np.testing.assert_almost_equal(grad, optimized_grad)
@given(input_dim=st.integers(min_value=4, max_value=4),
output_dim=st.integers(min_value=4, max_value=4),
batch_size=st.integers(min_value=4, max_value=4))
def test_gradient_optim_tree(self, input_dim, output_dim, batch_size):
m = cnn.CNNModelHelper()
with core.NameScope("name_x"):
fc1 = m.FC("data", "fc1", dim_in=input_dim, dim_out=output_dim)
fc2 = m.FC(fc1, "fc2", dim_in=output_dim, dim_out=output_dim)
fc3 = m.FC(fc2, "fc3", dim_in=output_dim, dim_out=output_dim)
fc4 = m.FC(fc3, "fc4", dim_in=output_dim, dim_out=output_dim)
fc5 = m.FC(fc4, "fc5", dim_in=output_dim, dim_out=output_dim)
fc5.Relu([], fc5) \
.Softmax([], "pred1") \
.LabelCrossEntropy(["label"], ["xent1"]) \
.AveragedLoss([], "loss1")
fc6 = m.FC(fc5, "fc6", dim_in=output_dim, dim_out=output_dim)
fc6.Relu([], fc6) \
.Softmax([], "pred2") \
.LabelCrossEntropy(["label"], ["xent2"]) \
.AveragedLoss([], "loss2")
input_to_grad = m.AddGradientOperators(["name_x/loss1", "name_x/loss2"])
blobs_before = count_blobs(m.net.Proto())
optim_proto = memonger.share_grad_blobs(
m.net,
["name_x/loss1", "name_x/loss2"],
set(m.param_to_grad.values()),
"name_x", # "name_x//shared_gradinp_0_shared" if using "name_x/"
share_activations=True,
dont_share_blobs=set(['name_x/fc6', 'name_x/fc5']),
)
blobs_after = count_blobs(optim_proto)
self.assertLess(blobs_after, blobs_before)
self.assertTrue(has_blob(optim_proto, "name_x/fc6"))
# Test networks produce exactly same gradients
data = np.random.randn(batch_size, input_dim).astype(np.float32)
label = np.random.randint(
low=0, high=output_dim, size=(batch_size,)).astype(np.int32)
workspace.RunNetOnce(m.param_init_net)
workspace.FeedBlob("name_x/data", data)
workspace.FeedBlob("name_x/label", label)
workspace.RunNetOnce(m.net)
loss1 = workspace.FetchBlob("name_x/loss1")
loss2 = workspace.FetchBlob("name_x/loss2")
grad = workspace.FetchBlob(str(input_to_grad["name_x/fc1_w"]))
workspace.RunNetOnce(optim_proto)
optimized_loss1 = workspace.FetchBlob("name_x/loss1")
optimized_loss2 = workspace.FetchBlob("name_x/loss2")
optimized_grad = workspace.FetchBlob(str(input_to_grad["name_x/fc1_w"]))
np.testing.assert_almost_equal(loss1, optimized_loss1)
np.testing.assert_almost_equal(loss2, optimized_loss2)
np.testing.assert_almost_equal(grad, optimized_grad)
@given(input_dim=st.integers(min_value=4, max_value=4),
output_dim=st.integers(min_value=4, max_value=4),
batch_size=st.integers(min_value=4, max_value=4))
def test_forward_optim_tree_daggy(self, input_dim, output_dim, batch_size):
m = cnn.CNNModelHelper()
m.Proto().type = "dag"
m.Proto().num_workers = 4
with core.NameScope("name_x"):
fc1 = m.FC("data", "fc1", dim_in=input_dim, dim_out=output_dim)
fc2 = m.FC(fc1, "fc2", dim_in=output_dim, dim_out=output_dim)
fc3 = m.FC(fc2, "fc3", dim_in=output_dim, dim_out=output_dim)
fc4 = m.FC(fc3, "fc4", dim_in=output_dim, dim_out=output_dim)
fc5 = m.FC(fc4, "fc5", dim_in=output_dim, dim_out=output_dim)
# Branch
fc3b = m.FC(fc2, "fc3b", dim_in=output_dim, dim_out=output_dim)
fc4b = m.FC(fc3b, "fc4b", dim_in=output_dim, dim_out=output_dim)
fc5b = m.FC(fc4b, "fc5b", dim_in=output_dim, dim_out=output_dim)
fc5sum = m.Sum([fc5, fc5b], "fc5sum")
fc5.Relu([], fc5sum) \
.Softmax([], "pred1") \
.LabelCrossEntropy(["label"], ["xent1"]) \
.AveragedLoss([], "loss1")
fc6 = m.FC(fc5, "fc6", dim_in=output_dim, dim_out=output_dim)
fc6.Relu([], fc6) \
.Softmax([], "pred2") \
.LabelCrossEntropy(["label"], ["xent2"]) \
.AveragedLoss([], "loss2")
blobs_before = count_blobs(m.net.Proto())
optim_proto = memonger.optimize_inference_for_dag(
m.net, ["name_x/data"], "name_x"
)
blobs_after = count_blobs(optim_proto)
self.assertLess(blobs_after, blobs_before)
# Test networks produce exactly same results
data = np.random.randn(batch_size, input_dim).astype(np.float32)
label = np.random.randint(
low=0, high=output_dim, size=(batch_size,)).astype(np.int32)
workspace.RunNetOnce(m.param_init_net)
workspace.FeedBlob("name_x/data", data)
workspace.FeedBlob("name_x/label", label)
workspace.RunNetOnce(m.net)
loss1 = workspace.FetchBlob("name_x/loss1")
loss2 = workspace.FetchBlob("name_x/loss2")
workspace.RunNetOnce(optim_proto)
optimized_loss1 = workspace.FetchBlob("name_x/loss1")
optimized_loss2 = workspace.FetchBlob("name_x/loss2")
np.testing.assert_almost_equal(loss1, optimized_loss1)
np.testing.assert_almost_equal(loss2, optimized_loss2)
def test_topological_sort_longest_path(self):
m = cnn.CNNModelHelper()
# 0
m.Copy("conv0_w_comp", "conv0_w")
# 1
conv0 = m.Conv("data", "conv0", 32, 32, 4)
# 2
m.Copy("conv2_w", "conv2_w")
# 3
m.Conv(conv0, "conv2", 16, 32, 4)
g = memonger.compute_interference_graph(m.net.Proto().op)
orders_org = memonger.topological_sort_traversal(g)
orders_gt_org = [2, 0, 1, 3]
self.assertEqual(orders_gt_org, orders_org)
orders = memonger.topological_sort_traversal_longest_path(g)
# longer path is in front of the shorter one
orders_gt = [0, 1, 2, 3]
self.assertEqual(orders_gt, orders)
def test_topological_sort_longest_path_multi_target(self):
# two outputs: conv2 and data4
m = cnn.CNNModelHelper()
# 0
m.Copy("conv0_w_comp", "conv0_w")
# 1
conv0 = m.Conv("data", "conv0", 32, 32, 4)
# 2
m.Copy("conv2_w", "conv2_w")
# 3
m.Conv(conv0, "conv2", 16, 32, 4)
# 4
m.Copy("data1", "data2")
# 5
m.Copy("data2", "data3")
g = memonger.compute_interference_graph(m.net.Proto().op)
orders_org = memonger.topological_sort_traversal(g)
orders_gt_org = [4, 5, 2, 0, 1, 3]
self.assertEqual(orders_gt_org, orders_org)
orders = memonger.topological_sort_traversal_longest_path(g)
# longer path is in front of the shorter one
orders_gt = [0, 1, 2, 3, 4, 5]
self.assertEqual(orders_gt, orders)
def test_topological_sort_longest_path_single_node(self):
# single node
m = cnn.CNNModelHelper()
# 0
m.Copy("conv0_w_comp", "conv0_w")
g = memonger.compute_interference_graph(m.net.Proto().op)
orders_org = memonger.topological_sort_traversal(g)
orders_gt_org = [0]
self.assertEqual(orders_gt_org, orders_org)
orders = memonger.topological_sort_traversal_longest_path(g)
# longer path is in front of the shorter one
orders_gt = [0]
self.assertEqual(orders_gt, orders)
def test_compute_assignments_greedy(self):
LiveRange = memonger.LiveRange
ranges_sorted = [
('b1', LiveRange(1, 3, 10)),
('b2', LiveRange(3, 4, 1)),
('b3', LiveRange(5, 6, 1)),
('b4', LiveRange(5, 7, 10)),
]
assignment_gt = [
[ranges_sorted[0], ranges_sorted[3]],
[ranges_sorted[1], ranges_sorted[2]],
]
best = memonger.compute_assignments_greedy(ranges_sorted, None)
self.assertEqual(memonger.get_memory_usage(best), 11)
self.assertEqual(best, assignment_gt)
def test_compute_assignments_dp(self):
LiveRange = memonger.LiveRange
ranges_sorted = [
('b1', LiveRange(1, 3, 10)),
('b2', LiveRange(3, 4, 1)),
('b3', LiveRange(5, 6, 1)),
('b4', LiveRange(5, 7, 10)),
]
best = memonger.compute_assignments_dp(ranges_sorted, None)
self.assertEqual(memonger.get_memory_usage(best), 11)
def test_compute_assignments_dp1(self):
LiveRange = memonger.LiveRange
ranges_sorted = [
('b1', LiveRange(1, 2, 10)),
('b2', LiveRange(4, 6, 1)),
('b3', LiveRange(5, 6, 10)),
]
best = memonger.compute_assignments_dp(ranges_sorted, [])
self.assertEqual(memonger.get_memory_usage(best), 11)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import errno
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
import os
import shutil
import tempfile
import unittest
from caffe2.proto import caffe2_pb2
from caffe2.python import core, test_util, workspace
if workspace.has_gpu_support:
DEVICES = [caffe2_pb2.CPU, caffe2_pb2.CUDA]
max_gpuid = workspace.NumCudaDevices() - 1
else:
DEVICES = [caffe2_pb2.CPU]
max_gpuid = 0
# Utility class for other loading tests, don't add test functions here
# Inherit from this test instead. If you add a test here,
# each derived class will inherit it as well and cause test duplication
class TestLoadSaveBase(test_util.TestCase):
def __init__(self, methodName, db_type='minidb'):
super(TestLoadSaveBase, self).__init__(methodName)
self._db_type = db_type
@given(src_device_type=st.sampled_from(DEVICES),
src_gpu_id=st.integers(min_value=0, max_value=max_gpuid),
dst_device_type=st.sampled_from(DEVICES),
dst_gpu_id=st.integers(min_value=0, max_value=max_gpuid))
def load_save(self, src_device_type, src_gpu_id,
dst_device_type, dst_gpu_id):
workspace.ResetWorkspace()
dtypes = [np.float16, np.float32, np.float64, np.bool, np.int8,
np.int16, np.int32, np.int64, np.uint8, np.uint16]
arrays = [np.random.permutation(6).reshape(2, 3).astype(T)
for T in dtypes]
src_device_option = core.DeviceOption(
src_device_type, src_gpu_id)
dst_device_option = core.DeviceOption(
dst_device_type, dst_gpu_id)
for i, arr in enumerate(arrays):
self.assertTrue(workspace.FeedBlob(str(i), arr, src_device_option))
self.assertTrue(workspace.HasBlob(str(i)))
try:
# Saves the blobs to a local db.
tmp_folder = tempfile.mkdtemp()
op = core.CreateOperator(
"Save",
[str(i) for i in range(len(arrays))], [],
absolute_path=1,
db=os.path.join(tmp_folder, "db"), db_type=self._db_type)
self.assertTrue(workspace.RunOperatorOnce(op))
# Reset the workspace so that anything we load is surely loaded
# from the serialized proto.
workspace.ResetWorkspace()
self.assertEqual(len(workspace.Blobs()), 0)
def _LoadTest(keep_device, device_type, gpu_id, blobs, loadAll):
"""A helper subfunction to test keep and not keep."""
op = core.CreateOperator(
"Load",
[], blobs,
absolute_path=1,
db=os.path.join(tmp_folder, "db"), db_type=self._db_type,
device_option=dst_device_option,
keep_device=keep_device,
load_all=loadAll)
self.assertTrue(workspace.RunOperatorOnce(op))
for i, arr in enumerate(arrays):
self.assertTrue(workspace.HasBlob(str(i)))
fetched = workspace.FetchBlob(str(i))
self.assertEqual(fetched.dtype, arr.dtype)
np.testing.assert_array_equal(
workspace.FetchBlob(str(i)), arr)
proto = caffe2_pb2.BlobProto()
proto.ParseFromString(workspace.SerializeBlob(str(i)))
self.assertTrue(proto.HasField('tensor'))
self.assertEqual(proto.tensor.device_detail.device_type,
device_type)
if device_type == caffe2_pb2.CUDA:
self.assertEqual(proto.tensor.device_detail.cuda_gpu_id,
gpu_id)
blobs = [str(i) for i in range(len(arrays))]
# Load using device option stored in the proto, i.e.
# src_device_option
_LoadTest(1, src_device_type, src_gpu_id, blobs, 0)
# Load again, but this time load into dst_device_option.
_LoadTest(0, dst_device_type, dst_gpu_id, blobs, 0)
# Load back to the src_device_option to see if both paths are able
# to reallocate memory.
_LoadTest(1, src_device_type, src_gpu_id, blobs, 0)
# Reset the workspace, and load directly into the dst_device_option.
workspace.ResetWorkspace()
_LoadTest(0, dst_device_type, dst_gpu_id, blobs, 0)
# Test load all which loads all blobs in the db into the workspace.
workspace.ResetWorkspace()
_LoadTest(1, src_device_type, src_gpu_id, [], 1)
# Load again making sure that overwrite functionality works.
_LoadTest(1, src_device_type, src_gpu_id, [], 1)
# Load again with different device.
_LoadTest(0, dst_device_type, dst_gpu_id, [], 1)
workspace.ResetWorkspace()
_LoadTest(0, dst_device_type, dst_gpu_id, [], 1)
finally:
# clean up temp folder.
try:
shutil.rmtree(tmp_folder)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def saveFile(self, tmp_folder, db_type):
dtypes = [np.float16, np.float32, np.float64, np.bool, np.int8,
np.int16, np.int32, np.int64, np.uint8, np.uint16]
arrays = [np.random.permutation(6).reshape(2, 3).astype(T)
for T in dtypes]
for i, arr in enumerate(arrays):
self.assertTrue(workspace.FeedBlob(str(i), arr))
self.assertTrue(workspace.HasBlob(str(i)))
# Saves the blobs to a local db.
tmp_file = os.path.join(tmp_folder, "db")
op = core.CreateOperator(
"Save",
[str(i) for i in range(len(arrays))], [],
absolute_path=1,
db=tmp_file, db_type=db_type)
workspace.RunOperatorOnce(op)
return tmp_file, arrays
class TestLoadSave(TestLoadSaveBase):
def testLoadSave(self):
self.load_save()
def testRepeatedArgs(self):
dtypes = [np.float16, np.float32, np.float64, np.bool, np.int8,
np.int16, np.int32, np.int64, np.uint8, np.uint16]
arrays = [np.random.permutation(6).reshape(2, 3).astype(T)
for T in dtypes]
for i, arr in enumerate(arrays):
self.assertTrue(workspace.FeedBlob(str(i), arr))
self.assertTrue(workspace.HasBlob(str(i)))
# Saves the blobs to a local db.
tmp_folder = tempfile.mkdtemp()
op = core.CreateOperator(
"Save",
[str(i) for i in range(len(arrays))] * 2, [],
absolute_path=1,
db=os.path.join(tmp_folder, "db"), db_type=self._db_type)
with self.assertRaises(RuntimeError):
workspace.RunOperatorOnce(op)
try:
shutil.rmtree(tmp_folder)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def testLoadExcessblobs(self):
tmp_folder = tempfile.mkdtemp()
tmp_file, arrays = self.saveFile(tmp_folder, self._db_type)
op = core.CreateOperator(
"Load",
[], [str(i) for i in range(len(arrays))] * 2,
absolute_path=1,
db=tmp_file, db_type=self._db_type,
load_all=False)
with self.assertRaises(RuntimeError):
workspace.RunOperatorOnce(op)
try:
shutil.rmtree(tmp_folder)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def testTruncatedFile(self):
tmp_folder = tempfile.mkdtemp()
tmp_file, arrays = self.saveFile(tmp_folder, self._db_type)
with open(tmp_file, 'wb+') as fdest:
fdest.seek(20, os.SEEK_END)
fdest.truncate()
op = core.CreateOperator(
"Load",
[], [str(i) for i in range(len(arrays))],
absolute_path=1,
db=tmp_file, db_type=self._db_type,
load_all=False)
with self.assertRaises(RuntimeError):
workspace.RunOperatorOnce(op)
op = core.CreateOperator(
"Load",
[], [],
absolute_path=1,
db=tmp_file, db_type=self._db_type,
load_all=True)
with self.assertRaises(RuntimeError):
workspace.RunOperatorOnce(op)
try:
shutil.rmtree(tmp_folder)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def testBlobNameOverrides(self):
original_names = ['blob_a', 'blob_b', 'blob_c']
new_names = ['x', 'y', 'z']
blobs = [np.random.permutation(6) for i in range(3)]
for i, blob in enumerate(blobs):
self.assertTrue(workspace.FeedBlob(original_names[i], blob))
self.assertTrue(workspace.HasBlob(original_names[i]))
self.assertEqual(len(workspace.Blobs()), 3)
try:
# Saves the blobs to a local db.
tmp_folder = tempfile.mkdtemp()
with self.assertRaises(RuntimeError):
workspace.RunOperatorOnce(
core.CreateOperator(
"Save", original_names, [],
absolute_path=1,
strip_prefix='.temp',
blob_name_overrides=new_names,
db=os.path.join(tmp_folder, "db"),
db_type=self._db_type
)
)
self.assertTrue(
workspace.RunOperatorOnce(
core.CreateOperator(
"Save", original_names, [],
absolute_path=1,
blob_name_overrides=new_names,
db=os.path.join(tmp_folder, "db"),
db_type=self._db_type
)
)
)
self.assertTrue(workspace.ResetWorkspace())
self.assertEqual(len(workspace.Blobs()), 0)
self.assertTrue(
workspace.RunOperatorOnce(
core.CreateOperator(
"Load", [], [],
absolute_path=1,
db=os.path.join(tmp_folder, "db"),
db_type=self._db_type,
load_all=1
)
)
)
self.assertEqual(len(workspace.Blobs()), 3)
for i, name in enumerate(new_names):
self.assertTrue(workspace.HasBlob(name))
self.assertTrue((workspace.FetchBlob(name) == blobs[i]).all())
# moved here per @cxj's suggestion
load_new_names = ['blob_x', 'blob_y', 'blob_z']
# load 'x' into 'blob_x'
self.assertTrue(
workspace.RunOperatorOnce(
core.CreateOperator(
"Load", [], load_new_names[0:1],
absolute_path=1,
db=os.path.join(tmp_folder, "db"),
db_type=self._db_type,
source_blob_names=new_names[0:1]
)
)
)
# we should have 'blob_a/b/c/' and 'blob_x' now
self.assertEqual(len(workspace.Blobs()), 4)
for i, name in enumerate(load_new_names[0:1]):
self.assertTrue(workspace.HasBlob(name))
self.assertTrue((workspace.FetchBlob(name) == blobs[i]).all())
self.assertTrue(
workspace.RunOperatorOnce(
core.CreateOperator(
"Load", [], load_new_names[0:3],
absolute_path=1,
db=os.path.join(tmp_folder, "db"),
db_type=self._db_type,
source_blob_names=new_names[0:3]
)
)
)
# we should have 'blob_a/b/c/' and 'blob_x/y/z' now
self.assertEqual(len(workspace.Blobs()), 6)
for i, name in enumerate(load_new_names[0:3]):
self.assertTrue(workspace.HasBlob(name))
self.assertTrue((workspace.FetchBlob(name) == blobs[i]).all())
finally:
# clean up temp folder.
try:
shutil.rmtree(tmp_folder)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def testMissingFile(self):
tmp_folder = tempfile.mkdtemp()
tmp_file = os.path.join(tmp_folder, "missing_db")
op = core.CreateOperator(
"Load",
[], [],
absolute_path=1,
db=tmp_file, db_type=self._db_type,
load_all=True)
with self.assertRaises(RuntimeError):
try:
workspace.RunOperatorOnce(op)
except RuntimeError as e:
print(e)
raise
try:
shutil.rmtree(tmp_folder)
except OSError as e:
if e.errno != errno.ENOENT:
raise
if __name__ == '__main__':
unittest.main()
|
## @package hypothesis_test_util
# Module caffe2.python.hypothesis_test_util
"""
The Hypothesis library uses *property-based testing* to check
invariants about the code under test under a variety of random inputs.
The key idea here is to express properties of the code under test
(e.g. that it passes a gradient check, that it implements a reference
function, etc), and then generate random instances and verify they
satisfy these properties.
The main functions of interest are exposed on `HypothesisTestCase`.
You can usually just add a short function in this to generate an
arbitrary number of test cases for your operator.
The key functions are:
- `assertDeviceChecks(devices, op, inputs, outputs)`. This asserts that the
operator computes the same outputs, regardless of which device it is executed
on.
- `assertGradientChecks(device, op, inputs, output_,
outputs_with_grads)`. This implements a standard numerical gradient checker
for the operator in question.
- `assertReferenceChecks(device, op, inputs, reference)`. This runs the
reference function (effectively calling `reference(*inputs)`, and comparing
that to the output of output.
`hypothesis_test_util.py` exposes some useful pre-built samplers.
- `hu.gcs` - a gradient checker device (`gc`) and device checker devices (`dc`)
- `hu.gcs_cpu_only` - a CPU-only gradient checker device (`gc`) and
device checker devices (`dc`). Used for when your operator is only
implemented on the CPU.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.proto import caffe2_pb2
from caffe2.python import (
workspace, device_checker, gradient_checker, test_util, core)
import contextlib
import copy
import functools
import hypothesis
import hypothesis.extra.numpy
import hypothesis.strategies as st
import logging
import numpy as np
import os
def is_sandcastle():
if os.getenv('SANDCASTLE') == '1':
return True
elif os.getenv('TW_JOB_USER') == 'sandcastle':
return True
return False
hypothesis.settings.register_profile(
"sandcastle",
hypothesis.settings(
derandomize=True,
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=100,
verbosity=hypothesis.Verbosity.verbose))
hypothesis.settings.register_profile(
"dev",
hypothesis.settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=10,
verbosity=hypothesis.Verbosity.verbose))
hypothesis.settings.register_profile(
"debug",
hypothesis.settings(
suppress_health_check=[hypothesis.HealthCheck.too_slow],
database=None,
max_examples=1000,
verbosity=hypothesis.Verbosity.verbose))
hypothesis.settings.load_profile(
'sandcastle' if is_sandcastle() else os.getenv('CAFFE2_HYPOTHESIS_PROFILE',
'dev')
)
def dims(min_value=1, max_value=5):
return st.integers(min_value=min_value, max_value=max_value)
def elements_of_type(dtype=np.float32, filter_=None):
elems = None
if dtype in (np.float16, np.float32, np.float64):
elems = st.floats(min_value=-1.0, max_value=1.0)
elif dtype is np.int32:
elems = st.integers(min_value=0, max_value=2 ** 31 - 1)
elif dtype is np.int64:
elems = st.integers(min_value=0, max_value=2 ** 63 - 1)
elif dtype is np.bool:
elems = st.booleans()
else:
raise ValueError("Unexpected dtype without elements provided")
return elems if filter_ is None else elems.filter(filter_)
def arrays(dims, dtype=np.float32, elements=None):
if elements is None:
elements = elements_of_type(dtype)
return hypothesis.extra.numpy.arrays(dtype, dims, elements=elements)
def tensor(min_dim=1, max_dim=4, dtype=np.float32, elements=None, **kwargs):
dims_ = st.lists(dims(**kwargs), min_size=min_dim, max_size=max_dim)
return dims_.flatmap(lambda dims: arrays(dims, dtype, elements))
def segment_ids(size, is_sorted):
if size == 0:
return st.just(np.empty(shape=[0], dtype=np.int32))
if is_sorted:
return arrays(
[size],
dtype=np.int32,
elements=st.booleans()).map(
lambda x: np.cumsum(x, dtype=np.int32) - x[0])
else:
return arrays(
[size],
dtype=np.int32,
elements=st.integers(min_value=0, max_value=2 * size))
def lengths(size, min_segments=None, max_segments=None, **kwargs):
# First generate number of boarders between segments
# Then create boarder values and add 0 and size
# By sorting and computing diff we convert them to lengths of
# possible 0 value
if min_segments is None:
min_segments = 0
if max_segments is None:
max_segments = size
assert min_segments >= 0
assert min_segments <= max_segments
if size == 0 and max_segments == 0:
return st.just(np.empty(shape=[0], dtype=np.int32))
assert max_segments > 0, "size is not 0, need at least one segment"
return st.integers(
min_value=max(min_segments - 1, 0), max_value=max_segments - 1
).flatmap(
lambda num_borders:
hypothesis.extra.numpy.arrays(
np.int32, num_borders, elements=st.integers(
min_value=0, max_value=size
)
)
).map(
lambda x: np.append(x, np.array([0, size], dtype=np.int32))
).map(sorted).map(np.diff)
def segmented_tensor(
min_dim=1,
max_dim=4,
dtype=np.float32,
is_sorted=True,
elements=None,
segment_generator=segment_ids,
allow_empty=False,
**kwargs
):
gen_empty = st.booleans() if allow_empty else st.just(False)
data_dims_ = st.lists(dims(**kwargs), min_size=min_dim, max_size=max_dim)
data_dims_ = st.tuples(
gen_empty, data_dims_
).map(lambda pair: ([0] if pair[0] else []) + pair[1])
return data_dims_.flatmap(lambda data_dims: st.tuples(
arrays(data_dims, dtype, elements),
segment_generator(data_dims[0], is_sorted=is_sorted),
))
def lengths_tensor(min_segments=None, max_segments=None, *args, **kwargs):
gen = functools.partial(
lengths, min_segments=min_segments, max_segments=max_segments)
return segmented_tensor(*args, segment_generator=gen, **kwargs)
def sparse_segmented_tensor(min_dim=1, max_dim=4, dtype=np.float32,
is_sorted=True, elements=None, allow_empty=False,
segment_generator=segment_ids, **kwargs):
gen_empty = st.booleans() if allow_empty else st.just(False)
data_dims_ = st.lists(dims(**kwargs), min_size=min_dim, max_size=max_dim)
all_dims_ = st.tuples(gen_empty, data_dims_).flatmap(
lambda pair: st.tuples(
st.just(pair[1]),
(st.integers(min_value=1, max_value=pair[1][0]) if not pair[0]
else st.just(0)),
))
return all_dims_.flatmap(lambda dims: st.tuples(
arrays(dims[0], dtype, elements),
arrays(dims[1], dtype=np.int64, elements=st.integers(
min_value=0, max_value=dims[0][0] - 1)),
segment_generator(dims[1], is_sorted=is_sorted),
))
def sparse_lengths_tensor(**kwargs):
return sparse_segmented_tensor(segment_generator=lengths, **kwargs)
def tensors(n, min_dim=1, max_dim=4, dtype=np.float32, elements=None, **kwargs):
dims_ = st.lists(dims(**kwargs), min_size=min_dim, max_size=max_dim)
return dims_.flatmap(
lambda dims: st.lists(arrays(dims, dtype, elements),
min_size=n, max_size=n))
cpu_do = caffe2_pb2.DeviceOption()
gpu_do = caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CUDA)
device_options = [cpu_do] + ([gpu_do] if workspace.has_gpu_support else [])
# Include device option for each GPU
expanded_device_options = [cpu_do] + (
[caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CUDA, cuda_gpu_id=i)
for i in range(workspace.NumCudaDevices())]
if workspace.has_gpu_support else [])
def device_checker_device_options():
return st.just(device_options)
def gradient_checker_device_option():
return st.sampled_from(device_options)
gcs = dict(
gc=gradient_checker_device_option(),
dc=device_checker_device_options()
)
gcs_cpu_only = dict(gc=st.sampled_from([cpu_do]), dc=st.just([cpu_do]))
gcs_gpu_only = dict(gc=st.sampled_from([gpu_do]), dc=st.just([gpu_do]))
@contextlib.contextmanager
def temp_workspace(name=b"temp_ws"):
old_ws_name = workspace.CurrentWorkspace()
workspace.SwitchWorkspace(name, True)
yield
workspace.ResetWorkspace()
workspace.SwitchWorkspace(old_ws_name)
def runOpBenchmark(
device_option,
op,
inputs,
input_device_options=None,
iterations=10,
):
if input_device_options is None:
input_device_options = {}
op = copy.deepcopy(op)
op.device_option.CopyFrom(device_option)
net = caffe2_pb2.NetDef()
net.op.extend([op])
net.name = op.name if op.name else "test"
with temp_workspace():
for (n, b) in zip(op.input, inputs):
workspace.FeedBlob(
n,
b,
device_option=input_device_options.get(n, device_option)
)
workspace.CreateNet(net)
ret = workspace.BenchmarkNet(net.name, 1, iterations, True)
return ret
class HypothesisTestCase(test_util.TestCase):
"""
A unittest.TestCase subclass with some helper functions for
utilizing the `hypothesis` (hypothesis.readthedocs.io) library.
"""
def assertDeviceChecks(
self,
device_options,
op,
inputs,
outputs_to_check,
input_device_options=None,
threshold=0.01
):
"""
Asserts that the operator computes the same outputs, regardless of
which device it is executed on.
Useful for checking the consistency of GPU and CPU
implementations of operators.
Usage example:
@given(inputs=hu.tensors(n=2), in_place=st.booleans(), **hu.gcs)
def test_sum(self, inputs, in_place, gc, dc):
op = core.CreateOperator("Sum", ["X1", "X2"],
["Y" if not in_place else "X1"])
X1, X2 = inputs
self.assertDeviceChecks(dc, op, [X1, X2], [0])
"""
dc = device_checker.DeviceChecker(
threshold,
device_options=device_options
)
self.assertTrue(
dc.CheckSimple(op, inputs, outputs_to_check, input_device_options)
)
def assertGradientChecks(
self,
device_option,
op,
inputs,
outputs_to_check,
outputs_with_grads,
grad_ops=None,
threshold=0.005,
stepsize=0.05,
input_device_options=None,
):
"""
Implements a standard numerical gradient checker for the operator
in question.
Useful for checking the consistency of the forward and
backward implementations of operators.
Usage example:
@given(inputs=hu.tensors(n=2), in_place=st.booleans(), **hu.gcs)
def test_sum(self, inputs, in_place, gc, dc):
op = core.CreateOperator("Sum", ["X1", "X2"],
["Y" if not in_place else "X1"])
X1, X2 = inputs
self.assertGradientChecks(gc, op, [X1, X2], 0, [0])
"""
gc = gradient_checker.GradientChecker(
stepsize=stepsize,
threshold=threshold,
device_option=device_option,
workspace_name=str(device_option),
)
res, grad, grad_estimated = gc.CheckSimple(
op, inputs, outputs_to_check, outputs_with_grads,
grad_ops=grad_ops,
input_device_options=input_device_options
)
self.assertEqual(grad.shape, grad_estimated.shape)
self.assertTrue(
res,
"Gradient check failed for input " + str(op.input[outputs_to_check])
)
def _assertGradReferenceChecks(
self,
op,
inputs,
ref_outputs,
output_to_grad,
grad_reference,
threshold=1e-4,
):
grad_blob_name = output_to_grad + '_grad'
grad_ops, grad_map = core.GradientRegistry.GetBackwardPass(
[op], {output_to_grad: grad_blob_name})
output_grad = workspace.FetchBlob(output_to_grad)
grad_ref_outputs = grad_reference(output_grad, ref_outputs, inputs)
workspace.FeedBlob(grad_blob_name, workspace.FetchBlob(output_to_grad))
workspace.RunOperatorsOnce(grad_ops)
self.assertEqual(len(grad_ref_outputs), len(inputs))
for (n, ref) in zip(op.input, grad_ref_outputs):
grad_names = grad_map.get(n)
if not grad_names:
# no grad for this input
self.assertIsNone(ref)
else:
if isinstance(grad_names, core.BlobReference):
# dense gradient
ref_vals = ref
ref_indices = None
val_name = grad_names
else:
# sparse gradient
ref_vals, ref_indices = ref
val_name = grad_names.values
vals = workspace.FetchBlob(str(val_name))
np.testing.assert_allclose(
vals,
ref_vals,
atol=threshold,
rtol=threshold,
err_msg='Gradient {0} is not matching the reference'.format(
val_name,
),
)
if ref_indices is not None:
indices = workspace.FetchBlob(str(grad_names.indices))
np.testing.assert_allclose(indices, ref_indices,
atol=1e-4, rtol=1e-4)
def _assertInferTensorChecks(self, name, shapes, types, output):
if name not in shapes:
# No inferred shape or type available
return
output = workspace.FetchBlob(name)
if type(output) is np.ndarray:
if output.dtype == np.dtype('float64'):
correct_type = caffe2_pb2.TensorProto.DOUBLE
elif output.dtype == np.dtype('float32'):
correct_type = caffe2_pb2.TensorProto.FLOAT
elif output.dtype == np.dtype('int32'):
correct_type = caffe2_pb2.TensorProto.INT32
elif output.dtype == np.dtype('int64'):
correct_type = caffe2_pb2.TensorProto.INT64
else:
correct_type = "unknown {}".format(np.dtype)
else:
correct_type = str(type(output))
try:
np.testing.assert_array_equal(
np.array(shapes[name]).astype(np.int32),
np.array(output.shape).astype(np.int32),
err_msg='Shape {} mismatch: {} vs. {}'.format(
name,
shapes[name],
output.shape))
# BUG: Workspace blob type not being set correctly T16121392
if correct_type != caffe2_pb2.TensorProto.INT32:
return
np.testing.assert_equal(
types[name],
correct_type,
err_msg='Type {} mismatch: {} vs. {}'.format(
name, types[name], correct_type,
)
)
except AssertionError as e:
# Temporarily catch these assertion errors when validating
# inferred shape and type info
logging.warning(str(e))
if os.getenv('CAFFE2_ASSERT_SHAPEINFERENCE') == '1':
raise e
def assertReferenceChecks(
self,
device_option,
op,
inputs,
reference,
input_device_options=None,
threshold=1e-4,
output_to_grad=None,
grad_reference=None,
atol=None,
outputs_to_check=None,
):
"""
This runs the reference Python function implementation
(effectively calling `reference(*inputs)`, and compares that
to the output of output, with an absolute/relative tolerance
given by the `threshold` parameter.
Useful for checking the implementation matches the Python
(typically NumPy) implementation of the same functionality.
Usage example:
@given(X=hu.tensor(), inplace=st.booleans(), **hu.gcs)
def test_softsign(self, X, inplace, gc, dc):
op = core.CreateOperator(
"Softsign", ["X"], ["X" if inplace else "Y"])
def softsign(X):
return (X / (1 + np.abs(X)),)
self.assertReferenceChecks(gc, op, [X], softsign)
"""
if input_device_options is None:
input_device_options = {}
op = copy.deepcopy(op)
op.device_option.CopyFrom(device_option)
with temp_workspace():
for (n, b) in zip(op.input, inputs):
workspace.FeedBlob(
n,
b,
device_option=input_device_options.get(n, device_option)
)
print("Input", n, input_device_options.get(n, device_option))
net = core.Net("opnet")
net.Proto().op.extend([op])
test_shape_inference = False
try:
(shapes, types) = workspace.InferShapesAndTypes([net])
test_shape_inference = True
except RuntimeError as e:
# Temporarily catch runtime errors when inferring shape
# and type info
logging.warning(str(e))
if os.getenv('CAFFE2_ASSERT_SHAPEINFERENCE') == '1':
raise e
workspace.RunNetOnce(net)
reference_outputs = reference(*inputs)
if not (isinstance(reference_outputs, tuple) or
isinstance(reference_outputs, list)):
raise RuntimeError(
"You are providing a wrong reference implementation. A "
"proper one should return a tuple/list of numpy arrays.")
if not outputs_to_check:
self.assertEqual(len(reference_outputs), len(op.output))
outputs_to_check = range(len(op.output))
outs = []
for (output_index, ref) in zip(outputs_to_check, reference_outputs):
output_blob_name = op.output[output_index]
output = workspace.FetchBlob(output_blob_name)
if output.dtype.kind in ('S', 'O'):
np.testing.assert_array_equal(output, ref)
else:
if atol is None:
atol = threshold
np.testing.assert_allclose(
output, ref, atol=atol, rtol=threshold,
err_msg=(
'Output {0} is not matching the reference'.format(
output_blob_name,
)),
)
if test_shape_inference:
self._assertInferTensorChecks(
output_blob_name, shapes, types, output)
outs.append(output)
if grad_reference and output_to_grad:
with core.DeviceScope(device_option):
self._assertGradReferenceChecks(
op, inputs, reference_outputs,
output_to_grad, grad_reference)
return outs
def assertValidationChecks(
self,
device_option,
op,
inputs,
validator,
input_device_options=None,
as_kwargs=True
):
if input_device_options is None:
input_device_options = {}
if as_kwargs:
assert len(set(list(op.input) + list(op.output))) == \
len(op.input) + len(op.output), \
"in-place ops are not supported in as_kwargs mode"
op = copy.deepcopy(op)
op.device_option.CopyFrom(device_option)
with temp_workspace():
for (n, b) in zip(op.input, inputs):
workspace.FeedBlob(
n,
b,
device_option=input_device_options.get(n, device_option)
)
workspace.RunOperatorOnce(op)
outputs = [workspace.FetchBlob(n) for n in op.output]
if as_kwargs:
validator(**dict(zip(
list(op.input) + list(op.output), inputs + outputs)))
else:
validator(inputs=inputs, outputs=outputs)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
import numpy as np
class TestSparseToDenseMask(TestCase):
def test_sparse_to_dense_mask_float(self):
op = core.CreateOperator(
'SparseToDenseMask',
['indices', 'values', 'default', 'lengths'],
['output'],
mask=[999999999, 2, 6])
workspace.FeedBlob(
'indices',
np.array([2, 4, 6, 1, 2, 999999999, 2], dtype=np.int32))
workspace.FeedBlob(
'values',
np.array([1, 2, 3, 4, 5, 6, 7], dtype=np.float))
workspace.FeedBlob('default', np.array(-1, dtype=np.float))
workspace.FeedBlob('lengths', np.array([3, 4], dtype=np.int32))
workspace.RunOperatorOnce(op)
output = workspace.FetchBlob('output')
expected = np.array([[-1, 1, 3], [6, 7, -1]], dtype=np.float)
self.assertEqual(output.shape, expected.shape)
np.testing.assert_array_equal(output, expected)
def test_sparse_to_dense_mask_subtensor(self):
op = core.CreateOperator(
'SparseToDenseMask',
['indices', 'values', 'default', 'lengths'],
['output'],
mask=[999999999, 2, 888, 6])
workspace.FeedBlob(
'indices',
np.array([2, 4, 6, 999999999, 2], dtype=np.int64))
workspace.FeedBlob(
'values',
np.array([[[1, -1]], [[2, -2]], [[3, -3]], [[4, -4]], [[5, -5]]],
dtype=np.float))
workspace.FeedBlob('default', np.array([[-1, 0]], dtype=np.float))
workspace.FeedBlob('lengths', np.array([2, 3], dtype=np.int32))
workspace.RunOperatorOnce(op)
output = workspace.FetchBlob('output')
expected = np.array([
[[[-1, 0]], [[1, -1]], [[-1, 0]], [[-1, 0]]],
[[[4, -4]], [[5, -5]], [[-1, 0]], [[3, -3]]]], dtype=np.float)
self.assertEqual(output.shape, expected.shape)
np.testing.assert_array_equal(output, expected)
def test_sparse_to_dense_mask_string(self):
op = core.CreateOperator(
'SparseToDenseMask',
['indices', 'values', 'default', 'lengths'],
['output'],
mask=[999999999, 2, 6])
workspace.FeedBlob(
'indices',
np.array([2, 4, 6, 1, 2, 999999999, 2], dtype=np.int32))
workspace.FeedBlob(
'values',
np.array(['1', '2', '3', '4', '5', '6', '7'], dtype=np.str))
workspace.FeedBlob('default', np.array('-1', dtype=np.str))
workspace.FeedBlob('lengths', np.array([3, 4], dtype=np.int32))
workspace.RunOperatorOnce(op)
output = workspace.FetchBlob('output')
expected = np.array([['-1', '1', '3'], ['6', '7', '-1']], dtype=np.str)
self.assertEqual(output.shape, expected.shape)
np.testing.assert_array_equal(output, expected)
def test_sparse_to_dense_mask_empty_lengths(self):
op = core.CreateOperator(
'SparseToDenseMask',
['indices', 'values', 'default'],
['output'],
mask=[1, 2, 6])
workspace.FeedBlob('indices', np.array([2, 4, 6], dtype=np.int32))
workspace.FeedBlob('values', np.array([1, 2, 3], dtype=np.float))
workspace.FeedBlob('default', np.array(-1, dtype=np.float))
workspace.RunOperatorOnce(op)
output = workspace.FetchBlob('output')
expected = np.array([-1, 1, 3], dtype=np.float)
self.assertEqual(output.shape, expected.shape)
np.testing.assert_array_equal(output, expected)
def test_sparse_to_dense_mask_no_lengths(self):
op = core.CreateOperator(
'SparseToDenseMask',
['indices', 'values', 'default'],
['output'],
mask=[1, 2, 6])
workspace.FeedBlob('indices', np.array([2, 4, 6], dtype=np.int32))
workspace.FeedBlob('values', np.array([1, 2, 3], dtype=np.float))
workspace.FeedBlob('default', np.array(-1, dtype=np.float))
workspace.RunOperatorOnce(op)
output = workspace.FetchBlob('output')
expected = np.array([-1, 1, 3], dtype=np.float)
self.assertEqual(output.shape, expected.shape)
np.testing.assert_array_equal(output, expected)
def test_sparse_to_dense_mask_presence_mask(self):
op = core.CreateOperator(
'SparseToDenseMask',
['indices', 'values', 'default', 'lengths'],
['output', 'presence_mask'],
mask=[11, 12],
return_presence_mask=True)
workspace.FeedBlob('indices', np.array([11, 12, 13], dtype=np.int32))
workspace.FeedBlob('values', np.array([11, 12, 13], dtype=np.float))
workspace.FeedBlob('default', np.array(-1, dtype=np.float))
workspace.FeedBlob('lengths', np.array([1, 2], dtype=np.int32))
workspace.RunOperatorOnce(op)
output = workspace.FetchBlob('output')
presence_mask = workspace.FetchBlob('presence_mask')
expected_output = np.array([[11, -1], [-1, 12]], dtype=np.float)
expected_presence_mask = np.array(
[[True, False], [False, True]],
dtype=np.bool)
self.assertEqual(output.shape, expected_output.shape)
np.testing.assert_array_equal(output, expected_output)
self.assertEqual(presence_mask.shape, expected_presence_mask.shape)
np.testing.assert_array_equal(presence_mask, expected_presence_mask)
|
## @package crf
# Module caffe2.python.crf
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, recurrent
from caffe2.python.cnn import CNNModelHelper
import numpy as np
'''
Due to a limitation in ReccurentNetworkOp, this layer only supports batch_size=1
In order to support batch_size > 1, we will have to implement the CRFUnit
and its gradient in C++ and handle the different batches there.
'''
class CRFWithLoss(object):
def __init__(self, model, num_classes, transitions_blob=None):
self.model = model
self.num_classes = num_classes
self.num_classes_padded = num_classes + 2 # After adding BOS and EOS
if not transitions_blob:
transitions_blob = self.model.param_init_net.UniformFill(
[],
[core.ScopedBlobReference('crf_transitions')],
shape=[self.num_classes_padded, self.num_classes_padded],
min=-1.0,
max=1.0
)
self.transitions = transitions_blob
self.model.params.append(self.transitions)
def crf_loss(self, predictions, labels, seq_lengths=None):
# Since the transitions matrix is a shared parameter, need to
# take a snapshot of it at the beginning since it can be updated
# in between the operators that uses it when doing parallel updates
transitions_snapshot = self.model.net.Copy(
self.transitions, core.ScopedBlobReference('transitions_snapshot')
)
# Compute best path unary score from the logits
path_unary_score = self._gather_entries_sum(
predictions, labels, self.num_classes
)
# Append BOS and EOS entries to the predictions and labels
predictions = self._pad_predictions(predictions)
labels = self._pad_labels(labels)
# Compute best path binary scores from the transitions matrix
path_binary_score = self._path_binary_scores(
labels, transitions_snapshot, seq_lengths
)
path_total_score = self.model.net.Add(
[path_binary_score, path_unary_score],
core.ScopedBlobReference('path_total')
)
# Compute all paths score
zero_index = self.model.param_init_net.ConstantFill(
[], shape=[1], value=0
)
initial_state = self.model.net.Gather(
[predictions, zero_index],
core.ScopedBlobReference('rnn_initial'),
dense_gradient=True
)
input_data, _ = self.model.net.RemovePadding(
[predictions],
padding_width=1,
end_padding_width=0,
outputs=2,
)
input_data = self.model.net.ExpandDims(
[input_data],
core.ScopedBlobReference('rnn_input_data'),
dims=[1]
)
# Due to a bug in RecurrentNetworkGradientOp, we need to copy the
# transitions blob before sending it to the recurrent network
transitions_copy = self.model.net.Copy(
transitions_snapshot, core.ScopedBlobReference('transitions_copy')
)
all_paths_scores = self._crf_forward(
input_data, initial_state, transitions_copy
)
loss = self.model.net.Sub(
[all_paths_scores, path_total_score],
core.ScopedBlobReference('crf_loss')
)
return loss
def _pad_predictions(self, predictions):
# This function will introduce two labels for beginning of sequence
# And end of sequence, it will make the necessary udpates to the
# the predictions blob
low_score = -1000.0 # An arbitray very low number
b_scores = np.array(
[[low_score] * self.num_classes + [0, low_score]]
).astype(np.float32)
e_scores = np.array(
[[low_score] * self.num_classes + [low_score, 0]]
).astype(np.float32)
b_scores = self.model.param_init_net.GivenTensorFill(
[], "b_scores", shape=[1, self.num_classes_padded], values=b_scores
)
e_scores = self.model.param_init_net.GivenTensorFill(
[], "e_scores", shape=[1, self.num_classes_padded], values=e_scores
)
zero_index = self.model.net.ConstantFill(
[], shape=[1, ], value=0
)
length = self.model.net.Gather(
[self.model.net.Shape([predictions]), zero_index],
)
length = self.model.net.Cast(length, to='int32')
t_range = self.model.net.LengthsRangeFill(length)
padding = self.model.net.ConstantFill([t_range], value=low_score)
padding = self.model.net.ExpandDims(padding, dims=[1])
padded_predictions, _ = self.model.net.Concat(
[predictions, padding, padding],
outputs=2,
axis=1
)
padded_predictions_concat, _ = self.model.net.Concat(
[b_scores, padded_predictions, e_scores],
outputs=2,
axis=0
)
return padded_predictions_concat
def _pad_labels(self, labels):
bos_i = self.num_classes
eos_i = self.num_classes + 1
bos_i_b = self.model.param_init_net.ConstantFill(
[], shape=[1], value=bos_i
)
eos_i_b = self.model.param_init_net.ConstantFill(
[], shape=[1], value=eos_i
)
labels = self.model.net.Cast([labels], to='int64')
padded_labels, _ = self.model.net.Concat(
[bos_i_b, labels, eos_i_b],
axis=0,
outputs=2
)
return padded_labels
def _path_binary_scores(self, labels, transitions, seq_lengths=None):
column_ids, _ = self.model.net.RemovePadding(
[labels],
outputs=2,
padding_width=1,
end_padding_width=0
)
row_ids, _ = self.model.net.RemovePadding(
[labels],
outputs=2,
padding_width=0,
end_padding_width=1
)
# Since there is no multi-dimensional gather, I flatten the matrix to
# a 1-d vector and transform the ids to (row_ids * num_columns +
# column_ids) and do gather in 1-d
num_columns_blob = self.model.net.ConstantFill(
[row_ids],
value=self.num_classes_padded,
)
flattened_ids = self.model.net.Mul([row_ids, num_columns_blob])
flattened_ids = self.model.net.Add([flattened_ids, column_ids])
flattened_transitions = self.model.net.FlattenToVec([transitions])
entries = self.model.net.Gather(
[flattened_transitions, flattened_ids],
dense_gradient=True
)
return self.model.ReduceFrontSum(entries)
def _gather_entries_sum(self, in_data, indices, index_size):
indices = self.model.net.Cast([indices], to='int64')
index_size_blob = self.model.param_init_net.ConstantFill(
[],
shape=[1],
value=index_size,
)
query_one_hot = self.model.net.OneHot(
[indices, index_size_blob]
)
flattend_query = self.model.net.FlattenToVec(query_one_hot)
flattend_data = self.model.net.FlattenToVec(in_data)
query_scores = self.model.net.DotProduct(
[flattend_query, flattend_data]
)
final_sum = self.model.net.ReduceFrontSum([query_scores])
return final_sum
def _crf_forward(
self,
input_blob,
initial_state,
transitions_copy,
seq_lengths=None
):
# Build the RNN net and get the last timestep output
out_last = self.build_crf_net(
input_blob, initial_state, transitions_copy
)
out_last, _ = self.model.net.Reshape(
[out_last],
outputs=2,
shape=(self.num_classes_padded,)
)
zero_segment_id = self.model.param_init_net.ConstantFill(
[],
value=0,
shape=[self.num_classes_padded],
dtype=core.DataType.INT32,
)
# Compute the accumlated total score of all the paths
accum_score = self.model.net.SortedSegmentRangeLogSumExp(
[out_last, zero_segment_id]
)
accum_score, _ = self.model.net.Reshape(
accum_score,
outputs=2,
shape=()
)
return accum_score
def build_crf_net(self, input_blob, initial_state, transitions):
'''
Adds the crf_net recurrent operator to the model.
model: CNNModelHelper object new operators would be added to
input_blob: the input sequence in a format T x N x D
where T is sequence size, N - batch size and D - input dimention
##Only supports batch-size 1##
seq_lengths: blob containing sequence lengths (unused)
'''
scope = 'crf_net'
def s(name):
''
# We have to manually scope due to our internal/external blob
# relationships.
return "{}/{}".format(str(scope), str(name))
step_model = CNNModelHelper(name='crf_step', param_model=self.model)
input_t, cell_t_prev, _ = (
step_model.net.AddExternalInputs(
'input_t', 'cell_t_prev', transitions
)
)
zero_segment_id = step_model.param_init_net.ConstantFill(
[],
[s('zero_segment_id')],
value=0,
shape=[self.num_classes_padded],
dtype=core.DataType.INT32,
)
# A hack to bypass model cloning for test
step_model.param_init_net.AddExternalOutput(zero_segment_id)
""" the CRF step """
# Do tile
prev_transpose = step_model.Transpose(
cell_t_prev,
[s('prev_transpose')],
axes=(0, 2, 1),
)
prev_tiled = step_model.net.Tile(
prev_transpose,
[s('prev_tiled')],
tiles=self.num_classes_padded,
axis=2,
)
input_t_tiled = step_model.net.Tile(
input_t,
[s('input_t_tiled')],
tiles=self.num_classes_padded,
axis=1,
)
input_with_prev = step_model.net.Add(
[prev_tiled, input_t_tiled],
[s('input_with_prev')]
)
all_with_transitions = step_model.net.Add(
[input_with_prev, transitions],
[s('prev_with_transitions')],
broadcast=1,
use_grad_hack=1,
)
all_with_transitions_reshaped, _ = step_model.net.Reshape(
all_with_transitions,
[s('all_with_transitions_reshaped'), s('all_with_transitions_orig')],
shape=(self.num_classes_padded, self.num_classes_padded)
)
cell_t = step_model.net.SortedSegmentRangeLogSumExp(
[all_with_transitions_reshaped, zero_segment_id],
[s('cell_t')],
)
step_model.net.AddExternalOutputs(cell_t)
""" recurrent network """
cell_input_blob = initial_state
out_all, out_last = recurrent.recurrent_net(
net=self.model.net,
cell_net=step_model.net,
inputs=[(input_t, input_blob)],
initial_cell_inputs=[
(cell_t_prev, cell_input_blob),
],
links={
cell_t_prev: cell_t,
},
scope=scope,
outputs_with_grads=(1,)
)
return out_last
def update_predictions(self, classes):
def crf_update_predictions_op(inputs, outputs):
# This operator will compute the best path of classes by performing
# Viterbi decoding and then updates the predictions to make the tag
# On the best path has the highest score among the others
predictions = inputs[0].data
transitions = inputs[1].data
predictions = inputs[0].data
predictions_shape = inputs[0].shape
outputs[0].reshape(predictions_shape)
trellis = np.zeros(predictions_shape)
backpointers = np.zeros(predictions_shape, dtype=np.int32)
trellis[0] = predictions[0]
for t in range(1, predictions_shape[0]):
v = np.expand_dims(trellis[t - 1], 1) + transitions
trellis[t] = predictions[t] + np.max(v, 0)
backpointers[t] = np.argmax(v, 0)
viterbi = [np.argmax(trellis[-1])]
for bp in reversed(backpointers[1:]):
viterbi.append(bp[viterbi[-1]])
viterbi.reverse()
new_predictions = np.zeros(predictions_shape)
old_bests = []
for i, w_predictions in enumerate(predictions):
# Get the current tag with the maximum score
new_predictions[i] = predictions[i]
old_best = np.argmax(w_predictions)
old_bests.append(old_best)
# Swap the scores of the current best tag and the tag on the
# Viterbi path
w_predictions[viterbi[i]], w_predictions[old_best] = \
w_predictions[old_best], w_predictions[viterbi[i]]
new_predictions[i] = w_predictions
# Remove the BOS and EOS entries from the predictions matrix
orig_predictions = new_predictions[1:-1, 0:-2]
outputs[0].reshape(orig_predictions.shape)
outputs[0].data[...] = orig_predictions
padded_classes = self._pad_predictions(classes)
new_classes = self.model.net.Python(crf_update_predictions_op)(
[padded_classes, self.transitions],
core.ScopedBlobReference('post_crf_classes')
)
return new_classes
|
## @package recurrent
# Module caffe2.python.recurrent
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from caffe2.python.scope import CurrentNameScope
def recurrent_net(
net, cell_net, inputs, initial_cell_inputs,
links, timestep=None, scope=None, outputs_with_grads=(0,),
recompute_blobs_on_backward=None, forward_only=False,
):
'''
net: the main net operator should be added to
cell_net: cell_net which is executed in a recurrent fasion
inputs: sequences to be fed into the recurrent net. Currently only one input
is supported. It has to be in a format T x N x (D1...Dk) where T is lengths
of the sequence. N is a batch size and (D1...Dk) are the rest of dimentions
initial_cell_inputs: inputs of the cell_net for the 0 timestamp.
Format for each input is:
(cell_net_input_name, external_blob_with_data)
links: a dictionary from cell_net input names in moment t+1 and
output names of moment t. Currently we assume that each output becomes
an input for the next timestep.
timestep: name of the timestep blob to be used. If not provided "timestep"
is used.
scope: Internal blobs are going to be scoped in a format
<scope_name>/<blob_name>
If not provided we generate a scope name automatically
outputs_with_grads : position indices of output blobs which will receive
error gradient (from outside recurrent network) during backpropagation
recompute_blobs_on_backward: specify a list of blobs that will be
recomputed for backward pass, and thus need not to be
stored for each forward timestep.
forward_only: if True, only forward steps are executed
'''
assert len(inputs) == 1, "Only one input blob is supported so far"
# Validate scoping
for einp in cell_net.Proto().external_input:
assert einp.startswith(CurrentNameScope()), \
'''
Cell net external inputs are not properly scoped, use
AddScopedExternalInputs() when creating them
'''
input_blobs = [str(i[0]) for i in inputs]
initial_input_blobs = [str(x[1]) for x in initial_cell_inputs]
op_name = net.NextName('recurrent')
def s(name):
# We have to manually scope due to our internal/external blob
# relationships.
scope_name = op_name if scope is None else scope
return "{}/{}".format(str(scope_name), str(name))
# determine inputs that are considered to be references
# it is those that are not referred to in inputs or initial_cell_inputs
known_inputs = map(str, input_blobs + initial_input_blobs)
known_inputs += [str(x[0]) for x in initial_cell_inputs]
if timestep is not None:
known_inputs.append(str(timestep))
references = [
core.BlobReference(b) for b in cell_net.Proto().external_input
if b not in known_inputs]
inner_outputs = list(cell_net.Proto().external_output)
# These gradients are expected to be available during the backward pass
inner_outputs_map = {o: o + '_grad' for o in inner_outputs}
# compute the backward pass of the cell net
if not forward_only:
backward_ops, backward_mapping = core.GradientRegistry.GetBackwardPass(
cell_net.Proto().op, inner_outputs_map)
backward_mapping = {str(k): v for k, v in backward_mapping.items()}
backward_cell_net = core.Net("RecurrentBackwardStep")
del backward_cell_net.Proto().op[:]
if recompute_blobs_on_backward is not None:
# Insert operators to re-compute the specified blobs.
# They are added in the same order as for the forward pass, thus
# the order is correct.
recompute_blobs_on_backward = {str(b) for b in
recompute_blobs_on_backward}
for op in cell_net.Proto().op:
if not recompute_blobs_on_backward.isdisjoint(set(op.output)):
backward_cell_net.Proto().op.extend([op])
# This fires if other outputs than the declared
# are computed by the ops that are recomputed
assert set(op.output).issubset(recompute_blobs_on_backward)
backward_cell_net.Proto().op.extend(backward_ops)
# compute blobs used but not defined in the backward pass
backward_ssa, backward_blob_versions = core.get_ssa(
backward_cell_net.Proto())
undefined = core.get_undefined_blobs(backward_ssa)
# also add to the output list the intermediate outputs of fwd_step that
# are used by backward.
ssa, blob_versions = core.get_ssa(cell_net.Proto())
scratches = [
blob for (blob, ver) in blob_versions.items()
if ver > 0 and
blob in undefined and
blob not in cell_net.Proto().external_output]
backward_cell_net.Proto().external_input.extend(scratches)
backward_cell_net.Proto().type = 'simple'
else:
backward_cell_net = None
all_inputs = [i[1] for i in inputs] + [
x[1] for x in initial_cell_inputs] + references
all_outputs = []
cell_net.Proto().type = 'rnn'
# Internal arguments used by RecurrentNetwork operator
# Links are in the format blob_name, recurrent_states, offset.
# In the moment t we know that corresponding data block is at
# t + offset position in the recurrent_states tensor
forward_links = []
backward_links = []
# Aliases are used to expose outputs to external world
# Format (internal_blob, external_blob, offset)
# Negative offset stands for going from the end,
# positive - from the beginning
aliases = []
# States held inputs to the cell net
recurrent_states = []
for cell_input, _ in initial_cell_inputs:
cell_input = str(cell_input)
# Recurrent_states is going to be (T + 1) x ...
# It stores all inputs and outputs of the cell net over time.
# Or their gradients in the case of the backward pass.
state = s(cell_input + "_states")
states_grad = state + "_grad"
cell_output = links[str(cell_input)]
forward_links.append((cell_input, state, 0))
forward_links.append((cell_output, state, 1))
aliases.append((state, cell_output + "_all", 1))
aliases.append((state, cell_output + "_last", -1))
all_outputs.extend([cell_output + "_all", cell_output + "_last"])
recurrent_states.append(state)
if backward_cell_net is not None:
backward_links.append((cell_output + "_grad", states_grad, 1))
backward_cell_net.Proto().external_input.append(
str(cell_output) + "_grad")
recurrent_input_grad = cell_input + "_grad"
if not backward_blob_versions.get(recurrent_input_grad, 0):
# If nobody writes to this recurrent input gradient, we need
# to make sure it gets to the states grad blob after all.
# We do this by using backward_links which triggers an alias
# This logic is being used for example in a SumOp case
backward_links.append(
(backward_mapping[cell_input], states_grad, 0))
else:
backward_links.append((cell_input + "_grad", states_grad, 0))
for input_t, input_blob in inputs:
forward_links.append((str(input_t), str(input_blob), 0))
if backward_cell_net is not None:
for input_t, input_blob in inputs:
backward_links.append((
backward_mapping[str(input_t)], str(input_blob) + "_grad", 0
))
backward_cell_net.Proto().external_input.extend(
cell_net.Proto().external_input)
backward_cell_net.Proto().external_input.extend(
cell_net.Proto().external_output)
def unpack_triple(x):
if x:
a, b, c = zip(*x)
return a, b, c
return [], [], []
# Splitting to separate lists so we can pass them to c++
# where we ensemle them back
link_internal, link_external, link_offset = unpack_triple(forward_links)
alias_src, alias_dst, alias_offset = unpack_triple(aliases)
recurrent_inputs = [str(x[1]) for x in initial_cell_inputs]
backward_args = {}
if backward_cell_net is not None:
backward_link_internal, backward_link_external, backward_link_offset = \
unpack_triple(backward_links)
params = [x for x in references if x in backward_mapping.keys()]
param_grads = [str(backward_mapping[x])
for x in references
if x in backward_mapping.keys()]
if recompute_blobs_on_backward is None:
recompute_blobs_on_backward = set()
backward_args = {
'param': map(all_inputs.index, params),
'backward_link_internal': map(str, backward_link_internal),
'backward_link_external': map(str, backward_link_external),
'backward_link_offset': backward_link_offset,
'backward_step_net': str(backward_cell_net.Proto()),
'outputs_with_grads': outputs_with_grads,
'recompute_blobs_on_backward': map(
str, recompute_blobs_on_backward),
'param_grads': param_grads,
}
# Make sure that recurrent gradients accumulate with internal gradients
# (if a blob in the backward_cell_net receives gradient from both an
# external connection as well as from within the backward_cell_net,
# those gradients need to be added together, rather than one overwriting
# the other)
if backward_cell_net is not None:
proto = backward_cell_net.Proto()
operators = []
while len(proto.op) > 0:
operators.append(proto.op.pop())
for op in operators[::-1]:
proto.op.extend([op])
for j, output_blob in enumerate(op.output):
if output_blob in proto.external_input:
accum_blob = '{}_accum'.format(output_blob)
proto.op[-1].output[j] = accum_blob
backward_cell_net.Sum(
[output_blob, accum_blob],
[output_blob],
)
results = net.RecurrentNetwork(
all_inputs,
all_outputs + [s("step_workspaces")],
alias_src=alias_src,
alias_dst=map(str, alias_dst),
alias_offset=alias_offset,
recurrent_states=recurrent_states,
initial_recurrent_state_ids=map(all_inputs.index, recurrent_inputs),
link_internal=map(str, link_internal),
link_external=map(str, link_external),
link_offset=link_offset,
step_net=str(cell_net.Proto()),
timestep="timestep" if timestep is None else str(timestep),
**backward_args
)
# Restore net type since 'rnn' is not recognized outside RNNs
cell_net.Proto().type = 'simple'
# The last output is a list of step workspaces,
# which is only needed internally for gradient propogation
return results[:-1]
|
## @package control
# Module caffe2.python.control
"""
Implement functions for controlling execution of nets and steps, including
Do
DoParallel
For-loop
While-loop
Do-While-loop
Switch
If
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
# Used to generate names of the steps created by the control functions.
# It is actually the internal index of these steps.
_current_idx = 1
_used_step_names = set()
def _get_next_step_name(control_name, base_name):
global _current_idx, _used_step_names
concat_name = '%s/%s' % (base_name, control_name)
next_name = concat_name
while next_name in _used_step_names:
next_name = '%s_%d' % (concat_name, _current_idx)
_current_idx += 1
_used_step_names.add(next_name)
return next_name
def _MakeList(input):
""" input is a tuple.
Example:
(a, b, c) --> [a, b, c]
(a) --> [a]
([a, b, c]) --> [a, b, c]
"""
if len(input) == 0:
raise ValueError(
'input cannot be empty.')
elif len(input) == 1:
output = input[0]
if not isinstance(output, list):
output = [output]
else:
output = list(input)
return output
def _IsNets(nets_or_steps):
if isinstance(nets_or_steps, list):
return all(isinstance(n, core.Net) for n in nets_or_steps)
else:
return isinstance(nets_or_steps, core.Net)
def _PrependNets(nets_or_steps, *nets):
nets_or_steps = _MakeList((nets_or_steps,))
nets = _MakeList(nets)
if _IsNets(nets_or_steps):
return nets + nets_or_steps
else:
return [Do('prepend', nets)] + nets_or_steps
def _AppendNets(nets_or_steps, *nets):
nets_or_steps = _MakeList((nets_or_steps,))
nets = _MakeList(nets)
if _IsNets(nets_or_steps):
return nets_or_steps + nets
else:
return nets_or_steps + [Do('append', nets)]
def GetConditionBlobFromNet(condition_net):
"""
The condition blob is the last external_output that must
be a single bool
"""
assert len(condition_net.Proto().external_output) > 0, (
"Condition net %s must has at least one external output" %
condition_net.Proto.name)
# we need to use a blob reference here instead of a string
# otherwise, it will add another name_scope to the input later
# when we create new ops (such as OR of two inputs)
return core.BlobReference(condition_net.Proto().external_output[-1])
def BoolNet(*blobs_with_bool_value):
"""A net assigning constant bool values to blobs. It is mainly used for
initializing condition blobs, for example, in multi-task learning, we
need to access reader_done blobs before reader_net run. In that case,
the reader_done blobs must be initialized.
Args:
blobs_with_bool_value: one or more (blob, bool_value) pairs. The net will
assign each bool_value to the corresponding blob.
returns
bool_net: A net assigning constant bool values to blobs.
Examples:
- BoolNet((blob_1, bool_value_1), ..., (blob_n, bool_value_n))
- BoolNet([(blob_1, net1), ..., (blob_n, bool_value_n)])
- BoolNet((cond_1, bool_value_1))
"""
blobs_with_bool_value = _MakeList(blobs_with_bool_value)
bool_net = core.Net('bool_net')
for blob, bool_value in blobs_with_bool_value:
out_blob = bool_net.ConstantFill(
[],
[blob],
shape=[],
value=bool_value,
dtype=core.DataType.BOOL)
bool_net.AddExternalOutput(out_blob)
return bool_net
def NotNet(condition_blob_or_net):
"""Not of a condition blob or net
Args:
condition_blob_or_net can be either blob or net. If condition_blob_or_net
is Net, the condition is its last external_output
that must be a single bool.
returns
not_net: the net NOT the input
out_blob: the output blob of the not_net
"""
if isinstance(condition_blob_or_net, core.Net):
condition_blob = GetConditionBlobFromNet(condition_blob_or_net)
else:
condition_blob = condition_blob_or_net
not_net = core.Net('not_net')
out_blob = not_net.Not(condition_blob)
not_net.AddExternalOutput(out_blob)
return not_net, out_blob
def _CopyConditionBlobNet(condition_blob):
"""Make a condition net that copies the condition_blob
Args:
condition_blob is a single bool.
returns
not_net: the net NOT the input
out_blob: the output blob of the not_net
"""
condition_net = core.Net('copy_condition_blob_net')
out_blob = condition_net.Copy(condition_blob)
condition_net.AddExternalOutput(out_blob)
return condition_net, out_blob
def MergeConditionNets(name, condition_nets, relation):
"""
Merge multi condition nets into a single condition nets.
Args:
name: name of the new condition net.
condition_nets: a list of condition nets. The last external_output
of each condition net must be single bool value.
relation: can be 'And' or 'Or'.
Returns:
- A new condition net. Its last external output is relation of all
condition_nets.
"""
if not isinstance(condition_nets, list):
return condition_nets
if len(condition_nets) <= 1:
return condition_nets[0] if condition_nets else None
merged_net = core.Net(name)
for i in range(len(condition_nets)):
net_proto = condition_nets[i].Proto()
assert net_proto.device_option == merged_net.Proto().device_option
assert net_proto.type == merged_net.Proto().type
merged_net.Proto().op.extend(net_proto.op)
merged_net.Proto().external_input.extend(net_proto.external_input)
# discard external outputs as we're combining them together
curr_cond = GetConditionBlobFromNet(condition_nets[i])
if i == 0:
last_cond = curr_cond
else:
last_cond = merged_net.__getattr__(relation)([last_cond, curr_cond])
# merge attributes
for k, v in condition_nets[i]._attr_dict.items():
merged_net._attr_dict[k] += v
merged_net.AddExternalOutput(last_cond)
return merged_net
def CombineConditions(name, condition_nets, relation):
"""
Combine conditions of multi nets into a single condition nets. Unlike
MergeConditionNets, the actual body of condition_nets is not copied into
the combine condition net.
One example is about multi readers. Each reader net has a reader_done
condition. When we want to check whether all readers are done, we can
use this function to build a new net.
Args:
name: name of the new condition net.
condition_nets: a list of condition nets. The last external_output
of each condition net must be single bool value.
relation: can be 'And' or 'Or'.
Returns:
- A new condition net. Its last external output is relation of all
condition_nets.
"""
if not condition_nets:
return None
if not isinstance(condition_nets, list):
raise ValueError('condition_nets must be a list of nets.')
if len(condition_nets) == 1:
condition_blob = GetConditionBlobFromNet(condition_nets[0])
condition_net, _ = _CopyConditionBlobNet(condition_blob)
return condition_net
combined_net = core.Net(name)
for i in range(len(condition_nets)):
curr_cond = GetConditionBlobFromNet(condition_nets[i])
if i == 0:
last_cond = curr_cond
else:
last_cond = combined_net.__getattr__(relation)(
[last_cond, curr_cond])
combined_net.AddExternalOutput(last_cond)
return combined_net
def Do(name, *nets_or_steps):
"""
Execute the sequence of nets or steps once.
Examples:
- Do('myDo', net1, net2, ..., net_n)
- Do('myDo', list_of_nets)
- Do('myDo', step1, step2, ..., step_n)
- Do('myDo', list_of_steps)
"""
nets_or_steps = _MakeList(nets_or_steps)
if (len(nets_or_steps) == 1 and isinstance(
nets_or_steps[0], core.ExecutionStep)):
return nets_or_steps[0]
else:
return core.scoped_execution_step(
_get_next_step_name('Do', name), nets_or_steps)
def DoParallel(name, *nets_or_steps):
"""
Execute the nets or steps in parallel, waiting for all of them to finish
Examples:
- DoParallel('pDo', net1, net2, ..., net_n)
- DoParallel('pDo', list_of_nets)
- DoParallel('pDo', step1, step2, ..., step_n)
- DoParallel('pDo', list_of_steps)
"""
nets_or_steps = _MakeList(nets_or_steps)
if (len(nets_or_steps) == 1 and isinstance(
nets_or_steps[0], core.ExecutionStep)):
return nets_or_steps[0]
else:
return core.scoped_execution_step(
_get_next_step_name('DoParallel', name),
nets_or_steps,
concurrent_substeps=True)
def _RunOnceIf(name, condition_blob_or_net, nets_or_steps):
"""
Execute nets_or_steps once if condition_blob_or_net evaluates as true.
If condition_blob_or_net is Net, the condition is its last external_output
that must be a single bool. And this net will be executed before
nets_or_steps so as to get the condition.
"""
condition_not_net, stop_blob = NotNet(condition_blob_or_net)
if isinstance(condition_blob_or_net, core.Net):
nets_or_steps = _PrependNets(
nets_or_steps, condition_blob_or_net, condition_not_net)
else:
nets_or_steps = _PrependNets(nets_or_steps, condition_not_net)
def if_step(control_name):
return core.scoped_execution_step(
_get_next_step_name(control_name, name),
nets_or_steps,
should_stop_blob=stop_blob,
only_once=True,
)
if _IsNets(nets_or_steps):
bool_net = BoolNet((stop_blob, False))
return Do(name + '/_RunOnceIf',
bool_net, if_step('_RunOnceIf-inner'))
else:
return if_step('_RunOnceIf')
def _RunOnceIfNot(name, condition_blob_or_net, nets_or_steps):
"""
Similar to _RunOnceIf() but Execute nets_or_steps once if
condition_blob_or_net evaluates as false.
"""
if isinstance(condition_blob_or_net, core.Net):
condition_blob = GetConditionBlobFromNet(condition_blob_or_net)
nets_or_steps = _PrependNets(nets_or_steps, condition_blob_or_net)
else:
copy_net, condition_blob = _CopyConditionBlobNet(condition_blob_or_net)
nets_or_steps = _PrependNets(nets_or_steps, copy_net)
return core.scoped_execution_step(
_get_next_step_name('_RunOnceIfNot', name),
nets_or_steps,
should_stop_blob=condition_blob,
only_once=True,
)
def For(name, nets_or_steps, iter_num):
"""
Execute nets_or_steps iter_num times.
Args:
nets_or_steps: a ExecutionStep or a Net or a list of ExecutionSteps or
a list nets.
iter_num: the number times to execute the nets_or_steps.
Returns:
A ExecutionStep instance.
"""
init_net = core.Net('init-net')
iter_cnt = init_net.CreateCounter([], init_count=iter_num)
iter_net = core.Net('For-iter')
iter_done = iter_net.CountDown([iter_cnt])
for_step = core.scoped_execution_step(
_get_next_step_name('For-inner', name),
_PrependNets(nets_or_steps, iter_net),
should_stop_blob=iter_done)
return Do(name + '/For',
Do(name + '/For-init-net', init_net),
for_step)
def While(name, condition_blob_or_net, nets_or_steps):
"""
Execute nets_or_steps when condition_blob_or_net returns true.
Args:
condition_blob_or_net: If it is an instance of Net, its last
external_output must be a single bool.
nets_or_steps: a ExecutionStep or a Net or a list of ExecutionSteps or
a list nets.
Returns:
A ExecutionStep instance.
"""
condition_not_net, stop_blob = NotNet(condition_blob_or_net)
if isinstance(condition_blob_or_net, core.Net):
nets_or_steps = _PrependNets(
nets_or_steps, condition_blob_or_net, condition_not_net)
else:
nets_or_steps = _PrependNets(nets_or_steps, condition_not_net)
def while_step(control_name):
return core.scoped_execution_step(
_get_next_step_name(control_name, name),
nets_or_steps,
should_stop_blob=stop_blob,
)
if _IsNets(nets_or_steps):
# In this case, while_step has sub-nets:
# [condition_blob_or_net, condition_not_net, nets_or_steps]
# If stop_blob is pre-set to True (this may happen when While() is
# called twice), the loop will exit after executing
# condition_blob_or_net. So we use BootNet to set stop_blob to
# False.
bool_net = BoolNet((stop_blob, False))
return Do(name + '/While', bool_net, while_step('While-inner'))
else:
return while_step('While')
def Until(name, condition_blob_or_net, nets_or_steps):
"""
Similar to While() but execute nets_or_steps when
condition_blob_or_net returns false
"""
if isinstance(condition_blob_or_net, core.Net):
stop_blob = GetConditionBlobFromNet(condition_blob_or_net)
nets_or_steps = _PrependNets(nets_or_steps, condition_blob_or_net)
else:
stop_blob = core.BlobReference(str(condition_blob_or_net))
return core.scoped_execution_step(
_get_next_step_name('Until', name),
nets_or_steps,
should_stop_blob=stop_blob)
def DoWhile(name, condition_blob_or_net, nets_or_steps):
"""
Execute nets_or_steps when condition_blob_or_net returns true. It will
execute nets_or_steps before evaluating condition_blob_or_net.
Args:
condition_blob_or_net: if it is an instance of Net, tts last external_output
must be a single bool.
nets_or_steps: a ExecutionStep or a Net or a list of ExecutionSteps or
a list nets.
Returns:
A ExecutionStep instance.
"""
condition_not_net, stop_blob = NotNet(condition_blob_or_net)
if isinstance(condition_blob_or_net, core.Net):
nets_or_steps = _AppendNets(
nets_or_steps, condition_blob_or_net, condition_not_net)
else:
nets_or_steps = _AppendNets(nets_or_steps, condition_not_net)
# If stop_blob is pre-set to True (this may happen when DoWhile() is
# called twice), the loop will exit after executing the first net/step
# in nets_or_steps. This is not what we want. So we use BootNet to
# set stop_blob to False.
bool_net = BoolNet((stop_blob, False))
return Do(name + '/DoWhile', bool_net, core.scoped_execution_step(
_get_next_step_name('DoWhile-inner', name),
nets_or_steps,
should_stop_blob=stop_blob,
))
def DoUntil(name, condition_blob_or_net, nets_or_steps):
"""
Similar to DoWhile() but execute nets_or_steps when
condition_blob_or_net returns false. It will execute
nets_or_steps before evaluating condition_blob_or_net.
Special case: if condition_blob_or_net is a blob and is pre-set to
true, then only the first net/step of nets_or_steps will be executed and
loop is exited. So you need to be careful about the initial value the
condition blob when using DoUntil(), esp when DoUntil() is called twice.
"""
if not isinstance(condition_blob_or_net, core.Net):
stop_blob = core.BlobReference(condition_blob_or_net)
return core.scoped_execution_step(
_get_next_step_name('DoUntil', name),
nets_or_steps,
should_stop_blob=stop_blob)
nets_or_steps = _AppendNets(nets_or_steps, condition_blob_or_net)
stop_blob = GetConditionBlobFromNet(condition_blob_or_net)
# If stop_blob is pre-set to True (this may happen when DoWhile() is
# called twice), the loop will exit after executing the first net/step
# in nets_or_steps. This is not what we want. So we use BootNet to
# set stop_blob to False.
bool_net = BoolNet((stop_blob, False))
return Do(name + '/DoUntil', bool_net, core.scoped_execution_step(
_get_next_step_name('DoUntil-inner', name),
nets_or_steps,
should_stop_blob=stop_blob,
))
def Switch(name, *conditions):
"""
Execute the steps for which the condition is true.
Each condition is a tuple (condition_blob_or_net, nets_or_steps).
Note:
1. Multi steps can be executed if their conditions are true.
2. The conditions_blob_or_net (if it is Net) of all steps will be
executed once.
Examples:
- Switch('name', (cond_1, net_1), (cond_2, net_2), ..., (cond_n, net_n))
- Switch('name', [(cond_1, net1), (cond_2, net_2), ..., (cond_n, net_n)])
- Switch('name', (cond_1, net_1))
"""
conditions = _MakeList(conditions)
return core.scoped_execution_step(
_get_next_step_name('Switch', name),
[_RunOnceIf(name + '/Switch', cond, step) for cond, step in conditions])
def SwitchNot(name, *conditions):
"""
Similar to Switch() but execute the steps for which the condition is False.
"""
conditions = _MakeList(conditions)
return core.scoped_execution_step(
_get_next_step_name('SwitchNot', name),
[_RunOnceIfNot(name + '/SwitchNot', cond, step)
for cond, step in conditions])
def If(name, condition_blob_or_net,
true_nets_or_steps, false_nets_or_steps=None):
"""
condition_blob_or_net is first evaluated or executed. If the condition is
true, true_nets_or_steps is then executed, otherwise, false_nets_or_steps
is executed.
If condition_blob_or_net is Net, the condition is its last external_output
that must be a single bool. And this Net will be executred before both
true/false_nets_or_steps so as to get the condition.
"""
if not false_nets_or_steps:
return _RunOnceIf(name + '/If',
condition_blob_or_net, true_nets_or_steps)
if isinstance(condition_blob_or_net, core.Net):
condition_blob = GetConditionBlobFromNet(condition_blob_or_net)
else:
condition_blob = condition_blob_or_net
return Do(
name + '/If',
_RunOnceIf(name + '/If-true',
condition_blob_or_net, true_nets_or_steps),
_RunOnceIfNot(name + '/If-false', condition_blob, false_nets_or_steps)
)
def IfNot(name, condition_blob_or_net,
true_nets_or_steps, false_nets_or_steps=None):
"""
If condition_blob_or_net returns false, executes true_nets_or_steps,
otherwise executes false_nets_or_steps
"""
if not false_nets_or_steps:
return _RunOnceIfNot(name + '/IfNot',
condition_blob_or_net, true_nets_or_steps)
if isinstance(condition_blob_or_net, core.Net):
condition_blob = GetConditionBlobFromNet(condition_blob_or_net)
else:
condition_blob = condition_blob_or_net
return Do(
name + '/IfNot',
_RunOnceIfNot(name + '/IfNot-true',
condition_blob_or_net, true_nets_or_steps),
_RunOnceIf(name + '/IfNot-false', condition_blob, false_nets_or_steps)
)
|
## @package timeout_guard
# Module caffe2.python.timeout_guard
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import contextlib
import threading
import os
import time
import signal
import logging
'''
Sometimes CUDA devices can get stuck, 'deadlock'. In this case it is often
better just the kill the process automatically. Use this guard to set a
maximum timespan for a python call, such as RunNet(). If it does not complete
in time, process is killed.
Example usage:
with timeout_guard.CompleteInTimeOrDie(10.0):
core.RunNet(...)
'''
class WatcherThread(threading.Thread):
def __init__(self, timeout_secs):
threading.Thread.__init__(self)
self.timeout_secs = timeout_secs
self.completed = False
self.condition = threading.Condition()
self.daemon = True
self.caller_thread = threading.current_thread()
def run(self):
started = time.time()
self.condition.acquire()
while time.time() - started < self.timeout_secs and not self.completed:
self.condition.wait(self.timeout_secs - (time.time() - started))
self.condition.release()
if not self.completed:
log = logging.getLogger("timeout_guard")
log.error("Call did not finish in time. Timeout:{}s PID: {}".format(
self.timeout_secs,
os.getpid(),
))
# First try dying cleanly, but in 10 secs, exit properly
def forcequit():
time.sleep(10.0)
log.info("Prepared output, dumping threads. ")
print("Caller thread was: {}".format(self.caller_thread))
print("-----After force------")
import sys
import traceback
code = []
for threadId, stack in sys._current_frames().items():
if threadId == self.caller_thread.ident:
code.append("\n# ThreadID: %s" % threadId)
for filename, lineno, name, line in traceback.extract_stack(stack):
code.append('File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
code.append(" %s" % (line.strip()))
print("\n".join(code))
log.error("Process did not terminate cleanly in 10 s, forcing")
os._exit(1)
forcet = threading.Thread(target=forcequit, args=())
forcet.daemon = True
forcet.start()
print("Caller thread was: {}".format(self.caller_thread))
print("-----Before forcing------")
import sys
import traceback
code = []
for threadId, stack in sys._current_frames().items():
code.append("\n# ThreadID: %s" % threadId)
for filename, lineno, name, line in traceback.extract_stack(stack):
code.append('File: "%s", line %d, in %s' % (filename, lineno, name))
if line:
code.append(" %s" % (line.strip()))
print("\n".join(code))
os.kill(os.getpid(), signal.SIGINT)
@contextlib.contextmanager
def CompleteInTimeOrDie(timeout_secs):
watcher = WatcherThread(timeout_secs)
watcher.start()
yield
watcher.completed = True
watcher.condition.acquire()
watcher.condition.notify()
watcher.condition.release()
def EuthanizeIfNecessary(timeout_secs=120):
'''
Call this if you have problem with process getting stuck at shutdown.
It will kill the process if it does not terminate in timeout_secs.
'''
watcher = WatcherThread(timeout_secs)
watcher.start()
|
import numpy as np
import os
import unittest
from caffe2.proto import caffe2_pb2
from caffe2.python import core, test_util, workspace, cnn
import caffe2.python.hypothesis_test_util as htu
import hypothesis.strategies as st
from hypothesis import given
class TestWorkspace(unittest.TestCase):
def setUp(self):
self.net = core.Net("test-net")
self.testblob_ref = self.net.ConstantFill(
[], "testblob", shape=[1, 2, 3, 4], value=1.0)
workspace.ResetWorkspace()
def testRootFolder(self):
self.assertEqual(workspace.ResetWorkspace(), True)
self.assertEqual(workspace.RootFolder(), ".")
self.assertEqual(
workspace.ResetWorkspace("/tmp/caffe-workspace-test"), True)
self.assertEqual(workspace.RootFolder(), "/tmp/caffe-workspace-test")
def testWorkspaceHasBlobWithNonexistingName(self):
self.assertEqual(workspace.HasBlob("non-existing"), False)
def testRunOperatorOnce(self):
self.assertEqual(
workspace.RunOperatorOnce(
self.net.Proto().op[0].SerializeToString()
), True
)
self.assertEqual(workspace.HasBlob("testblob"), True)
blobs = workspace.Blobs()
self.assertEqual(len(blobs), 1)
self.assertEqual(blobs[0], "testblob")
def testRunNetOnce(self):
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
self.assertEqual(workspace.HasBlob("testblob"), True)
def testCurrentWorkspaceWrapper(self):
self.assertNotIn("testblob", workspace.C.Workspace.current.blobs)
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
self.assertEqual(workspace.HasBlob("testblob"), True)
self.assertIn("testblob", workspace.C.Workspace.current.blobs)
workspace.ResetWorkspace()
self.assertNotIn("testblob", workspace.C.Workspace.current.blobs)
def testRunPlan(self):
plan = core.Plan("test-plan")
plan.AddStep(core.ExecutionStep("test-step", self.net))
self.assertEqual(
workspace.RunPlan(plan.Proto().SerializeToString()), True)
self.assertEqual(workspace.HasBlob("testblob"), True)
def testConstructPlanFromSteps(self):
step = core.ExecutionStep("test-step-as-plan", self.net)
self.assertEqual(workspace.RunPlan(step), True)
self.assertEqual(workspace.HasBlob("testblob"), True)
def testResetWorkspace(self):
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
self.assertEqual(workspace.HasBlob("testblob"), True)
self.assertEqual(workspace.ResetWorkspace(), True)
self.assertEqual(workspace.HasBlob("testblob"), False)
def testTensorAccess(self):
ws = workspace.C.Workspace()
""" test in-place modification """
ws.create_blob("tensor").feed(np.array([1.1, 1.2, 1.3]))
tensor = ws.blobs["tensor"].tensor()
tensor.data[0] = 3.3
val = np.array([3.3, 1.2, 1.3])
np.testing.assert_array_equal(tensor.data, val)
np.testing.assert_array_equal(ws.blobs["tensor"].fetch(), val)
""" test in-place initialization """
tensor.init([2, 3], core.DataType.INT32)
tensor.data[1, 1] = 100
val = np.zeros([2, 3], dtype=np.int32)
val[1, 1] = 100
np.testing.assert_array_equal(tensor.data, val)
np.testing.assert_array_equal(ws.blobs["tensor"].fetch(), val)
""" strings cannot be initialized from python """
with self.assertRaises(RuntimeError):
tensor.init([3, 4], core.DataType.STRING)
""" feed (copy) data into tensor """
val = np.array([['abc', 'def'], ['ghi', 'jkl']], dtype=np.object)
tensor.feed(val)
self.assertEquals(tensor.data[0, 0], 'abc')
np.testing.assert_array_equal(ws.blobs["tensor"].fetch(), val)
val = np.array([1.1, 10.2])
tensor.feed(val)
val[0] = 5.2
self.assertEquals(tensor.data[0], 1.1)
""" fetch (copy) data from tensor """
val = np.array([1.1, 1.2])
tensor.feed(val)
val2 = tensor.fetch()
tensor.data[0] = 5.2
val3 = tensor.fetch()
np.testing.assert_array_equal(val, val2)
self.assertEquals(val3[0], 5.2)
def testFetchFeedBlob(self):
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
fetched = workspace.FetchBlob("testblob")
# check if fetched is correct.
self.assertEqual(fetched.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched, 1.0)
fetched[:] = 2.0
self.assertEqual(workspace.FeedBlob("testblob", fetched), True)
fetched_again = workspace.FetchBlob("testblob")
self.assertEqual(fetched_again.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched_again, 2.0)
def testFetchFeedBlobViaBlobReference(self):
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
fetched = workspace.FetchBlob(self.testblob_ref)
# check if fetched is correct.
self.assertEqual(fetched.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched, 1.0)
fetched[:] = 2.0
self.assertEqual(workspace.FeedBlob(self.testblob_ref, fetched), True)
fetched_again = workspace.FetchBlob("testblob") # fetch by name now
self.assertEqual(fetched_again.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched_again, 2.0)
def testFetchFeedBlobTypes(self):
for dtype in [np.float16, np.float32, np.float64, np.bool,
np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16]:
try:
rng = np.iinfo(dtype).max * 2
except ValueError:
rng = 1000
data = ((np.random.rand(2, 3, 4) - 0.5) * rng).astype(dtype)
self.assertEqual(workspace.FeedBlob("testblob_types", data), True)
fetched_back = workspace.FetchBlob("testblob_types")
self.assertEqual(fetched_back.shape, (2, 3, 4))
self.assertEqual(fetched_back.dtype, dtype)
np.testing.assert_array_equal(fetched_back, data)
def testFetchFeedBlobBool(self):
"""Special case for bool to ensure coverage of both true and false."""
data = np.zeros((2, 3, 4)).astype(np.bool)
data.flat[::2] = True
self.assertEqual(workspace.FeedBlob("testblob_types", data), True)
fetched_back = workspace.FetchBlob("testblob_types")
self.assertEqual(fetched_back.shape, (2, 3, 4))
self.assertEqual(fetched_back.dtype, np.bool)
np.testing.assert_array_equal(fetched_back, data)
def testFetchFeedBlobZeroDim(self):
data = np.empty(shape=(2, 0, 3), dtype=np.float32)
self.assertEqual(workspace.FeedBlob("testblob_empty", data), True)
fetched_back = workspace.FetchBlob("testblob_empty")
self.assertEqual(fetched_back.shape, (2, 0, 3))
self.assertEqual(fetched_back.dtype, np.float32)
def testFetchFeedLongStringTensor(self):
# long strings trigger array of object creation
strs = np.array([
' '.join(10 * ['long string']),
' '.join(128 * ['very long string']),
'small \0\1\2 string',
"Hello, world! I have special \0 symbols \1!"])
workspace.FeedBlob('my_str_tensor', strs)
strs2 = workspace.FetchBlob('my_str_tensor')
self.assertEqual(strs.shape, strs2.shape)
for i in range(0, strs.shape[0]):
self.assertEqual(strs[i], strs2[i])
def testFetchFeedShortStringTensor(self):
# small strings trigger NPY_STRING array
strs = np.array(['elem1', 'elem 2', 'element 3'])
workspace.FeedBlob('my_str_tensor_2', strs)
strs2 = workspace.FetchBlob('my_str_tensor_2')
self.assertEqual(strs.shape, strs2.shape)
for i in range(0, strs.shape[0]):
self.assertEqual(strs[i], strs2[i])
def testFetchFeedPlainString(self):
# this is actual string, not a tensor of strings
s = "Hello, world! I have special \0 symbols \1!"
workspace.FeedBlob('my_plain_string', s)
s2 = workspace.FetchBlob('my_plain_string')
self.assertEqual(s, s2)
def testFetchBlobs(self):
s1 = "test1"
s2 = "test2"
workspace.FeedBlob('s1', s1)
workspace.FeedBlob('s2', s2)
fetch1, fetch2 = workspace.FetchBlobs(['s1', 's2'])
self.assertEquals(s1, fetch1)
self.assertEquals(s2, fetch2)
def testFetchFeedViaBlobDict(self):
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
fetched = workspace.blobs["testblob"]
# check if fetched is correct.
self.assertEqual(fetched.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched, 1.0)
fetched[:] = 2.0
workspace.blobs["testblob"] = fetched
fetched_again = workspace.blobs["testblob"]
self.assertEqual(fetched_again.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched_again, 2.0)
self.assertTrue("testblob" in workspace.blobs)
self.assertFalse("non_existant" in workspace.blobs)
self.assertEqual(len(workspace.blobs), 1)
for key in workspace.blobs:
self.assertEqual(key, "testblob")
class TestMultiWorkspaces(unittest.TestCase):
def setUp(self):
workspace.SwitchWorkspace("default")
workspace.ResetWorkspace()
def testCreateWorkspace(self):
self.net = core.Net("test-net")
self.net.ConstantFill([], "testblob", shape=[1, 2, 3, 4], value=1.0)
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True
)
self.assertEqual(workspace.HasBlob("testblob"), True)
self.assertEqual(workspace.SwitchWorkspace("test", True), None)
self.assertEqual(workspace.HasBlob("testblob"), False)
self.assertEqual(workspace.SwitchWorkspace("default"), None)
self.assertEqual(workspace.HasBlob("testblob"), True)
try:
# The following should raise an error.
workspace.SwitchWorkspace("non-existing")
# so this should never happen.
self.assertEqual(True, False)
except RuntimeError:
pass
workspaces = workspace.Workspaces()
self.assertTrue("default" in workspaces)
self.assertTrue("test" in workspaces)
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support.")
class TestWorkspaceGPU(test_util.TestCase):
def setUp(self):
workspace.ResetWorkspace()
self.net = core.Net("test-net")
self.net.ConstantFill([], "testblob", shape=[1, 2, 3, 4], value=1.0)
self.net.RunAllOnGPU()
def testFetchBlobGPU(self):
self.assertEqual(
workspace.RunNetOnce(self.net.Proto().SerializeToString()), True)
fetched = workspace.FetchBlob("testblob")
# check if fetched is correct.
self.assertEqual(fetched.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched, 1.0)
fetched[:] = 2.0
self.assertEqual(workspace.FeedBlob("testblob", fetched), True)
fetched_again = workspace.FetchBlob("testblob")
self.assertEqual(fetched_again.shape, (1, 2, 3, 4))
np.testing.assert_array_equal(fetched_again, 2.0)
def testDefaultGPUID(self):
self.assertEqual(workspace.SetDefaultGPUID(0), None)
self.assertEqual(workspace.GetDefaultGPUID(), 0)
def testGetCudaPeerAccessPattern(self):
pattern = workspace.GetCudaPeerAccessPattern()
self.assertEqual(type(pattern), np.ndarray)
self.assertEqual(pattern.ndim, 2)
self.assertEqual(pattern.shape[0], pattern.shape[1])
self.assertEqual(pattern.shape[0], workspace.NumCudaDevices())
@unittest.skipIf(not workspace.C.has_mkldnn, "No MKLDNN support.")
class TestWorkspaceMKLDNN(test_util.TestCase):
def testFeedFetchBlobMKLDNN(self):
arr = np.random.randn(2, 3).astype(np.float32)
workspace.FeedBlob(
"testblob_mkldnn", arr, core.DeviceOption(caffe2_pb2.MKLDNN))
fetched = workspace.FetchBlob("testblob_mkldnn")
np.testing.assert_array_equal(arr, fetched)
class TestImmedibate(test_util.TestCase):
def testImmediateEnterExit(self):
workspace.StartImmediate(i_know=True)
self.assertTrue(workspace.IsImmediate())
workspace.StopImmediate()
self.assertFalse(workspace.IsImmediate())
def testImmediateRunsCorrectly(self):
workspace.StartImmediate(i_know=True)
net = core.Net("test-net")
net.ConstantFill([], "testblob", shape=[1, 2, 3, 4], value=1.0)
self.assertEqual(
workspace.ImmediateBlobs(), ["testblob"])
content = workspace.FetchImmediate("testblob")
# Also, the immediate mode should not invade the original namespace,
# so we check if this is so.
with self.assertRaises(RuntimeError):
workspace.FetchBlob("testblob")
np.testing.assert_array_equal(content, 1.0)
content[:] = 2.0
self.assertTrue(workspace.FeedImmediate("testblob", content))
np.testing.assert_array_equal(
workspace.FetchImmediate("testblob"), 2.0)
workspace.StopImmediate()
with self.assertRaises(RuntimeError):
content = workspace.FetchImmediate("testblob")
def testImmediateRootFolder(self):
workspace.StartImmediate(i_know=True)
# for testing we will look into the _immediate_root_folder variable
# but in normal usage you should not access that.
self.assertTrue(len(workspace._immediate_root_folder) > 0)
root_folder = workspace._immediate_root_folder
self.assertTrue(os.path.isdir(root_folder))
workspace.StopImmediate()
self.assertTrue(len(workspace._immediate_root_folder) == 0)
# After termination, immediate mode should have the root folder
# deleted.
self.assertFalse(os.path.exists(root_folder))
class TestCppEnforceAsException(test_util.TestCase):
def testEnforce(self):
op = core.CreateOperator("Relu", ["X"], ["Y"])
with self.assertRaises(RuntimeError):
workspace.RunOperatorOnce(op)
class TestCWorkspace(htu.HypothesisTestCase):
def test_net_execution(self):
ws = workspace.C.Workspace()
self.assertEqual(ws.nets, {})
self.assertEqual(ws.blobs, {})
net = core.Net("test-net")
net.ConstantFill([], "testblob", shape=[1, 2, 3, 4], value=1.0)
ws.create_net(net)
# If we do not specify overwrite, this should raise an error.
with self.assertRaises(RuntimeError):
ws.create_net(net)
# But, if we specify overwrite, this should pass.
ws.create_net(net, True)
# Overwrite can also be a kwarg.
ws.create_net(net, overwrite=True)
self.assertIn("testblob", ws.blobs)
self.assertEqual(len(ws.nets), 1)
net_name = ws.nets.keys()[0]
self.assertIn("test-net", net_name) # May have a suffix such as "_1"
net = ws.nets[net_name].run()
blob = ws.blobs["testblob"]
np.testing.assert_array_equal(
np.ones((1, 2, 3, 4), dtype=np.float32),
blob.fetch())
@given(name=st.text(), value=st.floats(min_value=-1, max_value=1.0))
def test_operator_run(self, name, value):
name = name.encode("ascii", "ignore")
ws = workspace.C.Workspace()
op = core.CreateOperator(
"ConstantFill", [], [name], shape=[1], value=value)
ws.run(op)
self.assertIn(name, ws.blobs)
np.testing.assert_allclose(
[value], ws.blobs[name].fetch(), atol=1e-4, rtol=1e-4)
@given(blob_name=st.text(),
net_name=st.text(),
value=st.floats(min_value=-1, max_value=1.0))
def test_net_run(self, blob_name, net_name, value):
blob_name = blob_name.encode("ascii", "ignore")
net_name = net_name.encode("ascii", "ignore")
ws = workspace.C.Workspace()
net = core.Net(net_name)
net.ConstantFill([], [blob_name], shape=[1], value=value)
ws.run(net)
self.assertIn(blob_name, ws.blobs)
self.assertNotIn(net_name, ws.nets)
np.testing.assert_allclose(
[value], ws.blobs[blob_name].fetch(), atol=1e-4, rtol=1e-4)
@given(blob_name=st.text(),
net_name=st.text(),
plan_name=st.text(),
value=st.floats(min_value=-1, max_value=1.0))
def test_plan_run(self, blob_name, plan_name, net_name, value):
blob_name = blob_name.encode("ascii", "ignore")
net_name = net_name.encode("ascii", "ignore")
plan_name = plan_name.encode("ascii", "ignore")
ws = workspace.C.Workspace()
plan = core.Plan(plan_name)
net = core.Net(net_name)
net.ConstantFill([], [blob_name], shape=[1], value=value)
plan.AddStep(core.ExecutionStep("step", nets=[net], num_iter=1))
ws.run(plan)
self.assertIn(blob_name, ws.blobs)
self.assertIn(str(net), ws.nets)
np.testing.assert_allclose(
[value], ws.blobs[blob_name].fetch(), atol=1e-4, rtol=1e-4)
@given(blob_name=st.text(),
net_name=st.text(),
value=st.floats(min_value=-1, max_value=1.0))
def test_net_create(self, blob_name, net_name, value):
blob_name = blob_name.encode("ascii", "ignore")
net_name = net_name.encode("ascii", "ignore")
ws = workspace.C.Workspace()
net = core.Net(net_name)
net_name = str(net)
net.ConstantFill([], [blob_name], shape=[1], value=value)
ws.create_net(net).run()
self.assertIn(blob_name, ws.blobs)
self.assertIn(net_name, ws.nets)
np.testing.assert_allclose(
[value], ws.blobs[blob_name].fetch(), atol=1e-4, rtol=1e-4)
@given(name=st.text(),
value=htu.tensor(),
device_option=st.sampled_from(htu.device_options))
def test_array_serde(self, name, value, device_option):
name = name.encode("ascii", "ignore")
ws = workspace.C.Workspace()
ws.create_blob(name).feed(value, device_option=device_option)
self.assertIn(name, ws.blobs)
blob = ws.blobs[name]
np.testing.assert_equal(value, ws.blobs[name].fetch())
serde_blob = ws.create_blob("{}_serde".format(name))
serde_blob.deserialize(blob.serialize(name))
np.testing.assert_equal(value, serde_blob.fetch())
@given(name=st.text(), value=st.text())
def test_string_serde(self, name, value):
name = name.encode("ascii", "ignore")
value = value.encode("ascii", "ignore")
ws = workspace.C.Workspace()
ws.create_blob(name).feed(value)
self.assertIn(name, ws.blobs)
blob = ws.blobs[name]
self.assertEqual(value, ws.blobs[name].fetch())
serde_blob = ws.create_blob("{}_serde".format(name))
serde_blob.deserialize(blob.serialize(name))
self.assertEqual(value, serde_blob.fetch())
def test_exception(self):
ws = workspace.C.Workspace()
with self.assertRaises(RuntimeError):
ws.create_net("...")
class TestPredictor(unittest.TestCase):
def _create_model(self):
m = cnn.CNNModelHelper()
y = m.FC("data", "y",
dim_in=4, dim_out=2,
weight_init=m.ConstantInit(1.0),
bias_init=m.ConstantInit(0.0),
axis=0)
m.net.AddExternalOutput(y)
return m
# Use this test with a bigger model to see how using Predictor allows to
# avoid issues with low protobuf size limit in Python
#
# def test_predictor_predefined(self):
# workspace.ResetWorkspace()
# path = 'caffe2/caffe2/test/assets/'
# with open(path + 'squeeze_predict_net.pb') as f:
# self.predict_net = f.read()
# with open(path + 'squeeze_init_net.pb') as f:
# self.init_net = f.read()
# self.predictor = workspace.Predictor(self.init_net, self.predict_net)
# inputs = [np.zeros((1, 3, 256, 256), dtype='f')]
# outputs = self.predictor.run(inputs)
# self.assertEqual(len(outputs), 1)
# self.assertEqual(outputs[0].shape, (1, 1000, 1, 1))
# self.assertAlmostEqual(outputs[0][0][0][0][0], 5.19026289e-05)
def test_predictor_memory_model(self):
workspace.ResetWorkspace()
m = self._create_model()
workspace.FeedBlob("data", np.zeros([4], dtype='float32'))
self.predictor = workspace.Predictor(
workspace.StringifyProto(m.param_init_net.Proto()),
workspace.StringifyProto(m.net.Proto()))
inputs = np.array([1, 3, 256, 256], dtype='float32')
outputs = self.predictor.run([inputs])
np.testing.assert_array_almost_equal(np.array([[516, 516]], dtype='float32'), outputs)
if __name__ == '__main__':
unittest.main()
|
## @package experiment_util
# Module caffe2.python.experiment_util
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import datetime
import time
import logging
import socket
import abc
import six
from collections import OrderedDict
'''
Utilities for logging experiment run stats, such as accuracy
and loss over time for different runs. Runtime arguments are stored
in the log.
Optionally, ModelTrainerLog calls out to an logger to log to
an external log destination.
'''
class ExternalLogger(object):
six.add_metaclass(abc.ABCMeta)
@abc.abstractmethod
def set_runtime_args(self, runtime_args):
"""
Set runtime arguments for the logger.
runtime_args: dict of runtime arguments.
"""
raise NotImplementedError(
'Must define set_runtime_args function to use this base class'
)
@abc.abstractmethod
def log(self, log_dict):
"""
log a dict of key/values to an external destination
log_dict: input dict
"""
raise NotImplementedError(
'Must define log function to use this base class'
)
class ModelTrainerLog():
def __init__(self, expname, runtime_args, external_loggers=None):
now = datetime.datetime.fromtimestamp(time.time())
self.experiment_id = \
"{}_{}".format(expname, now.strftime('%Y%m%d_%H%M%S'))
self.filename = "{}.log".format(self.experiment_id)
self.logstr("# %s" % str(runtime_args))
self.headers = None
self.start_time = time.time()
self.last_time = self.start_time
self.last_input_count = 0
self.external_loggers = None
if external_loggers is not None:
self.external_loggers = external_loggers
if not isinstance(runtime_args, dict):
runtime_args = dict(vars(runtime_args))
runtime_args['experiment_id'] = self.experiment_id
runtime_args['hostname'] = socket.gethostname()
for logger in self.external_loggers:
logger.set_runtime_args(runtime_args)
else:
self.external_loggers = []
def logstr(self, str):
with open(self.filename, "a") as f:
f.write(str + "\n")
f.close()
logging.getLogger("experiment_logger").info(str)
def log(self, input_count, batch_count, additional_values):
logdict = OrderedDict()
delta_t = time.time() - self.last_time
delta_count = input_count - self.last_input_count
self.last_time = time.time()
self.last_input_count = input_count
logdict['time_spent'] = delta_t
logdict['cumulative_time_spent'] = time.time() - self.start_time
logdict['input_count'] = delta_count
logdict['cumulative_input_count'] = input_count
logdict['cumulative_batch_count'] = batch_count
if delta_t > 0:
logdict['inputs_per_sec'] = delta_count / delta_t
else:
logdict['inputs_per_sec'] = 0.0
for k in sorted(additional_values.keys()):
logdict[k] = additional_values[k]
# Write the headers if they are not written yet
if self.headers is None:
self.headers = logdict.keys()[:]
self.logstr(",".join(self.headers))
self.logstr(",".join([str(v) for v in logdict.values()]))
for logger in self.external_loggers:
try:
logger.log(logdict)
except Exception as e:
logging.warn(
"Failed to call ExternalLogger: {}".format(e), e)
|
## @package session
# Module caffe2.python.session
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
from caffe2.python.task import Cluster, Task, TaskGroup, WorkspaceType
class CompiledRunnable(object):
""" Wrapper for compiled runnable returned from session.compile() """
def __init__(self, obj, session_class):
self.obj = obj
self.session_class = session_class
class Session(object):
"""
Allows to run Nets, ExecutionSteps, Plans, Tasks and TaskGroups.
A session can potentially run in multiple nodes concurrently.
Example:
from core import Net
from caffe2.python.task import Task, TaskGroup, WorkspaceType
net = Net('test1')
net.Add([net.Const(1), net.Const(2)])
net2 = net.Clone()
step = core.execution_step('step1', [net2])
with TaskGroup(WorkspaceType.GLOBAL) as init_tg:
with Node('node1'):
n1setup = net.Net('n1setup')
n1msg = n1setup.Const('Hello from node 1.')
Task(step=n1setup)
with TaskGroup() as private_tg:
with Node('node1'):
n1 = net.Net('n1')
n1.Print(n1msg, 0)
Task(step=n1)
with Node('node2'):
n2 = net.Net('n2')
n2.Print(n2.Const('Hello from node 2.'), 0)
Task(step=n2)
session = LocalSession()
session.run(net)
session.run(step)
session.run(init_tg)
session.run(private_tg)
Global Workspace:
At the beggining of the session, a global workspace is created and kept
alive for the duration of the session.
Private Workspace:
Tasks can be run either directly on the global workspace, or they can
instantiate a private child workspace that is released after each run.
Blob visibility:
Tasks running in different nodes in parallel will always run under
different workspaces, so it must be assumed that they won't be able to
access each other's blobs. On the other hand, tasks running on the same
node are guaranteed to run on the same workspace within a run.
"""
_compiled_cache = {}
def __init__(self):
self._open = True
def is_open(self):
return self._open
@classmethod
def compile(cls, runnable):
if isinstance(runnable, CompiledRunnable):
assert cls == runnable.session_class, (
'Runnable was compiled for different session type. ' +
'Need: %s, got: %s' % (
cls.__name__, runnable.session_class.__name__))
return runnable
if runnable in cls._compiled_cache:
return cls._compiled_cache[runnable]
if isinstance(runnable, TaskGroup):
tg = runnable
else:
tg = TaskGroup(workspace_type=WorkspaceType.GLOBAL)
if isinstance(runnable, Task):
tg.add(runnable)
elif isinstance(runnable, core.ExecutionStep):
tg.add(Task(step=runnable))
else:
step = core.execution_step('runnable', runnable)
tg.add(Task(step=step))
compiled = CompiledRunnable(
cls._compile_task_group(tg), session_class=cls)
cls._compiled_cache[runnable] = compiled
return compiled
def run(self, runnable):
assert self.is_open(), 'Session is closed.'
self._run_compiled(self.compile(runnable).obj)
def close(self):
if self.is_open():
self._do_close()
self._open = False
def fetch_output(self, output):
raise NotImplementedError()
def _run_compiled(self, task_group):
raise NotImplementedError()
@classmethod
def _compile_task_group(cls, task_group):
return task_group
def _do_close(self):
pass
def __enter__(self):
assert self._open, 'Session already closed.'
return self
def __exit__(self, ex_type, value, traceback):
if ex_type is None:
self.close()
class LocalSession(Session):
"""
Session that runs in a single node.
Tasks are all remapped to run in parallel in the 'local' node.
Currently, LocalSession runs all parallel tasks in the same workspace,
but this behavior may change in the future. Only tasks pointing to the
same logical node are guaranteed to always run in the same workspace.
"""
def __init__(self, ws=None):
Session.__init__(self)
self._ws = ws or workspace.C.Workspace.current
@classmethod
def _compile_task_group(cls, task_group):
with Cluster():
task = task_group.to_task()
plan = core.Plan('task_group_plan')
plan.AddStep(task.get_step())
return (plan, task.output_list(), task.workspace_type)
def _run_compiled(self, compiled):
plan, output_list, workspace_type = compiled
# make sure the output blobs belong to the parent workspace
outputs = []
for name in output_list.names():
self._ws.create_blob(str(name))
outputs.append(core.BlobReference(str(name)))
output_list.set_values(outputs, _fetch_func=self._fetch_output)
task_ws = (
workspace.C.Workspace(self._ws)
if workspace_type == WorkspaceType.PRIVATE else self._ws)
with workspace.WorkspaceGuard(task_ws):
task_ws.run(plan)
def _fetch_output(self, output):
return self._ws.blobs[str(output)].fetch()
|
## @package record_queue
# Module caffe2.python.record_queue
"""
Implementation of an queue wrapper.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from caffe2.python.dataio import Reader, Writer
from caffe2.python.schema import (
Struct, Field, from_column_list)
class _QueueReader(Reader):
def __init__(self, blobs_queue, schema, name=None):
"""Don't call this directly. Instead, use dataset.reader()"""
super(_QueueReader, self).__init__(schema)
self.blobs_queue = blobs_queue
self.name = name
def read(self, read_net):
with core.NameScope(read_net.NextName(self.name)):
status = read_net.NextName()
fields = read_net.SafeDequeueBlobs(
self.blobs_queue, self._schema.field_names() + [status])
return (fields[-1], fields[:-1])
class _QueueWriter(Writer):
def __init__(self, blobs_queue, schema):
self.blobs_queue = blobs_queue
self.schema = schema
def write(self, writer_net, fields):
if isinstance(fields, Field):
fields = fields.field_blobs()
writer_net.CheckDatasetConsistency(
fields, [], fields=self.schema.field_names())
status = writer_net.NextName()
writer_net.SafeEnqueueBlobs(
[self.blobs_queue] + fields, fields + [status])
return status
class RecordQueue(object):
""" The class is used to feed data with some process from a reader into a
queue and provider a reader interface for data fetching from the queue.
"""
def __init__(self, fields, name=None, capacity=1,
enforce_unique_name=False, num_threads=1):
assert isinstance(fields, list) or isinstance(fields, Struct), (
'fields must be either a Struct or a list of raw field names.')
if isinstance(fields, list):
fields = from_column_list(fields)
self.schema = fields
self.name = name or 'queue'
self.num_threads = num_threads
num_blobs = len(self.schema.field_names())
init_net = core.Net(self.name + '/init_net')
self.blobs_queue = init_net.CreateBlobsQueue(
[], 1,
capacity=capacity,
num_blobs=num_blobs,
enforce_unique_name=enforce_unique_name)
core.workspace.RunNetOnce(init_net)
self.writer = _QueueWriter(self.blobs_queue, self.schema)
reader_name = self.name + '_reader'
self.reader = _QueueReader(self.blobs_queue, self.schema, reader_name)
exit_net = core.Net(self.name + '/exit_net')
exit_net.CloseBlobsQueue(self.blobs_queue, 0)
self.exit_step = core.execution_step(
'{}_close_step'.format(str(exit_net)),
exit_net)
def build(self, reader, process=None):
"""
Build the producer_step to feed data from reader into the queue, and
return the reader interface.
Inputs:
reader: read data which will be stored in the queue.
process: preprocess data before enqueue.
Outputs:
reader: reader to fetch the data from the queue.
producer_step: the step insert the data into the queue. Should be
run with comsume_step together.
exit_step: the step to close queue
schema: the schema for the reader.
"""
producer_steps = []
for i in range(self.num_threads):
name = 'reader_' + str(i)
net_reader = core.Net(name)
should_stop, fields = reader.read_record(net_reader)
step_read = core.execution_step(name, net_reader)
name = 'queue_writer' + str(i)
net_prod = core.Net(name)
field_blobs = fields.field_blobs()
if process:
field_blobs = process(net_prod, fields).field_blobs()
self.writer.write(net_prod, field_blobs)
step_prod = core.execution_step(name, net_prod)
step = core.execution_step(
'producer_' + str(i),
[step_read, step_prod],
should_stop_blob=should_stop)
producer_steps.append(step)
producer_step = core.execution_step(
'producers',
producer_steps,
concurrent_substeps=True)
return self.reader, producer_step, self.exit_step, self.schema
|
## @package layer_model_instantiator
# Module caffe2.python.layer_model_instantiator
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import InstantiationContext
from caffe2.python.layers.tags import Tags
def _filter_layers(layers, include_tags):
if include_tags is None:
return layers
include_tags = set(include_tags)
return filter(lambda l: not include_tags.isdisjoint(l.tags), layers)
def shrink_output_schema(net, out_schema):
if len(out_schema.field_names()) <= 1:
return out_schema
exists = [net.BlobIsDefined(blob) for blob in out_schema.field_blobs()]
return schema.from_column_list(
[
col_name for ok, col_name in
zip(exists, out_schema.field_names()) if ok
],
[
col_type for ok, col_type in
zip(exists, out_schema.field_types()) if ok
],
[
col_blob for ok, col_blob in
zip(exists, out_schema.field_blobs()) if ok
],
[
col_meta for ok, col_meta in
zip(exists, out_schema.field_metadata()) if ok
]
)
def generate_predict_net(model, include_tags=None):
predict_net = core.Net('predict_net')
for layer in _filter_layers(model.layers, include_tags):
if Tags.EXCLUDE_FROM_PREDICTION not in layer.tags:
layer.add_operators(
predict_net, context=InstantiationContext.PREDICTION)
predict_net.set_input_record(model.input_feature_schema.clone())
output_schema = shrink_output_schema(
predict_net, model.output_schema.clone()
)
predict_net.set_output_record(output_schema)
return predict_net
def generate_eval_net(model, include_tags=None):
eval_net = core.Net('eval_net')
for layer in _filter_layers(model.layers, include_tags):
if Tags.EXCLUDE_FROM_EVAL not in layer.tags:
layer.add_operators(eval_net, context=InstantiationContext.EVAL)
input_schema = model.input_feature_schema + model.trainer_extra_schema
eval_net.set_input_record(input_schema)
output_schema = shrink_output_schema(
eval_net, model.output_schema + model.metrics_schema
)
eval_net.set_output_record(output_schema)
return eval_net
def _generate_training_net_only(model, include_tags=None):
train_net = core.Net('train_net')
train_init_net = model.create_init_net('train_init_net')
for layer in _filter_layers(model.layers, include_tags):
if Tags.EXCLUDE_FROM_TRAIN not in layer.tags:
layer.add_operators(train_net, train_init_net)
input_schema = model.input_feature_schema + model.trainer_extra_schema
train_net.set_input_record(input_schema)
output_schema = shrink_output_schema(
train_net, model.output_schema + model.metrics_schema
)
train_net.set_output_record(output_schema)
return train_init_net, train_net
def generate_training_nets_forward_only(model, include_tags=None):
train_init_net, train_net = _generate_training_net_only(model, include_tags)
return train_init_net, train_net
def generate_training_nets(model, include_tags=None):
train_init_net, train_net = _generate_training_net_only(model, include_tags)
loss = model.loss
grad_map = train_net.AddGradientOperators(loss.field_blobs())
model.apply_optimizers(train_net, train_init_net, grad_map)
return train_init_net, train_net
|
## @package rnn_cell
# Module caffe2.python.rnn_cell
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import random
import functools
from caffe2.python.attention import (
AttentionType,
apply_regular_attention,
apply_recurrent_attention,
)
from caffe2.python import core, recurrent, workspace, brew
from caffe2.python.model_helper import ModelHelper
class RNNCell(object):
'''
Base class for writing recurrent / stateful operations.
One needs to implement 3 methods: _apply, prepare_input and get_state_names.
As a result base class will provice apply_over_sequence method, which
allows you to apply recurrent operations over a sequence of any length.
'''
def __init__(self, name, forward_only=False):
self.name = name
self.recompute_blobs = []
self.forward_only = forward_only
def scope(self, name):
return self.name + '/' + name if self.name is not None else name
def apply_over_sequence(
self,
model,
inputs,
seq_lengths,
initial_states,
outputs_with_grads=None,
):
preprocessed_inputs = self.prepare_input(model, inputs)
step_model = ModelHelper(name=self.name, param_model=model)
input_t, timestep = step_model.net.AddScopedExternalInputs(
'input_t',
'timestep',
)
states_prev = step_model.net.AddScopedExternalInputs(*[
s + '_prev' for s in self.get_state_names()
])
states = self._apply(
model=step_model,
input_t=input_t,
seq_lengths=seq_lengths,
states=states_prev,
timestep=timestep,
)
if outputs_with_grads is None:
outputs_with_grads = [self.get_output_state_index() * 2]
# states_for_all_steps consists of combination of
# states gather for all steps and final states. It looks like this:
# (state_1_all, state_1_final, state_2_all, state_2_final, ...)
states_for_all_steps = recurrent.recurrent_net(
net=model.net,
cell_net=step_model.net,
inputs=[(input_t, preprocessed_inputs)],
initial_cell_inputs=zip(states_prev, initial_states),
links=dict(zip(states_prev, states)),
timestep=timestep,
scope=self.name,
outputs_with_grads=outputs_with_grads,
recompute_blobs_on_backward=self.recompute_blobs,
)
output = self._prepare_output_sequence(
model,
states_for_all_steps,
)
return output, states_for_all_steps
def apply(self, model, input_t, seq_lengths, states, timestep):
input_t = self.prepare_input(model, input_t)
states = self._apply(
model, input_t, seq_lengths, states, timestep)
output = self._prepare_output(model, states)
return output, states
def _apply(
self,
model,
input_t,
seq_lengths,
states,
timestep,
extra_inputs,
):
'''
A single step of a recurrent network.
model: ModelHelper object new operators would be added to
input_t: single input with shape (1, batch_size, input_dim)
seq_lengths: blob containing sequence lengths which would be passed to
LSTMUnit operator
states: previous recurrent states
timestep: current recurrent iteration. Could be used together with
seq_lengths in order to determine, if some shorter sequences
in the batch have already ended.
extra_inputs: list of tuples (input, dim). specifies additional input
which is not subject to prepare_input(). (useful when a cell is a
component of a larger recurrent structure, e.g., attention)
'''
raise NotImplementedError('Abstract method')
def prepare_input(self, model, input_blob):
'''
If some operations in _apply method depend only on the input,
not on recurrent states, they could be computed in advance.
model: ModelHelper object new operators would be added to
input_blob: either the whole input sequence with shape
(sequence_length, batch_size, input_dim) or a single input with shape
(1, batch_size, input_dim).
'''
return input_blob
def get_output_state_index(self):
'''
Return index into state list of the "primary" step-wise output.
'''
return 0
def get_state_names(self):
'''
Return the names of the recurrent states.
It's required by apply_over_sequence method in order to allocate
recurrent states for all steps with meaningful names.
'''
raise NotImplementedError('Abstract method')
def get_output_dim(self):
'''
Specifies the dimension (number of units) of stepwise output.
'''
raise NotImplementedError('Abstract method')
def _prepare_output(self, model, states):
'''
Allows arbitrary post-processing of primary output.
'''
return states[self.get_output_state_index()]
def _prepare_output_sequence(self, model, state_outputs):
'''
Allows arbitrary post-processing of primary sequence output.
(Note that state_outputs alternates between full-sequence and final
output for each state, thus the index multiplier 2.)
'''
output_sequence_index = 2 * self.get_output_state_index()
return state_outputs[output_sequence_index]
class LSTMCell(RNNCell):
def __init__(
self,
input_size,
hidden_size,
forget_bias,
memory_optimization,
drop_states=False,
**kwargs
):
super(LSTMCell, self).__init__(**kwargs)
self.input_size = input_size
self.hidden_size = hidden_size
self.forget_bias = float(forget_bias)
self.memory_optimization = memory_optimization
self.drop_states = drop_states
def _apply(
self,
model,
input_t,
seq_lengths,
states,
timestep,
extra_inputs=None,
):
hidden_t_prev, cell_t_prev = states
fc_input = hidden_t_prev
fc_input_dim = self.hidden_size
if extra_inputs is not None:
extra_input_blobs, extra_input_sizes = zip(*extra_inputs)
fc_input, _ = model.net.Concat(
[hidden_t_prev] + list(extra_input_blobs),
[
self.scope('gates_concatenated_input_t'),
self.scope('_gates_concatenated_input_t_concat_dims'),
],
axis=2,
)
fc_input_dim += sum(extra_input_sizes)
gates_t = brew.fc(
model,
fc_input,
self.scope('gates_t'),
dim_in=fc_input_dim,
dim_out=4 * self.hidden_size,
axis=2,
)
model.net.Sum([gates_t, input_t], gates_t)
hidden_t, cell_t = model.net.LSTMUnit(
[
hidden_t_prev,
cell_t_prev,
gates_t,
seq_lengths,
timestep,
],
list(self.get_state_names()),
forget_bias=self.forget_bias,
drop_states=self.drop_states,
)
model.net.AddExternalOutputs(hidden_t, cell_t)
if self.memory_optimization:
self.recompute_blobs = [gates_t]
return hidden_t, cell_t
def get_input_params(self):
return {
'weights': self.scope('i2h') + '_w',
'biases': self.scope('i2h') + '_b',
}
def get_recurrent_params(self):
return {
'weights': self.scope('gates_t') + '_w',
'biases': self.scope('gates_t') + '_b',
}
def prepare_input(self, model, input_blob):
return brew.fc(
model,
input_blob,
self.scope('i2h'),
dim_in=self.input_size,
dim_out=4 * self.hidden_size,
axis=2,
)
def get_state_names(self):
return (self.scope('hidden_t'), self.scope('cell_t'))
def get_output_dim(self):
return self.hidden_size
class MILSTMCell(LSTMCell):
def _apply(
self,
model,
input_t,
seq_lengths,
states,
timestep,
extra_inputs=None,
):
hidden_t_prev, cell_t_prev = states
fc_input = hidden_t_prev
fc_input_dim = self.hidden_size
if extra_inputs is not None:
extra_input_blobs, extra_input_sizes = zip(*extra_inputs)
fc_input, _ = model.net.Concat(
[hidden_t_prev] + list(extra_input_blobs),
[
self.scope('gates_concatenated_input_t'),
self.scope('_gates_concatenated_input_t_concat_dims'),
],
axis=2,
)
fc_input_dim += sum(extra_input_sizes)
prev_t = brew.fc(
model,
fc_input,
self.scope('prev_t'),
dim_in=fc_input_dim,
dim_out=4 * self.hidden_size,
axis=2,
)
# defining MI parameters
alpha = model.param_init_net.ConstantFill(
[],
[self.scope('alpha')],
shape=[4 * self.hidden_size],
value=1.0,
)
beta_h = model.param_init_net.ConstantFill(
[],
[self.scope('beta1')],
shape=[4 * self.hidden_size],
value=1.0,
)
beta_i = model.param_init_net.ConstantFill(
[],
[self.scope('beta2')],
shape=[4 * self.hidden_size],
value=1.0,
)
b = model.param_init_net.ConstantFill(
[],
[self.scope('b')],
shape=[4 * self.hidden_size],
value=0.0,
)
model.params.extend([alpha, beta_h, beta_i, b])
# alpha * input_t + beta_h
# Shape: [1, batch_size, 4 * hidden_size]
alpha_by_input_t_plus_beta_h = model.net.ElementwiseLinear(
[input_t, alpha, beta_h],
self.scope('alpha_by_input_t_plus_beta_h'),
axis=2,
)
# (alpha * input_t + beta_h) * prev_t =
# alpha * input_t * prev_t + beta_h * prev_t
# Shape: [1, batch_size, 4 * hidden_size]
alpha_by_input_t_plus_beta_h_by_prev_t = model.net.Mul(
[alpha_by_input_t_plus_beta_h, prev_t],
self.scope('alpha_by_input_t_plus_beta_h_by_prev_t')
)
# beta_i * input_t + b
# Shape: [1, batch_size, 4 * hidden_size]
beta_i_by_input_t_plus_b = model.net.ElementwiseLinear(
[input_t, beta_i, b],
self.scope('beta_i_by_input_t_plus_b'),
axis=2,
)
# alpha * input_t * prev_t + beta_h * prev_t + beta_i * input_t + b
# Shape: [1, batch_size, 4 * hidden_size]
gates_t = model.net.Sum(
[alpha_by_input_t_plus_beta_h_by_prev_t, beta_i_by_input_t_plus_b],
self.scope('gates_t')
)
hidden_t, cell_t = model.net.LSTMUnit(
[hidden_t_prev, cell_t_prev, gates_t, seq_lengths, timestep],
[self.scope('hidden_t_intermediate'), self.scope('cell_t')],
forget_bias=self.forget_bias,
drop_states=self.drop_states,
)
model.net.AddExternalOutputs(
cell_t,
hidden_t,
)
if self.memory_optimization:
self.recompute_blobs = [gates_t]
return hidden_t, cell_t
class DropoutCell(RNNCell):
'''
Wraps arbitrary RNNCell, applying dropout to its output (but not to the
recurrent connection for the corresponding state).
'''
def __init__(self, internal_cell, dropout_ratio=None, **kwargs):
self.internal_cell = internal_cell
self.dropout_ratio = dropout_ratio
super(DropoutCell, self).__init__(**kwargs)
self.prepare_input = internal_cell.prepare_input
self.get_output_state_index = internal_cell.get_output_state_index
self.get_state_names = internal_cell.get_state_names
self.get_output_dim = internal_cell.get_output_dim
def _apply(
self,
model,
input_t,
seq_lengths,
states,
timestep,
extra_inputs=None,
):
return self.internal_cell._apply(
model,
input_t,
seq_lengths,
states,
timestep,
extra_inputs,
)
def _prepare_output(self, model, states):
output = states[self.get_output_state_index()]
if self.dropout_ratio is not None:
output = self._apply_dropout(model, output)
return output
def _prepare_output_sequence(self, model, state_outputs):
output_sequence_index = 2 * self.get_output_state_index()
output = state_outputs[output_sequence_index]
if self.dropout_ratio is not None:
output = self._apply_dropout(model, output)
return output
def _apply_dropout(self, model, output):
if self.dropout_ratio and not self.forward_only:
with core.NameScope(self.name or ''):
output, _ = model.net.Dropout(
output,
[
str(output) + '_with_dropout',
str(output) + '_dropout_mask',
],
ratio=float(self.dropout_ratio),
)
return output
class MultiRNNCell(RNNCell):
'''
Multilayer RNN via the composition of RNNCell instance.
It is the resposibility of calling code to ensure the compatibility
of the successive layers in terms of input/output dimensiality, etc.,
and to ensure that their blobs do not have name conflicts, typically by
creating the cells with names that specify layer number.
Assumes first state (recurrent output) for each layer should be the input
to the next layer.
'''
def __init__(self, cells, residual_output_layers=None, **kwargs):
'''
cells: list of RNNCell instances, from input to output side.
name: string designating network component (for scoping)
residual_output_layers: list of indices of layers whose input will
be added elementwise to their output elementwise. (It is the
responsibility of the client code to ensure shape compatibility.)
Note that layer 0 (zero) cannot have residual output because of the
timing of prepare_input().
forward_only: used to construct inference-only network.
'''
super(MultiRNNCell, self).__init__(**kwargs)
self.cells = cells
if residual_output_layers is None:
self.residual_output_layers = []
else:
self.residual_output_layers = residual_output_layers
self.state_names = []
for cell in self.cells:
self.state_names.extend(cell.get_state_names())
if len(self.state_names) != len(set(self.state_names)):
duplicates = {
state_name for state_name in self.state_names
if self.state_names.count(state_name) > 1
}
raise RuntimeError(
'Duplicate state names in MultiRNNCell: {}'.format(
list(duplicates),
),
)
def prepare_input(self, model, input_blob):
return self.cells[0].prepare_input(model, input_blob)
def _apply(
self,
model,
input_t,
seq_lengths,
states,
timestep,
extra_inputs=None,
):
states_per_layer = [len(cell.get_state_names()) for cell in self.cells]
assert len(states) == sum(states_per_layer)
next_states = []
states_index = 0
layer_input = input_t
for i, layer_cell in enumerate(self.cells):
num_states = states_per_layer[i]
layer_states = states[states_index:(states_index + num_states)]
states_index += num_states
if i > 0:
prepared_input = layer_cell.prepare_input(model, layer_input)
else:
prepared_input = layer_input
layer_next_states = layer_cell._apply(
model,
prepared_input,
seq_lengths,
layer_states,
timestep,
extra_inputs=(None if i > 0 else extra_inputs),
)
# Since we're using here non-public method _apply, instead of apply,
# we have to manually extract output from states
if i != len(self.cells) - 1:
layer_output = layer_cell._prepare_output(
model,
layer_next_states,
)
if i > 0 and i in self.residual_output_layers:
layer_input = model.net.Sum(
[layer_output, layer_input],
[layer_output],
)
else:
layer_input = layer_output
next_states.extend(layer_next_states)
return next_states
def get_state_names(self):
return self.state_names
def get_output_state_index(self):
index = 0
for cell in self.cells[:-1]:
index += len(cell.get_state_names())
index += self.cells[-1].get_output_state_index()
return index
def _prepare_output(self, model, states):
output = self.cells[-1]._prepare_output(
model,
states[-len(self.cells[-1].get_state_names()):],
)
if (len(self.cells) - 1) in self.residual_output_layers:
last_layer_input_index = 0
for cell in self.cells[:-2]:
last_layer_input_index += len(cell.get_state_names())
last_layer_input_index += self.cells[-2].get_output_state_index()
last_layer_input = states[last_layer_input_index]
output = model.net.Sum(
[output, last_layer_input],
[self.scope('residual_output')],
)
return output
def _prepare_output_sequence(self, model, states):
output = self.cells[-1]._prepare_output_sequence(
model,
states[-(2 * len(self.cells[-1].get_state_names())):],
)
if (len(self.cells) - 1) in self.residual_output_layers:
last_layer_input_index = 0
for cell in self.cells[:-2]:
last_layer_input_index += 2 * len(cell.get_state_names())
last_layer_input_index += (
2 * self.cells[-2].get_output_state_index()
)
last_layer_input = states[last_layer_input_index]
output = model.net.Sum(
[output, last_layer_input],
[self.scope('residual_output_sequence')],
)
return output
class AttentionCell(RNNCell):
def __init__(
self,
encoder_output_dim,
encoder_outputs,
decoder_cell,
decoder_state_dim,
attention_type,
weighted_encoder_outputs,
attention_memory_optimization,
**kwargs
):
super(AttentionCell, self).__init__(**kwargs)
self.encoder_output_dim = encoder_output_dim
self.encoder_outputs = encoder_outputs
self.decoder_cell = decoder_cell
self.decoder_state_dim = decoder_state_dim
self.weighted_encoder_outputs = weighted_encoder_outputs
self.encoder_outputs_transposed = None
assert attention_type in [
AttentionType.Regular,
AttentionType.Recurrent,
]
self.attention_type = attention_type
self.attention_memory_optimization = attention_memory_optimization
def _apply(
self,
model,
input_t,
seq_lengths,
states,
timestep,
extra_inputs=None,
):
decoder_prev_states = states[:-1]
attention_weighted_encoder_context_t_prev = states[-1]
assert extra_inputs is None
decoder_states = self.decoder_cell._apply(
model,
input_t,
seq_lengths,
decoder_prev_states,
timestep,
extra_inputs=[(
attention_weighted_encoder_context_t_prev,
self.encoder_output_dim,
)],
)
self.hidden_t_intermediate = self.decoder_cell._prepare_output(
model,
decoder_states,
)
if self.attention_type == AttentionType.Recurrent:
(
attention_weighted_encoder_context_t,
self.attention_weights_3d,
attention_blobs,
) = apply_recurrent_attention(
model=model,
encoder_output_dim=self.encoder_output_dim,
encoder_outputs_transposed=self.encoder_outputs_transposed,
weighted_encoder_outputs=self.weighted_encoder_outputs,
decoder_hidden_state_t=self.hidden_t_intermediate,
decoder_hidden_state_dim=self.decoder_state_dim,
scope=self.name,
attention_weighted_encoder_context_t_prev=(
attention_weighted_encoder_context_t_prev
),
)
else:
(
attention_weighted_encoder_context_t,
self.attention_weights_3d,
attention_blobs,
) = apply_regular_attention(
model=model,
encoder_output_dim=self.encoder_output_dim,
encoder_outputs_transposed=self.encoder_outputs_transposed,
weighted_encoder_outputs=self.weighted_encoder_outputs,
decoder_hidden_state_t=self.hidden_t_intermediate,
decoder_hidden_state_dim=self.decoder_state_dim,
scope=self.name,
)
if self.attention_memory_optimization:
self.recompute_blobs.extend(attention_blobs)
output = list(decoder_states) + [attention_weighted_encoder_context_t]
output[self.decoder_cell.get_output_state_index()] = model.Copy(
output[self.decoder_cell.get_output_state_index()],
self.scope('hidden_t_external'),
)
model.net.AddExternalOutputs(*output)
return output
def get_attention_weights(self):
# [batch_size, encoder_length, 1]
return self.attention_weights_3d
def prepare_input(self, model, input_blob):
if self.encoder_outputs_transposed is None:
self.encoder_outputs_transposed = model.Transpose(
self.encoder_outputs,
self.scope('encoder_outputs_transposed'),
axes=[1, 2, 0],
)
if self.weighted_encoder_outputs is None:
self.weighted_encoder_outputs = brew.fc(
model,
self.encoder_outputs,
self.scope('weighted_encoder_outputs'),
dim_in=self.encoder_output_dim,
dim_out=self.encoder_output_dim,
axis=2,
)
return self.decoder_cell.prepare_input(model, input_blob)
def get_state_names(self):
state_names = list(self.decoder_cell.get_state_names())
state_names[self.get_output_state_index()] = self.scope(
'hidden_t_external',
)
state_names.append(self.scope('attention_weighted_encoder_context_t'))
return state_names
def get_output_dim(self):
return self.decoder_state_dim + self.encoder_output_dim
def get_output_state_index(self):
return self.decoder_cell.get_output_state_index()
def _prepare_output(self, model, states):
attention_context = states[-1]
with core.NameScope(self.name or ''):
output, _ = model.net.Concat(
[self.hidden_t_intermediate, attention_context],
[
'states_and_context_combination',
'_states_and_context_combination_concat_dims',
],
axis=2,
)
return output
def _prepare_output_sequence(self, model, state_outputs):
decoder_output = self.decoder_cell._prepare_output_sequence(
model,
state_outputs[:-2],
)
attention_context_index = 2 * (len(self.get_state_names()) - 1)
with core.NameScope(self.name or ''):
output, _ = model.net.Concat(
[
decoder_output,
state_outputs[attention_context_index],
],
[
'states_and_context_combination',
'_states_and_context_combination_concat_dims',
],
axis=2,
)
return output
class LSTMWithAttentionCell(AttentionCell):
def __init__(
self,
encoder_output_dim,
encoder_outputs,
decoder_input_dim,
decoder_state_dim,
name,
attention_type,
weighted_encoder_outputs,
forget_bias,
lstm_memory_optimization,
attention_memory_optimization,
forward_only=False,
):
decoder_cell = LSTMCell(
input_size=decoder_input_dim,
hidden_size=decoder_state_dim,
forget_bias=forget_bias,
memory_optimization=lstm_memory_optimization,
name='{}/decoder'.format(name),
forward_only=False,
drop_states=False,
)
super(LSTMWithAttentionCell, self).__init__(
encoder_output_dim=encoder_output_dim,
encoder_outputs=encoder_outputs,
decoder_cell=decoder_cell,
decoder_state_dim=decoder_state_dim,
name=name,
attention_type=attention_type,
weighted_encoder_outputs=weighted_encoder_outputs,
attention_memory_optimization=attention_memory_optimization,
forward_only=forward_only,
)
class MILSTMWithAttentionCell(AttentionCell):
def __init__(
self,
encoder_output_dim,
encoder_outputs,
decoder_input_dim,
decoder_state_dim,
name,
attention_type,
weighted_encoder_outputs,
forget_bias,
lstm_memory_optimization,
attention_memory_optimization,
forward_only=False,
):
decoder_cell = MILSTMCell(
input_size=decoder_input_dim,
hidden_size=decoder_state_dim,
forget_bias=forget_bias,
memory_optimization=lstm_memory_optimization,
name='{}/decoder'.format(name),
forward_only=False,
drop_states=False,
)
super(MILSTMWithAttentionCell, self).__init__(
encoder_output_dim=encoder_output_dim,
encoder_outputs=encoder_outputs,
decoder_cell=decoder_cell,
decoder_state_dim=decoder_state_dim,
name=name,
attention_type=attention_type,
weighted_encoder_outputs=weighted_encoder_outputs,
attention_memory_optimization=attention_memory_optimization,
forward_only=forward_only,
)
def _LSTM(
cell_class,
model,
input_blob,
seq_lengths,
initial_states,
dim_in,
dim_out,
scope,
outputs_with_grads=(0,),
return_params=False,
memory_optimization=False,
forget_bias=0.0,
forward_only=False,
drop_states=False,
return_last_layer_only=True,
):
'''
Adds a standard LSTM recurrent network operator to a model.
cell_class: LSTMCell or compatible subclass
model: ModelHelper object new operators would be added to
input_blob: the input sequence in a format T x N x D
where T is sequence size, N - batch size and D - input dimension
seq_lengths: blob containing sequence lengths which would be passed to
LSTMUnit operator
initial_states: a list of (2 * num_layers) blobs representing the initial
hidden and cell states of each layer. If this argument is None,
these states will be added to the model as network parameters.
dim_in: input dimension
dim_out: number of units per LSTM layer
(use int for single-layer LSTM, list of ints for multi-layer)
outputs_with_grads : position indices of output blobs for LAST LAYER which
will receive external error gradient during backpropagation.
These outputs are: (h_all, h_last, c_all, c_last)
return_params: if True, will return a dictionary of parameters of the LSTM
memory_optimization: if enabled, the LSTM step is recomputed on backward
step so that we don't need to store forward activations for each
timestep. Saves memory with cost of computation.
forget_bias: forget gate bias (default 0.0)
forward_only: whether to create a backward pass
drop_states: drop invalid states, passed through to LSTMUnit operator
return_last_layer_only: only return outputs from final layer
(so that length of results does depend on number of layers)
'''
if type(dim_out) is not list and type(dim_out) is not tuple:
dim_out = [dim_out]
num_layers = len(dim_out)
cells = []
for i in range(num_layers):
name = '{}/layer_{}'.format(scope, i) if num_layers > 1 else scope
cell = cell_class(
input_size=(dim_in if i == 0 else dim_out[i - 1]),
hidden_size=dim_out[i],
forget_bias=forget_bias,
memory_optimization=memory_optimization,
name=name,
forward_only=forward_only,
drop_states=drop_states,
)
cells.append(cell)
if num_layers > 1:
multicell = MultiRNNCell(
cells,
name=scope,
forward_only=forward_only,
)
else:
multicell = cells[0]
if initial_states is None:
initial_states = []
for i in range(num_layers):
with core.NameScope(scope):
suffix = '_{}'.format(i) if num_layers > 1 else ''
initial_hidden = model.param_init_net.ConstantFill(
[],
'initial_hidden_state' + suffix,
shape=[dim_out[i]],
value=0.0,
)
initial_cell = model.param_init_net.ConstantFill(
[],
'initial_cell_state' + suffix,
shape=[dim_out[i]],
value=0.0,
)
initial_states.extend([initial_hidden, initial_cell])
model.params.extend([initial_hidden, initial_cell])
assert len(initial_states) == 2 * num_layers, \
"Incorrect initial_states, was expecting 2 * num_layers elements" \
+ " but had only {}".format(len(initial_states))
# outputs_with_grads argument indexes into final layer
outputs_with_grads = [4 * (num_layers - 1) + i for i in outputs_with_grads]
_, result = multicell.apply_over_sequence(
model=model,
inputs=input_blob,
seq_lengths=seq_lengths,
initial_states=initial_states,
outputs_with_grads=outputs_with_grads,
)
if return_last_layer_only:
result = result[4 * (num_layers - 1):]
if return_params:
result = list(result) + [{
'input': cell.get_input_params(),
'recurrent': cell.get_recurrent_params(),
}]
return tuple(result)
LSTM = functools.partial(_LSTM, LSTMCell)
MILSTM = functools.partial(_LSTM, MILSTMCell)
def GetLSTMParamNames():
weight_params = ["input_gate_w", "forget_gate_w", "output_gate_w", "cell_w"]
bias_params = ["input_gate_b", "forget_gate_b", "output_gate_b", "cell_b"]
return {'weights': weight_params, 'biases': bias_params}
def InitFromLSTMParams(lstm_pblobs, param_values):
'''
Set the parameters of LSTM based on predefined values
'''
weight_params = GetLSTMParamNames()['weights']
bias_params = GetLSTMParamNames()['biases']
for input_type in param_values.keys():
weight_values = [param_values[input_type][w].flatten() for w in weight_params]
wmat = np.array([])
for w in weight_values:
wmat = np.append(wmat, w)
bias_values = [param_values[input_type][b].flatten() for b in bias_params]
bm = np.array([])
for b in bias_values:
bm = np.append(bm, b)
weights_blob = lstm_pblobs[input_type]['weights']
bias_blob = lstm_pblobs[input_type]['biases']
cur_weight = workspace.FetchBlob(weights_blob)
cur_biases = workspace.FetchBlob(bias_blob)
workspace.FeedBlob(
weights_blob,
wmat.reshape(cur_weight.shape).astype(np.float32))
workspace.FeedBlob(
bias_blob,
bm.reshape(cur_biases.shape).astype(np.float32))
def cudnn_LSTM(model, input_blob, initial_states, dim_in, dim_out,
scope, recurrent_params=None, input_params=None,
num_layers=1, return_params=False):
'''
CuDNN version of LSTM for GPUs.
input_blob Blob containing the input. Will need to be available
when param_init_net is run, because the sequence lengths
and batch sizes will be inferred from the size of this
blob.
initial_states tuple of (hidden_init, cell_init) blobs
dim_in input dimensions
dim_out output/hidden dimension
scope namescope to apply
recurrent_params dict of blobs containing values for recurrent
gate weights, biases (if None, use random init values)
See GetLSTMParamNames() for format.
input_params dict of blobs containing values for input
gate weights, biases (if None, use random init values)
See GetLSTMParamNames() for format.
num_layers number of LSTM layers
return_params if True, returns (param_extract_net, param_mapping)
where param_extract_net is a net that when run, will
populate the blobs specified in param_mapping with the
current gate weights and biases (input/recurrent).
Useful for assigning the values back to non-cuDNN
LSTM.
'''
with core.NameScope(scope):
weight_params = GetLSTMParamNames()['weights']
bias_params = GetLSTMParamNames()['biases']
input_weight_size = dim_out * dim_in
upper_layer_input_weight_size = dim_out * dim_out
recurrent_weight_size = dim_out * dim_out
input_bias_size = dim_out
recurrent_bias_size = dim_out
def init(layer, pname, input_type):
input_weight_size_for_layer = input_weight_size if layer == 0 else \
upper_layer_input_weight_size
if pname in weight_params:
sz = input_weight_size_for_layer if input_type == 'input' \
else recurrent_weight_size
elif pname in bias_params:
sz = input_bias_size if input_type == 'input' \
else recurrent_bias_size
else:
assert False, "unknown parameter type {}".format(pname)
return model.param_init_net.UniformFill(
[],
"lstm_init_{}_{}_{}".format(input_type, pname, layer),
shape=[sz])
# Multiply by 4 since we have 4 gates per LSTM unit
first_layer_sz = input_weight_size + recurrent_weight_size + \
input_bias_size + recurrent_bias_size
upper_layer_sz = upper_layer_input_weight_size + \
recurrent_weight_size + input_bias_size + \
recurrent_bias_size
total_sz = 4 * (first_layer_sz + (num_layers - 1) * upper_layer_sz)
weights = model.param_init_net.UniformFill(
[], "lstm_weight", shape=[total_sz])
model.params.append(weights)
model.weights.append(weights)
lstm_args = {
'hidden_size': dim_out,
'rnn_mode': 'lstm',
'bidirectional': 0, # TODO
'dropout': 1.0, # TODO
'input_mode': 'linear', # TODO
'num_layers': num_layers,
'engine': 'CUDNN'
}
param_extract_net = core.Net("lstm_param_extractor")
param_extract_net.AddExternalInputs([input_blob, weights])
param_extract_mapping = {}
# Populate the weights-blob from blobs containing parameters for
# the individual components of the LSTM, such as forget/input gate
# weights and bises. Also, create a special param_extract_net that
# can be used to grab those individual params from the black-box
# weights blob. These results can be then fed to InitFromLSTMParams()
for input_type in ['input', 'recurrent']:
param_extract_mapping[input_type] = {}
p = recurrent_params if input_type == 'recurrent' else input_params
if p is None:
p = {}
for pname in weight_params + bias_params:
for j in range(0, num_layers):
values = p[pname] if pname in p else init(j, pname, input_type)
model.param_init_net.RecurrentParamSet(
[input_blob, weights, values],
weights,
layer=j,
input_type=input_type,
param_type=pname,
**lstm_args
)
if pname not in param_extract_mapping[input_type]:
param_extract_mapping[input_type][pname] = {}
b = param_extract_net.RecurrentParamGet(
[input_blob, weights],
["lstm_{}_{}_{}".format(input_type, pname, j)],
layer=j,
input_type=input_type,
param_type=pname,
**lstm_args
)
param_extract_mapping[input_type][pname][j] = b
(hidden_input_blob, cell_input_blob) = initial_states
output, hidden_output, cell_output, rnn_scratch, dropout_states = \
model.net.Recurrent(
[input_blob, cell_input_blob, cell_input_blob, weights],
["lstm_output", "lstm_hidden_output", "lstm_cell_output",
"lstm_rnn_scratch", "lstm_dropout_states"],
seed=random.randint(0, 100000), # TODO: dropout seed
**lstm_args
)
model.net.AddExternalOutputs(
hidden_output, cell_output, rnn_scratch, dropout_states)
if return_params:
param_extract = param_extract_net, param_extract_mapping
return output, hidden_output, cell_output, param_extract
else:
return output, hidden_output, cell_output
def LSTMWithAttention(
model,
decoder_inputs,
decoder_input_lengths,
initial_decoder_hidden_state,
initial_decoder_cell_state,
initial_attention_weighted_encoder_context,
encoder_output_dim,
encoder_outputs,
decoder_input_dim,
decoder_state_dim,
scope,
attention_type=AttentionType.Regular,
outputs_with_grads=(0, 4),
weighted_encoder_outputs=None,
lstm_memory_optimization=False,
attention_memory_optimization=False,
forget_bias=0.0,
forward_only=False,
):
'''
Adds a LSTM with attention mechanism to a model.
The implementation is based on https://arxiv.org/abs/1409.0473, with
a small difference in the order
how we compute new attention context and new hidden state, similarly to
https://arxiv.org/abs/1508.04025.
The model uses encoder-decoder naming conventions,
where the decoder is the sequence the op is iterating over,
while computing the attention context over the encoder.
model: ModelHelper object new operators would be added to
decoder_inputs: the input sequence in a format T x N x D
where T is sequence size, N - batch size and D - input dimension
decoder_input_lengths: blob containing sequence lengths
which would be passed to LSTMUnit operator
initial_decoder_hidden_state: initial hidden state of LSTM
initial_decoder_cell_state: initial cell state of LSTM
initial_attention_weighted_encoder_context: initial attention context
encoder_output_dim: dimension of encoder outputs
encoder_outputs: the sequence, on which we compute the attention context
at every iteration
decoder_input_dim: input dimension (last dimension on decoder_inputs)
decoder_state_dim: size of hidden states of LSTM
attention_type: One of: AttentionType.Regular, AttentionType.Recurrent.
Determines which type of attention mechanism to use.
outputs_with_grads : position indices of output blobs which will receive
external error gradient during backpropagation
weighted_encoder_outputs: encoder outputs to be used to compute attention
weights. In the basic case it's just linear transformation of
encoder outputs (that the default, when weighted_encoder_outputs is None).
However, it can be something more complicated - like a separate
encoder network (for example, in case of convolutional encoder)
lstm_memory_optimization: recompute LSTM activations on backward pass, so
we don't need to store their values in forward passes
attention_memory_optimization: recompute attention for backward pass
forward_only: whether to create only forward pass
'''
cell = LSTMWithAttentionCell(
encoder_output_dim=encoder_output_dim,
encoder_outputs=encoder_outputs,
decoder_input_dim=decoder_input_dim,
decoder_state_dim=decoder_state_dim,
name=scope,
attention_type=attention_type,
weighted_encoder_outputs=weighted_encoder_outputs,
forget_bias=forget_bias,
lstm_memory_optimization=lstm_memory_optimization,
attention_memory_optimization=attention_memory_optimization,
forward_only=forward_only,
)
_, result = cell.apply_over_sequence(
model=model,
inputs=decoder_inputs,
seq_lengths=decoder_input_lengths,
initial_states=(
initial_decoder_hidden_state,
initial_decoder_cell_state,
initial_attention_weighted_encoder_context,
),
outputs_with_grads=outputs_with_grads,
)
return result
def _layered_LSTM(
model, input_blob, seq_lengths, initial_states,
dim_in, dim_out, scope, outputs_with_grads=(0,), return_params=False,
memory_optimization=False, forget_bias=0.0, forward_only=False,
drop_states=False, create_lstm=None):
params = locals() # leave it as a first line to grab all params
params.pop('create_lstm')
if not isinstance(dim_out, list):
return create_lstm(**params)
elif len(dim_out) == 1:
params['dim_out'] = dim_out[0]
return create_lstm(**params)
assert len(dim_out) != 0, "dim_out list can't be empty"
assert return_params is False, "return_params not supported for layering"
for i, output_dim in enumerate(dim_out):
params.update({
'dim_out': output_dim
})
output, last_output, all_states, last_state = create_lstm(**params)
params.update({
'input_blob': output,
'dim_in': output_dim,
'initial_states': (last_output, last_state),
'scope': scope + '_layer_{}'.format(i + 1)
})
return output, last_output, all_states, last_state
layered_LSTM = functools.partial(_layered_LSTM, create_lstm=LSTM)
|
## @package _import_c_extension
# Module caffe2.python._import_c_extension
import atexit
import logging
import sys
from caffe2.python import extension_loader
# We will first try to load the gpu-enabled caffe2. If it fails, we will then
# attempt to load the cpu version. The cpu backend is the minimum required, so
# if that still fails, we will exit loud.
with extension_loader.DlopenGuard():
try:
from caffe2.python.caffe2_pybind11_state_gpu import * # noqa
if num_cuda_devices(): # noqa
has_gpu_support = True
else:
has_gpu_support = False
except ImportError as e:
logging.warning(
'This caffe2 python run does not have GPU support. '
'Will run in CPU only mode.')
logging.warning('Debug message: {0}'.format(str(e)))
has_gpu_support = False
try:
from caffe2.python.caffe2_pybind11_state import * # noqa
except ImportError as e:
logging.critical(
'Cannot load caffe2.python. Error: {0}'.format(str(e)))
sys.exit(1)
# libcaffe2_python contains a global Workspace that we need to properly delete
# when exiting. Otherwise, cudart will cause segfaults sometimes.
atexit.register(on_module_exit) # noqa
# Add functionalities for the TensorCPU interface.
def _TensorCPU_shape(self):
return tuple(self._shape)
def _TensorCPU_reshape(self, shape):
return self._reshape(list(shape))
TensorCPU.shape = property(_TensorCPU_shape) # noqa
TensorCPU.reshape = _TensorCPU_reshape # noqa
|
## @package core
# Module caffe2.python.core
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
from collections import OrderedDict
from caffe2.proto import caffe2_pb2
from collections import defaultdict
from caffe2.python import scope, utils, workspace
import caffe2.python._import_c_extension as C
import numpy as np
import sys
# Mac os specific message
if (sys.platform == 'darwin' and 'leveldb' in C.registered_dbs()):
print('If you are using homebrew leveldb on a Mac OS, you might see an '
'error warning you that malloc_zone_unregister() failed. This is '
'not a caffe2 issue but is due to the homebrew leveldb having an '
'incompatible memory allocator. It does not affect usage.')
# Convenience redirections to functions inside scope.
DeviceScope = scope.DeviceScope
NameScope = scope.NameScope
# Bring datatype enums to the main namespace
class DataType:
pass
def _InitDataType():
for name, value in caffe2_pb2.TensorProto.DataType.items():
setattr(DataType, name, value)
_InitDataType()
# Python 2 and 3 compatibility: test if basestring exists
try:
basestring = basestring # NOQA
except NameError:
# This is python3 so we define basestring.
basestring = str
def _GetRegisteredOperators():
return set(workspace.RegisteredOperators())
_REGISTERED_OPERATORS = _GetRegisteredOperators()
def RefreshRegisteredOperators():
global _REGISTERED_OPERATORS
_REGISTERED_OPERATORS = _GetRegisteredOperators()
_GLOBAL_INIT_ARGS = []
def GlobalInit(args):
_GLOBAL_INIT_ARGS.extend(args[1:])
C.global_init(args)
def GetGlobalInitArgs():
return _GLOBAL_INIT_ARGS[:]
_WORKER_INIT_CALLS = []
def worker_init_func(func):
"""
By decorating a function with this, each call to the function will be
recorded at workflow time and replayed in each of the works at startup.
Used for example for registering caffe python operators.
"""
def call(*args, **kwargs):
_WORKER_INIT_CALLS.append((func, args, kwargs))
return func(*args, **kwargs)
return call
def GetWorkerInitCalls():
return _WORKER_INIT_CALLS[:]
def IsOperator(op_type):
return (op_type in _REGISTERED_OPERATORS)
def IsOperatorWithEngine(op_type, engine):
return (op_type + "_ENGINE_" + engine in _REGISTERED_OPERATORS)
def DeviceOption(device_type, cuda_gpu_id=0, random_seed=None):
option = caffe2_pb2.DeviceOption()
option.device_type = device_type
option.cuda_gpu_id = cuda_gpu_id
if random_seed is not None:
option.random_seed = random_seed
return option
GradientSlice = namedtuple('GradientSlice', ['indices', 'values'])
class BlobReference(object):
"""A wrapper around a blob in a net.
BlobReference gives us a way to refer to the network that the blob is
generated from. Note that blobs are, essentially, just strings in the
current workspace.
"""
def __init__(self, name, net=None):
"""Initializes a blob reference.
Note that this does not prepends the namescope. If needed, use
ScopedBlobReference() to prepend the existing namespace.
"""
self._name = name
self._from_net = net
# meta allows helper functions to put whatever metainformation needed
# there.
self.meta = {}
def __hash__(self):
return hash(self._name)
def __eq__(self, other):
if isinstance(other, basestring):
return self._name == other
elif isinstance(other, BlobReference):
return self._name == other._name
else:
return False
def __ne__(self, other):
return not(self == other)
def __str__(self):
return self._name
def __repr__(self):
return 'BlobReference("{}")'.format(self._name)
def __add__(self, other):
if not isinstance(other, basestring):
raise RuntimeError('Cannot add BlobReference to a non-string.')
return BlobReference(self._name + other, self._from_net)
def __radd__(self, other):
if not isinstance(other, basestring):
raise RuntimeError('Cannot add a non-string to BlobReference.')
return BlobReference(other + self._name, self._from_net)
def Net(self):
return self._from_net
def GetNameScope(self):
return self._name[:self._name.rfind(scope._NAMESCOPE_SEPARATOR) + 1]
def _CreateAndAddToNet(self, op_type, inputs=None, *args, **kwargs):
"""Internal function that routes the operator generation to the
network's __getattr__ function.
"""
inputs = [] if inputs is None else inputs
if isinstance(inputs, BlobReference) or isinstance(inputs, str):
inputs = [inputs]
# add self to the input list.
inputs.insert(0, self)
return self._from_net.__getattr__(op_type)(inputs, *args, **kwargs)
def __getattr__(self, op_type):
"""A wrapper allowing one to initiate operators from a blob reference.
Example: for a blob reference b that comes from network n, doing
b.Relu(...)
is equivalent to doing
net.Relu([b], ...)
"""
if op_type.startswith('__'):
raise AttributeError('Attribute {} not found.'.format(op_type))
if self._from_net is None:
raise RuntimeError(
'You cannot use a blob reference that does not have a net '
'source to create operators. Create the operator from an '
'explicit net object.')
if not IsOperator(op_type):
raise RuntimeError(
'Method ' + op_type + ' is not a registered operator.' +
' Did you mean: [' +
",".join(workspace.C.nearby_opnames(op_type)) + ']'
)
return lambda *args, **kwargs: self._CreateAndAddToNet(
op_type, *args, **kwargs)
def __dir__(self):
additional_methods = [
op
for op in _REGISTERED_OPERATORS
if '_ENGINE_' not in op or '_ENGINE_CUDNN' in op]
return sorted(set(
dir(type(self)) +
self.__dict__.keys() +
additional_methods))
def ScopedName(name):
"""prefix the name with the current scope."""
return scope.CurrentNameScope() + name
def ScopedBlobReference(name, *args, **kwargs):
"""Returns a blob reference with scope prefixed."""
return BlobReference(ScopedName(name), *args, **kwargs)
def _RectifyInputOutput(blobs, net=None):
"""A helper function to rectify the input or output of the CreateOperator
interface.
"""
if isinstance(blobs, basestring):
# If blobs is a single string, prepend scope.CurrentNameScope()
# and put it as a list.
# TODO(jiayq): enforce using BlobReference instead of raw strings.
return [ScopedBlobReference(blobs, net=net)]
elif type(blobs) is BlobReference:
# If blob is a BlobReference, simply put it as a list.
return [blobs]
elif type(blobs) in (list, tuple):
# If blob is a list, we go through it and type check.
rectified = []
for blob in blobs:
if isinstance(blob, basestring):
rectified.append(ScopedBlobReference(blob, net=net))
elif type(blob) is BlobReference:
rectified.append(blob)
else:
raise TypeError(
"I/O blob #{} of unsupported type: {} of type {}"
.format(len(rectified), str(blob), type(blob)))
return rectified
else:
raise TypeError(
"Unknown input/output type: %s of type %s." %
(str(blobs), type(blobs))
)
def CreateOperator(
operator_type,
inputs,
outputs,
name='',
control_input=None,
device_option=None,
arg=None,
engine=None,
**kwargs
):
"""A function wrapper that allows one to create operators based on the
operator type. The type should be a string corresponding to an operator
registered with Caffe2.
"""
operator = caffe2_pb2.OperatorDef()
operator.type = operator_type
operator.name = name
# Add rectified inputs and outputs
inputs = _RectifyInputOutput(inputs)
outputs = _RectifyInputOutput(outputs)
operator.input.extend([str(i) for i in inputs])
operator.output.extend([str(o) for o in outputs])
if control_input:
control_input = _RectifyInputOutput(control_input)
operator.control_input.extend([str(i) for i in control_input])
# Set device option:
# (1) If device_option is explicitly set, use device_option.
# (2) If not, but scope.CurrentDeviceScope() is set,
# then we use scope.CurrentDeviceScope().
# (3) Otherwise, do not set device option.
if device_option is not None:
operator.device_option.CopyFrom(device_option)
elif scope.CurrentDeviceScope() is not None:
operator.device_option.CopyFrom(scope.CurrentDeviceScope())
if engine is not None:
operator.engine = engine
# random seed is defined in the device option, so we need to do special
# care.
if 'random_seed' in kwargs:
operator.device_option.random_seed = kwargs['random_seed']
del kwargs['random_seed']
# Add given arguments that do not need parsing
if arg is not None:
operator.arg.extend(arg)
# Add all other arguments
for key, value in kwargs.items():
operator.arg.add().CopyFrom(utils.MakeArgument(key, value))
if workspace.IsImmediate():
workspace.RunOperatorImmediate(operator)
return operator
def _RegisterPythonImpl(
f, grad_f=None, python_func_type=None, pass_workspace=False, name=None,
):
if python_func_type:
func = python_func_type(f)
f = func.forward
grad_f = func.backward
else:
if isinstance(f, tuple):
f = f[0](*f[1], **f[2])
if isinstance(grad_f, tuple):
grad_f = grad_f[0](*grad_f[1], **grad_f[2])
token = C.register_python_op(f, pass_workspace, name or '')
if grad_f:
C.register_python_gradient_op(token, grad_f)
return token
def CreatePythonOperator(
f, inputs,
outputs,
grad_f=None,
pass_workspace=False,
python_func_type=None,
name=None,
*args,
**kwargs
):
"""
`f` should have a signature (inputs, outputs)
If `pass_workspace` is True, the signature is changed to
(inputs, outputs, workspace) where `workspace` is the workspace the op
is going to run on. This is potentially dangerous (as the op can manipulate
the workspace directly), use on your own risk.
"""
kwargs["token"] = _RegisterPythonImpl(
f, grad_f, python_func_type, pass_workspace=pass_workspace, name=name
)
return CreateOperator("Python", inputs, outputs, *args, **kwargs)
def GetIndexFromGradientList(g_list, name):
"""A helper function to get the index from a gradient list, None if not
matching."""
for i, g in enumerate(g_list):
if g == name:
return i
elif type(g) is GradientSlice:
if (g.indices == name or g.values == name):
return i
return None
OpSSA = namedtuple('OpSSA', ['op', 'in_versions', 'out_versions'])
GradGenMeta = namedtuple('GradGenMeta', ['grad_op', 'idx', 'gradient'])
SparseGradGenMeta = namedtuple('SparseGradGenMeta', [
'grad_op_indices', 'idx_indices',
'grad_op_values', 'idx_values',
'gradient',
])
class IR(object):
"""A simple IR class to keep track of all intermediate representations used
in the gradient computation.
"""
def __init__(self, operators):
# The IR class holds multiple metadata from the forward pass:
# a) ssa: a list of [op, in_versions, out_versions] recording the
# input and the output version of each operator, similar
# to a normal SSA form.
# b) input_count: a dictionary specifying for each blob and
# each of its version, how many times it is used as input for another
# op.
# c) frontier: maintaining the current versions of the blobs
# we are having in the workspace, after the execution of all the ops
# added to the IR so far. This is useful because if a gradient is
# trying to access an earlier version of a blob, we can sanity check
# that it is no longer there, and thus throw an error.
# d) gradient_frontier: maps the names of blobs to its version that the
# gradient corresponds to.
# e) gradient_generators: for each blob and each of its version, maps to
# a list of operators that generates its gradient together with the
# gradient name.
self.ssa = []
self.input_usages = defaultdict(lambda: defaultdict(list))
self.frontier = defaultdict(int)
self.gradient_frontier = {}
self.gradient_generators = defaultdict(lambda: defaultdict(list))
for op in operators:
self.Play(op)
def Play(self, op):
""""Adds an op to the current IR, and update the internal states to
reflect the blobs and versions after the execution of the op.
"""
# For input, they are the current version in the dict.
in_versions = {}
for s in op.input:
in_versions[s] = self.frontier[s]
self.input_usages[s][self.frontier[s]].append(len(self.ssa))
# For output, they are the current version plus one. If this is a
# newly created blob, its version starts with zero.
out_versions = {}
for s in op.output:
if s in self.frontier:
self.frontier[s] += 1
out_versions[s] = self.frontier[s]
# Add to SSA for bookkeeping.
self.ssa.append(OpSSA(op, in_versions, out_versions))
def CheckGradientOperatorInput(
self, grad_op_input, g_output, fwd_op_idx, locally_generated_blobs):
"""Checks if the gradient operators can be correctly carried out."""
forward_op, in_versions, out_versions = self.ssa[fwd_op_idx]
original_index = GetIndexFromGradientList(g_output, grad_op_input)
# If it is a dense or sparse gradient name, it should match the
# version of the corresponding output.
if original_index is not None:
original_name = forward_op.output[original_index]
if (out_versions[original_name] !=
self.gradient_frontier[original_name]):
raise RuntimeError(
'Gradient name "%s" is expected to correspond '
'to version %d of "%s", but currently we have '
'version %d.' % (
grad_op_input, out_versions[original_name],
original_name,
self.gradient_frontier[original_name]))
# If it is an output name, the current version should match the
# version when the operator was run.
elif grad_op_input in out_versions:
if self.frontier[grad_op_input] != out_versions[grad_op_input]:
raise RuntimeError(
'Gradient operator needs output "%s" at version'
' %d, but currently we have version %d.' % (
grad_op_input, out_versions[grad_op_input],
self.frontier[grad_op_input]
)
)
# If it is an input name, the current version should match the
# version when the operator was run.
elif grad_op_input in in_versions:
if (self.frontier[grad_op_input] != in_versions[grad_op_input]):
raise RuntimeError(
'Gradient operator needs input "%s" at version '
'%d, but currently we have version %d.' % (
grad_op_input, in_versions[grad_op_input],
self.frontier[grad_op_input]
)
)
# If it is none of the above, it should be a blob that is
# generated locally by one of the previous gradient operators.
else:
if grad_op_input not in locally_generated_blobs:
raise RuntimeError(
'Blob name "%s" not in the scope of operator: '
'%s\nand is not generated by any of the local '
'gradient operators.' % (grad_op_input, str(forward_op))
)
def AppendSparseGenerators(self, sparse_generators):
# merge indices and values generators for sparse gradients
for name, input_generators in sparse_generators.items():
for version, generators in input_generators.items():
if len(generators) == 1:
# either indices or values are generated (but not both)
generator = generators[0]
else:
# both indices and values are generated
assert(len(generators) == 2)
op1_i, idx1_i, op1_v, idx1_v, g1 = generators[0]
op2_i, idx2_i, op2_v, idx2_v, g2 = generators[1]
assert(g1 == g2)
assert(op1_i is None or op2_i is None)
assert(op1_v is None or op2_v is None)
assert(idx1_i == 0 or idx2_i == 0)
assert(idx1_v == 0 or idx2_v == 0)
generator = SparseGradGenMeta(
op1_i or op2_i, idx1_i + idx2_i,
op1_v or op2_v, idx1_v + idx2_v,
g1)
self.gradient_generators[name][version].append(generator)
def BuildGradientGenerators( # NOQA
self, fwd_op_idx, gradient_ops, g_output, g_input):
"""Updates gradient_generators and gradient_frontier"""
forward_op, in_versions, out_versions = self.ssa[fwd_op_idx]
locally_generated_blobs = []
sparse_generators = defaultdict(lambda: defaultdict(list))
for grad_op in gradient_ops:
# (1) check that inputs are valid
for s in grad_op.input:
self.CheckGradientOperatorInput(
s, g_output, fwd_op_idx, locally_generated_blobs)
# (2) add outputs to the locally generated blobs
# If an output corresponds to the gradient of an input, we also
# record it to gradient_generators
locally_generated_blobs.extend([str(s) for s in grad_op.output])
for i, output in enumerate(grad_op.output):
input_index = GetIndexFromGradientList(g_input, output)
if input_index is not None:
input_name = forward_op.input[input_index]
input_version = in_versions[input_name]
g = g_input[input_index]
if type(g) is GradientSlice:
# the output corresponds either to the indices or the
# values of the sparse gradient. In either case we
# create a (partial) SparseGradGenMeta. If necessary,
# we'll merge indices and values generators
# corresponding to the same gradient in step (3)
if g.indices == output:
m = SparseGradGenMeta(grad_op, i, None, 0, g)
else:
assert(g.values == output)
m = SparseGradGenMeta(None, 0, grad_op, i, g)
sparse_generators[input_name][input_version].append(m)
else:
self.gradient_generators[input_name][input_version] \
.append(GradGenMeta(
grad_op, i, g))
# (3) merge indices and values generators for sparse gradients, and
# add them to gradient_generators
self.AppendSparseGenerators(sparse_generators)
# (4) for ops (e.g., Add, Sum, Sub) which have gradient outputs directly
# passed from inputs (not computed from gradient ops), we create an
# GradGenMeta with None grad_op and idx so that the gradient_generators
# knows where the gradients are coming from. This is needed for creating
# Sum op to accumulate the gradients from multiple parents.
for input_index, g in enumerate(g_input):
input_name = forward_op.input[input_index]
input_version = in_versions[input_name]
if not g:
continue
if type(g) is GradientSlice:
if str(g.indices) not in locally_generated_blobs and \
str(g.values) not in locally_generated_blobs:
self.gradient_generators[input_name][input_version].append(
SparseGradGenMeta(None, 0, None, 0, g))
else:
if str(g) not in locally_generated_blobs:
self.gradient_generators[input_name][input_version].append(
GradGenMeta(None, 0, g))
# Finally, for the gradients specified in g_input, we update the
# gradient frontier to reflect the input versions that the gradients
# correspond to.
for i, g in enumerate(g_input):
if g is not None:
input_name = forward_op.input[i]
input_version = in_versions[input_name]
self.gradient_frontier[input_name] = input_version
def _GetSumOpOutputName(self, generator, input_name):
def remove_suffix(s, suffix):
if s.endswith(suffix):
return s[:-len(suffix)]
return s
for g in generator:
if type(g) is GradGenMeta:
grad_op, idx, _ = g
if grad_op:
return grad_op.output[idx]
else:
assert(type(g) is SparseGradGenMeta)
op_i, idx_i, op_v, idx_v, _ = g
if op_i:
return remove_suffix(op_i.output[idx_i], '_indices')
if op_v:
return remove_suffix(op_v.output[idx_v], '_values')
return input_name + '_grad'
def _SetSumOpsDeviceOption(self, sum_ops, generators):
# we already checked that device options are consistent so we can just
# use the first one we find
for generator in generators:
grad_op = generator.grad_op if type(generator) is GradGenMeta \
else generator.grad_op_values or generator.grad_op_indices
if grad_op:
if grad_op.HasField('device_option'):
for op in sum_ops:
op.device_option.CopyFrom(grad_op.device_option)
break
def _DisambiguateGradOpOutput(self, grad_op, idx, cnt):
grad_op.output[idx] = (
'_' + grad_op.output[idx] + '_autosplit_{}'.format(cnt))
return grad_op.output[idx], cnt + 1
def _CheckSumOpsConflict(self, out_base_name, g):
if str(out_base_name) == str(g):
# TODO not sure what this message really means
raise RuntimeError(
'The gradient output of empty gradient op can not '
'be the same as the normal name of the current '
'input gradient.')
def _MakeDenseSumOps(self, generators, out_base_name):
sum_op_input = []
cnt = 0
for generator in generators:
grad_op, idx, g = generator
assert(type(g) is not GradientSlice)
if grad_op:
out, cnt = self._DisambiguateGradOpOutput(grad_op, idx, cnt)
sum_op_input.append(out)
else:
self._CheckSumOpsConflict(out_base_name, g)
sum_op_input.append(str(g))
sum_ops = [CreateOperator(
"Sum",
map(BlobReference, sum_op_input),
BlobReference(out_base_name))]
return sum_ops, out_base_name
def _MakeSparseSumOps(self, generators, out_base_name):
indices_concat_input = []
values_concat_input = []
cnt_i = 0
cnt_v = 0
for generator in generators:
assert(type(generator) is SparseGradGenMeta)
op_i, idx_i, op_v, idx_v, g = generator
if op_i:
out, cnt_i = self._DisambiguateGradOpOutput(op_i, idx_i, cnt_i)
indices_concat_input.append(out)
else:
self._CheckSumOpsConflict(out_base_name, g.indices)
indices_concat_input.append(g.indices)
if op_v:
out, cnt_v = self._DisambiguateGradOpOutput(op_v, idx_v, cnt_v)
values_concat_input.append(out)
else:
self._CheckSumOpsConflict(out_base_name, g.values)
values_concat_input.append(g.values)
indices_concat_output = out_base_name + '_indices_concat'
indices_concat_split = out_base_name + '_indices_concat_split'
values_concat_output = out_base_name + '_values_concat'
values_concat_split = out_base_name + '_values_concat_split'
# Sum the given sparse representations by simply concatenating the
# indices (resp. values) tensors together. We don't do any deduplication
# of indices at this point. This will be done as needed before the
# optimizer is called
sum_ops = [
CreateOperator(
"Concat",
map(BlobReference, indices_concat_input),
map(BlobReference,
[indices_concat_output, indices_concat_split]),
axis=0
),
CreateOperator(
"Concat",
map(BlobReference, values_concat_input),
map(BlobReference, [values_concat_output, values_concat_split]),
axis=0
),
]
sum_op_output = GradientSlice(
indices=indices_concat_output,
values=values_concat_output,
)
return sum_ops, sum_op_output
def _MakeSumOps(self, input_name, input_version):
generators = self.gradient_generators[input_name][input_version]
out_base_name = self._GetSumOpOutputName(generators, input_name)
types = list(set(type(x) for x in generators))
assert(len(types) == 1)
if types[0] is GradGenMeta:
sum_ops, g = self._MakeDenseSumOps(generators, out_base_name)
else:
assert(types[0] is SparseGradGenMeta)
sum_ops, g = self._MakeSparseSumOps(generators, out_base_name)
self._SetSumOpsDeviceOption(sum_ops, generators)
return sum_ops, g
def _VerifyGradientGenerators(self, generator):
# (1) check if all gradients are of the same type. Aggregating a mix of
# sparse and dense gradients is not supported yet
if len({type(g) for g in generator}) > 1:
raise RuntimeError(
'Automatic aggregation of a mix of sparse and dense gradients '
'is not supported yet')
# If for all the operators that used the operator, none or only one
# produced the gradient, then no additional sum needs to be carried
# out.
if len(generator) < 2:
return False
all_gradient_names = []
all_device_options = []
for g in generator:
if type(g) is GradGenMeta:
if g.grad_op:
all_gradient_names.append(g.gradient)
all_device_options.append(g.grad_op.device_option)
else:
assert(type(g) is SparseGradGenMeta)
if g.grad_op_indices:
all_device_options.append(g.grad_op_indices.device_option)
if g.grad_op_values:
all_device_options.append(g.grad_op_values.device_option)
all_gradient_names.append(g.gradient.values)
# Check if all grad names are the same.
if len(set(all_gradient_names)) > 1:
raise RuntimeError('Unexpected behavior: not all grad output '
'names are the same.')
# Check if all grad op device options are the same.
if len(all_device_options) >= 2 and not all(
d == all_device_options[0] for d in all_device_options[1:]):
raise RuntimeError('Unexpected behavior: not all grad ops'
'have the same device option.')
return True
def DoGradientAccumulation(self, fwd_op_idx):
"""For each input name in the forward op, check if we will need to
add gradient accumulation. If so, do gradient accumulation and return
the list of gradient operators.
The criteria for doing gradient accumulation is:
(1) the specific input version has been used by multiple operators.
(2) the current fwd_op_idx is the first to use that input, i.e. in the
backward pass, is the last to optionally generate the gradient for
the op.
(3) For the operators that used the input, their gradient operators
have generated more than 1 gradient.
When accumulating operators, our current solution is to rename all the
created gradients with an internal intermediate name, and then add a
Sum() operator that adds up all the gradients. This may use more memory
due to intermediate storage, but is usually the fastest approach as one
can do one single sum for multiple intermediate gradients.
"""
forward_op, in_versions, out_versions = self.ssa[fwd_op_idx]
additional_sum_ops = []
grad_map = {}
for _i, input_name in enumerate(set(forward_op.input)):
input_version = in_versions[input_name]
input_usage = self.input_usages[input_name][input_version]
if (len(input_usage) <= 1 or fwd_op_idx != input_usage[0]):
# We do not need to do gradient accumulation yet.
continue
generator = self.gradient_generators[input_name][input_version]
try:
if not self._VerifyGradientGenerators(generator):
continue
except RuntimeError as err:
raise RuntimeError(
"Gradients for param ''{}'' failed to verify: {}".format(
input_name,
err
)
)
# Finally, let's create the sum operator.
sum_ops, g = self._MakeSumOps(input_name, input_version)
additional_sum_ops.extend(sum_ops)
grad_map[input_name] = g
return additional_sum_ops, grad_map
def _GetInitGradients(self, ys):
input_to_grad = {}
gradient_ops = []
for y, g in ys.items():
if g is None:
autograd_op = CreateOperator(
"ConstantFill", [y], [str(y) + "_autogen_grad"],
value=1.0)
gradient_ops.append(autograd_op)
g = autograd_op.output[0]
# Since the C++ gradient registry does not have notion of
# NameScopes, we will convert all references to strings.
input_to_grad[str(y)] = (
GradientSlice(str(g[0]), str(g[1]))
if isinstance(g, GradientSlice) else str(g))
return input_to_grad, gradient_ops
def _GenerateGradientsForForwardOp(
self, forward_op_idx, input_to_grad):
new_input_to_grad = {}
gradient_ops = []
forward_op, in_versions, out_versions = self.ssa[forward_op_idx]
g_output = list(
input_to_grad.get(name, None) for name in forward_op.output)
if not all(g is None for g in g_output):
gradient_ops, g_input = GradientRegistry.GetGradientForOp(
forward_op, g_output)
# Check if the gradient operators are legal, and update
# gradient_generators and gradient_frontier
self.BuildGradientGenerators(
forward_op_idx, gradient_ops, g_output, g_input)
# Record the gradient map to all_input_to_grad.
for name, grad in zip(forward_op.input, g_input):
# Do not overwrite an existing gradient with a None
# unless the input is also an output of the op, since
# we update the blob version when blob is output of an
# operator.
if grad is not None or \
name not in input_to_grad or \
name in list(forward_op.output):
new_input_to_grad[name] = grad
return new_input_to_grad, gradient_ops
def GetBackwardPass(self, ys):
"""Gets the backward pass that computes the derivatives of given blobs.
Inputs:
ys: a list or a dictionary specifying what blobs we want to compute
derivatives of. If the input is a list, we will automatically
generate their gradients with all-one values; if the input is a
dictionary, for any dictionary entries that are not None, we will
take the corresponding blobs as their gradients; for all those
that are None, we will auto-fill them with 1.
"""
if isinstance(ys, list):
ys = dict((y, None) for y in ys)
elif not isinstance(ys, dict):
raise TypeError("ys should either be a list or a dict.")
# Set the gradient frontier with the initialized external
# gradients.
for y, _ in ys.items():
self.gradient_frontier[y] = self.frontier[y]
all_input_to_grad, all_gradient_ops = self._GetInitGradients(ys)
# (2) Now, after having the virtual play above, we now play the ops
# backwards, creating the gradients along the path. Note that although
# we are playing it backwards, we cannot refer to variables that are
# at a version older than current_versions because it is already been
# overwritten.
for forward_op_idx in reversed(range(len(self.ssa))):
input_to_grad, gradient_ops = self._GenerateGradientsForForwardOp(
forward_op_idx, all_input_to_grad)
all_input_to_grad.update(input_to_grad)
all_gradient_ops += gradient_ops
# If there are multiple use blobs, do gradient accumulation.
additional_sum_ops, grad_map = self.DoGradientAccumulation(
forward_op_idx)
# This line is so that if in an accumulation some of the operators
# have not produced gradients, they still do not overwrite the
# general all_input_to_grad map.
all_input_to_grad.update(grad_map)
all_gradient_ops += additional_sum_ops
# (3) Post-processing.
# After we have done computation for each op, we now have the gradient
# operators ready. For the output map, we will convert everything to
# BlobReferences for easier handling in python.
all_input_to_grad_out = {}
for key, val in all_input_to_grad.items():
if val is not None:
all_input_to_grad_out[BlobReference(key)] = (
BlobReference(val) if isinstance(val, basestring) else
GradientSlice(BlobReference(val[0]), BlobReference(val[1])))
return all_gradient_ops, all_input_to_grad_out
class GradientRegistry(object):
"""GradientRegistry holds the mapping from operators to their gradients."""
gradient_registry_ = {}
@classmethod
def RegisterGradient(cls, op_type):
"""A decorator for registering gradient mappings."""
def Wrapper(func):
cls.gradient_registry_[op_type] = func
return func
return Wrapper
@classmethod
def _GetGradientForOpCC(cls, op_def, g_output):
# TODO(tulloch) - Propagate GradientWrapper up through the stack.
def from_untyped(grad):
if grad is None:
w = C.GradientWrapper()
assert w.is_empty()
return w
try:
(indices, values) = grad
w = C.GradientWrapper()
w.indices = indices
w.values = values
assert w.is_sparse()
return w
except ValueError:
w = C.GradientWrapper()
w.dense = grad
assert w.is_dense()
return w
g_output = [from_untyped(grad) for grad in g_output]
grad_defs_str, g_input = C.get_gradient_defs(
op_def.SerializeToString(), g_output)
def to_untyped(grad_wrapper):
if grad_wrapper.is_empty():
return None
if grad_wrapper.is_sparse():
return GradientSlice(grad_wrapper.indices, grad_wrapper.values)
assert grad_wrapper.is_dense()
return grad_wrapper.dense
g_input = [to_untyped(grad_wrapper) for grad_wrapper in g_input]
grad_defs = []
for grad_def_str in grad_defs_str:
grad_def = caffe2_pb2.OperatorDef()
grad_def.ParseFromString(grad_def_str)
grad_defs.append(grad_def)
return grad_defs, g_input
@classmethod
def GetGradientForOp(cls, op, g_output):
try:
gradient_ops, g_input = cls._GetGradientForOpCC(op, g_output)
except Exception as e:
# Not supported in C++; will try python registration next.
try:
gradient_ops, g_input = cls.gradient_registry_[op.type](
op, g_output)
except KeyError:
raise Exception(
"No gradient registered for {}. ".format(op.type) +
"Exception from creating the gradient op: {}.".format(e))
if gradient_ops is None:
return [], g_input
if type(gradient_ops) is not list:
gradient_ops = [gradient_ops]
return gradient_ops, g_input
@classmethod
def GetBackwardPass(cls, operators, ys):
"""Gets the backward pass for the list of operators.
Args:
operators: a list of operators constituting the forward pass.
ys: a list or a dictionary specifying what blobs we want to compute
derivatives of. If the input is a list, we will automatically
generate their gradients with all-one values; if the input is a
dictionary, for any dictionary entries that are not None, we'll
take the corresponding blobs as their gradients; for all those
that are None, we will auto-fill them with 1.
Returns:
gradient_ops: a list of gradient operators to run.
all_input_to_grads: a map from input to their corresponding
gradients.
"""
ir = IR(operators)
return ir.GetBackwardPass(ys)
def get_ssa(net, blob_versions=None):
"""
Given a net, return a structure containing the version of each input and
output blob used by each operator.
Args:
net: either a Net or a NetDef
blob_versions: (optional) map with current version number for given
blob names. If not provided or blob not found, start
from version 0.
Returns:
Tuple (ssa, blob_versions)
ssa: list of tuples (versioned_inputs, versioned_outputs)
for each op in the net. A versioned input is a tuple
(blob_name, version).
blob_versions: updated map with latest version of each blob found in
the net.
"""
proto = net.Proto() if isinstance(net, Net) else net
assert isinstance(proto, caffe2_pb2.NetDef)
if blob_versions is None:
blob_versions = {}
if isinstance(net, list):
return [get_ssa(n, blob_versions) for n in net], blob_versions
for i in proto.external_input:
if i not in blob_versions:
blob_versions[str(i)] = 0
ssa = []
for op in proto.op:
if not proto.external_input:
for i in op.input:
if i not in blob_versions:
blob_versions[i] = 0
inputs = [(str(i), blob_versions.get(str(i), 0)) for i in op.input]
for o in op.output:
blob_versions[str(o)] = blob_versions.get(str(o), 0) + 1
outputs = [(str(o), blob_versions[str(o)]) for o in op.output]
ssa.append((inputs, outputs))
return ssa, blob_versions
def get_undefined_blobs(ssa):
"""
Given a ssa in the format produced by get_ssa(), return a set of blobs that
are used before they are defined, which corresponds to inputs at version 0.
"""
undef_blobs = set()
for inputs, _outputs in ssa:
undef_blobs |= set(name for (name, ver) in inputs if ver == 0)
return undef_blobs
def get_output_producers(ssa):
"""
Given a ssa in the format produced by get_ssa(), returns a map from
versioned blob into the operator index that produces that version of
the blob. A versioned blob is a tuple (blob_name, version).
"""
producers = {}
for i, (_inputs, outputs) in enumerate(ssa):
for o in outputs:
producers[o] = i
return producers
def get_op_ids_in_path(ssa, blob_versions, inputs, outputs):
"""
Given a ssa and blob_versions as produced by get_ssa(), returns the list
of op indices that are necessary in order to generate the blobs in
`outputs`, given blobs in `inputs`.
Consider that the `inputs` are given in their latest version.
"""
inputs_set = set((str(i), blob_versions[str(i)]) for i in inputs)
producers = get_output_producers(ssa)
queue = [(str(o), blob_versions[str(o)]) for o in outputs]
used_op_ids = set()
while len(queue) > 0:
o = queue.pop()
if (o not in inputs_set) and (o in producers):
op_id = producers[o]
if op_id not in used_op_ids:
used_op_ids |= {op_id}
inputs, _ = ssa[op_id]
queue.extend(inputs)
return sorted(used_op_ids)
def clone_and_bind_net(net, name, prefix, blob_remap=None, inputs=None,
keep_schema=True):
"""
Clone the given Net, binding its input schema to the given `inputs` record.
Blob names defined by the net are prepended with the given `prefix`.
Args:
net: the net to clone
name: the name of the new net
prefix: the prefix to append to local blobs
blob_remap: (optional) dict with additional blob name remapping.
inputs: (optional) input record that will provide actual input
values for the cloned net. Must be compatible with the
net's input schema or be a strict superset of it
keep_schema: by default (True), the original schema will be kept and
remapped accordingly. otherwise, the schema will be set as
inputs or left empty if inputs is not given.
Returns:
Tuple (cloned_net, blob_remap)
clone_net: the cloned Net
blob_remap: a map from original blob names into remapped blob names
"""
from caffe2.python import schema
assert isinstance(net, Net)
if blob_remap is None:
blob_remap = {}
if inputs is not None:
assert isinstance(inputs, schema.Field)
original = net.input_record()
assert original is not None
# TODO(azzolini): improve schema type checking
diff = set(original.field_names()) - set(inputs.field_names())
assert len(diff) == 0, (
"Schemas don't match, extra fields {diff} found in the net {name}. "
"original: {original}; inputs: {inputs}"
.format(
diff=diff, name=net.Name(), original=original.field_names(),
inputs=inputs.field_names()
)
)
original_mapping = dict(zip(original.field_names(),
original.field_blobs()))
for fn, fb in zip(inputs.field_names(), inputs.field_blobs()):
if fn in original_mapping:
blob_remap[str(original_mapping[fn])] = str(fb)
proto = net.Proto()
ssa, blob_versions = get_ssa(proto)
undef_blobs = get_undefined_blobs(ssa)
for blob in blob_versions.keys():
if blob in blob_remap:
continue
elif blob in undef_blobs:
blob_remap[blob] = blob
else:
blob_remap[blob] = prefix + blob
cloned_net = net.Clone(name, blob_remap, keep_schema=keep_schema)
if not keep_schema and inputs:
cloned_net.set_input_record(inputs)
return cloned_net, blob_remap
def _get_blob_ref(blob_name_or_ref):
return (
blob_name_or_ref if isinstance(input, BlobReference)
else BlobReference(blob_name_or_ref)
)
class Net(object):
_net_names_used = set()
operator_registry_ = {}
@staticmethod
def current_prefix():
from caffe2.python.net_builder import NetBuilder
builder = NetBuilder.current(required=False)
return builder.name if builder else ''
@staticmethod
def _get_next_net_name(basename):
name = basename = '/'.join(filter(
lambda x: x, (Net.current_prefix(), basename)))
next_idx = 1
while name in Net._net_names_used:
name = basename + '_' + str(next_idx)
next_idx += 1
Net._net_names_used |= set([name])
return name
def __init__(self, name_or_proto):
"""
Create a Net.
Args:
name_or_proto: If a NetDef is provided, clone it. Otherwise,
create an empty net with the given name.
"""
self._input_record = None
self._output_record = None
# Register blobs so that it's guaranteed that different calls to
# NextBlob/NextScopedBlob always return blobs with different names
self._registered_blob_names = set()
self._recreate_lookup_tables = False
self._op_outputs = set()
self._external_input_map = set()
self._attr_dict = defaultdict(list)
if type(name_or_proto) is caffe2_pb2.NetDef:
proto = name_or_proto
# We rae initializing a network by a NetDef. In this case, we will
# initialize our network with the given netdef.
self._net = caffe2_pb2.NetDef()
self._net.CopyFrom(proto)
existing_outputs = [list(op.output) for op in self._net.op]
self._external_input_map.update(list(self._net.external_input))
# Set the next name index properly.
existing_names = set(
sum(
[list(op.input) for op in self._net.op], []
) + sum(
existing_outputs, []
)
)
for outs in existing_outputs:
self._op_outputs.update(outs)
prefix_len = len(self._net.name + '_blob_')
autogen_indices = []
for s in existing_names:
if s.startswith(self._net.name + '_blob_'):
try:
autogen_indices.append(int(s[prefix_len]))
except ValueError:
pass
if len(autogen_indices):
self._next_name_index = max(autogen_indices) + 1
else:
self._next_name_index = 0
name = self._net.name
else:
name = name_or_proto
self._net = caffe2_pb2.NetDef()
self._next_name_index = 0
# make sure that this net name hasn't been used before
self._net.name = Net._get_next_net_name(name)
def AppendNet(self, net):
assert isinstance(net, Net)
for i in net.Proto().external_input:
if (
i not in self.Proto().external_input and
i not in self._op_outputs
):
self.Proto().external_input.append(i)
self.Proto().external_output.extend(
[
o for o in net.Proto().external_output
if o not in self.Proto().external_output
]
)
self._ExtendOps(net.Proto().op)
return self
def LogInfo(self, *msg_or_blobs):
for msg_or_blob in msg_or_blobs:
if not isinstance(msg_or_blob, BlobReference):
blob = self.GivenTensorStringFill(
[], self.NextName('log'),
shape=[], values=[msg_or_blob])
else:
blob = msg_or_blob
self.Print(blob, [])
def add_attribute(self, name, obj):
"""
Add `obj` to the list of attributes in this net under the given `name`.
Attributes are user-defined objects and have no pre-defined semantics.
"""
self._attr_dict[name].append(obj)
def get_attributes(self, name):
"""
Returns the list of attributes in this net for a given `name`.
Attributes are user-defined objects added with `add_attribute'.
"""
return self._attr_dict.get(name, [])
def set_rand_seed(self, seed=100, sequence_seed=True, seed_on_op_def=False):
"""
Adds a random seed to each op in the net.
If sequence_seed is set, the i-th op has rand_seed=`seed + i`
If seed_on_op_def is set, the op rand_seed=hash(str(op))
sequence_seed and seed_on_op_def cannot be both set to True.
"""
assert not (sequence_seed and seed_on_op_def), (
'sequence_seed and seed_on_op_def cannot be both set to True.')
for i, op in enumerate(self.Proto().op):
if sequence_seed:
curr_seed = seed + i
elif seed_on_op_def:
curr_seed = hash(str(op) + str(seed)) % np.iinfo(np.uint32).max
else:
curr_seed = seed
op.device_option.random_seed = curr_seed
def Name(self):
return self._net.name
def __str__(self):
return self.Name()
def Const(self, array, blob_out=None, dtype=None):
if isinstance(array, bool):
return self.ConstantFill(
[],
blob_out or 1,
dtype=DataType.BOOL,
value=array)
if dtype is None:
array = np.array(array)
else:
array = np.array(array, dtype=dtype)
def do_set(operator):
return operator(
[],
blob_out or 1,
shape=array.shape,
values=array.flatten().tolist())
if array.dtype == np.int32:
return do_set(self.GivenTensorIntFill)
elif array.dtype == np.int64:
return do_set(self.GivenTensorInt64Fill)
elif array.dtype == np.str:
return do_set(self.GivenTensorStringFill)
else:
return do_set(self.GivenTensorFill)
def BlobIsDefined(self, blob):
"""
Returns true if the given BlobReference is produced as output of
an operator in this net, or if it is provided as an external input.
"""
if self._recreate_lookup_tables:
self._RecreateLookupTables()
name = str(blob)
return (name in self._op_outputs) or (name in self._external_input_map)
def UsesBlob(self, blob):
"""
Returns true iff the given BlobReference is used by any operator
or this net, or if it is one of the external inputs of the net.
"""
blob_name = str(blob)
for op in self._net.op:
for input in op.input:
if input == blob_name:
return True
return blob_name in self._external_input_map
def GetBlobRef(self, blob_name):
"""
Given the name of a blob produced by this net, return a BlobReference
to it. If the blob is not produced by any op in this net,
raises KeyError.
"""
blob_name = str(blob_name)
if not self.BlobIsDefined(blob_name):
raise KeyError('Net does not define blob %s' % blob_name)
return BlobReference(blob_name, self)
def Clone(
self,
name,
blob_remap=None,
op_id_mask=None,
remap_funcs=None,
keep_schema=True
):
"""
Clone this net.
Args:
name: name of the cloned net
blob_remap: optional map with list of blob names to replace
op_id_mask: optional list of operator indices to include in
the cloned net. If not provided, all ops are included.
"""
if remap_funcs is None:
remap_funcs = {}
proto = self._net
new_proto = caffe2_pb2.NetDef()
new_proto.CopyFrom(proto)
new_proto.name = name
if blob_remap is None:
blob_remap = {}
if op_id_mask is None:
op_id_mask = range(0, len(proto.op))
def get_remapped_str(blob):
blob_str = str(blob)
return str(blob_remap.get(blob_str, blob_str))
def remap_list(proto_list):
new_list = [get_remapped_str(b) for b in proto_list]
del proto_list[:]
proto_list.extend(new_list)
def remap_op(op):
new_op = caffe2_pb2.OperatorDef()
new_op.CopyFrom(op)
remap_list(new_op.input)
remap_list(new_op.output)
if new_op.type in remap_funcs:
remap_funcs[new_op.type](new_op, (name + '/') if name else '')
return new_op
del new_proto.op[:]
new_proto.op.extend([remap_op(proto.op[op_id]) for op_id in op_id_mask])
remap_list(new_proto.external_input)
remap_list(new_proto.external_output)
new_net = Net(new_proto)
if keep_schema:
from caffe2.python import schema
if self._input_record:
new_net._input_record = schema.from_blob_list(
self._input_record,
[
BlobReference(get_remapped_str(blob), net=new_net)
for blob in self._input_record.field_blobs()
],
)
if self._output_record:
new_net._output_record = schema.from_blob_list(
self._output_record,
[
BlobReference(get_remapped_str(blob), net=new_net)
for blob in self._output_record.field_blobs()
],
)
new_net._attr_dict.update(self._attr_dict)
return new_net
def ClonePartial(self, name, inputs, outputs, remap_funcs=None):
"""
Clone this net, including only ops that are necessary in order to
compute `outputs` given `inputs`. Return references to the cloned
outputs. Internal blobs (blobs that are produced and consumed inside
the net but not used as outputs) will be remapped to avoid name
conflict.
Args:
name: the name of the cloned net
inputs: map where the keys correspond to BlobReferences in the
original net, and the values correspond to external inputs
in the partially cloned net. If `inputs` is a list, don't
remap input names.
outputs: outputs to be produced by the cloned net.
Returns:
Tuple (new_net, new_outputs)
new_net: a new Net object.
new_outputs: list of BlobReferences corresponding to the
outputs produced by new_net.
"""
input_is_pair_list = isinstance(inputs, list) and all(
isinstance(i, tuple) and len(i) == 2 for i in inputs)
inputs = (
inputs if isinstance(inputs, (dict, OrderedDict)) else
OrderedDict(inputs) if input_is_pair_list else
OrderedDict(zip(inputs, inputs)))
for output in outputs:
assert self.BlobIsDefined(output)
input_names = {str(k): str(v) for k, v in inputs.items()}
output_names = [str(o) for o in outputs]
proto = self._net
ssa, blob_versions = get_ssa(proto)
used_op_ids = get_op_ids_in_path(ssa, blob_versions, inputs, outputs)
disallowed_op_ids = get_op_ids_in_path(ssa, blob_versions, [], inputs)
assert len(set(used_op_ids) & set(disallowed_op_ids)) == 0, (
'Cannot partially clone net: some of the ops required would ' +
'generate the given input.')
sub_ssa = [op for i, op in enumerate(ssa) if i in used_op_ids]
undef_blobs = get_undefined_blobs(sub_ssa) - set(input_names.keys())
prefix = (name + '/') if name else ''
def remap(blob_name):
if blob_name in input_names:
return input_names[blob_name]
elif blob_name in undef_blobs:
return blob_name
else:
return prefix + blob_name
blob_mapping = {b: remap(b) for b in blob_versions.keys()}
new_net = self.Clone(name, blob_mapping, used_op_ids, remap_funcs)
new_in = [
blob_mapping[i] for i in input_names.keys()] + list(undef_blobs)
new_out = [blob_mapping[o] for o in output_names]
del new_net.Proto().external_input[:]
new_net.Proto().external_input.extend(new_in)
new_net._external_input_map = set(list(new_in))
del new_net.Proto().external_output[:]
new_net.Proto().external_output.extend(new_out)
return new_net, [new_net.GetBlobRef(o) for o in new_out]
def Proto(self):
self._InvalidateLookupTables()
return self._net
def NextScopedBlob(self, prefix='unnamed'):
"""Return the blob that has not been defined or registered in the
current net. It returns `ScopedBlobReference(prefix)`, if it's valid,
otherwise `ScopedBlobReference(prefix) + '_auto_' + ?`. Different calls
is guaranteed to return blob with different names.
"""
output_blob_base = ScopedName(prefix)
return self.NextBlob(output_blob_base)
def NextBlob(self, prefix='unnamed'):
"""Return the blob that has not been defined or registered in the
current net. It returns `BlobReference(prefix)`, if it's valid,
otherwise `BlobReference(prefix) + '_auto_' + ?`. Different calls
is guaranteed to return blob with different names."""
output_blob_base = BlobReference(prefix)
output_blob = output_blob_base
index = 0
while str(output_blob) in self._registered_blob_names or (
self.BlobIsDefined(output_blob)):
output_blob = output_blob_base + '_auto_' + str(index)
index += 1
self._registered_blob_names.add(str(output_blob))
return output_blob
def NextName(self, prefix=None, output_id=None):
"""Returns the next name to be used, if you do not want to explicitly
name your blob. [Deprecated, use NextBlob, NextScopedBlob instead]"""
if prefix:
output_name_base = self._net.name + '/' + prefix
output_name = output_name_base
if output_id is not None:
output_name += ':' + str(output_id)
index = 2
while self.BlobIsDefined(str(ScopedBlobReference(output_name))):
output_name = output_name_base + '_' + str(index)
if output_id is not None:
output_name += ':' + str(output_id)
index += 1
else:
output_name = self._net.name + '_blob_' + str(self._next_name_index)
self._next_name_index += 1
return str(output_name)
def _ExtendOps(self, new_ops):
self._net.op.extend(new_ops)
for op in new_ops:
self._op_outputs.update([str(o) for o in op.output])
def _CheckLookupTables(self):
'''
Called from unit tests to validate the internal lookup tables
match the protobuf contents.
'''
test_op_outputs = set()
for op in self._net.op:
for o in op.output:
test_op_outputs.add(o)
test_external_inp = set()
for inp in self._net.external_input:
test_external_inp.add(inp)
assert test_op_outputs.difference(self._op_outputs) == set()
assert test_external_inp.difference(self._external_input_map) == set()
def _InvalidateLookupTables(self):
self._recreate_lookup_tables = True
def _RecreateLookupTables(self):
self._op_outputs = set()
for op in self._net.op:
for o in op.output:
self._op_outputs.add(o)
self._external_input_map = set()
for inp in self._net.external_input:
self._external_input_map.add(inp)
self._recreate_lookup_tables = False
def AddGradientOperators(self, ys, skip=0):
"""Add the gradient for operators in the net.
Inputs:
ys: a list or a dictionary specifying what blobs we want to compute
derivatives of. If the input is a list, we will automatically
generate their gradients with all-one values; if the input is a
dictionary, for any dictionary entries that are not None, we will
take the corresponding blobs as their gradients; for all those
that are None, we will auto-fill them with 1.
skip: skips the first n operators. This is provided mainly because a
lot of nets may use the first few operators for data generation
like stuff which really do not need to have gradients.
Outputs:
returns a map from the blob name in the input network to a blob
containing gradient or a GradientSlice in case of sparse gradient
Currently, this is hard-coded for float operators if there are branches
(i.e. a blob is used as input to multiple operators). This is because
the gradient accumulation (Sum) is float only right now.
"""
grad_ops, input_to_grad = GradientRegistry.GetBackwardPass(
self._net.op[skip:], ys)
# Check if in immediate mode: the grad_ops are actually being produced
# by C++ and bypasses the CreateOperator() call, so in immediate mode
# we will have to explicitly run them.
if workspace.IsImmediate():
for op in grad_ops:
workspace.RunOperatorImmediate(op)
self._ExtendOps(grad_ops)
return input_to_grad
def AddExternalInput(self, *inputs):
assert len(inputs) > 0
refs = []
for input in inputs:
input_name = str(input)
assert str(input) not in self._external_input_map, (
'Net already contains an input named %s' % input_name)
for input in inputs:
input_name = str(input)
self._net.external_input.extend([input_name])
self._external_input_map.update([input_name])
refs.append(_get_blob_ref(input_name))
return refs[0] if len(refs) == 1 else refs
def AddExternalOutput(self, *outputs):
for output in outputs:
assert isinstance(output, BlobReference)
assert self.BlobIsDefined(output)
for output in outputs:
self.Proto().external_output.extend([str(output)])
def AddScopedExternalInputs(self, *inputs):
return self.AddExternalInput(
* [ScopedBlobReference(str(b)) for b in inputs]
)
def AddScopedExternalOutputs(self, *outputs):
return self.AddExternalOutput(
* [ScopedBlobReference(str(b)) for b in outputs]
)
@property
def external_inputs(self):
return map(_get_blob_ref, self._net.external_input)
@property
def external_outputs(self):
return map(_get_blob_ref, self._net.external_output)
def set_input_record(self, input_record):
from caffe2.python import schema
assert self._input_record is None, (
'Input schema cannot be reset')
if not input_record.has_blobs():
with NameScope(self.Name()):
self._input_record = schema.NewRecord(self, input_record)
else:
self._input_record = input_record
for blob in input_record.field_blobs():
if blob not in self.external_inputs:
self.AddExternalInput(blob)
return self._input_record
def set_output_record(self, record):
assert self._output_record is None, (
'Output record cannot be reset')
for blob in record.field_blobs():
assert self.BlobIsDefined(blob), "{} is not defined".format(blob)
for blob in record.field_blobs():
self.AddExternalOutput(blob)
self._output_record = record
def AppendOutputRecordField(self, field_name, record):
from caffe2.python import schema
assert self._output_record is not None, (
'Tried to append to missing output record'
)
for blob in record.field_blobs():
assert self.BlobIsDefined(blob)
for blob in record.field_blobs():
self.AddExternalOutput(blob)
self._output_record = self._output_record + schema.Struct(
(field_name, record)
)
def input_record(self):
return self._input_record
def output_record(self):
return self._output_record
def AddExternalInputs(self, *inputs):
return self.AddExternalInput(*inputs)
def AddExternalOutputs(self, *outputs):
self.AddExternalOutput(*outputs)
def DeduplicateGradientSlices(self, g, aggregator='sum'):
assert isinstance(g, GradientSlice)
unique, remapping = self.Unique([g.indices], 2, engine='SparseHash')
if aggregator.lower() == 'sum':
new_g = self.UnsortedSegmentSum([g.values, remapping], 1)
elif aggregator.lower() == 'mean':
new_g = self.UnsortedSegmentMean([g.values, remapping], 1)
else:
raise ValueError('{} is not supported'.format(aggregator))
return GradientSlice(indices=unique, values=new_g)
def RunAllOnGPU(self, gpu_id=0, use_cudnn=False):
"""A convenient function to run everything on the GPU."""
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CUDA
device_option.cuda_gpu_id = gpu_id
self._net.device_option.CopyFrom(device_option)
if use_cudnn:
for op in self._net.op:
op.engine = "CUDNN"
def RunAllOnMKL(self):
"""A convenient function to run everything on the GPU."""
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.MKLDNN
self._net.device_option.CopyFrom(device_option)
def _CreateAndAddToSelf(self, op_type, inputs, outputs=None, **kwargs):
"""A helper function to create an operator and add it to self.
"""
inputs = _RectifyInputOutput(inputs)
for input in inputs:
if not self.BlobIsDefined(input):
assert input.Net() != self
self.AddExternalInput(input)
if outputs is None:
# If we do not specify an output, we will assume that this op
# produces one output in this case.
outputs = self.NextName(prefix=op_type)
elif type(outputs) is int:
# In this case, we will auto-fill the given number of outputs
# with auto-generated names.
outputs = [
self.NextName(prefix=op_type, output_id=i)
for i in range(outputs)]
outputs = _RectifyInputOutput(outputs, net=self)
op = CreateOperator(op_type, inputs, outputs, **kwargs)
self._ExtendOps([op])
if len(op.output) == 0:
return
elif len(op.output) == 1:
return BlobReference(str(op.output[0]), self)
else:
return tuple(BlobReference(str(o), self) for o in op.output)
def __getattr__(self, op_type):
if op_type.startswith('__'):
raise AttributeError('Attribute {} not found.'.format(op_type))
if not IsOperator(op_type) and not IsOperatorWithEngine(op_type, "CUDNN"):
raise RuntimeError(
'Method ' + op_type + ' is not a registered operator.' +
' Did you mean: [' +
",".join(workspace.C.nearby_opnames(op_type)) + ']'
)
return lambda *args, **kwargs: self._CreateAndAddToSelf(
op_type, *args, **kwargs)
def __dir__(self):
additional_methods = [
op
for op in _REGISTERED_OPERATORS
if '_ENGINE_' not in op]
return sorted(set(
dir(type(self)) +
self.__dict__.keys() +
additional_methods))
def Python(
self, f, grad_f=None, python_func_type=None, pass_workspace=False,
):
"""
Registers and returns a python operator.
`f` and `f_grad` can be one of the following:
- a function with signature (inputs, outputs), where inputs and
outputs are a list of CPUTensor objects. This function will be
called from C++ everytime the operator is executed.
- a tuple (func, args, kwargs), here `func` is a callable, args is
an argument list, and kwargs is a dict list. The call:
f = func(*args, kwargs)
will be performed locally at node initialization time, on all of
the nodes of the job, returning `f`, a callable that will be used
as the python operator function to be called during Net execution.
This is to be used when using python operator in a distributed
context, and allows to create and keep local python state across
calls to the operator.
`python_func_type` is a type of an object that constructed as
python_func_type(f) and provides an implementation to forward and
backward functions. Its useful in such a case where users needs
a statefull PythonOp (ex: use autograd for computing grad_f).
If `pass_workspace` is True, the signature is changed to
(inputs, outputs, workspace) where `workspace` is the workspace the op
is going to run on. This is potentially dangerous (as the op can
manipulate the workspace directly), use on your own risk.
"""
assert(IsOperator('Python'))
if isinstance(f, tuple) or isinstance(grad_f, tuple):
# if we got a tuple, we will make sure this tuple will be
# registered to run at startup on each of the workers in a
# distributed run.
registry = worker_init_func(_RegisterPythonImpl)
else:
registry = _RegisterPythonImpl
token = registry(
f, grad_f, python_func_type, pass_workspace=pass_workspace,
name='%s:%d' % (str(self), len(self.Proto().op))
)
return lambda *args, **kwargs: self._CreateAndAddToSelf(
'Python', token=token, *args, **kwargs)
def get_net_name(netlike):
if isinstance(netlike, Net):
return netlike.Proto().name
elif isinstance(netlike, caffe2_pb2.NetDef):
return netlike.name
else:
return netlike
def output_to_list(op_output):
"""
Ensures that the output of an operator is a list.
Use when an operator has a variable number of outputs, but a list of
outputs is desired even when number of outputs is 1.
Args:
op_output: Either a BlobReferenece or an iterable of BlobReferences.
Returns:
A list of BlobReferences.
"""
assert type(op_output) in (list, tuple, BlobReference)
return (
[op_output]
if isinstance(op_output, BlobReference) else list(op_output))
def _add_net_to_dict(net_dict, net):
name = get_net_name(net)
if name in net_dict:
assert net_dict[name] is None or net == net_dict[name], (
'Different nets with same name: ' + name)
return False
else:
net_dict[name] = net if isinstance(net, Net) else None
return True
class ExecutionStep(object):
_step_names_used = set()
@staticmethod
def _get_next_step_name(basename):
name = basename
next_idx = 1
while name in ExecutionStep._step_names_used:
name = basename + '_' + str(next_idx)
next_idx += 1
ExecutionStep._step_names_used |= set([name])
return name
def __init__(self, name, nets=None, num_iter=None):
self._step = caffe2_pb2.ExecutionStep()
self._step.name = name or ExecutionStep._get_next_step_name('step')
self._net_dict = OrderedDict()
self._is_used = False
self._substeps = []
if nets is not None:
if type(nets) is Net:
nets = [nets]
for net in nets:
if _add_net_to_dict(self._net_dict, net):
self._step.network.extend([get_net_name(net)])
if num_iter is not None:
self._step.num_iter = num_iter
def get_net(self, name):
return self._net_dict[name]
def Name(self):
return self._step.name
def __str__(self):
return self._step.name
def _assert_can_mutate(self):
assert not self._is_used, (
'Cannot mutate a step that has already been added to a plan/step.')
def _notify_is_used(self):
self._is_used = True
def Proto(self):
return self._step
def HasNets(self):
return self._step.network is not None and (
len(self._step.network) > 0)
def HasSubsteps(self):
return self._step.substep is not None and (
len(self._step.substep) > 0)
def Nets(self):
return self._net_dict.values()
def Substeps(self):
return self._substeps
def SetIter(self, num_iter):
self._assert_can_mutate()
self._step.num_iter = num_iter
def SetOnlyOnce(self, only_once):
self._assert_can_mutate()
self._step.only_once = only_once
def SetShouldStopBlob(self, should_stop_blob):
assert isinstance(should_stop_blob, BlobReference), (
"expects BlobReference here, got {}".format(type(should_stop_blob)))
self._assert_can_mutate()
self._step.should_stop_blob = str(should_stop_blob)
def RunEveryMillis(self, interval):
"""
Run this step every interval millisecods, as long as its
siblings are still running. It is guaranteed that, after all
siblings finish, this step will run at least one.
This property is ignored for top-level ExecutionSteps.
"""
self._step.run_every_ms = interval
def SetReportNet(self, report_net, report_interval):
""" DEPRECATED. Use RunEveryMillis instead. """
self._assert_can_mutate()
_add_net_to_dict(self._net_dict, report_net)
self._step.report_net = get_net_name(report_net)
self._step.report_interval = report_interval
def AddSubstep(self, substep):
self._assert_can_mutate()
assert not self.HasNets(), 'Cannot have both network and substeps.'
if isinstance(substep, ExecutionStep):
substep._notify_is_used()
if not substep.HasNets() and not substep.HasSubsteps():
return self
for net in substep.Nets():
_add_net_to_dict(self._net_dict, net)
self._substeps.append(substep)
proto = substep.Proto()
else:
proto = substep
self._step.substep.add().CopyFrom(proto)
return self
def SetConcurrentSubsteps(self, concurrent_substeps):
self._assert_can_mutate()
assert not self.HasNets(), 'Cannot have both network and substeps.'
self._step.concurrent_substeps = concurrent_substeps
def AddNet(self, net):
self._assert_can_mutate()
assert not self.HasSubsteps(), 'Cannot have both network and substeps.'
assert isinstance(net, Net)
_add_net_to_dict(self._net_dict, net)
self._step.network.extend([get_net_name(net)])
return self
def get_all_attributes(self, name):
"""
Return the list of all attributes under the given `name`, present in
all of the nets used in this execution step and its children.
"""
objs = []
for net in self._net_dict.values():
objs += net.get_attributes(name)
return objs
def add_nets_in_order(step, net_list):
proto = step.Proto()
for substep in step.Substeps():
add_nets_in_order(substep, net_list)
for net in proto.network:
if net not in net_list:
net_list.append(net)
# FIXME(azzolini): This is actually wrong. Report nets should be
# instantiated first since they may run before any substep is run.
# However, curerntly, Reporter depends on this behavior.
if proto.report_net and proto.report_net not in net_list:
net_list.append(proto.report_net)
class Plan(object):
def __init__(self, name_or_step):
self._plan = caffe2_pb2.PlanDef()
self._net_dict = OrderedDict()
if isinstance(name_or_step, ExecutionStep):
self._plan.name = name_or_step.Name()
self.AddStep(name_or_step)
elif isinstance(name_or_step, basestring):
self._plan.name = name_or_step
else:
raise ValueError('name_or_step must be a string or ExecutionStep')
def __str__(self):
return self._plan.name
def Proto(self):
return self._plan
def AddNets(self, nets):
for net in nets:
if _add_net_to_dict(self._net_dict, net):
assert isinstance(net, Net)
self._plan.network.add().CopyFrom(net.Proto())
def Nets(self):
return self._net_dict.values()
def AddStep(self, step):
assert isinstance(step, ExecutionStep)
step._notify_is_used()
if not step.HasNets() and not step.HasSubsteps():
return
self._plan.execution_step.add().CopyFrom(step.Proto())
# nets need to be added to the plan in order of usage
net_list = []
add_nets_in_order(step, net_list)
self.AddNets([step.get_net(n) for n in net_list])
def get_all_attributes(self, name):
"""
Return the list of all attributes under the given `name`, present in
all of the nets used in this plan.
"""
objs = []
for net in self._net_dict.values():
objs += net.get_attributes(name)
return objs
def to_execution_step(step_or_nets, default_name=None):
from caffe2.python.net_builder import NetBuilder
if isinstance(step_or_nets, ExecutionStep):
return step_or_nets
stop_blob = None
if not default_name and hasattr(step_or_nets, 'name'):
default_name = step_or_nets.name
if isinstance(step_or_nets, NetBuilder):
stop_blob = step_or_nets._stop_blob
step_or_nets = step_or_nets.get()
return execution_step(
default_name, step_or_nets, should_stop_blob=stop_blob)
def execution_step(default_name,
steps_or_nets,
num_iter=None,
report_net=None,
report_interval=None,
concurrent_substeps=None,
should_stop_blob=None,
only_once=None):
"""
Helper for creating an ExecutionStep.
- steps_or_nets can be:
- None
- Net
- ExecutionStep
- list<Net>
- list<ExecutionStep>
- should_stop_blob is either None or a scalar boolean blob.
- This blob is checked AFTER every substeps/subnets.
- If specified and true, then this step will return immediately.
- Be sure to handle race conditions if setting from concurrent threads.
- if no should_stop_blob or num_iter is provided, defaults to num_iter=1
"""
assert should_stop_blob is None or num_iter is None, (
'Cannot set both should_stop_blob and num_iter.')
if should_stop_blob is None and num_iter is None:
num_iter = 1
step = ExecutionStep(default_name)
if should_stop_blob is not None:
step.SetShouldStopBlob(should_stop_blob)
if num_iter is not None:
step.SetIter(num_iter)
if only_once is not None:
step.SetOnlyOnce(only_once)
if concurrent_substeps is not None:
step.SetConcurrentSubsteps(concurrent_substeps)
if report_net is not None:
assert report_interval is not None
step.SetReportNet(report_net, report_interval)
if isinstance(steps_or_nets, ExecutionStep):
step.AddSubstep(steps_or_nets)
elif isinstance(steps_or_nets, Net):
step.AddNet(steps_or_nets)
elif isinstance(steps_or_nets, list):
if all(isinstance(x, Net) for x in steps_or_nets):
map(step.AddNet, steps_or_nets)
else:
map(step.AddSubstep, map(to_execution_step, steps_or_nets))
elif steps_or_nets:
raise ValueError(
'steps_or_nets must be a step, a net, or a list of nets or steps.')
return step
def scoped_execution_step(name, *args, **kwargs):
"""Same as execution_step() except that the step name is scoped."""
default_name = ScopedName(name) if name else name
return execution_step(default_name, *args, **kwargs)
|
## @package queue_util
# Module caffe2.python.queue_util
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, dataio
from caffe2.python.task import TaskGroup
class _QueueReader(dataio.Reader):
def __init__(self, wrapper):
assert wrapper.schema is not None, (
'Queue needs a schema in order to be read from.')
dataio.Reader.__init__(self, wrapper.schema())
self._wrapper = wrapper
def setup_ex(self, init_net, exit_net):
exit_net.CloseBlobsQueue([self._wrapper.queue()], 0)
def read_ex(self, local_init_net, local_finish_net):
self._wrapper._new_reader(local_init_net)
dequeue_net = core.Net('dequeue')
fields, status_blob = dequeue(
dequeue_net,
self._wrapper.queue(),
len(self.schema().field_names()),
field_names=self.schema().field_names())
return [dequeue_net], status_blob, fields
class _QueueWriter(dataio.Writer):
def __init__(self, wrapper):
self._wrapper = wrapper
def setup_ex(self, init_net, exit_net):
exit_net.CloseBlobsQueue([self._wrapper.queue()], 0)
def write_ex(self, fields, local_init_net, local_finish_net, status):
self._wrapper._new_writer(self.schema(), local_init_net)
enqueue_net = core.Net('enqueue')
enqueue(enqueue_net, self._wrapper.queue(), fields, status)
return [enqueue_net]
class QueueWrapper(dataio.Pipe):
def __init__(self, handler, schema=None):
dataio.Pipe.__init__(self, schema, TaskGroup.LOCAL_SETUP)
self._queue = handler
def reader(self):
return _QueueReader(self)
def writer(self):
return _QueueWriter(self)
def queue(self):
return self._queue
class Queue(QueueWrapper):
def __init__(self, capacity, schema=None, name='queue'):
# find a unique blob name for the queue
net = core.Net(name)
queue_blob = net.AddExternalInput(net.NextName('handler'))
QueueWrapper.__init__(self, queue_blob, schema)
self.capacity = capacity
self._setup_done = False
def setup(self, global_init_net):
assert self._schema, 'This queue does not have a schema.'
self._setup_done = True
global_init_net.CreateBlobsQueue(
[],
[self._queue],
capacity=self.capacity,
num_blobs=len(self._schema.field_names()),
field_names=self._schema.field_names())
def enqueue(net, queue, data_blobs, status=None):
if status is None:
status = net.NextName('status')
results = net.SafeEnqueueBlobs([queue] + data_blobs, data_blobs + [status])
return results[-1]
def dequeue(net, queue, num_blobs, status=None, field_names=None):
if field_names is not None:
assert len(field_names) == num_blobs
data_names = [net.NextName(name) for name in field_names]
else:
data_names = [net.NextName('data', i) for i in range(num_blobs)]
if status is None:
status = net.NextName('status')
results = net.SafeDequeueBlobs(queue, data_names + [status])
results = list(results)
status_blob = results.pop(-1)
return results, status_blob
def close_queue(step, *queues):
close_net = core.Net("close_queue_net")
for queue in queues:
close_net.CloseBlobsQueue([queue], 0)
close_step = core.execution_step("%s_step" % str(close_net), close_net)
return core.execution_step(
"%s_wraper_step" % str(close_net),
[step, close_step])
|
## @package predictor_constants
# Module caffe2.python.predictor_constants
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import caffe2.proto.predictor_consts_pb2 as predictor_consts
predictor_constants = predictor_consts.PredictorConsts()
|
## @package convnet_benchmarks
# Module caffe2.python.convnet_benchmarks
"""
Benchmark for common convnets.
Speed on Titan X, with 10 warmup steps and 10 main steps and with different
versions of cudnn, are as follows (time reported below is per-batch time,
forward / forward+backward):
CuDNN V3 CuDNN v4
AlexNet 32.5 / 108.0 27.4 / 90.1
OverFeat 113.0 / 342.3 91.7 / 276.5
Inception 134.5 / 485.8 125.7 / 450.6
VGG (batch 64) 200.8 / 650.0 164.1 / 551.7
Speed on Inception with varied batch sizes and CuDNN v4 is as follows:
Batch Size Speed per batch Speed per image
16 22.8 / 72.7 1.43 / 4.54
32 38.0 / 127.5 1.19 / 3.98
64 67.2 / 233.6 1.05 / 3.65
128 125.7 / 450.6 0.98 / 3.52
Speed on Tesla M40, which 10 warmup steps and 10 main steps and with cudnn
v4, is as follows:
AlexNet 68.4 / 218.1
OverFeat 210.5 / 630.3
Inception 300.2 / 1122.2
VGG (batch 64) 405.8 / 1327.7
(Note that these numbers involve a "full" backprop, i.e. the gradient
with respect to the input image is also computed.)
To get the numbers, simply run:
for MODEL in AlexNet OverFeat Inception; do
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size 128 --model $MODEL --forward_only True
done
for MODEL in AlexNet OverFeat Inception; do
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size 128 --model $MODEL
done
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size 64 --model VGGA --forward_only True
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size 64 --model VGGA
for BS in 16 32 64 128; do
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size $BS --model Inception --forward_only True
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size $BS --model Inception
done
Note that VGG needs to be run at batch 64 due to memory limit on the backward
pass.
"""
import argparse
from caffe2.python import cnn, workspace
def MLP(order, cudnn_ws):
model = cnn.CNNModelHelper()
d = 256
depth = 20
width = 3
for i in range(depth):
for j in range(width):
current = "fc_{}_{}".format(i, j) if i > 0 else "data"
next_ = "fc_{}_{}".format(i + 1, j)
model.FC(
current, next_,
dim_in=d, dim_out=d,
weight_init=model.XavierInit,
bias_init=model.XavierInit)
model.Sum(["fc_{}_{}".format(depth, j) for j in range(width)], ["sum"])
model.FC("sum", "last",
dim_in=d, dim_out=1000,
weight_init=model.XavierInit,
bias_init=model.XavierInit)
xent = model.LabelCrossEntropy(["last", "label"], "xent")
model.AveragedLoss(xent, "loss")
return model, d
def AlexNet(order, cudnn_ws):
model = cnn.CNNModelHelper(
order, name="alexnet",
use_cudnn=True, cudnn_exhaustive_search=True,
ws_nbytes_limit=cudnn_ws)
conv1 = model.Conv(
"data",
"conv1",
3,
64,
11,
('XavierFill', {}),
('ConstantFill', {}),
stride=4,
pad=2
)
relu1 = model.Relu(conv1, "conv1")
pool1 = model.MaxPool(relu1, "pool1", kernel=3, stride=2)
conv2 = model.Conv(
pool1,
"conv2",
64,
192,
5,
('XavierFill', {}),
('ConstantFill', {}),
pad=2
)
relu2 = model.Relu(conv2, "conv2")
pool2 = model.MaxPool(relu2, "pool2", kernel=3, stride=2)
conv3 = model.Conv(
pool2,
"conv3",
192,
384,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu3 = model.Relu(conv3, "conv3")
conv4 = model.Conv(
relu3,
"conv4",
384,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu4 = model.Relu(conv4, "conv4")
conv5 = model.Conv(
relu4,
"conv5",
256,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu5 = model.Relu(conv5, "conv5")
pool5 = model.MaxPool(relu5, "pool5", kernel=3, stride=2)
fc6 = model.FC(
pool5, "fc6", 256 * 6 * 6, 4096, ('XavierFill', {}),
('ConstantFill', {})
)
relu6 = model.Relu(fc6, "fc6")
fc7 = model.FC(
relu6, "fc7", 4096, 4096, ('XavierFill', {}), ('ConstantFill', {})
)
relu7 = model.Relu(fc7, "fc7")
fc8 = model.FC(
relu7, "fc8", 4096, 1000, ('XavierFill', {}), ('ConstantFill', {})
)
pred = model.Softmax(fc8, "pred")
xent = model.LabelCrossEntropy([pred, "label"], "xent")
loss = model.AveragedLoss(xent, "loss")
return model, 224
def OverFeat(order, cudnn_ws):
model = cnn.CNNModelHelper(
order, name="overfeat",
use_cudnn=True, cudnn_exhaustive_search=True,
ws_nbytes_limit=cudnn_ws)
conv1 = model.Conv(
"data",
"conv1",
3,
96,
11,
('XavierFill', {}),
('ConstantFill', {}),
stride=4
)
relu1 = model.Relu(conv1, "conv1")
pool1 = model.MaxPool(relu1, "pool1", kernel=2, stride=2)
conv2 = model.Conv(
pool1, "conv2", 96, 256, 5, ('XavierFill', {}), ('ConstantFill', {})
)
relu2 = model.Relu(conv2, "conv2")
pool2 = model.MaxPool(relu2, "pool2", kernel=2, stride=2)
conv3 = model.Conv(
pool2,
"conv3",
256,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu3 = model.Relu(conv3, "conv3")
conv4 = model.Conv(
relu3,
"conv4",
512,
1024,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu4 = model.Relu(conv4, "conv4")
conv5 = model.Conv(
relu4,
"conv5",
1024,
1024,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu5 = model.Relu(conv5, "conv5")
pool5 = model.MaxPool(relu5, "pool5", kernel=2, stride=2)
fc6 = model.FC(
pool5, "fc6", 1024 * 6 * 6, 3072, ('XavierFill', {}),
('ConstantFill', {})
)
relu6 = model.Relu(fc6, "fc6")
fc7 = model.FC(
relu6, "fc7", 3072, 4096, ('XavierFill', {}), ('ConstantFill', {})
)
relu7 = model.Relu(fc7, "fc7")
fc8 = model.FC(
relu7, "fc8", 4096, 1000, ('XavierFill', {}), ('ConstantFill', {})
)
pred = model.Softmax(fc8, "pred")
xent = model.LabelCrossEntropy([pred, "label"], "xent")
loss = model.AveragedLoss(xent, "loss")
return model, 231
def VGGA(order, cudnn_ws):
model = cnn.CNNModelHelper(
order, name='vgg-a',
use_cudnn=True, cudnn_exhaustive_search=True,
ws_nbytes_limit=cudnn_ws)
conv1 = model.Conv(
"data",
"conv1",
3,
64,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu1 = model.Relu(conv1, "conv1")
pool1 = model.MaxPool(relu1, "pool1", kernel=2, stride=2)
conv2 = model.Conv(
pool1,
"conv2",
64,
128,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu2 = model.Relu(conv2, "conv2")
pool2 = model.MaxPool(relu2, "pool2", kernel=2, stride=2)
conv3 = model.Conv(
pool2,
"conv3",
128,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu3 = model.Relu(conv3, "conv3")
conv4 = model.Conv(
relu3,
"conv4",
256,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu4 = model.Relu(conv4, "conv4")
pool4 = model.MaxPool(relu4, "pool4", kernel=2, stride=2)
conv5 = model.Conv(
pool4,
"conv5",
256,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu5 = model.Relu(conv5, "conv5")
conv6 = model.Conv(
relu5,
"conv6",
512,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu6 = model.Relu(conv6, "conv6")
pool6 = model.MaxPool(relu6, "pool6", kernel=2, stride=2)
conv7 = model.Conv(
pool6,
"conv7",
512,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu7 = model.Relu(conv7, "conv7")
conv8 = model.Conv(
relu7,
"conv8",
512,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu8 = model.Relu(conv8, "conv8")
pool8 = model.MaxPool(relu8, "pool8", kernel=2, stride=2)
fcix = model.FC(
pool8, "fcix", 512 * 7 * 7, 4096, ('XavierFill', {}),
('ConstantFill', {})
)
reluix = model.Relu(fcix, "fcix")
fcx = model.FC(
reluix, "fcx", 4096, 4096, ('XavierFill', {}), ('ConstantFill', {})
)
relux = model.Relu(fcx, "fcx")
fcxi = model.FC(
relux, "fcxi", 4096, 1000, ('XavierFill', {}), ('ConstantFill', {})
)
pred = model.Softmax(fcxi, "pred")
xent = model.LabelCrossEntropy([pred, "label"], "xent")
loss = model.AveragedLoss(xent, "loss")
return model, 231
def _InceptionModule(
model, input_blob, input_depth, output_name, conv1_depth, conv3_depths,
conv5_depths, pool_depth
):
# path 1: 1x1 conv
conv1 = model.Conv(
input_blob, output_name + ":conv1", input_depth, conv1_depth, 1,
('XavierFill', {}), ('ConstantFill', {})
)
conv1 = model.Relu(conv1, conv1)
# path 2: 1x1 conv + 3x3 conv
conv3_reduce = model.Conv(
input_blob, output_name + ":conv3_reduce", input_depth, conv3_depths[0],
1, ('XavierFill', {}), ('ConstantFill', {})
)
conv3_reduce = model.Relu(conv3_reduce, conv3_reduce)
conv3 = model.Conv(
conv3_reduce,
output_name + ":conv3",
conv3_depths[0],
conv3_depths[1],
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
conv3 = model.Relu(conv3, conv3)
# path 3: 1x1 conv + 5x5 conv
conv5_reduce = model.Conv(
input_blob, output_name + ":conv5_reduce", input_depth, conv5_depths[0],
1, ('XavierFill', {}), ('ConstantFill', {})
)
conv5_reduce = model.Relu(conv5_reduce, conv5_reduce)
conv5 = model.Conv(
conv5_reduce,
output_name + ":conv5",
conv5_depths[0],
conv5_depths[1],
5,
('XavierFill', {}),
('ConstantFill', {}),
pad=2
)
conv5 = model.Relu(conv5, conv5)
# path 4: pool + 1x1 conv
pool = model.MaxPool(
input_blob,
output_name + ":pool",
kernel=3,
stride=1,
pad=1
)
pool_proj = model.Conv(
pool, output_name + ":pool_proj", input_depth, pool_depth, 1,
('XavierFill', {}), ('ConstantFill', {})
)
pool_proj = model.Relu(pool_proj, pool_proj)
output = model.Concat([conv1, conv3, conv5, pool_proj], output_name)
return output
def Inception(order, cudnn_ws):
model = cnn.CNNModelHelper(
order, name="inception",
use_cudnn=True, cudnn_exhaustive_search=True,
ws_nbytes_limit=cudnn_ws)
conv1 = model.Conv(
"data",
"conv1",
3,
64,
7,
('XavierFill', {}),
('ConstantFill', {}),
stride=2,
pad=3
)
relu1 = model.Relu(conv1, "conv1")
pool1 = model.MaxPool(relu1, "pool1", kernel=3, stride=2, pad=1)
conv2a = model.Conv(
pool1, "conv2a", 64, 64, 1, ('XavierFill', {}), ('ConstantFill', {})
)
conv2a = model.Relu(conv2a, conv2a)
conv2 = model.Conv(
conv2a,
"conv2",
64,
192,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu2 = model.Relu(conv2, "conv2")
pool2 = model.MaxPool(relu2, "pool2", kernel=3, stride=2, pad=1)
# Inception modules
inc3 = _InceptionModule(
model, pool2, 192, "inc3", 64, [96, 128], [16, 32], 32
)
inc4 = _InceptionModule(
model, inc3, 256, "inc4", 128, [128, 192], [32, 96], 64
)
pool5 = model.MaxPool(inc4, "pool5", kernel=3, stride=2, pad=1)
inc5 = _InceptionModule(
model, pool5, 480, "inc5", 192, [96, 208], [16, 48], 64
)
inc6 = _InceptionModule(
model, inc5, 512, "inc6", 160, [112, 224], [24, 64], 64
)
inc7 = _InceptionModule(
model, inc6, 512, "inc7", 128, [128, 256], [24, 64], 64
)
inc8 = _InceptionModule(
model, inc7, 512, "inc8", 112, [144, 288], [32, 64], 64
)
inc9 = _InceptionModule(
model, inc8, 528, "inc9", 256, [160, 320], [32, 128], 128
)
pool9 = model.MaxPool(inc9, "pool9", kernel=3, stride=2, pad=1)
inc10 = _InceptionModule(
model, pool9, 832, "inc10", 256, [160, 320], [32, 128], 128
)
inc11 = _InceptionModule(
model, inc10, 832, "inc11", 384, [192, 384], [48, 128], 128
)
pool11 = model.AveragePool(inc11, "pool11", kernel=7, stride=1)
fc = model.FC(
pool11, "fc", 1024, 1000, ('XavierFill', {}), ('ConstantFill', {})
)
# It seems that Soumith's benchmark does not have softmax on top
# for Inception. We will add it anyway so we can have a proper
# backward pass.
pred = model.Softmax(fc, "pred")
xent = model.LabelCrossEntropy([pred, "label"], "xent")
loss = model.AveragedLoss(xent, "loss")
return model, 224
def AddParameterUpdate(model):
""" Simple plain SGD update -- not tuned to actually train the models """
ITER = model.Iter("iter")
LR = model.LearningRate(
ITER, "LR", base_lr=-1e-8, policy="step", stepsize=10000, gamma=0.999)
ONE = model.param_init_net.ConstantFill([], "ONE", shape=[1], value=1.0)
for param in model.params:
param_grad = model.param_to_grad[param]
model.WeightedSum([param, ONE, param_grad, LR], param)
def Benchmark(model_gen, arg):
model, input_size = model_gen(arg.order, arg.cudnn_ws)
model.Proto().type = arg.net_type
model.Proto().num_workers = arg.num_workers
# In order to be able to run everything without feeding more stuff, let's
# add the data and label blobs to the parameter initialization net as well.
if arg.order == "NCHW":
input_shape = [arg.batch_size, 3, input_size, input_size]
else:
input_shape = [arg.batch_size, input_size, input_size, 3]
if arg.model == "MLP":
input_shape = [arg.batch_size, input_size]
model.param_init_net.GaussianFill(
[],
"data",
shape=input_shape,
mean=0.0,
std=1.0
)
model.param_init_net.UniformIntFill(
[],
"label",
shape=[arg.batch_size, ],
min=0,
max=999
)
if arg.forward_only:
print('{}: running forward only.'.format(arg.model))
else:
print('{}: running forward-backward.'.format(arg.model))
model.AddGradientOperators(["loss"])
AddParameterUpdate(model)
if arg.order == 'NHWC':
print(
'==WARNING==\n'
'NHWC order with CuDNN may not be supported yet, so I might\n'
'exit suddenly.'
)
if not arg.cpu:
model.param_init_net.RunAllOnGPU()
model.net.RunAllOnGPU()
if arg.engine:
for op in model.net.Proto().op:
op.engine = arg.engine
if arg.dump_model:
# Writes out the pbtxt for benchmarks on e.g. Android
with open(
"{0}_init_batch_{1}.pbtxt".format(arg.model, arg.batch_size), "w"
) as fid:
fid.write(str(model.param_init_net.Proto()))
with open("{0}.pbtxt".format(arg.model, arg.batch_size), "w") as fid:
fid.write(str(model.net.Proto()))
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
workspace.BenchmarkNet(
model.net.Proto().name, arg.warmup_iterations, arg.iterations,
arg.layer_wise_benchmark)
def GetArgumentParser():
parser = argparse.ArgumentParser(description="Caffe2 benchmark.")
parser.add_argument(
"--batch_size",
type=int,
default=128,
help="The batch size."
)
parser.add_argument("--model", type=str, help="The model to benchmark.")
parser.add_argument(
"--order",
type=str,
default="NCHW",
help="The order to evaluate."
)
parser.add_argument(
"--cudnn_ws",
type=int,
help="The cudnn workspace size."
)
parser.add_argument(
"--iterations",
type=int,
default=10,
help="Number of iterations to run the network."
)
parser.add_argument(
"--warmup_iterations",
type=int,
default=10,
help="Number of warm-up iterations before benchmarking."
)
parser.add_argument(
"--forward_only",
action='store_true',
help="If set, only run the forward pass."
)
parser.add_argument(
"--layer_wise_benchmark",
action='store_true',
help="If True, run the layer-wise benchmark as well."
)
parser.add_argument(
"--cpu",
action='store_true',
help="If True, run testing on CPU instead of GPU."
)
parser.add_argument(
"--engine",
type=str,
default="",
help="If set, blindly prefer the given engine(s) for every op.")
parser.add_argument(
"--dump_model",
action='store_true',
help="If True, dump the model prototxts to disk."
)
parser.add_argument("--net_type", type=str, default="dag")
parser.add_argument("--num_workers", type=int, default=2)
parser.add_argument("--use-nvtx", default=False, action='store_true')
parser.add_argument("--htrace_span_log_path", type=str)
return parser
if __name__ == '__main__':
args = GetArgumentParser().parse_args()
if (
not args.batch_size or not args.model or not args.order
):
GetArgumentParser().print_help()
else:
workspace.GlobalInit(
['caffe2', '--caffe2_log_level=0'] +
(['--caffe2_use_nvtx'] if args.use_nvtx else []) +
(['--caffe2_htrace_span_log_path=' + args.htrace_span_log_path]
if args.htrace_span_log_path else []))
model_map = {
'AlexNet': AlexNet,
'OverFeat': OverFeat,
'VGGA': VGGA,
'Inception': Inception,
'MLP': MLP,
}
Benchmark(model_map[args.model], args)
|
## @package gradient_checker
# Module caffe2.python.gradient_checker
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import core, workspace
from caffe2.proto import caffe2_pb2
class NetGradientChecker(object):
@staticmethod
def Check(net, outputs_with_grad, input_values,
input_to_check, step_size=0.0001,
threshold=0.05, print_net=True):
assert input_to_check in input_values.keys()
net_copy = net.Clone(net.Name() + "_copy")
grad_map = net_copy.AddGradientOperators(outputs_with_grad)
assert input_to_check in grad_map, (
'{} has no gradient, cannot check net gradient.'.format(
input_to_check))
for name, value in input_values.items():
workspace.blobs[name] = value
def GetLoss(new_value):
workspace.blobs[input_to_check] = new_value
workspace.RunNetOnce(net_copy)
return sum([
workspace.blobs[output]
for output in outputs_with_grad
]).sum()
def GetValue(dim, delta):
input_value = input_values[input_to_check].copy()
input_value.flat[dim] += delta
return input_value
workspace.RunNetOnce(net_copy)
grad_blob = grad_map[input_to_check]
def get_analytic_grad(grad_blob):
if isinstance(grad_blob, core.BlobReference):
return workspace.blobs[grad_blob]
# If grad_blob is not a single blob, it should be a gradient slice.
# To make it comparable with the estimiated gradient which is dense,
# we need to first convert grad_blob to dense gradient.
assert isinstance(grad_blob, core.GradientSlice)
dense_grad = 'tmp_dense_grad'
sparse_to_dense_op = core.CreateOperator(
'SparseToDense',
[grad_blob.indices, grad_blob.values, input_to_check],
dense_grad,
)
workspace.RunOperatorOnce(sparse_to_dense_op)
return workspace.blobs[dense_grad]
analytic_grad = get_analytic_grad(grad_blob)
grad_estimate = np.zeros_like(input_values[input_to_check])
for dim in range(input_values[input_to_check].size):
pos_loss = GetLoss(GetValue(dim, step_size))
neg_loss = GetLoss(GetValue(dim, -step_size))
grad_estimate.flat[dim] = (pos_loss - neg_loss) / step_size / 2
err_msg = "Error in gradient check for net_copy {}".format(
net.Name())
if print_net:
err_msg += ": {}".format(net.Proto())
np.testing.assert_allclose(
analytic_grad, grad_estimate,
atol=threshold, rtol=threshold,
err_msg=err_msg,
)
delta = np.abs(grad_estimate - analytic_grad).flatten()
return np.mean(delta), max(delta)
class GradientChecker:
"""A gradient checker in Python.
This is not the most efficient way to check gradients, as the Python
interface will involve a lot of copy back and forth operations. Use at your
own risk.
"""
def __init__(
self,
stepsize,
threshold,
device_option=caffe2_pb2.DeviceOption(),
workspace_name="gradient_check"
):
self._stepsize = stepsize
self._threshold = threshold
self._device_option = device_option
self._workspace_name = workspace_name
def GetLossAndGrad(
self, op, grad_ops, x, input_name, grad_name, outputs_with_grads
):
# First, feed in the current input. Note that we are not changing
# anything else, so we don't need to feed in others.
workspace.FeedBlob(input_name, x, self._device_option)
# Run.
workspace.RunOperatorOnce(op)
loss = 0.
# Get Loss and feed in the gradients, run gradient ops.
for idx in outputs_with_grads:
name = op.output[idx]
arr = workspace.FetchBlob(name)
loss += (arr**2).sum()
workspace.FeedBlob(name + '_grad', arr, self._device_option)
loss /= 2.
# Run gradient ops
workspace.RunOperatorsOnce(grad_ops)
# Get gradients
if isinstance(grad_name, core.GradientSlice):
workspace.FeedBlob('zeros', np.zeros_like(x, dtype=np.float32))
workspace.FeedBlob('ones', np.ones(1, dtype=np.float32))
gv_cpu_op = core.CreateOperator(
'EnsureCPUOutput', grad_name.values, grad_name.values + '_cpu',
device_option=self._device_option
)
gi_cpu_op = core.CreateOperator(
'EnsureCPUOutput', grad_name.indices, grad_name.indices + '_cpu',
device_option=self._device_option
)
sparse_to_dense_op = core.CreateOperator(
'ScatterWeightedSum',
[
'zeros', 'ones', grad_name.indices + '_cpu',
grad_name.values + '_cpu', 'ones'
],
'zeros',
)
workspace.RunOperatorOnce(gv_cpu_op)
workspace.RunOperatorOnce(gi_cpu_op)
workspace.RunOperatorOnce(sparse_to_dense_op)
grad = workspace.FetchBlob('zeros')
else:
grad = workspace.FetchBlob(grad_name)
return loss, grad
def CheckSimple(
self,
op,
inputs,
input_to_check,
outputs_with_grads,
grad_ops=None,
input_device_options=None
):
"""Checks the operator in a very simple fashion by stacking a sum of
squares on the top.
Inputs:
op: the operator to be checked.
inputs: the input data in numpy arrays.
input_to_check: an index specifying which input blob we should
check.
outputs_with_grads: indices specifying which output blobs will we
need to check gradients with. For these outputs, we will collect a
squared sum and also feed in their gradients.
grad_operator: the gradient operator. If not given, we will get the
gradient operator from the gradient registry.
input_device_options: an optional mapping from input names to
DeviceOptions (to override the default DeviceOption)
Outputs:
boolean: True if it passes, False if it does not pass.
"""
if input_device_options is None:
input_device_options = {}
# Entering the checker workspace
old_ws_name = workspace.CurrentWorkspace()
if self._workspace_name != old_ws_name:
workspace.SwitchWorkspace(self._workspace_name, True)
op.device_option.CopyFrom(self._device_option)
if grad_ops is None:
# TODO(jiayq): use the gradient registration instead of the old
# hack.
grad_ops, g_input = core.GradientRegistry.GetGradientForOp(
op, [s + '_grad' for s in op.output])
dims_to_check = inputs[input_to_check].size
# First, feed in the input.
for i, arr in enumerate(inputs):
workspace.FeedBlob(
op.input[i], arr,
input_device_options.get(
op.input[i], self._device_option))
# Get the loss and gradient for the original.
input_name = op.input[input_to_check]
grad_name = g_input[input_to_check]
loss, grad = self.GetLossAndGrad(
op, grad_ops, inputs[input_to_check], input_name, grad_name,
outputs_with_grads
)
grad_estimate = np.zeros_like(inputs[input_to_check])
if grad_estimate.shape != grad.shape:
raise Exception(
"Mismatched gradient shapes: estimated ({}), grad ({})".format(
grad_estimate.shape, grad.shape))
for current_dim in range(dims_to_check):
# Positive gradient
inputs[input_to_check].flat[current_dim] += self._stepsize
pos_loss, _ = self.GetLossAndGrad(
op, grad_ops, inputs[input_to_check], input_name,
grad_name, outputs_with_grads
)
# Negative gradient
inputs[input_to_check].flat[current_dim] -= self._stepsize * 2
neg_loss, _ = self.GetLossAndGrad(
op, grad_ops, inputs[input_to_check], input_name,
grad_name, outputs_with_grads
)
# Recover the value
inputs[input_to_check].flat[current_dim] += self._stepsize
grad_estimate.flat[current_dim] = (
pos_loss - neg_loss) / self._stepsize / 2
# Now, check correctness
fail_mat = ~np.isclose(
grad, grad_estimate, atol=self._threshold, rtol=self._threshold)
if np.any(fail_mat):
idx = np.flatnonzero(fail_mat)
print('Failed. [idx, grad, grad_estimate] are:')
print(np.vstack([idx, grad.flat[idx], grad_estimate.flat[idx]]).T)
ret = False
else:
ret = True
# After finishing, cleaning up things.
if self._workspace_name != old_ws_name:
# We reset the workspace to make sure everything intermediate is
# cleaned up. Note that there is no need to delete a workspace -
# when empty it takes a very limited amount of memory.
workspace.ResetWorkspace()
workspace.SwitchWorkspace(old_ws_name)
return ret, grad, grad_estimate
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import copy
from functools import partial, reduce
from hypothesis import assume, given, settings
import hypothesis.strategies as st
import unittest
from caffe2.python import core, workspace, tt_core, dyndep
import caffe2.python.hypothesis_test_util as hu
from caffe2.proto.caffe2_pb2 import TensorProto
dyndep.InitOpsLibrary('@/caffe2/caffe2/fb/optimizers:sgd_simd_ops')
def sigmoid(x):
return 1.0 / (1.0 + np.exp(-x))
@st.composite
def _tensor_and_prefix(draw, dtype, elements, min_dim=1, max_dim=4, **kwargs):
dims_ = draw(
st.lists(hu.dims(**kwargs), min_size=min_dim, max_size=max_dim))
extra_ = draw(
st.lists(hu.dims(**kwargs), min_size=min_dim, max_size=max_dim))
return (draw(hu.arrays(dims_ + extra_, dtype, elements)),
draw(hu.arrays(extra_, dtype, elements)))
def _tensor_and_indices(min_dim=1, max_dim=4, dtype=np.float32,
elements=None, **kwargs):
""" generates a tensor and a list of indices of larger tensor of same dim"""
data_dims_ = st.lists(hu.dims(**kwargs), min_size=min_dim, max_size=max_dim)
original_dim = st.integers(min_value=2, max_value=10)
return st.tuples(data_dims_, original_dim).flatmap(lambda pair: st.tuples(
st.just(pair[1]), # original dimension
hu.arrays(pair[0], dtype, elements), # data tensor
hu.arrays(pair[0][0], dtype=np.int64, elements=st.integers(
min_value=0, max_value=pair[1] - 1)),
))
_NUMPY_TYPE_TO_ENUM = {
np.float32: core.DataType.FLOAT,
np.int32: core.DataType.INT32,
np.bool: core.DataType.BOOL,
np.uint8: core.DataType.UINT8,
np.int8: core.DataType.INT8,
np.uint16: core.DataType.UINT16,
np.int16: core.DataType.INT16,
np.int64: core.DataType.INT64,
np.float64: core.DataType.DOUBLE,
}
def _dtypes(dtypes=None):
dtypes = dtypes if dtypes else [np.int32, np.int64, np.float32]
return st.sampled_from(dtypes)
def _test_binary(name, ref, filter_=None, gcs=hu.gcs,
test_gradient=False, allow_inplace=False, dtypes=_dtypes):
@given(
inputs=dtypes().flatmap(
lambda dtype: hu.tensors(
n=2, dtype=dtype,
elements=hu.elements_of_type(dtype, filter_=filter_))),
out=st.sampled_from(('Y', 'X1', 'X2') if allow_inplace else ('Y',)),
**gcs)
@settings(max_examples=3, timeout=100)
def test_binary(self, inputs, out, gc, dc):
op = core.CreateOperator(name, ["X1", "X2"], [out])
X1, X2 = inputs
self.assertDeviceChecks(dc, op, [X1, X2], [0])
# We only do gradient check with float32 types.
if test_gradient and X1.dtype == np.float32:
self.assertGradientChecks(gc, op, [X1, X2], 0, [0])
self.assertReferenceChecks(gc, op, [X1, X2], ref)
return test_binary
def _test_binary_broadcast(name, ref, filter_=None,
gcs=hu.gcs, allow_inplace=False, dtypes=_dtypes):
@given(
inputs=dtypes().flatmap(lambda dtype: _tensor_and_prefix(
dtype=dtype,
elements=hu.elements_of_type(dtype, filter_=filter_))),
in_place=(st.booleans() if allow_inplace else st.just(False)),
**gcs)
@settings(max_examples=3, timeout=100)
def test_binary_broadcast(self, inputs, in_place, gc, dc):
op = core.CreateOperator(
name, ["X1", "X2"], ["X1" if in_place else "Y"], broadcast=1)
X1, X2 = inputs
self.assertDeviceChecks(dc, op, [X1, X2], [0])
def cast_ref(x, y):
return (np.array(ref(x, y)[0], dtype=x.dtype), )
# gradient not implemented yet
# self.assertGradientChecks(gc, op, [X1, X2], 0, [0])
self.assertReferenceChecks(gc, op, [X1, X2], cast_ref)
return test_binary_broadcast
class TestOperators(hu.HypothesisTestCase):
def test_comparison_ops(self):
ops = {"LT": lambda x1, x2: [x1 < x2],
"LE": lambda x1, x2: [x1 <= x2],
"GT": lambda x1, x2: [x1 > x2],
"GE": lambda x1, x2: [x1 >= x2]}
for name, ref in ops.items():
_test_binary(name, ref, gcs=hu.gcs_cpu_only)(self)
_test_binary_broadcast(name, ref, gcs=hu.gcs_cpu_only)(self)
@given(inputs=hu.tensors(n=2), in_place=st.booleans(), **hu.gcs)
def test_sum(self, inputs, in_place, gc, dc):
op = core.CreateOperator("Sum", ["X1", "X2"],
["Y" if not in_place else "X1"])
X1, X2 = inputs
self.assertDeviceChecks(dc, op, [X1, X2], [0])
self.assertGradientChecks(gc, op, [X1, X2], 0, [0])
@given(inputs=hu.tensors(n=2, min_dim=2, max_dim=2), **hu.gcs_cpu_only)
def test_row_mul(self, inputs, gc, dc):
op = core.CreateOperator("RowMul", ["X1", "X2"], ["Y"])
X1, Xtmp = inputs
X2 = Xtmp[:, 0]
def ref(x, y):
ret = np.zeros(shape=x.shape, dtype=x.dtype)
for i in range(y.size):
ret[i, ] = x[i, ] * y[i]
return [ret]
self.assertDeviceChecks(dc, op, [X1, X2], [0])
for i in range(2):
self.assertGradientChecks(gc, op, [X1, X2], i, [0])
self.assertReferenceChecks(gc, op, [X1, X2], ref)
@given(inputs=hu.tensors(n=2), **hu.gcs_cpu_only)
def test_max(self, inputs, gc, dc):
op = core.CreateOperator("Max", ["X1", "X2"], ["Y"])
X1, X2 = inputs
# Make X1 and X2 far from each other, since X1=X2 is not differentiable
# and the step size of gradient checker is 0.05
X1[np.logical_and(X1 >= X2 - 0.05, X1 <= X2)] -= 0.05
X1[np.logical_and(X1 <= X2 + 0.05, X1 >= X2)] += 0.05
self.assertDeviceChecks(dc, op, [X1, X2], [0])
for i in range(2):
self.assertGradientChecks(gc, op, [X1, X2], i, [0])
def elementwise_max(X, Y):
return [np.maximum(X, Y)]
self.assertReferenceChecks(gc, op, [X1, X2], elementwise_max)
def test_add(self):
def ref(x, y):
return (x + y, )
_test_binary("Add", ref, test_gradient=True)(self)
_test_binary_broadcast("Add", ref)(self)
def test_sub(self):
def ref(x, y):
return (x - y, )
# TODO(jiayq): enable gradient test when implemented.
_test_binary("Sub", ref, test_gradient=True)(self)
_test_binary_broadcast("Sub", ref)(self)
def test_mul(self):
def ref(x, y):
return (x * y, )
_test_binary("Mul", ref, test_gradient=True)(self)
_test_binary_broadcast("Mul", ref)(self)
def test_div(self):
def ref(x, y):
return (x / y, )
def non_zero(x):
return abs(x) > 10e-5
def div_dtypes():
return st.sampled_from([np.float32, np.float64])
_test_binary(
"Div", ref, filter_=non_zero, test_gradient=True,
dtypes=div_dtypes, gcs=hu.gcs_cpu_only
)(self)
_test_binary(
"Div", ref, filter_=non_zero, test_gradient=False,
dtypes=div_dtypes
)(self)
_test_binary_broadcast(
"Div", ref, filter_=non_zero, dtypes=div_dtypes)(self)
@given(X=hu.tensor(), in_place=st.booleans(), **hu.gcs)
def test_negative(self, X, in_place, gc, dc):
op = core.CreateOperator("Negative", ["X"],
["Y" if not in_place else "X"])
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(), **hu.gcs)
def test_tanh(self, X, gc, dc):
op = core.CreateOperator("Tanh", "X", "Y")
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(), **hu.gcs)
def test_averaged_loss(self, X, gc, dc):
op = core.CreateOperator("AveragedLoss", ["X"], ["loss"])
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(), inplace=st.booleans(), **hu.gcs)
def test_softsign(self, X, inplace, gc, dc):
op = core.CreateOperator("Softsign", ["X"], ["X" if inplace else "Y"])
def softsign(X):
return (X / (1 + np.abs(X)),)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertReferenceChecks(gc, op, [X], softsign)
if inplace:
with self.assertRaises(Exception):
self.assertGradientChecks(gc, op, [X], 0, [0])
else:
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(
device_options=st.lists(
min_size=2,
max_size=4,
elements=st.sampled_from(hu.expanded_device_options)),
set_seed=st.booleans())
def test_random_seed_behaviour(self, device_options, set_seed):
# Assume we are always operating on CUDA or CPU, since RNG is
# inconsistent between CPU and GPU.
device_options = copy.deepcopy(device_options)
assume(len({do.device_type for do in device_options}) == 1)
if set_seed:
for do in device_options:
do.random_seed = 1000
def run(do):
# Reset each time because 'Y' may already exist in the workspace
# on a different device
workspace.ResetWorkspace()
ws = workspace.C.Workspace()
op = core.CreateOperator(
"XavierFill", [], ["Y"],
device_option=do,
shape=[2])
ws.run(op)
return ws.blobs["Y"].fetch()
ys = [run(do) for do in device_options]
for y in ys[1:]:
if set_seed:
np.testing.assert_array_equal(ys[0], y)
else:
with self.assertRaises(AssertionError):
np.testing.assert_array_equal(ys[0], y)
@given(axis=st.integers(min_value=1, max_value=4),
num_output=st.integers(min_value=4, max_value=8),
engine=st.sampled_from(["", "PACKED"]),
**hu.gcs)
def test_fully_connected_axis(self, axis, num_output, engine, gc, dc):
np.random.seed(1)
X = np.random.randn(1, 2, 3, 2, 1).astype(np.float32)
def prod(xs):
p = 1
for x in xs:
p *= x
return p
K = prod(list(X.shape)[axis:])
N = num_output
W = np.random.randn(N, K).astype(np.float32)
b = np.random.randn(N).astype(np.float32)
op = core.CreateOperator(
"FC",
["X", "W", "b"],
["Y"],
engine=engine,
axis=axis)
for name, param in [("X", X), ("W", W), ("b", b)]:
self.ws.create_blob(name).feed(param)
self.ws.run(op)
Y = self.ws.blobs["Y"].fetch()
self.assertEqual(list(Y.shape), list(X.shape)[:axis] + [N])
inputs = [X, W, b]
self.assertDeviceChecks(dc, op, inputs, [0])
for param, _ in enumerate(inputs):
self.assertGradientChecks(gc, op, inputs, param, [0])
@unittest.skipIf(not workspace.has_gpu_support,
"Skipping test due to no gpu present.")
@given(hidden_size=st.integers(min_value=1, max_value=3),
num_layers=st.integers(min_value=1, max_value=3),
bidirectional=st.booleans(),
rnn_mode=st.sampled_from(["lstm"]), # TODO: "gru"
input_mode=st.sampled_from(["linear"]),
dropout=st.floats(min_value=1.0, max_value=1.0),
T=st.integers(min_value=2, max_value=6),
N=st.integers(min_value=1, max_value=4),
D=st.integers(min_value=1, max_value=4))
def test_recurrent(self, hidden_size, num_layers, bidirectional, rnn_mode,
input_mode, dropout, T, N, D):
# Random seed, this one happens to pass
seed = 1234
np.random.seed(seed)
input_weight_size = hidden_size * D
recurrent_weight_size = hidden_size * hidden_size
input_bias_size = hidden_size
recurrent_bias_size = hidden_size
num_directions = 2 if bidirectional else 1
total_sz = 4 * (input_weight_size + recurrent_weight_size +
input_bias_size + recurrent_bias_size) * num_layers
total_sz *= num_directions
W = np.random.rand(total_sz).astype(np.float32)
self.ws.create_blob("WEIGHT").feed(W, device_option=hu.gpu_do)
op = core.CreateOperator(
"Recurrent",
["INPUT", "HIDDEN_INPUT", "CELL_INPUT", "WEIGHT"],
["OUTPUT", "HIDDEN_OUTPUT", "CELL_OUTPUT",
"RNN_SCRATCH", "DROPOUT_STATES"],
hidden_size=hidden_size,
bidirectional=bidirectional,
rnn_mode=rnn_mode,
dropout=dropout,
input_mode=input_mode,
num_layers=num_layers,
seed=seed,
engine="CUDNN")
X = np.random.randn(T, N, D).astype(np.float32)
self.ws.create_blob("INPUT").feed(X, device_option=hu.gpu_do)
W = self.ws.blobs["WEIGHT"].fetch()
H = np.random.randn(
num_layers, N, hidden_size * num_directions).astype(
np.float32)
C = np.random.randn(
num_layers, N, hidden_size * num_directions).astype(
np.float32) if rnn_mode == "lstm" else \
np.empty((1,)).astype(np.float32) # unused in GRU
inputs = [X, H, C, W]
input_idxs = [i for (i, _) in enumerate(inputs)] \
if rnn_mode == "lstm" else [0, 1, 3] # ignore C
for input_idx in input_idxs:
self.assertGradientChecks(
hu.gpu_do, op, inputs, input_idx, [0])
@given(ndim=st.integers(1, 4),
axis=st.integers(0, 3),
num_inputs=st.integers(2, 4), **hu.gcs)
def test_depth_concat(self, ndim, axis, num_inputs, gc, dc):
assume(axis < ndim)
input_names = ['X0', 'X1', 'X2', 'X3'][:num_inputs]
shape = [2, 3, 5, 7][:ndim]
individual_dims = [1, 2, 3, 4, 5][:num_inputs]
inputs = []
for i in range(num_inputs):
# Sets a unique dim and create the input.
shape[axis] = individual_dims[i]
inputs.append(np.random.randn(*shape).astype(np.float32))
op = core.CreateOperator("Concat", input_names, ["Y", "Y_dims"],
axis=axis)
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(num_inputs):
self.assertGradientChecks(gc, op, inputs, i, [0])
@given(num_inputs=st.integers(2, 4),
order=st.sampled_from([("NCHW", 1), ("NHWC", 3)]),
**hu.gcs)
def test_depth_concat_with_order(self, num_inputs, order, gc, dc):
input_names = ['X0', 'X1', 'X2', 'X3'][:num_inputs]
shape = [2, 3, 5, 7]
individual_dims = [1, 2, 3, 4][:num_inputs]
inputs = []
for i in range(num_inputs):
# Sets a unique dim and create the input.
shape[order[1]] = individual_dims[i]
inputs.append(np.random.rand(*shape).astype(np.float32))
op = core.CreateOperator("Concat", input_names, ["Y", "Y_dims"],
order=order[0])
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(num_inputs):
self.assertGradientChecks(gc, op, inputs, i, [0])
@given(X=hu.arrays(dims=[5, 2],
elements=st.floats(min_value=0.0, max_value=10.0)),
**hu.gcs_cpu_only)
def test_last_n_windows(self, X, gc, dc):
workspace.FeedBlob('input', X)
workspace.FeedBlob('next', np.array(0, dtype=np.int32))
workspace.CreateBlob('output')
collect_net = core.Net('collect_net')
collect_net.LastNWindowCollector(
['output', 'next', 'input'],
['output', 'next'],
num_to_collect=7,
)
plan = core.Plan('collect_data')
plan.AddStep(core.execution_step('collect_data',
[collect_net], num_iter=2))
workspace.RunPlan(plan)
output = workspace.FetchBlob('output')
inputs = workspace.FetchBlob('input')
new_output = np.zeros([7, inputs.shape[1]])
for i in range(inputs.shape[0] * 2):
new_output[i % 7] = inputs[i % inputs.shape[0]]
import numpy.testing as npt
npt.assert_almost_equal(output, new_output, decimal=5)
@given(batch_size=st.integers(1, 3),
stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(1, 5),
dilation=st.integers(1, 3),
size=st.integers(7, 10),
channels=st.integers(1, 8),
**hu.gcs)
def test_im2col_layout(self, batch_size, stride, pad, kernel, dilation,
size, channels, gc, dc):
dkernel = (dilation * (kernel - 1) + 1)
assume(size >= dkernel)
NCHW_TO_NHWC = (0, 2, 3, 1)
NHWC_TO_NCHW = (0, 3, 1, 2)
COL_NHWC_TO_NCHW = (4, 2, 3, 0, 1)
N = batch_size
C = channels
H = size
W = size
out_h = int((H + (2 * pad) - dkernel) / stride + 1)
out_w = int((W + (2 * pad) - dkernel) / stride + 1)
im_nchw = np.random.rand(N, C, H, W).astype(np.float32) - 0.5
im_nhwc = im_nchw.transpose(NCHW_TO_NHWC)
op_im2col_nchw = core.CreateOperator(
"Im2Col",
["im_nchw"], ["col_nchw"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order="NCHW",
device_option=gc)
op_im2col_nhwc = core.CreateOperator(
"Im2Col",
["im_nhwc"], ["col_nhwc"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order="NHWC",
device_option=gc)
self.ws.create_blob("im_nchw").feed(im_nchw, device_option=gc)
self.ws.create_blob("im_nhwc").feed(im_nhwc, device_option=gc)
self.ws.run(op_im2col_nchw)
self.ws.run(op_im2col_nhwc)
# there is probably a clever way to spell this in np
col_nchw = self.ws.blobs["col_nchw"].fetch()
col_nhwc = self.ws.blobs["col_nhwc"].fetch()
col_nchw_ = col_nchw.reshape(N, C, kernel, kernel, out_h, out_w)
col_nhwc_ = col_nhwc.reshape(N, out_h, out_w, kernel, kernel, C)
for i in range(0, N):
np.testing.assert_allclose(
col_nchw_[i],
col_nhwc_[i].transpose(COL_NHWC_TO_NCHW),
atol=1e-4,
rtol=1e-4)
op_col2im_nchw = core.CreateOperator(
"Col2Im",
["col_nchw", "im_nchw"],
["out_nchw"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order="NCHW",
device_option=gc)
op_col2im_nhwc = core.CreateOperator(
"Col2Im",
["col_nhwc", "im_nhwc"],
["out_nhwc"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order="NHWC",
device_option=gc)
self.ws.run(op_col2im_nchw)
self.ws.run(op_col2im_nhwc)
out_nchw = self.ws.blobs["out_nchw"].fetch()
out_nhwc = self.ws.blobs["out_nhwc"].fetch()
np.testing.assert_allclose(
out_nchw,
out_nhwc.transpose(NHWC_TO_NCHW),
atol=1e-4,
rtol=1e-4)
@given(dtype=st.sampled_from([np.float32, np.float64, np.int32, np.bool]))
def test_print(self, dtype):
data = np.random.permutation(6).astype(dtype)
self.ws.create_blob("data").feed(data)
op = core.CreateOperator("Print", "data", [])
self.ws.run(op)
@given(inputs=hu.tensors(n=2),
in_place=st.booleans(),
momentum=st.floats(min_value=0.1, max_value=0.9),
nesterov=st.booleans(),
lr=st.floats(min_value=0.1, max_value=0.9),
**hu.gcs)
def test_momentum_sgd(
self, inputs, in_place, momentum, nesterov, lr, gc, dc):
grad, m = inputs
lr = np.asarray([lr], dtype=np.float32)
op = core.CreateOperator(
"MomentumSGD",
["grad", "m", "lr"],
["grad" if in_place else "grad_o",
"m" if in_place else "m_o"],
momentum=momentum,
nesterov=int(nesterov),
device_option=gc)
self.assertDeviceChecks(
dc, op, [grad, m, lr], [0])
# Reference
def momentum_sgd(grad, m, lr):
lr = lr[0]
if not nesterov:
adjusted_gradient = lr * grad + momentum * m
return (adjusted_gradient, adjusted_gradient)
else:
m_new = momentum * m + lr * grad
return ((1 + momentum) * m_new - momentum * m, m_new)
self.assertReferenceChecks(gc, op, [grad, m, lr], momentum_sgd)
@given(inputs=hu.tensors(n=3),
in_place=st.booleans(),
decay=st.floats(min_value=0.1, max_value=0.9),
momentum=st.floats(min_value=0.1, max_value=0.9),
lr=st.floats(min_value=0.1, max_value=0.9),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
**hu.gcs)
def test_rmsprop_sgd(self, inputs, in_place, decay, momentum, lr, epsilon,
gc, dc):
grad, ms, mom = inputs
ms = np.abs(ms) + 0.01
lr = np.asarray([lr], dtype=np.float32)
op = core.CreateOperator(
"RmsProp",
["grad", "ms", "mom", "lr"],
["grad" if in_place else "grad_o",
"ms" if in_place else "ms_o",
"mom" if in_place else "mom_o"],
momentum=momentum, decay=decay, epsilon=epsilon, device_option=gc)
self.assertDeviceChecks(dc, op, [grad, ms, mom, lr], [0])
def rmsprop(grad, ms, mom, lr):
lr = lr[0]
ms_o = ms + (1. - decay) * (np.square(grad) - ms)
mom_o = momentum * mom + lr * grad / np.sqrt(epsilon + ms_o)
grad_o = mom_o
return (grad_o, ms_o, mom_o)
self.assertReferenceChecks(gc, op, [grad, ms, mom, lr], rmsprop)
# Reference
@staticmethod
def _dense_adagrad(epsilon, w, h, grad, lr):
lr = lr[0]
h_o = h + np.square(grad)
grad_o = lr * grad / (np.sqrt(h_o) + epsilon)
w_o = w + grad_o
return (w_o, h_o)
# Reference
@staticmethod
def _dense_adam(epsilon, beta1, beta2, w, m1, m2, grad, lr, iters):
lr = lr[0]
iters = iters[0]
t = iters + 1
corrected_local_rate = lr * np.sqrt(1. - np.power(beta2, t)) / \
(1. - np.power(beta1, t))
m1_o = (beta1 * m1) + (1. - beta1) * grad
m2_o = (beta2 * m2) + (1. - beta2) * np.square(grad)
grad_o = corrected_local_rate * m1_o / \
(np.sqrt(m2_o) + epsilon)
w_o = w + grad_o
return (w_o, m1_o, m2_o)
@given(inputs=hu.tensors(n=3),
in_place=st.booleans(),
lr=st.floats(min_value=0.1, max_value=0.9),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
def test_adagrad_sgd(self, inputs, in_place, lr, epsilon, engine,
gc, dc):
w, grad, h = inputs
h = np.abs(h) + 0.01
lr = np.asarray([lr], dtype=np.float32)
op = core.CreateOperator(
"Adagrad",
["w", "h", "grad", "lr"],
["w" if in_place else "grad_o",
"h" if in_place else "h_o"],
epsilon=epsilon, engine=engine, device_option=gc)
self.assertDeviceChecks(dc, op, [w, h, grad, lr], [0])
self.assertReferenceChecks(gc, op, [w, h, grad, lr],
partial(self._dense_adagrad, epsilon))
@given(inputs=hu.tensors(n=3),
lr=st.floats(min_value=0.1, max_value=0.9),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
def test_sparse_adagrad_sgd(self, inputs, lr, epsilon,
engine, gc, dc):
w, grad, h = inputs
indices = np.arange(h.shape[0])
indices = indices[indices % 2 == 0]
grad = grad[indices]
h = np.abs(h)
lr = np.asarray([lr], dtype=np.float32)
op = core.CreateOperator(
"SparseAdagrad",
["param", "h", "indices", "grad", "lr"],
["param", "h"],
epsilon=epsilon,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [w, h, indices, grad, lr], [0])
def adagrad(param, h, i, grad, lr):
sw, sh = self._dense_adagrad(epsilon, param[i], h[i], grad, lr)
h[i] = sh
param[i] = sw
return (param, h)
self.assertReferenceChecks(gc, op, [w, h, indices, grad, lr], adagrad)
@given(inputs=hu.tensors(n=4),
in_place=st.booleans(),
beta1=st.floats(min_value=0.1, max_value=0.9),
beta2=st.floats(min_value=0.1, max_value=0.9),
lr=st.floats(min_value=0.1, max_value=0.9),
iters=st.integers(min_value=1, max_value=10000),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
**hu.gcs_cpu_only)
def test_adam_sgd(self, inputs, in_place, beta1, beta2, lr, iters, epsilon,
gc, dc):
w, grad, m1, m2 = inputs
m2 += np.abs(m2) + 0.01
lr = np.asarray([lr], dtype=np.float32)
iters = np.asarray([iters], dtype=np.int64)
op = core.CreateOperator(
"Adam",
["w", "m1", "m2", "grad", "lr", "iters"],
["w" if in_place else "w_o",
"m1" if in_place else "m1_o",
"m2" if in_place else "m2_o"],
beta1=beta1, beta2=beta2, epsilon=epsilon,
device_option=gc)
input_device_options = {"iters": hu.cpu_do}
inputs = [w, m1, m2, grad, lr, iters]
self.assertDeviceChecks(
dc, op, inputs, [0], input_device_options=input_device_options)
self.assertReferenceChecks(gc, op, inputs, partial(self._dense_adam,
epsilon, beta1, beta2),
input_device_options=input_device_options)
@given(inputs=hu.tensors(n=4),
beta1=st.floats(min_value=0.1, max_value=0.9),
beta2=st.floats(min_value=0.1, max_value=0.9),
lr=st.floats(min_value=0.1, max_value=0.9),
iters=st.integers(min_value=1, max_value=10000),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
**hu.gcs_cpu_only)
def test_sparse_adam_sgd(self, inputs, beta1, beta2, lr, iters,
epsilon, gc, dc):
w, grad, m1, m2 = inputs
indices = np.arange(m1.shape[0])
indices = indices[indices % 2 == 0]
grad = grad[indices]
m2 += np.abs(m2) + 0.01
lr = np.asarray([lr], dtype=np.float32)
iters = np.asarray([iters], dtype=np.int64)
op = core.CreateOperator(
"SparseAdam",
["w", "m1", "m2", "indices", "grad", "lr", "iters"],
["w", "m1", "m2"],
beta1=beta1, beta2=beta2, epsilon=epsilon,
device_option=gc)
input_device_options = {"iters": hu.cpu_do}
inputs = [w, m1, m2, indices, grad, lr, iters]
self.assertDeviceChecks(
dc, op, inputs, [0], input_device_options=input_device_options)
def adam(w, m1, m2, i, grad, lr, iters):
nw, nm1, nm2 = self._dense_adam(epsilon, beta1, beta2, w[i],
m1[i], m2[i], grad, lr, iters)
w[i] = nw
m1[i] = nm1
m2[i] = nm2
return (w, m1, m2)
self.assertReferenceChecks(gc, op, inputs, adam)
# Reference
@staticmethod
def _dense_ftrl(alpha, beta, lambda1, lambda2, w, nz, g):
if isinstance(alpha, np.ndarray):
alpha = np.asscalar(alpha)
n = np.take(nz, 0, axis=-1)
z = np.take(nz, 1, axis=-1)
# python port of Sigrid's implementation
g2 = g * g
sigma = (np.sqrt(n + g2) - np.sqrt(n)) / alpha
z += g - sigma * w
n += g2
w = (np.sign(z) * lambda1 - z) / (
(beta + np.sqrt(n)) / alpha + lambda2)
w[np.abs(z) <= lambda1] = 0
return (w, np.stack([n, z], axis=-1))
@given(inputs=hu.tensors(n=4),
in_place=st.booleans(),
alpha=st.floats(min_value=0.01, max_value=0.1),
beta=st.floats(min_value=0.1, max_value=0.9),
lambda1=st.floats(min_value=0.001, max_value=0.1),
lambda2=st.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
def test_ftrl_sgd(self, inputs, in_place, alpha, beta, lambda1, lambda2,
engine, gc, dc):
var, n, z, grad = inputs
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
op = core.CreateOperator(
"Ftrl",
["var", "nz", "grad"],
["var" if in_place else "var_o",
"nz" if in_place else "nz_o"],
alpha=alpha, beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, grad], [0])
self.assertReferenceChecks(
gc, op, [var, nz, grad],
partial(self._dense_ftrl, alpha, beta, lambda1, lambda2))
@given(inputs=hu.tensors(n=4),
alpha=st.floats(min_value=0.01, max_value=0.1),
beta=st.floats(min_value=0.1, max_value=0.9),
lambda1=st.floats(min_value=0.001, max_value=0.1),
lambda2=st.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
def test_sparse_ftrl_sgd(self, inputs, alpha, beta, lambda1, lambda2,
engine, gc, dc):
var, n, z, grad = inputs
# generate fake subset manually because hypothesis is too complicated :)
indices = np.arange(var.shape[0])
indices = indices[indices % 2 == 0]
grad = grad[indices]
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
op = core.CreateOperator(
"SparseFtrl",
["var", "nz", "indices", "grad"],
["var", "nz"],
alpha=alpha, beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, indices, grad], [0])
# Reference
def ftrl(w, nz, i, g):
sw, snz = self._dense_ftrl(alpha, beta, lambda1, lambda2,
w[i], nz[i], g)
w[i] = sw
nz[i] = snz
return (w, nz)
self.assertReferenceChecks(gc, op, [var, nz, indices, grad], ftrl)
# Reference
@staticmethod
def _dense_ftrl_send_alpha_by_input(beta, lambda1, lambda2, w, nz, g, alpha):
return TestOperators._dense_ftrl(alpha, beta, lambda1, lambda2, w, nz,
g)
@given(inputs=hu.tensors(n=4),
in_place=st.booleans(),
alpha=st.floats(min_value=0.01, max_value=0.1),
beta=st.floats(min_value=0.1, max_value=0.9),
lambda1=st.floats(min_value=0.001, max_value=0.1),
lambda2=st.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
def test_ftrl_sgd_send_alpha_by_input(self, inputs, in_place, alpha, beta,
lambda1, lambda2, engine, gc, dc):
var, n, z, grad = inputs
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
alpha = np.array(alpha).astype(np.float32)
op = core.CreateOperator(
"Ftrl",
["var", "nz", "grad", "alpha"],
["var" if in_place else "var_o",
"nz" if in_place else "nz_o"],
beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, grad, alpha], [0])
self.assertReferenceChecks(
gc, op, [var, nz, grad, alpha],
partial(self._dense_ftrl_send_alpha_by_input, beta, lambda1, lambda2))
@given(inputs=hu.tensors(n=4),
alpha=st.floats(min_value=0.01, max_value=0.1),
beta=st.floats(min_value=0.1, max_value=0.9),
lambda1=st.floats(min_value=0.001, max_value=0.1),
lambda2=st.floats(min_value=0.001, max_value=0.1),
engine=st.sampled_from([None, "SIMD"]),
**hu.gcs_cpu_only)
def test_sparse_ftrl_sgd_send_alpha_by_input(self, inputs, alpha, beta,
lambda1, lambda2, engine, gc,
dc):
var, n, z, grad = inputs
# generate fake subset manually because hypothesis is too complicated :)
indices = np.arange(var.shape[0])
indices = indices[indices % 2 == 0]
grad = grad[indices]
n = np.abs(n)
nz = np.stack([n, z], axis=-1)
alpha = np.array(alpha).astype(np.float32)
op = core.CreateOperator(
"SparseFtrl",
["var", "nz", "indices", "grad", "alpha"],
["var", "nz"],
beta=beta, lambda1=lambda1, lambda2=lambda2,
engine=engine,
device_option=gc)
self.assertDeviceChecks(
dc, op, [var, nz, indices, grad, alpha], [0])
# Reference
def ftrl(w, nz, i, g, alpha):
sw, snz = self._dense_ftrl_send_alpha_by_input(beta, lambda1,
lambda2, w[i], nz[i],
g, alpha)
w[i] = sw
nz[i] = snz
return (w, nz)
self.assertReferenceChecks(gc, op, [var, nz, indices, grad, alpha],
ftrl)
@given(input=hu.tensor(max_value=20,
max_dim=1,
dtype=np.int32,
elements=st.integers(min_value=0, max_value=10)),
with_remapping=st.booleans(),
**hu.gcs)
def test_unique(self, input, with_remapping, gc, dc):
op = core.CreateOperator(
"Unique",
["input"],
["unique"] + (["remapping"] if with_remapping else []),
device_option=gc)
self.assertDeviceChecks(dc, op, [input], [0])
# Validator
def unique_valid(input, unique, remapping=None):
self.assertEqual(unique.size, len(set(input)))
self.assertEqual(sorted(unique), sorted(set(input)))
if with_remapping:
self.assertEqual(remapping.shape, input.shape)
remapped = [unique[remapping[i]] for i in range(len(input))]
np.testing.assert_array_equal(remapped, input)
self.assertValidationChecks(gc, op, [input], unique_valid)
@given(prediction=hu.arrays(dims=[10, 3],
elements=st.floats(allow_nan=False,
allow_infinity=False,
min_value=0,
max_value=1)),
labels=hu.arrays(dims=[10],
dtype=np.int32,
elements=st.integers(min_value=0,
max_value=3 - 1)),
top_k=st.integers(min_value=1, max_value=3),
**hu.gcs)
def test_accuracy(self, prediction, labels, top_k, gc, dc):
if(top_k > 1):
gc = hu.cpu_do
op = core.CreateOperator(
"Accuracy",
["prediction", "labels"],
["accuracy"],
top_k=top_k,
device_option=gc
)
def op_ref(prediction, labels, top_k):
N = prediction.shape[0]
correct = 0
for i in range(0, len(prediction)):
# we no longer have cmp function in python 3
pred_sorted = sorted([
[item, j] for j, item in enumerate(prediction[i])],
cmp=lambda x, y: int(y[0] > x[0]) - int(y[0] < x[0]))
max_ids = [x[1] for x in pred_sorted[0:top_k]]
for m in max_ids:
if m == labels[i]:
correct += 1
accuracy = correct / N
return (accuracy,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[prediction, labels, top_k],
reference=op_ref)
@given(target_probabilities=hu.arrays(
dims=[10], elements=st.floats(allow_nan=False,
allow_infinity=False,
min_value=0.01,
max_value=1)),
**hu.gcs)
def test_perplexity(self, target_probabilities, gc, dc):
op = core.CreateOperator(
"Perplexity",
["target_probabilities"],
["perplexity"]
)
def op_ref(target_probabilities):
N = target_probabilities.shape[0]
perplexities = np.power(target_probabilities, -1.0 / N)
perplexity = reduce(lambda x, y: x * y, perplexities)
return (perplexity,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[target_probabilities],
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
def test_lengths_to_segment_ids(self, lengths, gc, dc):
op = core.CreateOperator(
"LengthsToSegmentIds",
["lengths"],
["segment_ids"])
def op_ref(lengths):
sids = []
for i, l in enumerate(lengths):
sids.extend(l * [i])
return (np.array(sids, dtype=np.int32), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
def test_lengths_range_fill(self, lengths, gc, dc):
op = core.CreateOperator(
"LengthsRangeFill",
["lengths"],
["increasing_seq"])
def op_ref(lengths):
sids = []
for _, l in enumerate(lengths):
sids.extend(range(l))
return (np.array(sids, dtype=np.int32), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=op_ref)
@given(**hu.gcs_cpu_only)
def test_segment_ids_to_ranges(self, gc, dc):
lengths = [4, 6, 3, 2, 0, 4]
op = core.CreateOperator(
"SegmentIdsToRanges",
["segment_ids"],
["ranges"])
def op_ref(segment_ids):
ranges = [np.array([0, 0], dtype=np.int32)]
prev = 0
for i, sid in enumerate(segment_ids):
while sid != prev:
prev += 1
ranges.append(np.array([i, 0], dtype=np.int32))
ranges[-1][1] += 1
return (np.array(ranges, dtype=np.int32), )
def lengths_to_segment_ids(lengths):
sids = []
for i, l in enumerate(lengths):
sids.extend(l * [i])
return (np.array(sids, dtype=np.int32), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=np.array(lengths_to_segment_ids(lengths), dtype=np.int32),
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
def test_lengths_to_ranges(self, lengths, gc, dc):
op = core.CreateOperator(
"LengthsToRanges",
["lengths"],
["ranges"])
def op_ref(x):
if not x.size:
return (x.reshape((0, 2)), )
return (np.column_stack((np.concatenate(([0], np.cumsum(x)[:-1])),
x)), )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=op_ref)
@given(prediction=hu.arrays(dims=[10, 3],
elements=st.floats(allow_nan=False,
allow_infinity=False,
min_value=0,
max_value=1)),
labels=hu.arrays(dims=[10],
dtype=np.int32,
elements=st.integers(min_value=0,
max_value=3 - 1)),
**hu.gcs)
def test_multi_class_accuracy(self, prediction, labels, gc, dc):
op = core.CreateOperator(
"MultiClassAccuracy",
["prediction", "labels"],
["accuracies", "amounts"]
)
def op_ref(prediction, labels):
N = prediction.shape[0]
D = prediction.shape[1]
accuracies = np.empty(D, dtype=float)
accuracies.fill(0)
amounts = np.empty(D, dtype=int)
amounts.fill(0)
max_ids = np.argmax(prediction, axis=1)
for i in range(0, N):
max_id = max_ids[i]
label_id = labels[i]
if max_id == label_id:
accuracies[label_id] += 1
amounts[label_id] += 1
for i in range(0, D):
amount = amounts[i]
if amount:
accuracies[i] /= amount
return (accuracies, amounts,)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[prediction, labels],
reference=op_ref)
@given(lengths=st.lists(st.integers(min_value=0, max_value=10),
min_size=0,
max_size=10),
**hu.gcs_cpu_only)
def test_segment_ids_to_lengths(self, lengths, gc, dc):
op = core.CreateOperator(
"SegmentIdsToLengths",
["segment_ids"],
["lengths"])
def lengths_to_ids(lengths):
sids = []
for i, l in enumerate(lengths):
sids.extend(l * [i])
return sids
segment_ids = lengths_to_ids(lengths)
def ids_to_lengths(ids):
ids_length = len(ids)
if ids_length == 0:
return (np.array([], dtype=np.int32),)
lengths = []
# segment id starts with 0
prev_id = -1
tmp_length = 0
for idx in range(ids_length):
cur_id = ids[idx]
if cur_id != prev_id:
if idx != 0:
lengths.append(tmp_length)
while prev_id + 1 != cur_id:
lengths.append(0)
prev_id += 1
prev_id = cur_id
tmp_length = 0
tmp_length += 1
lengths.append(tmp_length)
return (np.array(lengths, dtype=np.int32),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(segment_ids, dtype=np.int32)],
reference=ids_to_lengths)
@given(lengths=st.lists(st.integers(min_value=1, max_value=10),
min_size=0,
max_size=10),
power=st.sampled_from([0.5, 1.0, 1.5, 2.0]),
**hu.gcs_cpu_only)
def test_lengths_to_weights(self, lengths, power, gc, dc):
op = core.CreateOperator(
"LengthsToWeights",
["lengths"],
["weights"],
power=power)
def lengths_to_weights(lengths):
weighted_length = []
for l in lengths:
weighted_length.extend(l * [1 / pow(l, power)])
return (np.array(weighted_length, dtype=float),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[np.array(lengths, dtype=np.int32)],
reference=lengths_to_weights)
@given(input_tensor=hu.arrays(
dims=[10], elements=st.floats(allow_nan=False,
allow_infinity=False)),
**hu.gcs)
def test_exp(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Exp",
["input"],
["output"]
)
def exp_ref(input_tensor):
return (np.exp(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=exp_ref)
@given(input_tensor=hu.arrays(
dims=[10], elements=st.floats(min_value=1,
max_value=10000)),
**hu.gcs_cpu_only)
def test_log(self, input_tensor, gc, dc):
op = core.CreateOperator(
"Log",
["input"],
["output"]
)
def log_ref(input_tensor):
return (np.log(input_tensor),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[input_tensor],
reference=log_ref)
self.assertGradientChecks(gc, op, [input_tensor], 0, [0])
@given(num_threads=st.integers(1, 10), # noqa
num_elements=st.integers(1, 100),
capacity=st.integers(1, 5),
num_blobs=st.integers(1, 3),
do=st.sampled_from(hu.device_options))
def test_blobs_queue_threading(self, num_threads, num_elements,
capacity, num_blobs, do):
"""
- Construct matrices of size N x D
- Start K threads
- Push all N rows into the queue of capacity C
- Pull all N rows out of the queue.
- Verify that the output matrices are permutation of the rows of the
original matrices.
"""
import threading
import Queue
op = core.CreateOperator(
"CreateBlobsQueue",
[],
["queue"],
capacity=capacity,
num_blobs=num_blobs,
device_option=do)
self.ws.run(op)
xs = [np.random.randn(num_elements, 5).astype(np.float32)
for _ in range(num_blobs)]
q = Queue.Queue()
for i in range(num_elements):
q.put([x[i] for x in xs])
def enqueue(t):
while True:
feed_blobs = ["x_{}_{}".format(i, t) for i in range(num_blobs)]
op = core.CreateOperator(
"EnqueueBlobs",
["queue"] + feed_blobs,
feed_blobs,
device_option=do)
try:
elems = q.get_nowait()
for elem, feed_blob in zip(elems, feed_blobs):
self.ws.create_blob(feed_blob).feed(
elem, device_option=do)
self.ws.run(op)
except Queue.Empty:
return
# Create all blobs before racing on multiple threads
# (blob creation is not threadsafe)
for t in range(num_threads):
for i in range(num_blobs):
self.ws.create_blob("x_{}_{}".format(i, t))
threads = [threading.Thread(target=enqueue, args=(t,))
for t in range(num_threads)]
for thread in threads:
thread.start()
for n in range(num_elements):
dequeue_blobs = ["y_{}_{}".format(i, n) for i in range(num_blobs)]
op = core.CreateOperator(
"DequeueBlobs",
["queue"],
dequeue_blobs,
device_option=do)
self.ws.run(op)
for thread in threads:
thread.join()
op = core.CreateOperator("CloseBlobsQueue", ["queue"], [])
self.ws.run(op)
ys = [np.vstack([self.ws.blobs["y_{}_{}".format(i, n)].fetch()
for n in range(num_elements)])
for i in range(num_blobs)]
for i in range(num_blobs):
self.assertEqual(ys[i].shape, xs[i].shape)
for j in range(num_elements):
# Verify that the rows of the returned blob are a
# permutation. The order may be different due to
# different threads racing.
self.assertTrue(
any(np.array_equal(xs[i][j], ys[i][k])
for k in range(num_elements)))
@given(num_producers=st.integers(1, 10),
num_consumers=st.integers(1, 10),
capacity=st.integers(1, 5),
num_blobs=st.integers(1, 3),
do=st.sampled_from(hu.device_options))
def test_safe_blobs_queue(self, num_producers, num_consumers,
capacity, num_blobs, do):
init_net = core.Net('init_net')
queue = init_net.CreateBlobsQueue(
[], 1, capacity=capacity, num_blobs=num_blobs)
producer_steps = []
truth = 0
for i in range(num_producers):
name = 'producer_%d' % i
net = core.Net(name)
blobs = [net.ConstantFill([], 1, value=1.0, run_once=False)
for times in range(num_blobs)]
status = net.NextName()
net.SafeEnqueueBlobs([queue] + blobs, blobs + [status])
count = (i + 1) * 10
step = core.execution_step(name, net, num_iter=count)
truth += count
producer_steps.append(step)
producer_exit_net = core.Net('producer_exit_net')
producer_exit_net.CloseBlobsQueue([queue], 0)
producer_step = core.execution_step('producer', [
core.execution_step(
'producers', producer_steps, concurrent_substeps=True),
core.execution_step('producer_exit', producer_exit_net)]
)
consumer_steps = []
counters = []
const_1 = init_net.ConstantFill([], 1, value=1.0)
for i in range(num_consumers):
name = 'consumer_%d' % i
net1 = core.Net(name)
blobs = net1.SafeDequeueBlobs([queue], num_blobs + 1)
status = blobs[-1]
net2 = core.Net(name + '_counter')
counter = init_net.ConstantFill([], 1, value=0.0)
counters.append(counter)
net2.Add([counter, const_1], counter)
consumer_steps.append(core.execution_step(
name, [net1, net2], should_stop_blob=status))
consumer_step = core.execution_step(
'consumer', consumer_steps, concurrent_substeps=True)
init_step = core.execution_step('init', init_net)
worker_step = core.execution_step(
'worker', [consumer_step, producer_step], concurrent_substeps=True)
plan = core.Plan('test')
plan.AddStep(init_step)
plan.AddStep(worker_step)
self.ws.run(plan)
v = 0
for counter in counters:
v += self.ws.blobs[str(counter)].fetch().tolist()
self.assertEqual(v, truth)
@given(num_queues=st.integers(1, 5),
num_iter=st.integers(5, 10),
capacity=st.integers(1, 5),
num_blobs=st.integers(1, 3))
def test_weighted_sample_blobs_queue(
self, num_queues, num_iter, capacity, num_blobs
):
# Create BlobsQueue for each input queue
print("num_queues", num_queues)
init_net = core.Net('init_net')
queues = [
init_net.CreateBlobsQueue(
[], 1, capacity=capacity, num_blobs=num_blobs
) for _ in range(num_queues)
]
# Create multiple producer nets and one producer exist net
producer_steps = []
producer_exit_nets = []
for i in range(num_queues):
name = 'producer_%d' % i
net = core.Net(name)
blobs = [net.ConstantFill([], 1, value=1.0, run_once=False)
for _ in range(num_blobs)]
status = net.NextName()
net.SafeEnqueueBlobs([queues[i]] + blobs, blobs + [status])
exit_net = core.Net('producer_exit_%d' % i)
exit_net.CloseBlobsQueue(queues[i], 0)
producer_exit_nets.append(exit_net)
step = core.execution_step(
name, [
core.execution_step(
'producer_%d' % i, [net], num_iter=num_iter
),
core.execution_step('producer_exit_%d' % i, [exit_net]),
]
)
producer_steps.append(step)
producer_step = core.execution_step(
'producer', [
core.execution_step(
'producers',
producer_steps,
concurrent_substeps=True,
),
]
)
status_lst = []
def append(ins, outs):
status_lst.append(ins)
# Create one consumer dequeue net and one consumer exist net
consumer_net = core.Net('weight_sample_dequeue_net')
blobs = consumer_net.WeightedSampleDequeueBlobs(
queues,
num_blobs + 1,
weights=np.random.uniform(low=0.0, high=1.0, size=(num_queues,))
)
status = blobs[-1]
consumer_net.Python(append)(status)
consumer_step = core.execution_step(
'consumer',
[
core.execution_step(
'consumer', [consumer_net], should_stop_blob=status
),
core.execution_step('producer_exit', producer_exit_nets)
]
)
init_step = core.execution_step('init', init_net)
worker_step = core.execution_step(
'worker', [producer_step, consumer_step], concurrent_substeps=True)
plan = core.Plan('test')
plan.AddStep(init_step)
plan.AddStep(worker_step)
self.ws.run(plan)
assert len(status_lst) >= num_iter + 1
assert len(status_lst) <= num_iter * num_queues + 1
@given(
data=hu.tensor(),
**hu.gcs_cpu_only)
def test_squeeze_expand_dims(self, data, gc, dc):
dims = [0, 0]
if len(data.shape) > 2:
dims.append(2)
op = core.CreateOperator(
"ExpandDims",
["data"],
["expanded"],
dims=dims)
def expand_dims_ref(data, *args, **kw):
inc_dims = list(set(dims))
inc_dims.sort()
r = data
for dim in inc_dims:
r = np.expand_dims(r, axis=dim)
return (r, )
def squeeze_ref(data, *args, **kw):
dec_dims = list(set(dims))
dec_dims.sort(reverse=True)
r = data
for dim in dec_dims:
r = np.squeeze(r, axis=dim)
return (r, )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[data],
reference=expand_dims_ref,
output_to_grad='expanded',
grad_reference=squeeze_ref)
@given(**hu.gcs_cpu_only)
def test_tt_layer(self, gc, dc):
seed = 1234
np.random.seed(seed)
inp_sizes = [2, 2, 2, 2]
out_sizes = [2, 2, 2, 2]
tt_ranks = [1, 3, 3, 3, 1]
op = core.CreateOperator(
"TT",
["X", "b", "cores"],
["Y"],
inp_sizes=inp_sizes,
out_sizes=out_sizes,
tt_ranks=tt_ranks,
)
X = np.expand_dims(
np.random.rand(16).astype(np.float32), axis=0)
b = np.array([0] * 16).astype(np.float32)
cores = tt_core.init_tt_cores(inp_sizes, out_sizes, tt_ranks)
self.ws.create_blob("X").feed(X)
self.ws.create_blob("b").feed(b)
self.ws.create_blob("cores").feed(cores)
self.ws.run(op)
Y = self.ws.blobs[("Y")].fetch()
Y = Y.reshape([16])
golden = np.array([-9.51763490e-07, -1.28442286e-06,
-2.86281141e-07, 2.28865644e-07,
-1.96180017e-06, -1.78920531e-06,
9.31094666e-07, -2.04273989e-07,
1.70017107e-06, 1.64845711e-06,
-1.06099132e-06, -4.69111137e-07,
6.57552358e-08, -1.28942040e-08,
-2.29114004e-07, -1.04262714e-06])
# This golden array is dependent on the specified inp_sizes, out_sizes,
# tt_ranks, and seed. Changing these will cause the test to fail.
self.assertAlmostEqual(np.linalg.norm(golden - Y), 0, delta=1e-10)
@given(num_workers=st.integers(1, 10),
net_type=st.sampled_from(
["simple", "dag"] +
(["async_dag"] if workspace.has_gpu_support else [])),
do=st.sampled_from(hu.device_options))
def test_dag_net_forking(self, net_type, num_workers, do):
from caffe2.python.cnn import CNNModelHelper
m = CNNModelHelper()
n = 10
d = 2
depth = 2
iters = 5
np.random.seed(1701)
# Build a binary tree of FC layers, summing at each node.
for i in reversed(range(depth)):
for j in range(2 ** i):
bottom_1 = "{}_{}".format(i + 1, 2 * j)
bottom_2 = "{}_{}".format(i + 1, 2 * j + 1)
mid_1 = "{}_{}_m".format(i + 1, 2 * j)
mid_2 = "{}_{}_m".format(i + 1, 2 * j + 1)
top = "{}_{}".format(i, j)
m.FC(
bottom_1, mid_1,
dim_in=d, dim_out=d,
weight_init=m.ConstantInit(np.random.randn()),
bias_init=m.ConstantInit(np.random.randn()))
m.FC(
bottom_2, mid_2,
dim_in=d, dim_out=d,
weight_init=m.ConstantInit(np.random.randn()),
bias_init=m.ConstantInit(np.random.randn()))
m.net.Sum([mid_1, mid_2], top)
m.net.SquaredL2Distance(["0_0", "label"], "xent")
m.net.AveragedLoss("xent", "loss")
input_to_grad = m.AddGradientOperators(["loss"])
m.Proto().device_option.CopyFrom(do)
m.param_init_net.Proto().device_option.CopyFrom(do)
m.Proto().type = net_type
m.Proto().num_workers = num_workers
self.ws.run(m.param_init_net)
print(str(m.Proto()))
def run():
import numpy as np
np.random.seed(1701)
input_blobs = ["{}_{}".format(depth, j) for j in range(2 ** depth)]
for input_blob in input_blobs:
self.ws.create_blob(input_blob).feed(
np.random.randn(n, d).astype(np.float32),
device_option=do)
self.ws.create_blob("label").feed(
np.random.randn(n, d).astype(np.float32),
device_option=do)
self.ws.run(m.net)
gradients = [
self.ws.blobs[str(input_to_grad[input_blob])].fetch()
for input_blob in input_blobs]
return gradients
outputs = [run() for _ in range(iters)]
for output in outputs[1:]:
np.testing.assert_array_equal(outputs[0], output)
self.assertAlmostEqual(np.sum(np.square(output)), 91.81752,
delta=1e-2)
@given(input=hu.tensor(min_dim=2, max_dim=6, dtype=np.int32,
elements=st.integers(min_value=0,
max_value=2**32 - 1)),
slice_dim=st.integers(),
a=st.integers(),
b=st.integers(),
is_empty=st.booleans(),
**hu.gcs_cpu_only)
def test_slice(self, input, slice_dim, a, b, is_empty, gc, dc):
slice_dim = slice_dim % len(input.shape)
if (is_empty):
input = np.random.rand(*([0] + list(input.shape))).astype(np.int32)
slice_dim += 1
a = a % input.shape[slice_dim]
b = b % input.shape[slice_dim] + 1
start_vec = np.zeros(len(input.shape), dtype=np.int32)
end_vec = np.ones(len(input.shape), dtype=np.int32) * -1
start_vec[slice_dim] = min(a, b)
end_vec[slice_dim] = max(a, b)
op = core.CreateOperator(
"Slice",
["input", "start", "end"],
["output"])
def slice_ref(x, s, e):
if len(s.shape) == 0:
return x
slc = [slice(si, None if ei == -1 else ei) for si, ei in zip(s, e)]
return (x[slc], )
self.assertReferenceChecks(gc, op, [input, start_vec, end_vec],
slice_ref)
@given(data=hu.tensor(), **hu.gcs_cpu_only)
def test_shape(self, data, gc, dc):
op = core.CreateOperator("Shape", ["data"], ["shape"])
self.assertReferenceChecks(gc, op, [data], lambda x: (x.shape, ))
@given(data=hu.tensor(), **hu.gcs_cpu_only)
def test_has_elements(self, data, gc, dc):
op = core.CreateOperator("HasElements", ["data"], ["has_elements"])
self.assertReferenceChecks(gc, op, [data], lambda x: (len(x) > 0, ))
op = core.CreateOperator("IsEmpty", ["data"], ["is_empty"])
self.assertReferenceChecks(gc, op, [data], lambda x: (len(x) == 0, ))
@given(initial_iters=st.integers(0, 100),
max_iters=st.integers(0, 100))
def test_should_stop_as_criteria_net_execution_step(
self, initial_iters, max_iters):
net = core.Net("net")
net.Iter(["iter"], ["iter"])
self.ws.create_blob("iter").feed(
np.asarray([initial_iters]).astype(np.int64))
self.ws.create_blob("num_iters").feed(
np.asarray([max_iters]).astype(np.int64))
criteria_net = core.Net("criteria")
criteria_net.GE(["iter", "num_iters"], ["stop"])
criteria_net.Proto().external_output.extend(["stop"])
plan = core.Plan('plan')
plan.AddStep(core.execution_step(
'step', [criteria_net, net],
should_stop_blob=core.BlobReference("stop")))
self.ws.run(plan)
iters = self.ws.blobs[("iter")].fetch()
self.assertEqual(iters.dtype, np.int64)
self.assertEqual(iters[0], max(initial_iters, max_iters))
def test_disabled_execution_step(self):
def createNets(i, disabled):
should_stop = 'should_stop_{}'.format(i)
output = 'output_{}'.format(i)
# init content and stop signal
init = core.Net("init_{}".format(i))
init.ConstantFill(
[],
[output],
shape=[1],
value=0.0
)
init.Cast([output], [should_stop], to='bool')
# decide if disabled or not
criterion = core.Net("criterion_{}".format(i))
tmp = criterion.ConstantFill(
[],
shape=[1],
value=1.0 if disabled else 0.0
)
criterion.Cast([tmp], [should_stop], to='bool')
criterion.Proto().external_output.extend([should_stop])
# the body net is just to turn a 0 blob to 1
net = core.Net("net_{}".format(i))
net.ConstantFill(
[],
[output],
shape=[1],
value=1.0
)
# always end the loop
ender = core.Net("ender_{}".format(i))
tmp = ender.ConstantFill(
[],
shape=[1],
value=1.0
)
ender.Cast([tmp], [should_stop], to='bool')
ender.Proto().external_output.extend([should_stop])
return [init, criterion, net, ender]
nets = [createNets(1, False),
createNets(2, True),
createNets(3, False)]
steps = [
core.execution_step(
'step_1', nets[0],
should_stop_blob=core.BlobReference('should_stop_1')),
core.execution_step(
'step_2', nets[1],
should_stop_blob=core.BlobReference('should_stop_2')),
core.execution_step('step_3', nets[2])
]
expected = [1.0, 0.0, 1.0]
plan = core.Plan('plan')
plan.AddStep(core.execution_step('all_steps', steps, num_iter=3))
self.ws.run(plan)
for i, _ in enumerate(nets):
self.assertEqual(
self.ws.blobs['output_{}'.format(i + 1)].fetch()[0],
expected[i])
@given(initial_iters=st.integers(0, 100),
num_iters=st.integers(0, 100))
def test_iter_count_with_execution_step(self, initial_iters, num_iters):
net = core.Net("net")
net.Iter(["iter"], ["iter"])
self.ws.create_blob("iter").feed(
np.asarray([initial_iters]).astype(np.int64))
step = core.ExecutionStep("step", [net])
step.SetIter(num_iters)
plan = core.Plan("plan")
plan.AddStep(step)
self.ws.run(plan)
iters = self.ws.blobs[("iter")].fetch()
self.assertEqual(iters.dtype, np.int64)
self.assertEqual(iters[0], initial_iters + num_iters)
@given(initial_iters=st.integers(0, 100),
num_iters=st.integers(0, 100),
num_nets=st.integers(0, 5))
def test_atomic_iter_with_concurrent_steps(self, initial_iters, num_iters,
num_nets):
init_net = core.Net("init_net")
iter_mutex = init_net.CreateMutex([], ["iter_mutex"])
self.ws.create_blob("iter").feed(
np.asarray([initial_iters]).astype(np.int64))
concurrent_steps = core.ExecutionStep("concurrent_steps",
num_iter=num_iters)
for i in range(num_nets):
net = core.Net("net_{}".format(i))
net.AtomicIter([iter_mutex, "iter"], ["iter"])
step = core.ExecutionStep("step", [net])
concurrent_steps.AddSubstep(step)
concurrent_steps.SetConcurrentSubsteps(True)
plan = core.Plan("plan")
plan.AddStep(concurrent_steps)
self.ws.run(init_net)
self.ws.run(plan)
iters = self.ws.blobs[("iter")].fetch()
self.assertEqual(iters.dtype, np.int64)
self.assertEqual(iters[0], initial_iters + num_iters * num_nets)
@given(a=hu.tensor(),
src=st.sampled_from(_NUMPY_TYPE_TO_ENUM.keys()),
dst=st.sampled_from(_NUMPY_TYPE_TO_ENUM.keys()),
use_name=st.booleans(),
**hu.gcs)
def test_cast(self, a, src, dst, use_name, gc, dc):
a = a.astype(src)
# Casting from a float type outside the range of the integral
# type is UB.
ftypes = [np.float32, np.float64]
if src in ftypes and dst not in ftypes and dst is not np.bool:
info = np.iinfo(dst)
a = np.clip(a, info.min, info.max)
def ref(data):
return [data.astype(dst)]
to = _NUMPY_TYPE_TO_ENUM[dst]
if use_name:
to = TensorProto.DataType.Name(to).lower()
op = core.CreateOperator('Cast', ["X"], ["Y"], to=to)
self.assertDeviceChecks(dc, op, [a], [0])
out, = self.assertReferenceChecks(gc, op, [a], ref)
self.assertEqual(dst, out.dtype)
@given(a=hu.tensor(),
eps=st.floats(min_value=1e-4, max_value=1e-2),
**hu.gcs_cpu_only)
def test_logit(self, a, eps, gc, dc):
def ref(data):
data = np.clip(data, eps, 1.0 - eps)
return (np.log(data / (1 - data)), )
op = core.CreateOperator('Logit', ["X"], ["Y"], eps=eps)
self.assertDeviceChecks(dc, op, [a], [0])
self.assertReferenceChecks(gc, op, [a], ref)
@given(a=hu.tensor(elements=st.floats(allow_nan=True)),
value=st.floats(min_value=-10, max_value=10),
**hu.gcs_cpu_only)
def test_replace_nan(self, a, value, gc, dc):
def ref(data):
out = np.copy(data)
out[np.isnan(data)] = value
return (out, )
op = core.CreateOperator('ReplaceNaN', ["X"], ["Y"], value=value)
self.assertDeviceChecks(dc, op, [a], [0])
self.assertReferenceChecks(gc, op, [a], ref)
@given(data=_dtypes(dtypes=[np.int32, np.int64, np.float32, np.bool]).
flatmap(lambda dtype: hu.tensor(
min_dim=1, dtype=dtype, elements=hu.elements_of_type(dtype))),
has_input=st.booleans(),
has_extra_shape=st.booleans(),
extra_shape=st.lists(
min_size=1, max_size=5, elements=st.integers(1, 5)),
**hu.gcs)
def test_constant_fill(self, data, has_input, has_extra_shape, extra_shape,
gc, dc):
dtype = data.dtype.type
# in opt mode, np.bool is converted into np.bool_
if data.dtype == np.dtype(np.bool):
dtype = np.bool
value = data.item(0)
gt_shape = data.shape
inputs = [data]
enum_type = _NUMPY_TYPE_TO_ENUM[dtype]
if has_input:
if has_extra_shape:
op = core.CreateOperator('ConstantFill', ["X"], ["Y"],
dtype=enum_type,
extra_shape=extra_shape,
value=value)
gt_shape += tuple(extra_shape)
else:
op = core.CreateOperator('ConstantFill', ["X"], ["Y"],
dtype=enum_type,
value=value)
else:
op = core.CreateOperator('ConstantFill', [], ["Y"],
dtype=enum_type,
value=value,
shape=list(gt_shape))
inputs = []
def ref(inputs=None):
outputs = np.full(shape=gt_shape, fill_value=value, dtype=dtype)
return [outputs]
self.assertDeviceChecks(dc, op, inputs, [0])
out, = self.assertReferenceChecks(gc, op, inputs, ref)
self.assertEqual(dtype, out.dtype)
@given(t=st.integers(1, 5),
n=st.integers(1, 5),
d=st.integers(1, 5))
def test_elman_recurrent_network(self, t, n, d):
from caffe2.python import cnn
np.random.seed(1701)
step_net = cnn.CNNModelHelper(name="Elman")
# TODO: name scope external inputs and outputs
step_net.Proto().external_input.extend(
["input_t", "seq_lengths", "timestep",
"hidden_t_prev", "gates_t_w", "gates_t_b"])
step_net.Proto().type = "simple"
step_net.Proto().external_output.extend(["hidden_t", "gates_t"])
step_net.FC("hidden_t_prev", "gates_t", dim_in=d, dim_out=d, axis=2)
step_net.net.Sum(["gates_t", "input_t"], ["gates_t"])
step_net.net.Sigmoid(["gates_t"], ["hidden_t"])
# Initialize params for step net in the parent net
for op in step_net.param_init_net.Proto().op:
workspace.RunOperatorOnce(op)
backward_ops, backward_mapping = core.GradientRegistry.GetBackwardPass(
step_net.Proto().op, {"hidden_t": "hidden_t_grad"})
backward_mapping = {str(k): str(v) for k, v
in backward_mapping.items()}
backward_step_net = core.Net("ElmanBackward")
del backward_step_net.Proto().op[:]
backward_step_net.Proto().op.extend(backward_ops)
assert backward_mapping["input_t"] == "gates_t_grad"
links = [
("hidden_t_prev", "hidden", 0),
("hidden_t", "hidden", 1),
("input_t", "input", 0),
]
link_internal, link_external, link_offset = zip(*links)
backward_links = [
("hidden_t_prev_grad", "hidden_grad", 0),
("hidden_t_grad", "hidden_grad", 1),
("gates_t_grad", "input_grad", 0),
]
backward_link_internal, backward_link_external, backward_link_offset = \
zip(*backward_links)
backward_step_net.Proto().external_input.extend(["hidden_t_grad"])
backward_step_net.Proto().external_input.extend(
step_net.Proto().external_input)
backward_step_net.Proto().external_input.extend(
step_net.Proto().external_output)
inputs = ["input", "seq_lengths", "gates_t_w", "gates_t_b", "hidden_input"]
recurrent_inputs = ["hidden_input"]
op = core.CreateOperator(
"RecurrentNetwork",
inputs,
["output", "hidden", "hidden_output", "step_workspaces"],
alias_src=["hidden", "hidden"],
alias_dst=["output", "hidden_output"],
alias_offset=[1, -1],
recurrent_states=["hidden"],
initial_recurrent_state_ids=map(inputs.index, recurrent_inputs),
link_internal=link_internal,
link_external=link_external,
link_offset=link_offset,
backward_link_internal=backward_link_internal,
backward_link_external=backward_link_external,
backward_link_offset=backward_link_offset,
param=map(inputs.index, step_net.params),
step_net=str(step_net.Proto()),
backward_step_net=str(backward_step_net.Proto()),
outputs_with_grads=[0],
)
workspace.FeedBlob(
"input", np.random.randn(t, n, d).astype(np.float32))
workspace.FeedBlob(
"hidden_input", np.random.randn(1, n, d).astype(np.float32))
workspace.FeedBlob(
"seq_lengths", np.random.randint(0, t, size=(n,)).astype(np.int32))
def reference(input, seq_lengths, gates_w, gates_b, hidden_input):
T = input.shape[0]
N = input.shape[1]
D = input.shape[2]
hidden = np.zeros(shape=(T + 1, N, D))
assert hidden.shape[0] == T + 1
assert hidden.shape[1] == N
assert hidden.shape[2] == D
hidden[0, :, :] = hidden_input
for t in range(T):
input_t = input[t].reshape(1, N, D)
hidden_t_prev = hidden[t].reshape(1, N, D)
gates = np.dot(hidden_t_prev, gates_w.T)
gates = gates.reshape(1, N, D) + input_t.reshape(1, N, D)
hidden[t + 1] = sigmoid(gates)
return hidden[1:], hidden, hidden[-1].reshape(1, N, D)
self.assertReferenceChecks(
hu.cpu_do,
op,
[workspace.FetchBlob(name)
for name in ["input", "seq_lengths", "gates_t_w", "gates_t_b",
"hidden_input"]],
reference,
outputs_to_check=[0, 1, 2])
for param in [0, 2, 3]:
self.assertGradientChecks(
hu.cpu_do,
op,
[workspace.FetchBlob(name)
for name in ["input", "seq_lengths", "gates_t_w", "gates_t_b",
"hidden_input"]],
param,
[0])
@given(n=st.integers(1, 5),
c=st.integers(1, 5),
h=st.integers(1, 5),
w=st.integers(1, 5),
pad=st.integers(0, 2),
block_size=st.integers(2, 3),
**hu.gcs)
def test_space_to_batch(self, n, c, h, w, pad, block_size, gc, dc):
assume((h + 2 * pad) % block_size == 0)
assume((w + 2 * pad) % block_size == 0)
X = np.random.randn(n, c, h, w).astype(np.float32)
op = core.CreateOperator("SpaceToBatch", ["X"], ["Y"],
pad=pad, block_size=block_size)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(n=st.integers(1, 5),
c=st.integers(1, 5),
h=st.integers(1, 5),
w=st.integers(1, 5),
pad=st.integers(0, 2),
block_size=st.integers(2, 3),
**hu.gcs)
def test_batch_to_space(self, n, c, h, w, pad, block_size, gc, dc):
assume((h + 2 * pad) % block_size == 0)
assume((w + 2 * pad) % block_size == 0)
X = np.random.randn(
n * block_size * block_size,
c,
(h + 2 * pad) / block_size,
(w + 2 * pad) / block_size).astype(np.float32)
op = core.CreateOperator("BatchToSpace", ["X"], ["Y"],
pad=pad, block_size=block_size)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(),
in_place=st.booleans(),
scale=st.floats(min_value=-2.0, max_value=2.0),
**hu.gcs)
def test_scale(self, X, in_place, scale, gc, dc):
op = core.CreateOperator(
"Scale", ["X"], ["Y" if not in_place else "X"],
scale=scale)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(s=st.text())
def test_string_serde(self, s):
s = s.encode('ascii', 'ignore')
self.ws.create_blob("a").feed(s)
serialized = self.ws.blobs["a"].serialize("a")
self.ws.create_blob("b").deserialize(serialized)
self.assertEqual(s, self.ws.blobs[("a")].fetch())
self.assertEqual(s, self.ws.blobs[("b")].fetch())
@given(n=st.integers(1, 3),
dim=st.integers(4, 16),
**hu.gcs)
def test_distances(self, n, dim, gc, dc):
X = np.random.uniform(-1, 1, (n, dim)).astype(np.float32)
Y = np.random.uniform(-1, 1, (n, dim)).astype(np.float32)
self.ws.create_blob("X").feed(X)
self.ws.create_blob("Y").feed(Y)
def check_grad(op):
self.assertGradientChecks(gc, op, [X, Y], 0, [0],
stepsize=1e-2, threshold=1e-2)
self.assertGradientChecks(gc, op, [X, Y], 1, [0],
stepsize=1e-2, threshold=1e-2)
l2_op = core.CreateOperator("SquaredL2Distance",
["X", "Y"], ["l2_dist"])
self.ws.run(l2_op)
np.testing.assert_allclose(self.ws.blobs[("l2_dist")].fetch(),
np.square(X - Y).sum(axis=1) * 0.5,
rtol=1e-4, atol=1e-4)
check_grad(l2_op)
if gc.device_type == 1:
# Only SquaredL2Distance has CUDA implementation
return
dot_op = core.CreateOperator("DotProduct", ["X", "Y"], ["dot"])
self.ws.run(dot_op)
np.testing.assert_allclose(self.ws.blobs[("dot")].fetch(),
np.multiply(X, Y).sum(axis=1),
rtol=1e-4, atol=1e-4)
check_grad(dot_op)
kEps = 1e-12
cos_op = core.CreateOperator("CosineSimilarity", ["X", "Y"], ["cos"])
self.ws.run(cos_op)
cos = np.divide(np.multiply(X, Y).sum(axis=1),
np.multiply(np.linalg.norm(X, axis=1) + kEps,
np.linalg.norm(Y, axis=1) + kEps))
np.testing.assert_allclose(self.ws.blobs[("cos")].fetch(), cos,
rtol=1e-4, atol=1e-4)
check_grad(cos_op)
@given(pad=st.integers(0, 3),
size=st.integers(1, 10),
input_channels=st.integers(1, 5),
batch_size=st.integers(1, 5),
order=st.sampled_from(["NCHW", "NHWC"]),
mode=st.sampled_from(["constant", "reflect", "edge"]),
**hu.gcs)
def test_same_pad_image(self, pad, size, input_channels, batch_size, order,
mode, gc, dc):
assume(size > pad)
op = core.CreateOperator(
"PadImage",
["X"],
["Y"],
pad=pad,
mode=mode,
order=order,
)
if order == "NHWC":
X = np.random.rand(
batch_size, size, size, input_channels).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (pad, pad), (pad, pad), (0, 0)), mode),)
else:
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (0, 0), (pad, pad), (pad, pad)), mode),)
self.assertReferenceChecks(gc, op, [X], numpy_pad_ref)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(pad_t=st.integers(0, 3),
pad_l=st.integers(0, 3),
pad_b=st.integers(0, 3),
pad_r=st.integers(0, 3),
size=st.integers(1, 10),
input_channels=st.integers(1, 5),
batch_size=st.integers(1, 5),
order=st.sampled_from(["NCHW", "NHWC"]),
mode=st.sampled_from(["constant", "reflect", "edge"]),
**hu.gcs)
def test_pad_image(self, pad_t, pad_l, pad_b, pad_r, size, input_channels,
batch_size, order, mode, gc, dc):
assume(size > max(pad_b, pad_r, pad_t, pad_l))
op = core.CreateOperator(
"PadImage",
["X"],
["Y"],
pad_t=pad_t,
pad_l=pad_l,
pad_b=pad_b,
pad_r=pad_r,
mode=mode,
order=order,
)
if order == "NHWC":
X = np.random.rand(
batch_size, size, size, input_channels).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (pad_t, pad_b), (pad_l, pad_r), (0, 0)),
mode),)
else:
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
def numpy_pad_ref(x):
return (np.pad(
x, ((0, 0), (0, 0), (pad_t, pad_b), (pad_l, pad_r)),
mode),)
self.assertReferenceChecks(gc, op, [X], numpy_pad_ref)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=st.floats(min_value=1e-4, max_value=1e-2),
**hu.gcs_cpu_only)
def test_instance_norm(self, size, input_channels, batch_size, order,
epsilon, gc, dc):
op = core.CreateOperator(
"InstanceNorm",
["X", "scale", "bias"],
["Y"],
order=order,
epsilon=epsilon,
)
np.random.seed(1701)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
if order == "NHWC":
X = X.swapaxes(1, 2).swapaxes(2, 3)
def ref_nchw(x, scale, bias):
x = x.reshape(batch_size * input_channels, size * size)
y = (x - x.mean(1)[:, np.newaxis])
y /= np.sqrt(x.var(1) + epsilon)[:, np.newaxis]
y = y.reshape(batch_size, input_channels, size, size)
y = y * scale.reshape(1, input_channels, 1, 1)
y = y + bias.reshape(1, input_channels, 1, 1)
return (y, )
def ref_nhwc(x, scale, bias):
x = x.swapaxes(2, 3).swapaxes(1, 2)
y = ref_nchw(x, scale, bias)[0]
return (y.swapaxes(1, 2).swapaxes(2, 3), )
self.assertReferenceChecks(
gc, op, [X, scale, bias],
ref_nchw if order == "NCHW" else ref_nhwc)
# TODO(jiayq): when there are backward and GPU implementations, enable
# these two.
# self.assertDeviceChecks(dc, op, [X, scale, bias], [0])
# self.assertGradientChecks(gc, op, [X, scale, bias], 0, [0])
ws = workspace.C.Workspace()
feeds = [("X", X), ("scale", scale), ("bias", bias)]
for blob, arr in feeds:
ws.create_blob(blob).feed(arr)
for _ in range(100):
ws.run(op)
for blob, arr in feeds:
np.testing.assert_array_equal(ws.blobs[blob].fetch(), arr)
@given(sizes=st.lists(st.integers(1, 100), min_size=1),
in_place=st.booleans(),
**hu.gcs)
def test_unsafe_coalesce(self, sizes, in_place, gc, dc):
gAlignment = 32
Xs = [np.random.randn(size)
.astype(np.random.choice([np.float32, np.float64, np.uint8]))
for size in sizes]
op = core.CreateOperator(
"UnsafeCoalesce",
["X_{}".format(i) for i, _ in enumerate(sizes)],
[("X_{}" if in_place else "Y_{}").format(i)
for i, _ in enumerate(sizes)] + ["coalesced"])
self.assertDeviceChecks(dc, op, Xs, list(range(len(sizes) + 1)))
def unsafe_coalesce(*xs):
def to_uint8(x):
x_aligned_bytes = ((x.nbytes + gAlignment - 1) // gAlignment) \
* gAlignment
x_aligned = np.zeros(
shape=(x_aligned_bytes // x.dtype.itemsize, ),
dtype=x.dtype)
x_aligned[:x.size] = x
x_cast = np.fromstring(x_aligned.tobytes(), dtype='<u1')
return x_cast
flat = [to_uint8(x) for x in xs]
coalesced = np.concatenate(flat)
return list(xs) + [coalesced]
self.assertReferenceChecks(gc, op, Xs, unsafe_coalesce)
@given(inp=_dtypes().flatmap(lambda dt: _tensor_and_indices(
elements=st.floats(min_value=0.5, max_value=10), dtype=dt)),
**hu.gcs_cpu_only)
def test_sparse_to_dense(self, inp, gc, dc):
first_dim, X, I = inp
# values don't matter
D = np.random.uniform(0, 1, size=(first_dim,) + X.shape[1:])
op = core.CreateOperator("SparseToDense", ["I", "X", "D"], ["Y"])
def sparse_to_dense(I, X, D):
O = np.zeros(D.shape)
for i, p in enumerate(I):
O[p] += X[i]
return [O]
self.assertReferenceChecks(gc, op, [I, X, D], sparse_to_dense)
self.assertDeviceChecks(dc, op, [I, X, D], [0])
@given(inputs=hu.tensors(n=2, min_dim=2, max_dim=2), **hu.gcs_cpu_only)
def test_dot_product(self, inputs, gc, dc):
X, Y = inputs
op = core.CreateOperator("DotProduct", ["X", "Y"], 'out')
def dotproduct(X, Y):
return (np.sum(X * Y, axis=1), )
self.assertReferenceChecks(gc, op, [X, Y], dotproduct)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(N=st.integers(min_value=2, max_value=10),
M=st.integers(min_value=2, max_value=10),
K=st.integers(min_value=2, max_value=10),
pad_value=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs_cpu_only)
def test_dot_product_with_padding(self, N, M, K, pad_value, gc, dc):
X = np.random.rand(N, M).astype(np.float32) - 0.5
Y = np.random.rand(N, K).astype(np.float32) - 0.5
op = core.CreateOperator("DotProductWithPadding", ["X", "Y"], 'out',
pad_value=pad_value)
def dotproduct(X, Y):
Z = np.ones((N, max(M, K))).astype(np.float32) * pad_value
if M < K:
Z[:, :M] = X
return (np.sum(Z * Y, axis=1), )
else:
Z[:, :K] = Y
return (np.sum(Z * X, axis=1), )
self.assertReferenceChecks(gc, op, [X, Y], dotproduct)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(N=st.integers(min_value=2, max_value=10),
M=st.integers(min_value=2, max_value=10),
pad_value=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs_cpu_only)
def test_dot_product_with_rep_padding(self, N, M, pad_value, gc, dc):
K = 2 * M
X = np.random.rand(N, M).astype(np.float32) - 0.5
Y = np.random.rand(N, K).astype(np.float32) - 0.5
op = core.CreateOperator("DotProductWithPadding", ["X", "Y"], 'out',
replicate=True,
pad_value=pad_value)
def dotproduct(X, Y):
import numpy.matlib as npm
if M < K:
Z = npm.repmat(X, 1, K // M)
return (np.sum(Z * Y, axis=1), )
else:
Z = npm.repmat(Y, 1, M // K)
return (np.sum(Z * X, axis=1), )
self.assertReferenceChecks(gc, op, [X, Y], dotproduct)
self.assertDeviceChecks(dc, op, [X, Y], [0])
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(N=st.integers(min_value=2, max_value=10),
M=st.integers(min_value=2, max_value=10), **hu.gcs_cpu_only)
def test_ensure_dense(self, N, M, gc, dc):
# in place
X = np.random.rand(N, M).astype(np.float32) - 0.5
op = core.CreateOperator("EnsureDense", ["X"], "X")
self.assertReferenceChecks(gc, op, [X], lambda x: [x])
self.assertDeviceChecks(dc, op, [X], [0])
# or not
X = np.random.rand(N, M).astype(np.float32) - 0.5
op = core.CreateOperator("EnsureDense", ["X"], "out")
self.assertReferenceChecks(gc, op, [X], lambda x: [x])
self.assertDeviceChecks(dc, op, [X], [0])
@given(N=st.integers(min_value=10, max_value=100),
M=st.integers(min_value=2, max_value=10),
num_buckets=st.integers(min_value=1, max_value=5),
**hu.gcs_cpu_only)
def test_accumulate_histogram_op(self, N, M, num_buckets, gc, dc):
X = np.random.rand(N, M).astype(np.float32)
lower_bound, upper_bound = 0.1, 0.9
op = core.CreateOperator("AccumulateHistogram", ["X"],
['cur_hist', 'acc_hist'],
lower_bound=lower_bound,
upper_bound=upper_bound,
num_buckets=num_buckets)
def histogram(X):
hist = np.zeros((num_buckets + 2, ), dtype=np.int32)
segment = (upper_bound - lower_bound) / num_buckets
Y = np.zeros((N, M), dtype=np.int32)
Y[X < lower_bound] = 0
Y[X >= upper_bound] = num_buckets + 1
Y[(X >= lower_bound) & (X < upper_bound)] = \
((X[(X >= lower_bound) & (X < upper_bound)] - lower_bound) /
segment + 1).astype(np.int32)
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
hist[Y[i][j]] += 1
cur_hist, acc_hist = hist, hist
return [cur_hist, acc_hist]
self.assertDeviceChecks(dc, op, [X], [0, 1])
self.assertReferenceChecks(gc, op, [X], histogram)
if __name__ == "__main__":
unittest.main()
|
## @package visualize
# Module caffe2.python.visualize
"""Functions that could be used to visualize Tensors.
This is adapted from the old-time iceberk package that Yangqing wrote... Oh gold
memories. Before decaf and caffe. Why iceberk? Because I was at Berkeley,
bears are vegetarian, and iceberg lettuce has layers of leaves.
(This joke is so lame.)
"""
import numpy as np
from matplotlib import cm, pyplot
def ChannelFirst(arr):
"""Convert a HWC array to CHW."""
ndim = arr.ndim
return arr.swapaxes(ndim - 1, ndim - 2).swapaxes(ndim - 2, ndim - 3)
def ChannelLast(arr):
"""Convert a CHW array to HWC."""
ndim = arr.ndim
return arr.swapaxes(ndim - 3, ndim - 2).swapaxes(ndim - 2, ndim - 1)
class PatchVisualizer(object):
"""PatchVisualizer visualizes patches.
"""
def __init__(self, gap=1):
self.gap = gap
def ShowSingle(self, patch, cmap=None):
"""Visualizes one single patch.
The input patch could be a vector (in which case we try to infer the shape
of the patch), a 2-D matrix, or a 3-D matrix whose 3rd dimension has 3
channels.
"""
if len(patch.shape) == 1:
patch = patch.reshape(self.get_patch_shape(patch))
elif len(patch.shape) > 2 and patch.shape[2] != 3:
raise ValueError("The input patch shape isn't correct.")
# determine color
if len(patch.shape) == 2 and cmap is None:
cmap = cm.gray
pyplot.imshow(patch, cmap=cmap)
return patch
def ShowMultiple(self, patches, ncols=None, cmap=None, bg_func=np.mean):
"""Visualize multiple patches.
In the passed in patches matrix, each row is a patch, in the shape of either
n*n, n*n*1 or n*n*3, either in a flattened format (so patches would be a
2-D array), or a multi-dimensional tensor. We will try our best to figure
out automatically the patch size.
"""
num_patches = patches.shape[0]
if ncols is None:
ncols = int(np.ceil(np.sqrt(num_patches)))
nrows = int(np.ceil(num_patches / float(ncols)))
if len(patches.shape) == 2:
patches = patches.reshape(
(patches.shape[0], ) + self.get_patch_shape(patches[0])
)
patch_size_expand = np.array(patches.shape[1:3]) + self.gap
image_size = patch_size_expand * np.array([nrows, ncols]) - self.gap
if len(patches.shape) == 4:
if patches.shape[3] == 1:
# gray patches
patches = patches.reshape(patches.shape[:-1])
image_shape = tuple(image_size)
if cmap is None:
cmap = cm.gray
elif patches.shape[3] == 3:
# color patches
image_shape = tuple(image_size) + (3, )
else:
raise ValueError("The input patch shape isn't expected.")
else:
image_shape = tuple(image_size)
if cmap is None:
cmap = cm.gray
image = np.ones(image_shape) * bg_func(patches)
for pid in range(num_patches):
row = pid / ncols * patch_size_expand[0]
col = pid % ncols * patch_size_expand[1]
image[row:row+patches.shape[1], col:col+patches.shape[2]] = \
patches[pid]
pyplot.imshow(image, cmap=cmap, interpolation='nearest')
pyplot.axis('off')
return image
def ShowImages(self, patches, *args, **kwargs):
"""Similar to ShowMultiple, but always normalize the values between 0 and 1
for better visualization of image-type data.
"""
patches = patches - np.min(patches)
patches /= np.max(patches) + np.finfo(np.float64).eps
return self.ShowMultiple(patches, *args, **kwargs)
def ShowChannels(self, patch, cmap=None, bg_func=np.mean):
""" This function shows the channels of a patch.
The incoming patch should have shape [w, h, num_channels], and each channel
will be visualized as a separate gray patch.
"""
if len(patch.shape) != 3:
raise ValueError("The input patch shape isn't correct.")
patch_reordered = np.swapaxes(patch.T, 1, 2)
return self.ShowMultiple(patch_reordered, cmap=cmap, bg_func=bg_func)
def get_patch_shape(self, patch):
"""Gets the shape of a single patch.
Basically it tries to interprete the patch as a square, and also check if it
is in color (3 channels)
"""
edgeLen = np.sqrt(patch.size)
if edgeLen != np.floor(edgeLen):
# we are given color patches
edgeLen = np.sqrt(patch.size / 3.)
if edgeLen != np.floor(edgeLen):
raise ValueError("I can't figure out the patch shape.")
return (edgeLen, edgeLen, 3)
else:
edgeLen = int(edgeLen)
return (edgeLen, edgeLen)
_default_visualizer = PatchVisualizer()
"""Utility functions that directly point to functions in the default visualizer.
These functions don't return anything, so you won't see annoying printouts of
the visualized images. If you want to save the images for example, you should
explicitly instantiate a patch visualizer, and call those functions.
"""
class NHWC(object):
@staticmethod
def ShowSingle(*args, **kwargs):
_default_visualizer.ShowSingle(*args, **kwargs)
@staticmethod
def ShowMultiple(*args, **kwargs):
_default_visualizer.ShowMultiple(*args, **kwargs)
@staticmethod
def ShowImages(*args, **kwargs):
_default_visualizer.ShowImages(*args, **kwargs)
@staticmethod
def ShowChannels(*args, **kwargs):
_default_visualizer.ShowChannels(*args, **kwargs)
class NCHW(object):
@staticmethod
def ShowSingle(patch, *args, **kwargs):
_default_visualizer.ShowSingle(ChannelLast(patch), *args, **kwargs)
@staticmethod
def ShowMultiple(patch, *args, **kwargs):
_default_visualizer.ShowMultiple(ChannelLast(patch), *args, **kwargs)
@staticmethod
def ShowImages(patch, *args, **kwargs):
_default_visualizer.ShowImages(ChannelLast(patch), *args, **kwargs)
@staticmethod
def ShowChannels(patch, *args, **kwargs):
_default_visualizer.ShowChannels(ChannelLast(patch), *args, **kwargs)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python.schema import Struct, ConstRecord
from caffe2.python import core, workspace
from caffe2.python.session import LocalSession
from caffe2.python.dataset import Dataset
from caffe2.python.pipeline import pipe
from caffe2.python.checkpoint import (
CheckpointManager, MultiNodeCheckpointManager, Job, JobRunner)
from caffe2.python.net_builder import ops
from caffe2.python.task import Task, Node
from caffe2.python.test_util import TestCase
from caffe2.python.dataio import ReaderWithLimit
import tempfile
import numpy as np
import shutil
def build_pipeline(node_id):
with Node('reader:%d' % node_id):
with Job.current().init_group, Task():
data_arr = Struct(('val', np.array(range(10))))
data = ConstRecord(ops, data_arr)
ds = Dataset(data, name='dataset:%d' % node_id)
full_reader = ds.reader(ops)
total = ops.Const([100])
def inc_total(rec):
ops.Add([total, rec.val()], [total])
epoch_reader = ReaderWithLimit(full_reader, num_iter=3)
pipe(epoch_reader, processor=inc_total)
Job.current().add_stop_signal(epoch_reader.data_finished())
return [total]
EXPECTED_TOTALS = [103, 115, 136, 145]
class TestCheckpoint(TestCase):
def run_with(self, builder):
with Job() as job:
outputs = build_pipeline(node_id=0)
output_fetcher = Task(step=core.Net('empty'), outputs=outputs)
def fetch_total(session):
session.run(output_fetcher)
return output_fetcher.outputs()[0].fetch()
session, checkpoint = builder()
compiled_job = job.compile(LocalSession)
num_epochs = JobRunner(compiled_job, checkpoint)(session)
self.assertEquals(num_epochs, len(EXPECTED_TOTALS))
self.assertEquals(fetch_total(session), EXPECTED_TOTALS[-1])
for initial_epoch in range(1, num_epochs + 1):
session, checkpoint = builder()
JobRunner(
compiled_job,
checkpoint, resume_from_epoch=initial_epoch)(session)
self.assertEquals(fetch_total(session), EXPECTED_TOTALS[-1])
for epoch in range(1, num_epochs + 1):
session.run(checkpoint.load(epoch))
self.assertEquals(fetch_total(session), EXPECTED_TOTALS[epoch - 1])
def test_single_checkpoint(self):
# test single node
with tempfile.NamedTemporaryFile() as tmp:
def builder():
ws = workspace.C.Workspace()
session = LocalSession(ws)
checkpoint = CheckpointManager(tmp.name, 'minidb')
return session, checkpoint
self.run_with(builder)
# test multi-node
try:
tmpdir = tempfile.mkdtemp()
def builder():
ws = workspace.C.Workspace()
session = LocalSession(ws)
checkpoint = MultiNodeCheckpointManager(tmpdir, 'minidb')
return session, checkpoint
self.run_with(builder)
finally:
shutil.rmtree(tmpdir)
def test_load_model_from_checkpoints(self):
try:
tmpdir = tempfile.mkdtemp()
for node_id in range(3):
ws = workspace.C.Workspace()
session = LocalSession(ws)
checkpoint = MultiNodeCheckpointManager(tmpdir, 'minidb')
with Job() as job:
build_pipeline(node_id)
compiled_job = job.compile(LocalSession)
job_runner = JobRunner(compiled_job, checkpoint)
num_epochs = job_runner(session)
self.assertEquals(num_epochs, len(EXPECTED_TOTALS))
# There are 16 blobs after finishing up the job runner.
self.assertEquals(len(ws.blobs), 16)
ws = workspace.C.Workspace()
session = LocalSession(ws)
self.assertEquals(len(ws.blobs), 0)
model_blob_names = ['reader:1/task/GivenTensorInt64Fill:0',
'reader:2/task/GivenTensorInt64Fill:0']
checkpoint = MultiNodeCheckpointManager(tmpdir, 'minidb')
with Job() as job:
for node_id in range(3):
build_pipeline(node_id)
compiled_job = job.compile(LocalSession)
job_runner = JobRunner(compiled_job, checkpoint)
job_runner.load_blobs_from_checkpoints(blob_names=model_blob_names,
epoch=1, session=session)
# Check that we can successfully load from checkpoints of epochs
# 1 to 4, but not epoch 5.
for epoch in range(1, 5):
self.assertTrue(
job_runner.load_blobs_from_checkpoints(
blob_names=model_blob_names, epoch=epoch,
session=session))
# Check that all the model blobs are loaded.
for blob_name in model_blob_names:
self.assertTrue(ws.has_blob(blob_name))
self.assertEquals(ws.fetch_blob(blob_name),
np.array([EXPECTED_TOTALS[epoch - 1]]))
self.assertFalse(
job_runner.load_blobs_from_checkpoints(
blob_names=model_blob_names, epoch=5, session=session))
finally:
shutil.rmtree(tmpdir)
|
## @package cnn
# Module caffe2.python.cnn
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import brew
from caffe2.python.model_helper import ModelHelper
from caffe2.proto import caffe2_pb2
import logging
class CNNModelHelper(ModelHelper):
"""A helper model so we can write CNN models more easily, without having to
manually define parameter initializations and operators separately.
"""
def __init__(self, order="NCHW", name=None,
use_cudnn=True, cudnn_exhaustive_search=False,
ws_nbytes_limit=None, init_params=True,
skip_sparse_optim=False,
param_model=None):
logging.warning(
"[====DEPRECATE WARNING====]: you are creating an "
"object from CNNModelHelper class which will be deprecated soon. "
"Please use ModelHelper object with brew module. For more "
"information, please refer to caffe2.ai and python/brew.py, "
"python/brew_test.py for more information."
)
cnn_arg_scope = {
'order': order,
'use_cudnn': use_cudnn,
'cudnn_exhaustive_search': cudnn_exhaustive_search,
}
if ws_nbytes_limit:
cnn_arg_scope['ws_nbytes_limit'] = ws_nbytes_limit
super(CNNModelHelper, self).__init__(
skip_sparse_optim=skip_sparse_optim,
name="CNN" if name is None else name,
init_params=init_params,
param_model=param_model,
arg_scope=cnn_arg_scope,
)
self.order = order
self.use_cudnn = use_cudnn
self.cudnn_exhaustive_search = cudnn_exhaustive_search
self.ws_nbytes_limit = ws_nbytes_limit
if self.order != "NHWC" and self.order != "NCHW":
raise ValueError(
"Cannot understand the CNN storage order %s." % self.order
)
def ImageInput(self, blob_in, blob_out, use_gpu_transform=False, **kwargs):
return brew.image_input(
self,
blob_in,
blob_out,
order=self.order,
use_gpu_transform=use_gpu_transform,
**kwargs
)
def VideoInput(self, blob_in, blob_out, **kwargs):
return brew.video_input(
self,
blob_in,
blob_out,
**kwargs
)
def PadImage(self, blob_in, blob_out, **kwargs):
# TODO(wyiming): remove this dummy helper later
self.net.PadImage(blob_in, blob_out, **kwargs)
def ConvNd(self, *args, **kwargs):
return brew.conv_nd(
self,
*args,
use_cudnn=self.use_cudnn,
order=self.order,
cudnn_exhaustive_search=self.cudnn_exhaustive_search,
ws_nbytes_limit=self.ws_nbytes_limit,
**kwargs
)
def Conv(self, *args, **kwargs):
return brew.conv(
self,
*args,
use_cudnn=self.use_cudnn,
order=self.order,
cudnn_exhaustive_search=self.cudnn_exhaustive_search,
ws_nbytes_limit=self.ws_nbytes_limit,
**kwargs
)
def ConvTranspose(self, *args, **kwargs):
return brew.conv_transpose(
self,
*args,
use_cudnn=self.use_cudnn,
order=self.order,
cudnn_exhaustive_search=self.cudnn_exhaustive_search,
ws_nbytes_limit=self.ws_nbytes_limit,
**kwargs
)
def GroupConv(self, *args, **kwargs):
return brew.group_conv(
self,
*args,
use_cudnn=self.use_cudnn,
order=self.order,
cudnn_exhaustive_search=self.cudnn_exhaustive_search,
ws_nbytes_limit=self.ws_nbytes_limit,
**kwargs
)
def GroupConv_Deprecated(self, *args, **kwargs):
return brew.group_conv_deprecated(
self,
*args,
use_cudnn=self.use_cudnn,
order=self.order,
cudnn_exhaustive_search=self.cudnn_exhaustive_search,
ws_nbytes_limit=self.ws_nbytes_limit,
**kwargs
)
def FC(self, *args, **kwargs):
return brew.fc(self, *args, **kwargs)
def PackedFC(self, *args, **kwargs):
return brew.packed_fc(self, *args, **kwargs)
def FC_Prune(self, *args, **kwargs):
return brew.fc_prune(self, *args, **kwargs)
def FC_Decomp(self, *args, **kwargs):
return brew.fc_decomp(self, *args, **kwargs)
def FC_Sparse(self, *args, **kwargs):
return brew.fc_sparse(self, *args, **kwargs)
def Dropout(self, *args, **kwargs):
return brew.dropout(
self, *args, order=self.order, use_cudnn=self.use_cudnn, **kwargs
)
def LRN(self, *args, **kwargs):
return brew.lrn(
self, *args, order=self.order, use_cudnn=self.use_cudnn, **kwargs
)
def Softmax(self, *args, **kwargs):
return brew.softmax(self, *args, use_cudnn=self.use_cudnn, **kwargs)
def SpatialBN(self, *args, **kwargs):
return brew.spatial_bn(self, *args, order=self.order, **kwargs)
def InstanceNorm(self, *args, **kwargs):
return brew.instance_norm(self, *args, order=self.order, **kwargs)
def Relu(self, *args, **kwargs):
return brew.relu(
self, *args, order=self.order, use_cudnn=self.use_cudnn, **kwargs
)
def PRelu(self, *args, **kwargs):
return brew.prelu(self, *args, **kwargs)
def Concat(self, *args, **kwargs):
return brew.concat(self, *args, order=self.order, **kwargs)
def DepthConcat(self, *args, **kwargs):
"""The old depth concat function - we should move to use concat."""
print("DepthConcat is deprecated. use Concat instead.")
return self.Concat(*args, **kwargs)
def Sum(self, *args, **kwargs):
return brew.sum(self, *args, **kwargs)
def Transpose(self, *args, **kwargs):
return brew.transpose(self, *args, use_cudnn=self.use_cudnn, **kwargs)
def Iter(self, *args, **kwargs):
return brew.iter(self, *args, **kwargs)
def Accuracy(self, *args, **kwargs):
return brew.accuracy(self, *args, **kwargs)
def MaxPool(self, *args, **kwargs):
return brew.max_pool(
self, *args, use_cudnn=self.use_cudnn, order=self.order, **kwargs
)
def MaxPoolWithIndex(self, *args, **kwargs):
return brew.max_pool_with_index(self, *args, order=self.order, **kwargs)
def AveragePool(self, *args, **kwargs):
return brew.average_pool(
self, *args, use_cudnn=self.use_cudnn, order=self.order, **kwargs
)
@property
def XavierInit(self):
return ('XavierFill', {})
def ConstantInit(self, value):
return ('ConstantFill', dict(value=value))
@property
def MSRAInit(self):
return ('MSRAFill', {})
@property
def ZeroInit(self):
return ('ConstantFill', {})
def AddWeightDecay(self, weight_decay):
return brew.add_weight_decay(self, weight_decay)
@property
def CPU(self):
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CPU
return device_option
@property
def GPU(self, gpu_id=0):
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CUDA
device_option.cuda_gpu_id = gpu_id
return device_option
|
## @package test_util
# Module caffe2.python.test_util
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import workspace
import unittest
def rand_array(*dims):
# np.random.rand() returns float instead of 0-dim array, that's why need to
# do some tricks
return np.array(np.random.rand(*dims) - 0.5).astype(np.float32)
class TestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
workspace.GlobalInit([
'caffe2',
'--caffe2_log_level=0',
])
def setUp(self):
self.ws = workspace.C.Workspace()
workspace.ResetWorkspace()
def tearDown(self):
workspace.ResetWorkspace()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import control, core, test_util, workspace
import logging
logger = logging.getLogger(__name__)
class TestControl(test_util.TestCase):
def setUp(self):
super(TestControl, self).setUp()
self.N_ = 10
self.init_net_ = core.Net("init-net")
cnt = self.init_net_.CreateCounter([], init_count=0)
const_n = self.init_net_.ConstantFill(
[], shape=[], value=self.N_, dtype=core.DataType.INT64)
const_0 = self.init_net_.ConstantFill(
[], shape=[], value=0, dtype=core.DataType.INT64)
self.cnt_net_ = core.Net("cnt-net")
self.cnt_net_.CountUp([cnt])
curr_cnt = self.cnt_net_.RetrieveCount([cnt])
self.init_net_.ConstantFill(
[], [curr_cnt], shape=[], value=0, dtype=core.DataType.INT64)
self.cnt_net_.AddExternalOutput(curr_cnt)
self.cnt_2_net_ = core.Net("cnt-2-net")
self.cnt_2_net_.CountUp([cnt])
self.cnt_2_net_.CountUp([cnt])
curr_cnt_2 = self.cnt_2_net_.RetrieveCount([cnt])
self.init_net_.ConstantFill(
[], [curr_cnt_2], shape=[], value=0, dtype=core.DataType.INT64)
self.cnt_2_net_.AddExternalOutput(curr_cnt_2)
self.cond_net_ = core.Net("cond-net")
cond_blob = self.cond_net_.LT([curr_cnt, const_n])
self.cond_net_.AddExternalOutput(cond_blob)
self.not_cond_net_ = core.Net("not-cond-net")
cond_blob = self.not_cond_net_.GE([curr_cnt, const_n])
self.not_cond_net_.AddExternalOutput(cond_blob)
self.true_cond_net_ = core.Net("true-cond-net")
true_blob = self.true_cond_net_.LT([const_0, const_n])
self.true_cond_net_.AddExternalOutput(true_blob)
self.false_cond_net_ = core.Net("false-cond-net")
false_blob = self.false_cond_net_.GT([const_0, const_n])
self.false_cond_net_.AddExternalOutput(false_blob)
self.idle_net_ = core.Net("idle-net")
self.idle_net_.ConstantFill(
[], shape=[], value=0, dtype=core.DataType.INT64)
def CheckNetOutput(self, nets_and_expects):
"""
Check the net output is expected
nets_and_expects is a list of tuples (net, expect)
"""
for net, expect in nets_and_expects:
output = workspace.FetchBlob(
net.Proto().external_output[-1])
self.assertEqual(output, expect)
def CheckNetAllOutput(self, net, expects):
"""
Check the net output is expected
expects is a list of bools.
"""
self.assertEqual(len(net.Proto().external_output), len(expects))
for i in range(len(expects)):
output = workspace.FetchBlob(
net.Proto().external_output[i])
self.assertEqual(output, expects[i])
def BuildAndRunPlan(self, step):
plan = core.Plan("test")
plan.AddStep(control.Do('init', self.init_net_))
plan.AddStep(step)
self.assertEqual(workspace.RunPlan(plan), True)
def ForLoopTest(self, nets_or_steps):
step = control.For('myFor', nets_or_steps, self.N_)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(self.cnt_net_, self.N_)])
def testForLoopWithNets(self):
self.ForLoopTest(self.cnt_net_)
self.ForLoopTest([self.cnt_net_, self.idle_net_])
def testForLoopWithStep(self):
step = control.Do('count', self.cnt_net_)
self.ForLoopTest(step)
self.ForLoopTest([step, self.idle_net_])
def WhileLoopTest(self, nets_or_steps):
step = control.While('myWhile', self.cond_net_, nets_or_steps)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(self.cnt_net_, self.N_)])
def testWhileLoopWithNet(self):
self.WhileLoopTest(self.cnt_net_)
self.WhileLoopTest([self.cnt_net_, self.idle_net_])
def testWhileLoopWithStep(self):
step = control.Do('count', self.cnt_net_)
self.WhileLoopTest(step)
self.WhileLoopTest([step, self.idle_net_])
def UntilLoopTest(self, nets_or_steps):
step = control.Until('myUntil', self.not_cond_net_, nets_or_steps)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(self.cnt_net_, self.N_)])
def testUntilLoopWithNet(self):
self.UntilLoopTest(self.cnt_net_)
self.UntilLoopTest([self.cnt_net_, self.idle_net_])
def testUntilLoopWithStep(self):
step = control.Do('count', self.cnt_net_)
self.UntilLoopTest(step)
self.UntilLoopTest([step, self.idle_net_])
def DoWhileLoopTest(self, nets_or_steps):
step = control.DoWhile('myDoWhile', self.cond_net_, nets_or_steps)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(self.cnt_net_, self.N_)])
def testDoWhileLoopWithNet(self):
self.DoWhileLoopTest(self.cnt_net_)
self.DoWhileLoopTest([self.idle_net_, self.cnt_net_])
def testDoWhileLoopWithStep(self):
step = control.Do('count', self.cnt_net_)
self.DoWhileLoopTest(step)
self.DoWhileLoopTest([self.idle_net_, step])
def DoUntilLoopTest(self, nets_or_steps):
step = control.DoUntil('myDoUntil', self.not_cond_net_, nets_or_steps)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(self.cnt_net_, self.N_)])
def testDoUntilLoopWithNet(self):
self.DoUntilLoopTest(self.cnt_net_)
self.DoUntilLoopTest([self.cnt_net_, self.idle_net_])
def testDoUntilLoopWithStep(self):
step = control.Do('count', self.cnt_net_)
self.DoUntilLoopTest(step)
self.DoUntilLoopTest([self.idle_net_, step])
def IfCondTest(self, cond_net, expect, cond_on_blob):
if cond_on_blob:
step = control.Do(
'if-all',
control.Do('count', cond_net),
control.If('myIf', cond_net.Proto().external_output[-1],
self.cnt_net_))
else:
step = control.If('myIf', cond_net, self.cnt_net_)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(self.cnt_net_, expect)])
def testIfCondTrueOnNet(self):
self.IfCondTest(self.true_cond_net_, 1, False)
def testIfCondTrueOnBlob(self):
self.IfCondTest(self.true_cond_net_, 1, True)
def testIfCondFalseOnNet(self):
self.IfCondTest(self.false_cond_net_, 0, False)
def testIfCondFalseOnBlob(self):
self.IfCondTest(self.false_cond_net_, 0, True)
def IfElseCondTest(self, cond_net, cond_value, expect, cond_on_blob):
if cond_value:
run_net = self.cnt_net_
else:
run_net = self.cnt_2_net_
if cond_on_blob:
step = control.Do(
'if-else-all',
control.Do('count', cond_net),
control.If('myIfElse', cond_net.Proto().external_output[-1],
self.cnt_net_, self.cnt_2_net_))
else:
step = control.If('myIfElse', cond_net,
self.cnt_net_, self.cnt_2_net_)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(run_net, expect)])
def testIfElseCondTrueOnNet(self):
self.IfElseCondTest(self.true_cond_net_, True, 1, False)
def testIfElseCondTrueOnBlob(self):
self.IfElseCondTest(self.true_cond_net_, True, 1, True)
def testIfElseCondFalseOnNet(self):
self.IfElseCondTest(self.false_cond_net_, False, 2, False)
def testIfElseCondFalseOnBlob(self):
self.IfElseCondTest(self.false_cond_net_, False, 2, True)
def IfNotCondTest(self, cond_net, expect, cond_on_blob):
if cond_on_blob:
step = control.Do(
'if-not',
control.Do('count', cond_net),
control.IfNot('myIfNot', cond_net.Proto().external_output[-1],
self.cnt_net_))
else:
step = control.IfNot('myIfNot', cond_net, self.cnt_net_)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(self.cnt_net_, expect)])
def testIfNotCondTrueOnNet(self):
self.IfNotCondTest(self.true_cond_net_, 0, False)
def testIfNotCondTrueOnBlob(self):
self.IfNotCondTest(self.true_cond_net_, 0, True)
def testIfNotCondFalseOnNet(self):
self.IfNotCondTest(self.false_cond_net_, 1, False)
def testIfNotCondFalseOnBlob(self):
self.IfNotCondTest(self.false_cond_net_, 1, True)
def IfNotElseCondTest(self, cond_net, cond_value, expect, cond_on_blob):
if cond_value:
run_net = self.cnt_2_net_
else:
run_net = self.cnt_net_
if cond_on_blob:
step = control.Do(
'if-not-else',
control.Do('count', cond_net),
control.IfNot('myIfNotElse',
cond_net.Proto().external_output[-1],
self.cnt_net_, self.cnt_2_net_))
else:
step = control.IfNot('myIfNotElse', cond_net,
self.cnt_net_, self.cnt_2_net_)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(run_net, expect)])
def testIfNotElseCondTrueOnNet(self):
self.IfNotElseCondTest(self.true_cond_net_, True, 2, False)
def testIfNotElseCondTrueOnBlob(self):
self.IfNotElseCondTest(self.true_cond_net_, True, 2, True)
def testIfNotElseCondFalseOnNet(self):
self.IfNotElseCondTest(self.false_cond_net_, False, 1, False)
def testIfNotElseCondFalseOnBlob(self):
self.IfNotElseCondTest(self.false_cond_net_, False, 1, True)
def testSwitch(self):
step = control.Switch(
'mySwitch',
(self.false_cond_net_, self.cnt_net_),
(self.true_cond_net_, self.cnt_2_net_)
)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(self.cnt_net_, 0), (self.cnt_2_net_, 2)])
def testSwitchNot(self):
step = control.SwitchNot(
'mySwitchNot',
(self.false_cond_net_, self.cnt_net_),
(self.true_cond_net_, self.cnt_2_net_)
)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(self.cnt_net_, 1), (self.cnt_2_net_, 0)])
def testBoolNet(self):
bool_net = control.BoolNet(('a', True))
step = control.Do('bool', bool_net)
self.BuildAndRunPlan(step)
self.CheckNetAllOutput(bool_net, [True])
bool_net = control.BoolNet(('a', True), ('b', False))
step = control.Do('bool', bool_net)
self.BuildAndRunPlan(step)
self.CheckNetAllOutput(bool_net, [True, False])
bool_net = control.BoolNet([('a', True), ('b', False)])
step = control.Do('bool', bool_net)
self.BuildAndRunPlan(step)
self.CheckNetAllOutput(bool_net, [True, False])
def testCombineConditions(self):
# combined by 'Or'
combine_net = control.CombineConditions(
'test', [self.true_cond_net_, self.false_cond_net_], 'Or')
step = control.Do('combine',
self.true_cond_net_,
self.false_cond_net_,
combine_net)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(combine_net, True)])
# combined by 'And'
combine_net = control.CombineConditions(
'test', [self.true_cond_net_, self.false_cond_net_], 'And')
step = control.Do('combine',
self.true_cond_net_,
self.false_cond_net_,
combine_net)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(combine_net, False)])
def testMergeConditionNets(self):
# merged by 'Or'
merge_net = control.MergeConditionNets(
'test', [self.true_cond_net_, self.false_cond_net_], 'Or')
step = control.Do('merge', merge_net)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(merge_net, True)])
# merged by 'And'
merge_net = control.MergeConditionNets(
'test', [self.true_cond_net_, self.false_cond_net_], 'And')
step = control.Do('merge', merge_net)
self.BuildAndRunPlan(step)
self.CheckNetOutput([(merge_net, False)])
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import workspace
import os
import tempfile
import unittest
class TestDB(unittest.TestCase):
def setUp(self):
handle, self.file_name = tempfile.mkstemp()
os.close(handle)
self.data = [("key{}".format(i), "value{}".format(i))
for i in range(1, 10)]
def testSimple(self):
db = workspace.C.create_db(
"minidb", self.file_name, workspace.C.Mode.write)
for key, value in self.data:
transaction = db.new_transaction()
transaction.put(key, value)
del transaction
del db # should close DB
db = workspace.C.create_db(
"minidb", self.file_name, workspace.C.Mode.read)
cursor = db.new_cursor()
data = []
while cursor.valid():
data.append((cursor.key(), cursor.value()))
cursor.next()
del cursor
db.close() # test explicit db closer
self.assertEqual(data, self.data)
|
import unittest
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, test_util
class TestScopes(test_util.TestCase):
def testBlobReferenceIsIndependentFromNameScope(self):
blob_v = core.BlobReference("v")
with core.NameScope("foo"):
blob_w = core.BlobReference("w")
with core.NameScope("bar"):
blob_x = core.BlobReference("x")
self.assertEqual(str(blob_v), "v")
self.assertEqual(str(blob_w), "w")
self.assertEqual(str(blob_x), "x")
def testNameScopeWithOp(self):
global_x = core.BlobReference("x")
global_y = core.BlobReference("y")
with core.NameScope("foo"):
# Raw strings should have namescope prepended.
op = core.CreateOperator("Relu", "x", "y")
self.assertEqual(len(op.input), 1)
self.assertEqual(op.input[0], "foo/x")
self.assertEqual(len(op.output), 1)
self.assertEqual(op.output[0], "foo/y")
# BlobReferences should not.
op = core.CreateOperator("Relu", global_x, global_y)
self.assertEqual(len(op.input), 1)
self.assertEqual(op.input[0], "x")
self.assertEqual(len(op.output), 1)
self.assertEqual(op.output[0], "y")
def testNameScopeWithReset(self):
with core.NameScope("foo"):
# foo/
op = core.CreateOperator("Relu", "x", "y")
self.assertEqual(len(op.input), 1)
self.assertEqual(op.input[0], "foo/x")
self.assertEqual(len(op.output), 1)
self.assertEqual(op.output[0], "foo/y")
with core.NameScope("bar"):
# foo/bar/
op = core.CreateOperator("Relu", "x", "y")
self.assertEqual(len(op.input), 1)
self.assertEqual(op.input[0], "foo/bar/x")
self.assertEqual(len(op.output), 1)
self.assertEqual(op.output[0], "foo/bar/y")
# Back to foo/
op = core.CreateOperator("Relu", "x", "y")
self.assertEqual(len(op.input), 1)
self.assertEqual(op.input[0], "foo/x")
self.assertEqual(len(op.output), 1)
self.assertEqual(op.output[0], "foo/y")
with core.NameScope("bar", reset=True):
# bar/
op = core.CreateOperator("Relu", "x", "y")
self.assertEqual(len(op.input), 1)
self.assertEqual(op.input[0], "bar/x")
self.assertEqual(len(op.output), 1)
self.assertEqual(op.output[0], "bar/y")
# Back to foo/
op = core.CreateOperator("Relu", "x", "y")
self.assertEqual(len(op.input), 1)
self.assertEqual(op.input[0], "foo/x")
self.assertEqual(len(op.output), 1)
self.assertEqual(op.output[0], "foo/y")
def testDeviceScope(self):
# No device
op = core.CreateOperator("Relu", "x", "y")
self.assertFalse(op.HasField('device_option'))
# explicitly setting a device
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CUDA
device_option.cuda_gpu_id = 1
op = core.CreateOperator("Relu", "x", "y", device_option=device_option)
self.assertTrue(op.HasField('device_option'))
self.assertEqual(op.device_option.device_type, caffe2_pb2.CUDA)
self.assertEqual(op.device_option.cuda_gpu_id, 1)
with core.DeviceScope(device_option):
# from device scope
op = core.CreateOperator("Relu", "x", "y")
self.assertTrue(op.HasField('device_option'))
self.assertEqual(op.device_option.device_type, caffe2_pb2.CUDA)
self.assertEqual(op.device_option.cuda_gpu_id, 1)
# from an overridden device option
override_device = caffe2_pb2.DeviceOption()
override_device.device_type = caffe2_pb2.CPU
op = core.CreateOperator(
"Relu", "x", "y", device_option=override_device)
self.assertTrue(op.HasField('device_option'))
self.assertEqual(op.device_option.device_type, caffe2_pb2.CPU)
# back from normal: no device
op = core.CreateOperator("Relu", "x", "y")
self.assertFalse(op.HasField('device_option'))
device_option = caffe2_pb2.DeviceOption()
def testNameAndDeviceScopeTogether(self):
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CUDA
device_option.cuda_gpu_id = 1
with core.DeviceScope(device_option):
with core.NameScope("foo"):
op = core.CreateOperator("Relu", "x", "y")
self.assertTrue(op.HasField('device_option'))
self.assertEqual(op.device_option.device_type, caffe2_pb2.CUDA)
self.assertEqual(op.device_option.cuda_gpu_id, 1)
self.assertEqual(len(op.input), 1)
self.assertEqual(op.input[0], "foo/x")
self.assertEqual(len(op.output), 1)
self.assertEqual(op.output[0], "foo/y")
class TestCloneNet(test_util.TestCase):
def testPartialClone(self):
params = core.Net('params')
p1 = params.ConstantFill([], ['p1'])
workspace.CreateNet(params)
workspace.RunNetOnce(params)
n = core.Net('original')
a1 = n.AddExternalInput('a1')
a2 = n.AddExternalInput('a2')
b1, b2 = n.Concat([a1, a2], ['b1', 'b2'], axis=0)
c1 = n.Sum([b1, p1], ['c1'])
c2 = n.Sum([b2], ['c2'])
d = n.Sum([c1, c2], ['d'])
# test that gradient ops are ignored when partial-cloning
n.AddGradientOperators([d])
# test some in-place ops
k = n.Sum([p1], ['k'])
e = n.Sum([d], ['e'])
e = n.Sum([e, k], [e])
e = n.Sum([e], [e])
f = n.Sum(e, ['f'])
def net_assert(net, num_ops, inputs, outputs, internals):
self.assertEqual(len(net.Proto().op), num_ops)
self.assertEqual(set(net.Proto().external_input), inputs)
self.assertEqual(set(net.Proto().external_output), outputs)
all_blobs = set(net.Proto().external_input)
all_blobs |= set(net.Proto().external_output)
for op in net.Proto().op:
all_blobs |= set(op.input) | set(op.output)
self.assertEqual(all_blobs, inputs | outputs | internals)
# create net to make sure its valid
for input in inputs:
workspace.FeedBlob(input, np.array([]))
workspace.CreateNet(net)
n2, (d22, ) = n.ClonePartial('f1', {a1: 'a11', a2: 'a22'}, [d])
net_assert(
n2, 4, {'p1', 'a11', 'a22'}, {'f1/d'},
{'f1/b1', 'f1/b2', 'f1/c1', 'f1/c2', 'p1'})
self.assertTrue(isinstance(d22, core.BlobReference))
self.assertEqual(d22.Net(), n2)
self.assertEqual(str(d22), 'f1/d')
n3, (d22, ) = n.ClonePartial('f2', [b1, b2], [d])
net_assert(
n3, 3, {'p1', 'b1', 'b2'}, {'f2/d'}, {'f2/c1', 'f2/c2', 'p1'})
self.assertEqual(str(d22), 'f2/d')
n4, (c22, ) = n.ClonePartial('f3', [b1], [c1])
net_assert(n4, 1, {'p1', 'b1'}, {'f3/c1'}, {'p1'})
self.assertEqual(str(c22), 'f3/c1')
n5, (c11, c22) = n.ClonePartial('f4', [b1, b2], [c1, c2])
net_assert(n5, 2, {'p1', 'b1', 'b2'}, {'f4/c1', 'f4/c2'}, {'p1'})
self.assertEqual(str(c11), 'f4/c1')
self.assertEqual(str(c22), 'f4/c2')
with self.assertRaises(AssertionError):
n.ClonePartial('f4', [a1, a2, c2], [d])
n6, (e22, ) = n.ClonePartial('f5', [d], [e])
net_assert(n6, 4, {'p1', 'd'}, {'f5/e'}, {'f5/k', 'p1'})
self.assertEqual(str(e22), 'f5/e')
n8, (e22, f22) = n.ClonePartial('f7', [d], [e, f])
net_assert(n8, 5, {'p1', 'd'}, {'f7/e', 'f7/f'}, {'p1', 'f7/k'})
self.assertEqual(str(e22), 'f7/e')
self.assertEqual(str(f22), 'f7/f')
params._CheckLookupTables()
n._CheckLookupTables()
class TestCreateOperator(test_util.TestCase):
def testCreate(self):
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CUDA
device_option.cuda_gpu_id = 1
op = core.CreateOperator(
"Ludicrous", "x", "y", name="ludicrous",
control_input="z", device_option=device_option,
engine="WARP", arg1=1, arg2="2", arg3=[1, 2, 3])
self.assertEqual(op.type, "Ludicrous")
self.assertEqual(op.name, "ludicrous")
self.assertEqual(op.engine, "WARP")
self.assertEqual(len(op.input), 1)
self.assertEqual(op.input[0], "x")
self.assertEqual(len(op.output), 1)
self.assertEqual(op.output[0], "y")
self.assertEqual(len(op.control_input), 1)
self.assertEqual(op.control_input[0], "z")
self.assertTrue(op.HasField('device_option'))
self.assertEqual(op.device_option.device_type, caffe2_pb2.CUDA)
self.assertEqual(op.device_option.cuda_gpu_id, 1)
self.assertTrue(len(op.arg), 3)
self.assertEqual(op.arg[0].name, "arg1")
self.assertEqual(op.arg[1].name, "arg2")
self.assertEqual(op.arg[2].name, "arg3")
self.assertEqual(op.arg[0].i, 1)
self.assertEqual(op.arg[1].s, "2")
self.assertEqual(list(op.arg[2].ints), [1, 2, 3])
def testCreateWithNoneKwarg(self):
with self.assertRaises(ValueError):
core.CreateOperator("Ludicrous", "x", "y", arg1=None)
class TestAutoNaming(test_util.TestCase):
"""
Test that operators are named with different names, and that automatically
named blob names don't clash intra or inter networks.
"""
def test_next_blob(self):
def create_net():
net = core.Net('net')
with core.NameScope('foo'):
net.Add(['a', 'b'], net.NextScopedBlob('ab'))
net.Add(['c', 'd'], net.NextBlob('cd'))
return net
net_a = create_net()
net_b = create_net()
# created net proto is predicatable.
self.assertEqual(net_a.Proto().op, net_b.Proto().op)
self.assertEqual(net_a.Proto().op[0].output[0], 'foo/ab')
self.assertEqual(net_a.Proto().op[1].output[0], 'cd')
net_c = core.Net('net')
# different calls return different blob names
self.assertNotEqual(str(net_c.NextBlob('b')), str(net_c.NextBlob('b')))
def test_auto_naming(self):
a = core.Net('net')
b = core.Net('net')
self.assertNotEqual(a.Proto().name, b.Proto().name)
a_in1 = a.AddExternalInput('a')
b_in1 = b.AddExternalInput('b')
all_outputs_single = []
all_outputs_list = []
def add_ops():
all_outputs_single.append(a.Sum([a_in1, a_in1]))
all_outputs_single.append(a.Sum([a_in1, a_in1]))
all_outputs_single.append(b.Sum([b_in1, b_in1]))
all_outputs_single.append(b.Sum([b_in1, b_in1]))
all_outputs_list.append(a.Sum([a_in1, a_in1], outputs=2))
all_outputs_list.append(a.Sum([a_in1, a_in1], outputs=2))
all_outputs_list.append(b.Sum([b_in1, b_in1], outputs=2))
all_outputs_list.append(b.Sum([b_in1, b_in1], outputs=2))
add_ops()
with core.NameScope('n1'):
add_ops()
# Force reset of lookup tables
a.Proto().name
with core.NameScope('n2'):
add_ops()
all_outputs = []
for s in all_outputs_single:
all_outputs.append(str(s))
for l in all_outputs_list:
for o in l:
all_outputs.append(str(o))
for i, o1 in enumerate(all_outputs):
for j, o2 in enumerate(all_outputs):
if i != j:
self.assertNotEqual(str(o1), str(o2))
a._CheckLookupTables()
b._CheckLookupTables()
class TestAppendNet(test_util.TestCase):
def test_external_inputs_merged_correctly(self):
netA = core.Net("A")
netA.Sum(["in1", "in2"], ["sum1"])
self.assertTrue("in1" in netA.external_inputs)
netB = core.Net("B")
netB.Sum(["in3", "in4"], ["in1"])
netB.AppendNet(netA)
self.assertFalse("in1" in netB.external_inputs)
def test_external_inputs_merged_correctlyB(self):
netA = core.Net("A")
netA.Sum(["in1", "in2"], ["sum1"])
self.assertTrue("in1" in netA.external_inputs)
netB = core.Net("B")
netB.Sum(["in3", "in4"], ["in1"])
netA.AppendNet(netB) # note different order than in prev test
self.assertTrue("in1" in netA.external_inputs)
if __name__ == '__main__':
unittest.main()
|
## @package dataset
# Module caffe2.python.dataset
"""
Implementation of an in-memory dataset with structured schema.
Use this to store and iterate through datasets with complex schema that
fit in memory.
Iterating through entries of this dataset is very fast since the dataset
is stored as a set of native Caffe2 tensors, thus no type conversion or
deserialization is necessary.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
from caffe2.python.dataio import Reader, Writer
from caffe2.python.schema import (
Struct, from_blob_list, Field, from_column_list, InitEmptyRecord)
import numpy as np
class _DatasetReader(Reader):
def __init__(self, dataset, name, batch_size=1):
"""Don't call this directly. Instead, use dataset.reader()"""
Reader.__init__(self, dataset.content())
self.dataset = dataset
self.name = name or (dataset.name + '_cursor')
self.batch_size = batch_size
self.cursor = None
def setup_ex(self, init_net, exit_net):
if self.cursor is None:
self.cursor = init_net.CreateTreeCursor(
[],
[self.name],
fields=self.dataset.fields)
def read(self, read_net):
assert self.cursor, 'setup not called.'
content = self.dataset.content()
with core.NameScope(read_net.NextName(self.name)):
fields = read_net.ReadNextBatch(
[self.cursor] + content.field_blobs(),
content.field_names(),
batch_size=self.batch_size)
if type(fields) is core.BlobReference:
fields = [fields]
return (read_net.IsEmpty([fields[0]]), fields)
def reset(self, net):
net.ResetCursor([self.cursor], [])
class _DatasetRandomReader(Reader):
def __init__(self, dataset, name, indices, batch_size=1):
"""Don't call this directly. Instead, use dataset.random_reader()"""
Reader.__init__(self, dataset.content())
self.dataset = dataset
self.cursor = None
self.name = name or (dataset.name + '_cursor')
self.indices = indices
self.batch_size = batch_size
def setup_ex(self, init_net, exit_net):
if self.cursor is None:
self.cursor = init_net.CreateTreeCursor(
[],
[self.name],
fields=self.dataset.fields)
def reset(self, net):
net.ResetCursor([self.cursor], [])
def computeoffset(self, net):
self.reset(net)
offsets = net.ComputeOffset(
[self.cursor] + self.dataset.content().field_blobs(),
'offsets')
self.offsets = offsets
def sort_and_shuffle(self, net, sort_by_field=None,
shuffle_size=1, batch_size=1):
# no sorting by default
content = self.dataset.content()
sort_by_field_idx = -1
if sort_by_field:
assert sort_by_field in content.field_names(), (
'Must be valid field.')
sort_by_field_idx = content.field_names().index(sort_by_field)
self.reset(net)
indices = net.SortAndShuffle(
[self.cursor] + content.field_blobs(),
'indices',
sort_by_field_idx=sort_by_field_idx,
shuffle_size=shuffle_size,
batch_size=batch_size)
self.indices = indices
def read(self, read_net):
with core.NameScope(read_net.NextName(self.name)):
fields = read_net.ReadRandomBatch(
[self.cursor, self.indices, self.offsets] + (
self.dataset.content().field_blobs()),
self.dataset.content().field_names(),
batch_size=self.batch_size)
return (read_net.IsEmpty([fields[0]]), fields)
class _DatasetWriter(Writer):
def __init__(self, content):
"""Don't call this directly. Use dataset.writer() instead."""
self._content = content
self.mutex = None
def setup_ex(self, init_net, exit_net):
if self.mutex is None:
self.mutex = init_net.CreateMutex([])
def write(self, writer_net, fields):
"""
Add operations to `net` that append the blobs in `fields` to the end
of the dataset. An additional operator will also be added that checks
the consistency of the data in `fields` against the dataset schema.
Args:
writer_net: The net that will contain the Append operators.
fields: A list of BlobReference to be appeneded to this dataset.
"""
assert self.mutex is not None, 'setup not called.'
field_blobs = self._content.field_blobs()
assert len(fields) == len(field_blobs), (
'Expected %s fields, got %s.' % (len(field_blobs), len(fields)))
writer_net.CheckDatasetConsistency(
fields, [], fields=self._content.field_names())
writer_net.AtomicAppend(
[self.mutex] + field_blobs + list(fields),
field_blobs)
def commit(self, finish_net):
"""Commit is a no-op for an in-memory dataset."""
pass
def Const(net, value, dtype=None, name=None):
"""
Create a 'constant' by first creating an external input in the given
net, and then feeding the corresponding blob with its provided value
in the current workspace. The name is automatically generated in order
to avoid clashes with existing blob names.
"""
assert isinstance(net, core.Net), 'net must be a core.Net instance.'
value = np.array(value, dtype=dtype)
blob = net.AddExternalInput(net.NextName(prefix=name))
workspace.FeedBlob(str(blob), value)
return blob
def execution_step_with_progress(name, init_net, substeps, rows_read):
# progress reporter
report_net = core.Net('report_net')
report_net.Print([rows_read], [])
return core.execution_step(
name,
substeps,
report_net=report_net,
concurrent_substeps=True,
report_interval=5)
class Dataset(object):
"""Represents an in-memory dataset with fixed schema.
Use this to store and iterate through datasets with complex schema that
fit in memory.
Iterating through entries of this dataset is very fast since the dataset
is stored as a set of native Caffe2 tensors, thus no type conversion or
deserialization is necessary.
"""
def __init__(self, fields, name=None):
"""Create an un-initialized dataset with schema provided by `fields`.
Before this dataset can be used, it must be initialized, either by
`init_empty` or `init_from_dataframe`.
Args:
fields: either a schema.Struct or a list of field names in a format
compatible with the one described in schema.py.
name: optional name to prepend to blobs that will store the data.
"""
assert isinstance(fields, list) or isinstance(fields, Struct), (
'fields must be either a Struct or a list of raw field names.')
if isinstance(fields, list):
fields = from_column_list(fields)
self.schema = fields
self.fields = fields.field_names()
self.field_types = fields.field_types()
self.name = name or 'dataset'
self.field_blobs = fields.field_blobs() if fields.has_blobs() else None
def init_empty(self, init_net):
"""Initialize the blobs for this dataset with empty values.
Empty arrays will be immediately fed into the current workspace,
and `init_net` will take those blobs as external inputs.
"""
self.field_blobs = InitEmptyRecord(
init_net, self.schema.clone_schema()).field_blobs()
def init_from_dataframe(self, net, dataframe):
"""Initialize the blobs for this dataset from a Pandas dataframe.
Each column of the dataframe will be immediately fed into the current
workspace, and the `net` will take this blobs as external inputs.
"""
assert len(self.fields) == len(dataframe.columns)
self.field_blobs = [
Const(net, dataframe.as_matrix([col]).flatten(), name=field)
for col, field in enumerate(self.fields)]
def get_blobs(self):
"""
Return the list of BlobReference pointing to the blobs that contain
the data for this dataset.
"""
assert self
return self.field_blobs
def content(self):
"""
Return a Record of BlobReferences pointing to the full content of
this dataset.
"""
return from_blob_list(self.schema, self.field_blobs)
def field_names(self):
"""Return the list of field names for this dataset."""
return self.fields
def field_types(self):
"""
Return the list of field dtypes for this dataset.
If a list of strings, not a schema.Struct, was passed to the
constructor, this will return a list of dtype(np.void).
"""
return self.field_types
def reader(self, init_net=None, cursor_name=None, batch_size=1):
"""Create a Reader object that is used to iterate through the dataset.
This will append operations to `init_net` that create a TreeCursor,
used to iterate through the data.
NOTE: Currently, it is not safe to append to a dataset while reading.
Args:
init_net: net that will be run once to create the cursor.
cursor_name: optional name for the blob containing a pointer
to the cursor.
batch_size: how many samples to read per iteration.
Returns:
A _DatasetReader that can be used to create operators that will
iterate through the dataset.
"""
assert self.field_blobs, 'Dataset not initialized.'
reader = _DatasetReader(self, cursor_name, batch_size)
if init_net is not None:
reader.setup_ex(init_net, None)
return reader
def random_reader(self, init_net=None, indices=None, cursor_name=None,
batch_size=1):
"""Create a Reader object that is used to iterate through the dataset.
NOTE: The reader order depends on the order in indices.
Args:
init_net: net that will be run once to create the cursor.
indices: blob of reading order
cursor_name: optional name for the blob containing a pointer
to the cursor.
batch_size: how many samples to read per iteration.
Returns:
A DatasetReader that can be used to create operators that will
iterate through the dataset according to indices.
"""
assert self.field_blobs, 'Dataset not initialized.'
reader = _DatasetRandomReader(self, cursor_name, indices, batch_size)
if init_net is not None:
reader.setup_ex(init_net, None)
return reader
def writer(self, init_net=None):
"""Create a Writer that can be used to append entries into the dataset.
NOTE: Currently, it is not safe to append to a dataset
while reading from it.
NOTE: Currently implementation of writer is not thread safe.
TODO: fixme
Args:
init_net: net that will be run once in order to create the writer.
(currently not used)
"""
assert self.field_blobs, 'Dataset not initialized.'
writer = _DatasetWriter(self.content())
if init_net is not None:
writer.setup_ex(init_net, None)
return writer
|
## @package text_file_reader
# Module caffe2.python.text_file_reader
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from caffe2.python.dataio import Reader
from caffe2.python.schema import Scalar, Struct, data_type_for_dtype
class TextFileReader(Reader):
"""
Wrapper around operators for reading from text files.
"""
def __init__(self, init_net, filename, schema, num_passes=1, batch_size=1):
"""
Create op for building a TextFileReader instance in the workspace.
Args:
init_net : Net that will be run only once at startup.
filename : Path to file to read from.
schema : schema.Struct representing the schema of the data.
Currently, only support Struct of strings.
num_passes : Number of passes over the data.
batch_size : Number of rows to read at a time.
"""
assert isinstance(schema, Struct), 'Schema must be a schema.Struct'
for name, child in schema.get_children():
assert isinstance(child, Scalar), (
'Only scalar fields are supported in TextFileReader.')
field_types = [
data_type_for_dtype(dtype) for dtype in schema.field_types()]
Reader.__init__(self, schema)
self._reader = init_net.CreateTextFileReader(
[],
filename=filename,
num_passes=num_passes,
field_types=field_types)
self._batch_size = batch_size
def read(self, net):
"""
Create op for reading a batch of rows.
"""
blobs = net.TextFileReaderRead(
[self._reader],
len(self.schema().field_names()),
batch_size=self._batch_size)
if type(blobs) is core.BlobReference:
blobs = [blobs]
is_empty = net.IsEmpty(
[blobs[0]],
core.ScopedBlobReference(net.NextName('should_stop'))
)
return (is_empty, blobs)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import unittest
import time
from caffe2.python import workspace, cnn
from caffe2.python import timeout_guard
import caffe2.python.data_workers as data_workers
def dummy_fetcher(fetcher_id, batch_size):
# Create random amount of values
n = np.random.randint(64) + 1
data = np.zeros((n, 3))
labels = []
for j in range(n):
data[j, :] *= (j + fetcher_id)
labels.append(data[j, 0])
return [np.array(data), np.array(labels)]
def dummy_fetcher_rnn(fetcher_id, batch_size):
# Hardcoding some input blobs
T = 20
N = batch_size
D = 33
data = np.random.rand(T, N, D)
label = np.random.randint(N, size=(T, N))
seq_lengths = np.random.randint(N, size=(N))
return [data, label, seq_lengths]
class DataWorkersTest(unittest.TestCase):
def testNonParallelModel(self):
workspace.ResetWorkspace()
model = cnn.CNNModelHelper(name="test")
old_seq_id = data_workers.global_coordinator._fetcher_id_seq
coordinator = data_workers.init_data_input_workers(
model,
["data", "label"],
dummy_fetcher,
32,
2,
input_source_name="unittest"
)
new_seq_id = data_workers.global_coordinator._fetcher_id_seq
self.assertEqual(new_seq_id, old_seq_id + 2)
coordinator.start()
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
for _i in range(500):
with timeout_guard.CompleteInTimeOrDie(5):
workspace.RunNet(model.net.Proto().name)
data = workspace.FetchBlob("data")
labels = workspace.FetchBlob("label")
self.assertEqual(data.shape[0], labels.shape[0])
self.assertEqual(data.shape[0], 32)
for j in range(32):
self.assertEqual(labels[j], data[j, 0])
self.assertEqual(labels[j], data[j, 1])
self.assertEqual(labels[j], data[j, 2])
coordinator.stop_coordinator("unittest")
self.assertEqual(coordinator._coordinators, [])
def testRNNInput(self):
workspace.ResetWorkspace()
model = cnn.CNNModelHelper(name="rnn_test")
old_seq_id = data_workers.global_coordinator._fetcher_id_seq
coordinator = data_workers.init_data_input_workers(
model,
["data1", "label1", "seq_lengths1"],
dummy_fetcher_rnn,
32,
2,
dont_rebatch=False,
batch_columns=[1, 1, 0],
)
new_seq_id = data_workers.global_coordinator._fetcher_id_seq
self.assertEqual(new_seq_id, old_seq_id + 2)
coordinator.start()
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
while coordinator._coordinators[0]._inputs < 100:
time.sleep(0.01)
# Run a couple of rounds
workspace.RunNet(model.net.Proto().name)
workspace.RunNet(model.net.Proto().name)
# Wait for the enqueue thread to get blocked
time.sleep(0.2)
# We don't dequeue on caffe2 side (as we don't run the net)
# so the enqueue thread should be blocked.
# Let's now shutdown and see it succeeds.
self.assertTrue(coordinator.stop())
def testInputOrder(self):
#
# Create two models (train and validation) with same input blobs
# names and ensure that both will get the data in correct order
#
workspace.ResetWorkspace()
self.counters = {0: 0, 1: 1}
def dummy_fetcher_rnn_ordered1(fetcher_id, batch_size):
# Hardcoding some input blobs
T = 20
N = batch_size
D = 33
data = np.zeros((T, N, D))
data[0][0][0] = self.counters[fetcher_id]
label = np.random.randint(N, size=(T, N))
label[0][0] = self.counters[fetcher_id]
seq_lengths = np.random.randint(N, size=(N))
seq_lengths[0] = self.counters[fetcher_id]
self.counters[fetcher_id] += 1
return [data, label, seq_lengths]
workspace.ResetWorkspace()
model = cnn.CNNModelHelper(name="rnn_test_order")
coordinator = data_workers.init_data_input_workers(
model,
input_blob_names=["data2", "label2", "seq_lengths2"],
fetch_fun=dummy_fetcher_rnn_ordered1,
batch_size=32,
max_buffered_batches=1000,
num_worker_threads=1,
dont_rebatch=True,
input_source_name='train'
)
coordinator.start()
val_model = cnn.CNNModelHelper(name="rnn_test_order_val")
coordinator1 = data_workers.init_data_input_workers(
val_model,
input_blob_names=["data2", "label2", "seq_lengths2"],
fetch_fun=dummy_fetcher_rnn_ordered1,
batch_size=32,
max_buffered_batches=1000,
num_worker_threads=1,
dont_rebatch=True,
input_source_name='val'
)
coordinator1.start()
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
workspace.CreateNet(val_model.net)
while coordinator._coordinators[0]._inputs < 900:
time.sleep(0.01)
for m in (model, val_model):
print(m.net.Proto().name)
workspace.RunNet(m.net.Proto().name)
last_data = workspace.FetchBlob('data2')[0][0][0]
last_lab = workspace.FetchBlob('label2')[0][0]
last_seq = workspace.FetchBlob('seq_lengths2')[0]
# Run few rounds
for _i in range(10):
workspace.RunNet(m.net.Proto().name)
data = workspace.FetchBlob('data2')[0][0][0]
lab = workspace.FetchBlob('label2')[0][0]
seq = workspace.FetchBlob('seq_lengths2')[0]
self.assertEqual(data, last_data + 1)
self.assertEqual(lab, last_lab + 1)
self.assertEqual(seq, last_seq + 1)
last_data = data
last_lab = lab
last_seq = seq
time.sleep(0.2)
self.assertTrue(coordinator.stop())
|
## @package mkl_test_util
# Module caffe2.python.mkl_test_util
"""
The MKL test utils is a small addition on top of the hypothesis test utils
under caffe2/python, which allows one to more easily test MKL related
operators.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import hypothesis.strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import workspace
from caffe2.python import hypothesis_test_util as hu
cpu_do = hu.cpu_do
gpu_do = hu.gpu_do
mkl_do = caffe2_pb2.DeviceOption(device_type=caffe2_pb2.MKLDNN)
device_options = hu.device_options + (
[mkl_do] if workspace.C.has_mkldnn else [])
def device_checker_device_options():
return st.just(device_options)
def gradient_checker_device_option():
return st.sampled_from(device_options)
gcs = dict(
gc=gradient_checker_device_option(),
dc=device_checker_device_options()
)
gcs_cpu_only = dict(gc=st.sampled_from([cpu_do]), dc=st.just([cpu_do]))
gcs_gpu_only = dict(gc=st.sampled_from([gpu_do]), dc=st.just([gpu_do]))
gcs_mkl_only = dict(gc=st.sampled_from([mkl_do]), dc=st.just([mkl_do]))
|
## @package net_builder
# Module caffe2.python.net_builder
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, context
from caffe2.python.task import Task, TaskGroup
@context.define_context()
class NetBuilder(object):
"""
Scope-driven mechanism for building nets, loops and conditional blocks.
Example:
from caffe2.python.net_builder import NetBuilder, ops
with NetBuilder() as nb:
c = ops.Const(5)
d = ops.Const(0)
with ops.loop():
ops.stop_if(ops.LE([c, ops.Const(0)]))
ops.Add([c, ops.Const(-1)], [c])
with ops.If(ops.GE([c, ops.Const(3)])):
ops.Add([d, ops.Const(10)])
ops.Print(c, [])
ops.Print(d, [])
step = core.to_execution_step(nb)
"""
def __init__(self, name=None, _stop_blob_required=False,
_stop_blob=None, _fullname=None):
nb = NetBuilder.current(required=False)
assert not _fullname or not name, 'Cannot set both _fullname and name'
self.name = _fullname or '/'.join(filter(lambda x: x, (
nb.name if nb else None, name)))
self._frozen = False
self._current_net = None
self._children = []
self._stop_blob = _stop_blob
self._stop_blob_required = _stop_blob_required
def stop_blob(self):
"""
Returns the BlobReference to the stop_blob of this NetBuilder.
If one is not yet available, creates one.
This function assumes that the stop_blob() will be used immediatelly
in the current net, so it doesn't initialize it if the current net is
the first of the builder.
"""
if self._stop_blob is None:
net = self.current_net()
self._stop_blob = core.BlobReference(
net.NextName('stop_blob'), net=net)
if self._current_net != self._children[0]:
self._children.insert(0, core.Net('stop_blob_init'))
self._children[0].Const(False, blob_out=self._stop_blob)
return self._stop_blob
def stop_if(self, blob):
ops.Copy(blob, self.stop_blob())
self._current_net = None
def _assert_mutable(self):
assert not self._frozen, (
'This NetBuilder (%s) has been built already.' % self.name)
def add(self, child):
self._assert_mutable()
self._current_net = None
self._children.append(child)
# to-do : check it's not a dag net
if isinstance(child, core.Net):
self._current_net = child
return child
def current_net(self, name=None):
self._assert_mutable()
if self._current_net is None or name is not None:
self.add(core.Net(name))
return self._current_net
def freeze(self):
for child in self._children:
if hasattr(child, 'freeze'):
child.freeze()
self._current_net = None
self._frozen = True
def get(self):
self.freeze()
return self._children
def __exit__(self, etype, *args):
self.freeze()
if etype is not None:
return
assert (not self._stop_blob_required) or self._stop_blob is not None, (
'This NetBuilder (%s) requires a stop condition ' % self.name +
'to be set with `stop` or `stop_if`')
def __str__(self):
return self.name or 'Un-named NetBuilder'
class Operations(object):
"""
Operations to be used in the context of a NetBuilder.
"""
def net(self, net=None, name=None):
"""
Retrieves the current net, or add a new net to the builder.
Args:
net: If provided, add the given net to the active builder.
Else, returns the current Net or creates a new one as needed.
name: if provided, creates a new Net with given name and makes
it the new current net of the active builder. Cannot
be provided if net is provided.
"""
assert name is None or net is None, (
'Cannot provide both `net` and `name`.')
if net is not None:
NetBuilder.current().add(net)
return net
return NetBuilder.current().current_net(name=name)
def __getattr__(self, op_type):
"""
Adds an operator call to the currently active Net.
"""
if op_type.startswith('__'):
raise AttributeError()
# We want hasattr to work properly even if no context is active.
if NetBuilder.current(required=False) is None:
raise AttributeError('No active NetBuilder.')
return getattr(self.net(), op_type)
def task_group(self):
"""
Creates a local task group which will execute as the next step of
the current NetBuilder.
"""
from caffe2.python import task
group = NetBuilder.current()
with task.Cluster():
with task.Node('local'):
tg = task.TaskGroup()
group.add(tg)
return tg
def stop(self):
"""
Stop execution of the current execution step.
Example:
ops.Print(a, 0)
ops.stop()
ops.Print(b, 0)
In the example, 'b' will never be printed.
"""
return self.stop_if(ops.Const(True))
def stop_if(self, blob):
"""
Stop execution of the current execution step if the
condition `blob` is met.
Example:
ops.Print(a, 0)
ops.stop_if(ops.LE([x, ops.Const(0)]))
ops.Print(b, 0)
In the example, 'b' will only be printed if the value of scalar
tensor 'x' lower or equal to 0.
"""
return NetBuilder.current().stop_if(blob)
def loop(self, iters=None, name=None):
"""
Creates a NetBuilder that will execute in a loop as the next step of
the current NetBuilder. If `iters` is provided, the loop will execute
for `iters` iterations and then stop. `iters` can be a constant or a
BlobReference. If `iters` is not provided, the loop will execute
until `ops.stop` or `ops.stop_if` is called.
Examples:
a = ops.Const(5)
with ops.loop():
ops.stop_if(ops.LE([a, ops.Const(0)]))
ops.Print(a, 0)
ops.Add([a, ops.Const(-1)], [a])
Above, 'a' will be printed 5 times, with values 5 to 1.
with ops.loop(10) as loop:
ops.LogInfo(loop.iter())
This will print the numbers from 0 to 9.
x = ops.Add([ops.Const(10), ops.Const(10)])
with ops.loop(x) as loop:
ops.LogInfo(loop.iter())
This will print the numbers from 0 to 19.
"""
return NetBuilder.current().add(_Loop(iters, name=name))
def stop_guard(self, has_stopped_blob=None, name=None):
"""
Creates a NetBuilder that will execute once as the next step of the
current NetBuilder. After execution, a bool tensor will indicate
whether the inner execution was halted with `stop` or `stop_if`.
Example:
a = ops.Const(True)
with ops.stop_guard() as sg1:
ops.stop_if(a)
ops.Print(ops.Const('did not stop'))
b = ops.Const(False)
with ops.stop_guard() as sg2:
ops.stop_if(b)
ops.Print(ops.Const('did not stop'))
ops.Print(sg1.has_stopped(), [])
ops.Print(sg2.has_stopped(), [])
In the example, 'did not stop' will be printed once,
followed by True and False.
"""
return NetBuilder.current().add(
_StopGuard(has_stopped_blob=has_stopped_blob, name=name))
def If(self, cond, name=None):
"""
Creates a NetBuilder that will execute once as the next step of the
current NetBuilder if the blob `cond` is True.
Example:
with ops.If(ops.Const(True)):
ops.Print(ops.Const('Will print'))
with ops.If(ops.Const(False)):
ops.Print(ops.Const('Wont print'))
The example will print 'Will print' once.
"""
return NetBuilder.current().add(_RunIf(cond, name=name))
def task_init(self):
"""
Defines operations that will be executed once at task startup.
Useful when implementing processors, that don't have access to the Task
top-level structure.
Example:
def my_processor(rec):
with ops.task_init():
one = ops.Const(1)
two = ops.Const(1)
return Tuple(
ops.Add(rec[0](), zero), ops.Add(rec[1](), two))
"""
setup = _SetupBuilder(_SetupBuilder.INIT)
self.net().add_attribute(Task.TASK_SETUP, setup)
return setup
def task_exit(self):
"""
Define operations to be executed at task shutdown.
Useful when implementing processors, that don't have access to the Task
top-level structure.
Example:
def read_queue(queue):
with ops.task_exit():
queue.close(ops.net())
return queue.read(ops.net())
"""
setup = _SetupBuilder(_SetupBuilder.EXIT)
self.net().add_attribute(Task.TASK_SETUP, setup)
return setup
def local_init(self):
"""
Similar to `task_init`, but executes at TaskGroup's startup instead,
before any task of the group starts executing.
"""
setup = _SetupBuilder(_SetupBuilder.INIT)
self.net().add_attribute(TaskGroup.LOCAL_SETUP, setup)
return setup
def local_exit(self):
"""
Similar to `task_init`, but executes at TaskGroup's exit instead,
after all tasks of the group finished execution.
"""
setup = _SetupBuilder(_SetupBuilder.EXIT)
self.net().add_attribute(TaskGroup.LOCAL_SETUP, setup)
return setup
def task_reporter(self, interval_ms=1000, name=None):
"""
Define operations to be executed at every time interval from
task start-up to finish. These operations are guaranteed to
execute at least once after all other operations of the task are
finished.
Example:
with ops.task_reporter(interval_ms=10000):
ops.LogInfo('10s elapsed')
"""
return _ReporterBuilder(interval_ms, net=self.net(), name=name)
def local_reporter(self, interval_ms=1000, name=None):
"""
Similar to task_report, but operations defined within this block
will run repeatedly for as long as any of the tasks in the current
TaskGroup have not finished.
"""
return _ReporterBuilder(interval_ms, name=name)
ops = Operations()
class _ReporterBuilder(NetBuilder):
def __init__(self, interval_ms, net=None, name=None):
NetBuilder.__init__(self, name)
self._net = net
self.interval_ms = interval_ms
def __exit__(self, etype, *args):
if etype is None:
step = core.to_execution_step(self)
step.RunEveryMillis(self.interval_ms)
if self._net:
self._net.add_attribute(Task.REPORT_STEP, step)
else:
TaskGroup.current().report_step(
step, interval_ms=self.interval_ms)
NetBuilder.__exit__(self, etype, *args)
class _SetupBuilder(NetBuilder):
INIT = 'init'
EXIT = 'exit'
def __init__(self, type, name=None):
NetBuilder.__init__(self, name)
self.type = type
def setup(self, net):
if self.type == _SetupBuilder.INIT:
return core.to_execution_step(self)
def exit(self, net):
if self.type == _SetupBuilder.EXIT:
return core.to_execution_step(self)
class _RunOnce(NetBuilder):
def __init__(self, name=None):
NetBuilder.__init__(self, name)
def __exit__(self, etype, *args):
if etype is None and self._stop_blob is not None:
ops.stop()
NetBuilder.__exit__(self, etype, *args)
class _StopGuard(_RunOnce):
def __init__(self, has_stopped_blob=None, name=None):
_RunOnce.__init__(self, name)
self._stopped = has_stopped_blob
self._ran = False
def __enter__(self):
r = _RunOnce.__enter__(self)
self._stopped = ops.Const(True, blob_out=self._stopped)
return r
def __exit__(self, etype, *args):
if etype is None:
self._ran = True
ops.Const(False, blob_out=self._stopped)
_RunOnce.__exit__(self, etype, *args)
def has_stopped(self):
"""
Return a blob that will be set to scalar bool `True` after
this net builder ran, iff it was halted early.
"""
assert self._ran, 'Context not used yet.'
return self._stopped
class _Loop(NetBuilder):
def __init__(self, iters=None, name=None):
NetBuilder.__init__(self, name, _stop_blob_required=True)
if iters is not None:
self._inc = ops.Const(1)
self._iter = ops.Const(0)
self._num_iters = (
iters if isinstance(iters, core.BlobReference)
else ops.Const(iters))
else:
self._num_iters = None
def iter(self):
assert self._num_iters is not None, (
'This loop does not have a number of iterations.')
assert self._iter is not None, (
'iter() must be called from inside the loop context')
return self._iter
def __enter__(self):
builder = NetBuilder.__enter__(self)
if self._num_iters is not None:
ops.stop_if(ops.GE([self._iter, self._num_iters]))
return builder
def __exit__(self, type, *args):
if type is None and self._num_iters is not None:
self.current_net().Add([self._iter, self._inc], [self._iter])
NetBuilder.__exit__(self, type, *args)
class _RunIf(_RunOnce):
def __init__(self, cond_blob=None, name=None, _already_ran=None):
_RunOnce.__init__(self, name)
assert cond_blob or _already_ran
self._is_else = cond_blob is None
if _already_ran is None:
self._else_blob = ops.Not(cond_blob)
self._already_ran = ops.Const(False)
else:
self._already_ran = _already_ran
self._else_blob = _already_ran if cond_blob is None else (
ops.Or([_already_ran, ops.Not(cond_blob)]))
def __enter__(self):
r = _RunOnce.__enter__(self)
ops.stop_if(self._else_blob)
ops.Const(True, blob_out=self._already_ran)
return r
def Elif(self, cond, name=None):
assert not self._is_else, 'Else not allowed for an Else.'
return NetBuilder.current().add(_RunIf(
cond, name=name or self.name, _already_ran=self._already_ran))
def Else(self, name=None):
assert not self._is_else, 'Elif not allowed for an Else.'
return NetBuilder.current().add(
_RunIf(name=name or self.name, _already_ran=self._already_ran))
|
## @package device_checker
# Module caffe2.python.device_checker
import numpy as np
import copy
from caffe2.python import workspace
class DeviceChecker(object):
"""A device checker in Python to check consistency across multiple devices.
This is not the most efficient way to check devices, as the Python interface
will involve a lot of copy back and forth operations. Use at your own risk.
"""
def __init__(self, threshold, device_options):
self._threshold = threshold
self._device_options = device_options
def CheckSimple(self, op, inputs, outputs_to_check,
input_device_options=None):
"""Checks the operator with different device implementations.
Inputs:
op: the operator to be checked.
inputs: the input data in numpy arrays.
outputs_to_check: the outputs to check between devices.
input_device_options: a mapping from input name to a device to use
(instead of self._device_options)
Outputs:
boolean: True if it passes, False if it does not pass.
"""
op = copy.deepcopy(op)
input_device_options = input_device_options or {}
# Entering the checker workspace
old_ws_name = workspace.CurrentWorkspace()
results = []
workspace.SwitchWorkspace("_device_check_", True)
for i, device_option in enumerate(self._device_options):
for i, arr in enumerate(inputs):
workspace.FeedBlob(
op.input[i], np.array(arr),
input_device_options.get(op.input[i], device_option))
op.device_option.CopyFrom(device_option)
workspace.RunOperatorOnce(op)
results.append(
[workspace.FetchBlob(op.output[idx])
for idx in outputs_to_check])
# Everything is done, reset the workspace.
workspace.ResetWorkspace()
# After running on all devices, check correctness
success = True
for i in range(1, len(self._device_options)):
for j in range(len(outputs_to_check)):
x = results[i][j]
y = results[0][j]
if not np.allclose(x, y,
atol=self._threshold, rtol=self._threshold):
print('Failure in checking device option {}'
' and output {}. The outputs are:'
.format(i, op.output[outputs_to_check[j]]))
print(x.flatten())
print(y.flatten())
print(np.max(np.abs(x - y)))
success = False
# else:
# print ('Passed device pair (0, %d), %s %s' %
# (i, outputs_to_check[j], y.shape))
workspace.SwitchWorkspace(old_ws_name)
return success
def CheckNet(self, net, inputs={}, blobs_to_check=None, ignore=set()):
"""Checks a network by inspecting all of its intermediate results, and
see if things match.
"""
old_ws_name = workspace.CurrentWorkspace()
results = []
if blobs_to_check is None:
blobs_to_check = sum([list(op.output) for op in net.op], [])
blobs_to_check = [b for b in blobs_to_check if b not in ignore]
workspace.SwitchWorkspace("_device_check_", True)
for i, device_option in enumerate(self._device_options):
for name, arr in inputs.items():
# print 'feeding', name
workspace.FeedBlob(name, arr, device_option)
for op in net.op:
op.device_option.CopyFrom(device_option)
workspace.RunNetOnce(net)
results.append(
[workspace.FetchBlob(name) for name in blobs_to_check]
)
# After running on all devices, check correctness
success = True
for i in range(1, len(results)):
for j in range(len(blobs_to_check)):
x = results[i][j]
y = results[0][j]
if not np.allclose(x, y,
atol=self._threshold, rtol=self._threshold):
print('Failure in checking device option {}'
' and output {}. The outputs are:'
.format(i, blobs_to_check[j]))
print(x.flatten())
print(y.flatten())
print(np.max(np.abs(x - y)))
success = False
# else:
# print ('Passed device pair (%d, %d), %s %s: %s' %
# (i, j, blobs_to_check[j], y.shape,
# str(y.flatten())))
workspace.SwitchWorkspace(old_ws_name)
return success
|
## @package context
# Module caffe2.python.context
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import threading
class ContextInfo(object):
def __init__(self, cls, allow_default, arg_name):
self.cls = cls
self.allow_default = allow_default
self.arg_name = arg_name
self._local_stack = threading.local()
@property
def _stack(self):
if not hasattr(self._local_stack, 'obj'):
self._local_stack.obj = []
return self._local_stack.obj
def enter(self, value):
self._stack.append(value)
def exit(self, value):
assert len(self._stack) > 0, 'Context %s is empty.' % self.cls
assert self._stack.pop() == value
def get_active(self, required=True):
if len(self._stack) == 0:
if not required:
return None
assert self.allow_default, (
'Context %s is required but none is active.' % self.cls)
self.enter(self.cls())
return self._stack[-1]
class ContextManager(object):
def __init__(self):
self._ctxs = {}
def register(self, ctx_info):
assert isinstance(ctx_info, ContextInfo)
assert (ctx_info.cls not in self._ctxs), (
'Context %s already registered' % ctx_info.cls)
self._ctxs[ctx_info.cls] = ctx_info
def get(self, cls):
assert cls in self._ctxs, 'Context %s not registered.' % cls
return self._ctxs[cls]
_CONTEXT_MANAGER = ContextManager()
def context_manager():
global _CONTEXT_MANAGER
return _CONTEXT_MANAGER
def __enter__(self):
if self._prev_enter is not None:
self._prev_enter()
context_manager().get(self._ctx_class).enter(self)
return self
def __exit__(self, *args):
context_manager().get(self._ctx_class).exit(self)
if self._prev_exit is not None:
self._prev_exit(*args)
@classmethod
def current(cls, value=None, required=True):
return get_active_context(cls, value, required)
class define_context(object):
def __init__(self, arg_name=None, allow_default=False):
self.arg_name = arg_name
self.allow_default = allow_default
def __call__(self, cls):
assert not hasattr(cls, '_ctx_class'), (
'%s parent class (%s) already defines context.' % (
cls, cls._ctx_class))
context_manager().register(
ContextInfo(cls, self.allow_default, self.arg_name))
cls._prev_enter = cls.__enter__ if hasattr(cls, '__enter__') else None
cls._prev_exit = cls.__exit__ if hasattr(cls, '__exit__') else None
cls._ctx_class = cls
cls.__enter__ = __enter__
cls.__exit__ = __exit__
cls.current = current
return cls
def get_active_context(cls, val=None, required=True):
ctx_info = context_manager().get(cls)
if val is not None:
assert isinstance(val, cls), (
'Wrong context type. Expected: %s, got %s.' % (cls, type(val)))
return val
return ctx_info.get_active(required=required)
|
# This a large test that goes through the translation of the bvlc caffenet
# model, runs an example through the whole model, and verifies numerically
# that all the results look right. In default, it is disabled unless you
# explicitly want to run it.
from caffe2.proto import caffe2_pb2
from caffe.proto import caffe_pb2
from google.protobuf import text_format
import numpy as np
import os
from caffe2.python import caffe_translator, utils, workspace, test_util
import sys
import unittest
@unittest.skipIf(not os.path.exists('data/testdata/caffe_translator'),
'No testdata existing for the caffe translator test. Exiting.')
def setUpModule():
# We will do all the computation stuff in the global space.
caffenet = caffe_pb2.NetParameter()
caffenet_pretrained = caffe_pb2.NetParameter()
text_format.Merge(
open('data/testdata/caffe_translator/deploy.prototxt').read(), caffenet
)
caffenet_pretrained.ParseFromString(
open(
'data/testdata/caffe_translator/bvlc_reference_caffenet.caffemodel')
.read()
)
net, pretrained_params = caffe_translator.TranslateModel(
caffenet, caffenet_pretrained, is_test=True
)
with open('data/testdata/caffe_translator/'
'bvlc_reference_caffenet.translatedmodel',
'w') as fid:
fid.write(str(net))
for param in pretrained_params.protos:
workspace.FeedBlob(param.name, utils.Caffe2TensorToNumpyArray(param))
# Let's also feed in the data from the Caffe test code.
data = np.load('data/testdata/caffe_translator/data_dump.npy').astype(
np.float32)
workspace.FeedBlob('data', data)
# Actually running the test.
workspace.RunNetOnce(net.SerializeToString())
class TestNumericalEquivalence(test_util.TestCase):
def testBlobs(self):
names = [
"conv1", "pool1", "norm1", "conv2", "pool2", "norm2", "conv3",
"conv4", "conv5", "pool5", "fc6", "fc7", "fc8", "prob"
]
for name in names:
print('Verifying {}'.format(name))
caffe2_result = workspace.FetchBlob(name)
reference = np.load(
'data/testdata/caffe_translator/' + name + '_dump.npy'
)
self.assertEqual(caffe2_result.shape, reference.shape)
scale = np.max(caffe2_result)
np.testing.assert_almost_equal(
caffe2_result / scale,
reference / scale,
decimal=5
)
if __name__ == '__main__':
if len(sys.argv) == 1:
print(
'If you do not explicitly ask to run this test, I will not run it. '
'Pass in any argument to have the test run for you.'
)
sys.exit(0)
unittest.main()
|
## @package caffe_translator
# Module caffe2.python.caffe_translator
#!/usr/bin/env python2
import argparse
import copy
import logging
import numpy as np # noqa
from caffe2.proto import caffe2_pb2, caffe2_legacy_pb2
from caffe.proto import caffe_pb2
from caffe2.python import core, utils, workspace
from google.protobuf import text_format
logging.basicConfig()
log = logging.getLogger("caffe_translator")
log.setLevel(logging.INFO)
def _StateMeetsRule(state, rule):
"""A function that reproduces Caffe's StateMeetsRule functionality."""
if rule.HasField('phase') and rule.phase != state.phase:
return False
if rule.HasField('min_level') and state.level < rule.min_level:
return False
if rule.HasField('max_level') and state.level > rule.max_level:
return False
curr_stages = set(list(state.stage))
# all stages in rule.stages should be in, otherwise it's not a match.
if len(rule.stage) and any([s not in curr_stages for s in rule.stage]):
return False
# none of the stage in rule.stages should be in, otherwise it's not a match.
if len(rule.not_stage) and any([s in curr_stages for s in rule.not_stage]):
return False
# If none of the nonmatch happens, return True.
return True
def _ShouldInclude(net_state, layer):
"""A function that reproduces Caffe's inclusion and exclusion rule."""
ret = (len(layer.include) == 0)
# check exclude rules: if any exclusion is met, we shouldn't include.
ret &= not any([_StateMeetsRule(net_state, rule) for rule in layer.exclude])
if len(layer.include):
# check include rules: if any inclusion is met, we should include.
ret |= any([_StateMeetsRule(net_state, rule) for rule in layer.include])
return ret
class TranslatorRegistry(object):
registry_ = {}
@classmethod
def Register(cls, op_name):
"""A decorator for registering gradient mappings."""
def Wrapper(func):
cls.registry_[op_name] = func
return func
return Wrapper
@classmethod
def TranslateLayer(cls, layer, pretrained_blobs, is_test):
try:
caffe_ops, params = cls.registry_[layer.type](
layer, pretrained_blobs, is_test)
except KeyError:
raise KeyError('No translator registered for layer: %s yet.' %
str(layer))
if caffe_ops is None:
caffe_ops = []
if type(caffe_ops) is not list:
caffe_ops = [caffe_ops]
return caffe_ops, params
@classmethod
def TranslateModel(
cls,
caffe_net,
pretrained_net,
is_test=False,
net_state=None,
):
net_state = caffe_pb2.NetState() if net_state is None else net_state
net = caffe2_pb2.NetDef()
net.name = caffe_net.name
net_params = caffe2_pb2.TensorProtos()
if len(caffe_net.layers) > 0:
raise ValueError(
'I think something is wrong. This translation script '
'only accepts new style layers that are stored in the '
'layer field.'
)
for layer in caffe_net.layer:
if not _ShouldInclude(net_state, layer):
log.info('Current net state does not need layer {}'
.format(layer.name))
continue
log.info('Translate layer {}'.format(layer.name))
# Get pretrained one
pretrained_layers = (
[l for l in pretrained_net.layer
if l.name == layer.name] + [l
for l in pretrained_net.layers
if l.name == layer.name]
)
if len(pretrained_layers) > 1:
raise ValueError(
'huh? more than one pretrained layer of one name?')
elif len(pretrained_layers) == 1:
pretrained_blobs = [
utils.CaffeBlobToNumpyArray(blob)
for blob in pretrained_layers[0].blobs
]
else:
# No pretrained layer for the given layer name. We'll just pass
# no parameter blobs.
# print 'No pretrained layer for layer', layer.name
pretrained_blobs = []
operators, params = cls.TranslateLayer(
layer, pretrained_blobs, is_test)
net.op.extend(operators)
net_params.protos.extend(params)
return net, net_params
def TranslateModel(*args, **kwargs):
return TranslatorRegistry.TranslateModel(*args, **kwargs)
def ConvertTensorProtosToInitNet(net_params, input_name):
"""Takes the net_params returned from TranslateModel, and wrap it as an
init net that contain GivenTensorFill.
This is a very simple feature that only works with float tensors, and is
only intended to be used in an environment where you want a single
initialization file - for more complex cases, use a db to store the
parameters.
"""
init_net = caffe2_pb2.NetDef()
for tensor in net_params.protos:
if len(tensor.float_data) == 0:
raise RuntimeError(
"Only float tensors are supported in this util.")
op = core.CreateOperator(
"GivenTensorFill", [], [tensor.name],
arg=[
utils.MakeArgument("shape", list(tensor.dims)),
utils.MakeArgument("values", tensor.float_data)])
init_net.op.extend([op])
init_net.op.extend([core.CreateOperator("ConstantFill", [], [input_name], shape=[1])])
return init_net
def BaseTranslate(layer, caffe2_type):
"""A simple translate interface that maps the layer input and output."""
caffe2_op = caffe2_pb2.OperatorDef()
caffe2_op.type = caffe2_type
caffe2_op.input.extend(layer.bottom)
caffe2_op.output.extend(layer.top)
return caffe2_op
def AddArgument(op, key, value):
"""Makes an argument based on the value type."""
op.arg.extend([utils.MakeArgument(key, value)])
################################################################################
# Common translators for layers.
################################################################################
@TranslatorRegistry.Register("Input")
def TranslateInput(layer, pretrained_blobs, is_test):
return [], []
@TranslatorRegistry.Register("VideoData")
def TranslateVideoData(layer, pretrained_blobs, is_test):
return [], []
@TranslatorRegistry.Register("Data")
def TranslateData(layer, pretrained_blobs, is_test):
return [], []
# A function used in convolution, pooling and deconvolution to deal with
# conv pool specific parameters.
def _TranslateStridePadKernelHelper(param, caffe_op):
try:
if (len(param.stride) > 1 or len(param.kernel_size) > 1 or
len(param.pad) > 1):
raise NotImplementedError(
"Translator currently does not support non-conventional "
"pad/kernel/stride settings."
)
stride = param.stride[0] if len(param.stride) else 1
pad = param.pad[0] if len(param.pad) else 0
kernel = param.kernel_size[0] if len(param.kernel_size) else 0
except TypeError:
# This catches the case of a PoolingParameter, in which case we are
# having non-repeating pad, stride and kernel.
stride = param.stride
pad = param.pad
kernel = param.kernel_size
# Get stride
if param.HasField("stride_h") or param.HasField("stride_w"):
AddArgument(caffe_op, "stride_h", param.stride_h)
AddArgument(caffe_op, "stride_w", param.stride_w)
else:
AddArgument(caffe_op, "stride", stride)
# Get pad
if param.HasField("pad_h") or param.HasField("pad_w"):
if param.pad_h == param.pad_w:
AddArgument(caffe_op, "pad", param.pad_h)
else:
AddArgument(caffe_op, "pad_t", param.pad_h)
AddArgument(caffe_op, "pad_b", param.pad_h)
AddArgument(caffe_op, "pad_l", param.pad_w)
AddArgument(caffe_op, "pad_r", param.pad_w)
else:
AddArgument(caffe_op, "pad", pad)
# Get kernel
if param.HasField("kernel_h") or param.HasField("kernel_w"):
AddArgument(caffe_op, "kernel_h", param.kernel_h)
AddArgument(caffe_op, "kernel_w", param.kernel_w)
else:
AddArgument(caffe_op, "kernel", kernel)
@TranslatorRegistry.Register("Convolution3D")
def TranslateConvNd(layer, pretrained_blobs, is_test):
param = layer.convolution3d_param
caffe_op = BaseTranslate(layer, "Conv")
output = caffe_op.output[0]
caffe_op.input.append(output + '_w')
AddArgument(
caffe_op,
"kernels",
[param.kernel_depth, param.kernel_size, param.kernel_size])
AddArgument(
caffe_op,
"strides",
[param.temporal_stride, param.stride, param.stride])
temporal_pad = 0
spatial_pad = 0
if hasattr(param, 'temporal_pad'):
temporal_pad = param.temporal_pad
if hasattr(param, 'pad'):
spatial_pad = param.pad
AddArgument(caffe_op, "pads", [temporal_pad, spatial_pad, spatial_pad] * 2)
# weight
params = [
utils.NumpyArrayToCaffe2Tensor(pretrained_blobs[0], output + '_w')]
# bias
if len(pretrained_blobs) == 2:
caffe_op.input.append(output + '_b')
params.append(
utils.NumpyArrayToCaffe2Tensor(
pretrained_blobs[1].flatten(), output + '_b'))
return caffe_op, params
@TranslatorRegistry.Register("Convolution")
def TranslateConv(layer, pretrained_blobs, is_test):
param = layer.convolution_param
caffe_op = BaseTranslate(layer, "Conv")
output = caffe_op.output[0]
caffe_op.input.append(output + '_w')
_TranslateStridePadKernelHelper(param, caffe_op)
# weight
params = [
utils.NumpyArrayToCaffe2Tensor(pretrained_blobs[0], output + '_w')]
# bias
if len(pretrained_blobs) == 2:
caffe_op.input.append(output + '_b')
params.append(
utils.NumpyArrayToCaffe2Tensor(
pretrained_blobs[1].flatten(), output + '_b'))
# Group convolution option
if param.group != 1:
AddArgument(caffe_op, "group", param.group)
# Get dilation - not tested. If you have a model and this checks out,
# please provide a test and uncomment this.
if len(param.dilation) > 0:
if len(param.dilation) == 1:
AddArgument(caffe_op, "dilation", param.dilation[0])
elif len(param.dilation) == 2:
AddArgument(caffe_op, "dilation_h", param.dilation[0])
AddArgument(caffe_op, "dilation_w", param.dilation[1])
return caffe_op, params
@TranslatorRegistry.Register("Deconvolution")
def TranslateDeconv(layer, pretrained_blobs, is_test):
param = layer.convolution_param
if param.group > 1:
raise NotImplementedError(
"Translator currently does not support group deconvolution."
)
caffe_op = BaseTranslate(layer, "ConvTranspose")
output = caffe_op.output[0]
_TranslateStridePadKernelHelper(param, caffe_op)
caffe_op.input.extend([output + '_w', output + '_b'])
AddArgument(caffe_op, "order", "NCHW")
weight = utils.NumpyArrayToCaffe2Tensor(pretrained_blobs[0], output + '_w')
bias = utils.NumpyArrayToCaffe2Tensor(
pretrained_blobs[1].flatten(), output + '_b'
)
return caffe_op, [weight, bias]
@TranslatorRegistry.Register("ReLU")
def TranslateRelu(layer, pretrained_blobs, is_test):
return BaseTranslate(layer, "Relu"), []
@TranslatorRegistry.Register("Pooling")
def TranslatePool(layer, pretrained_blobs, is_test):
param = layer.pooling_param
if param.pool == caffe_pb2.PoolingParameter.MAX:
caffe_op = BaseTranslate(layer, "MaxPool")
elif param.pool == caffe_pb2.PoolingParameter.AVE:
caffe_op = BaseTranslate(layer, "AveragePool")
_TranslateStridePadKernelHelper(param, caffe_op)
AddArgument(caffe_op, "order", "NCHW")
try:
# In the Facebook port of Caffe, a torch_pooling field was added to
# map the pooling computation of Torch. Essentially, it uses
# floor((height + 2 * padding - kernel) / stride) + 1
# instead of
# ceil((height + 2 * padding - kernel) / stride) + 1
# which is Caffe's version.
# Torch pooling is actually the same as Caffe2 pooling, so we don't
# need to do anything.
is_torch_pooling = param.torch_pooling
except AttributeError:
is_torch_pooling = False
if not is_torch_pooling:
AddArgument(caffe_op, "legacy_pad",
caffe2_legacy_pb2.CAFFE_LEGACY_POOLING)
if param.global_pooling:
AddArgument(caffe_op, "global_pooling", 1)
return caffe_op, []
@TranslatorRegistry.Register("Pooling3D")
def TranslatePool3D(layer, pretrained_blobs, is_test):
param = layer.pooling3d_param
if param.pool == caffe_pb2.Pooling3DParameter.MAX:
caffe_op = BaseTranslate(layer, "MaxPool")
elif param.pool == caffe_pb2.Pooling3DParameter.AVE:
caffe_op = BaseTranslate(layer, "AveragePool")
AddArgument(caffe_op, "order", "NCHW")
AddArgument(
caffe_op,
"kernels",
[param.kernel_depth, param.kernel_size, param.kernel_size])
AddArgument(
caffe_op,
"strides",
[param.temporal_stride, param.stride, param.stride])
temporal_pad = 0
spatial_pad = 0
if hasattr(param, 'temporal_pad'):
temporal_pad = param.temporal_pad
if hasattr(param, 'pad'):
spatial_pad = param.pad
AddArgument(caffe_op, "pads", [temporal_pad, spatial_pad, spatial_pad] * 2)
return caffe_op, []
@TranslatorRegistry.Register("LRN")
def TranslateLRN(layer, pretrained_blobs, is_test):
caffe_op = BaseTranslate(layer, "LRN")
caffe_op.output.extend(['_' + caffe_op.output[0] + '_scale'])
param = layer.lrn_param
if param.norm_region != caffe_pb2.LRNParameter.ACROSS_CHANNELS:
raise ValueError(
"Does not support norm region other than across channels.")
AddArgument(caffe_op, "size", int(param.local_size))
AddArgument(caffe_op, "alpha", float(param.alpha))
AddArgument(caffe_op, "beta", float(param.beta))
AddArgument(caffe_op, "bias", float(param.k))
AddArgument(caffe_op, "order", "NCHW")
return caffe_op, []
@TranslatorRegistry.Register("InnerProduct")
def TranslateInnerProduct(layer, pretrained_blobs, is_test):
param = layer.inner_product_param
try:
if param.axis != 1 or param.transpose:
raise ValueError(
"We don't have testing case for non-default axis and transpose "
"cases yet so we are disabling it for now. If you have a model "
"with this, please do send us your model for us to update this "
"support, and you are more than welcome to send a PR for this.")
except AttributeError:
# We might be using an historic Caffe protobuf that does not have axis
# and transpose arguments, so we will silently pass.
pass
caffe_op = BaseTranslate(layer, "FC")
output = caffe_op.output[0]
caffe_op.input.extend([output + '_w', output + '_b'])
# To provide the old-style 4-dimensional blob (1, 1, dim_output, dim_input)
# case, we always explicitly reshape the pretrained blob.
if pretrained_blobs[0].ndim not in [2, 4]:
raise ValueError("Unexpected weight ndim.")
if (pretrained_blobs[0].ndim == 4 and
list(pretrained_blobs[0].shape[:2]) != [1, 1]):
raise ValueError(
"If pretrained blob has 4 dims (old-style Caffe), the first two "
"should be of value 1, but I got " + str(pretrained_blobs[0].shape))
weight = utils.NumpyArrayToCaffe2Tensor(
pretrained_blobs[0].reshape(-1, pretrained_blobs[0].shape[-1]),
output + '_w'
)
bias = utils.NumpyArrayToCaffe2Tensor(
pretrained_blobs[1].flatten(), output + '_b'
)
return caffe_op, [weight, bias]
@TranslatorRegistry.Register("Dropout")
def TranslateDropout(layer, pretrained_blobs, is_test):
caffe_op = BaseTranslate(layer, "Dropout")
caffe_op.output.extend(['_' + caffe_op.output[0] + '_mask'])
param = layer.dropout_param
AddArgument(caffe_op, "ratio", param.dropout_ratio)
if (is_test):
AddArgument(caffe_op, "is_test", 1)
return caffe_op, []
@TranslatorRegistry.Register("Softmax")
def TranslateSoftmax(layer, pretrained_blobs, is_test):
caffe_op = BaseTranslate(layer, "Softmax")
return caffe_op, []
@TranslatorRegistry.Register("SoftmaxWithLoss")
def TranslateSoftmaxWithLoss(layer, pretrained_blobs, is_test):
softmax_op = core.CreateOperator(
"Softmax", [layer.bottom[0]],
layer.bottom[0] + "_translator_autogen_softmax")
xent_op = core.CreateOperator(
"LabelCrossEntropy",
[softmax_op.output[0], layer.bottom[1]],
layer.bottom[0] + "_translator_autogen_xent")
loss_op = core.CreateOperator(
"AveragedLoss",
xent_op.output[0],
layer.top[0])
return [softmax_op, xent_op, loss_op], []
@TranslatorRegistry.Register("Accuracy")
def TranslateAccuracy(layer, pretrained_blobs, is_test):
caffe_op = BaseTranslate(layer, "Accuracy")
if layer.accuracy_param.top_k != 1:
AddArgument(caffe_op, "top_k", layer.accuracy_param.top_k)
return caffe_op, []
@TranslatorRegistry.Register("Concat")
def TranslateConcat(layer, pretrained_blobs, is_test):
caffe_op = BaseTranslate(layer, "Concat")
caffe_op.output.extend(['_' + caffe_op.output[0] + '_dims'])
AddArgument(caffe_op, "order", "NCHW")
return caffe_op, []
@TranslatorRegistry.Register("TanH")
def TranslateTanH(layer, pretrained_blobs, is_test):
caffe_op = BaseTranslate(layer, "Tanh")
return caffe_op, []
@TranslatorRegistry.Register("InstanceNorm")
def TranslateInstanceNorm(layer, pretrained_blobs, is_test):
caffe_op = BaseTranslate(layer, "InstanceNorm")
output = caffe_op.output[0]
weight = utils.NumpyArrayToCaffe2Tensor(
pretrained_blobs[0].flatten(), output + '_w')
bias = utils.NumpyArrayToCaffe2Tensor(
pretrained_blobs[1].flatten(), output + '_b')
caffe_op.input.extend([output + '_w', output + '_b'])
AddArgument(caffe_op, "order", "NCHW")
return caffe_op, [weight, bias]
@TranslatorRegistry.Register("BatchNorm")
def TranslateBatchNorm(layer, pretrained_blobs, is_test):
caffe_op = BaseTranslate(layer, "SpatialBN")
output = caffe_op.output[0]
param = layer.batch_norm_param
AddArgument(caffe_op, "is_test", is_test)
AddArgument(caffe_op, "epsilon", param.eps)
AddArgument(caffe_op, "order", "NCHW")
caffe_op.input.extend(
[output + "_scale",
output + "_bias",
output + "_mean",
output + "_var"])
if not is_test:
caffe_op.output.extend(
[output + "_mean",
output + "_var",
output + "_saved_mean",
output + "_saved_var"])
n_channels = pretrained_blobs[0].shape[0]
if pretrained_blobs[2][0] != 0:
mean = utils.NumpyArrayToCaffe2Tensor(
(1. / pretrained_blobs[2][0]) * pretrained_blobs[0],
output + '_mean')
var = utils.NumpyArrayToCaffe2Tensor(
(1. / pretrained_blobs[2][0]) * pretrained_blobs[1],
output + '_var')
else:
raise RuntimeError("scalar is zero.")
pretrained_blobs[2][0] = 1
pretrained_blobs[2] = np.tile(pretrained_blobs[2], (n_channels, ))
scale = utils.NumpyArrayToCaffe2Tensor(
pretrained_blobs[2],
output + '_scale')
bias = utils.NumpyArrayToCaffe2Tensor(
np.zeros_like(pretrained_blobs[2]),
output + '_bias')
return caffe_op, [scale, bias, mean, var]
@TranslatorRegistry.Register("Eltwise")
def TranslateElementWise(layer, pretrained_blobs, is_test):
param = layer.eltwise_param
# TODO(jiayq): if we have a protobuf that uses this, lift this constraint
# and verify that we can correctly translate.
if len(param.coeff) or param.operation != 1:
raise RuntimeError("This eltwise layer is not yet supported.")
caffe_op = BaseTranslate(layer, "Sum")
return caffe_op, []
@TranslatorRegistry.Register("Scale")
def TranslateScale(layer, pretrained_blobs, is_test):
mul_op = BaseTranslate(layer, "Mul")
scale_param = layer.scale_param
AddArgument(mul_op, "axis", scale_param.axis)
AddArgument(mul_op, "broadcast", True)
if len(mul_op.input) == 1:
# the scale parameter is in pretrained blobs
if scale_param.num_axes != 1:
raise RuntimeError("This path has not been verified yet.")
output = mul_op.output[0]
mul_op_param = output + '_w'
mul_op.input.append(mul_op_param)
weights = []
weights.append(utils.NumpyArrayToCaffe2Tensor(
pretrained_blobs[0].flatten(), mul_op_param))
add_op = None
if len(pretrained_blobs) == 1:
# No bias-term in Scale layer
pass
elif len(pretrained_blobs) == 2:
# Caffe Scale layer supports a bias term such that it computes
# (scale_param * X + bias), whereas Caffe2 Mul op doesn't.
# Include a separate Add op for the bias followed by Mul.
add_op = copy.deepcopy(mul_op)
add_op.type = "Add"
add_op_param = output + '_b'
internal_blob = output + "_internal"
del mul_op.output[:]
mul_op.output.append(internal_blob)
del add_op.input[:]
add_op.input.append(internal_blob)
add_op.input.append(add_op_param)
weights.append(utils.NumpyArrayToCaffe2Tensor(
pretrained_blobs[1].flatten(), add_op_param))
else:
raise RuntimeError("Unexpected number of pretrained blobs in Scale")
caffe_ops = [mul_op]
if add_op:
caffe_ops.append(add_op)
assert len(caffe_ops) == len(weights)
return caffe_ops, weights
elif len(mul_op.input) == 2:
# TODO(jiayq): find a protobuf that uses this and verify.
raise RuntimeError("This path has not been verified yet.")
else:
raise RuntimeError("Unexpected number of inputs.")
@TranslatorRegistry.Register("Reshape")
def TranslateReshape(layer, pretrained_blobs, is_test):
caffe_op = BaseTranslate(layer, "Reshape")
caffe_op.output.append("_" + caffe_op.input[0] + "_dims")
reshape_param = layer.reshape_param
AddArgument(caffe_op, 'shape', reshape_param.shape.dim)
return caffe_op, []
@TranslatorRegistry.Register("Flatten")
def TranslateFlatten(layer, pretrained_blobs, is_test):
param = layer.flatten_param
if param.end_axis != -1:
raise NotImplementedError("flatten_param.end_axis not supported yet.")
if param.axis == 0:
caffe_op = BaseTranslate(layer, "FlattenToVec")
elif param.axis == 1:
caffe_op = BaseTranslate(layer, "Flatten")
else:
# This could be a Reshape op, but dim size is not known here.
raise NotImplementedError(
"Not supported yet for flatten_param.axis {}.".format(param.axis))
return caffe_op, []
@TranslatorRegistry.Register("Sigmoid")
def TranslateSigmoid(layer, pretrained_blobs, is_test):
caffe_op = BaseTranslate(layer, "Sigmoid")
return caffe_op, []
@TranslatorRegistry.Register("ROIPooling")
def TranslateROIPooling(layer, pretrained_blobs, is_test):
caffe_op = BaseTranslate(layer, "RoIPool")
AddArgument(caffe_op, "order", "NCHW")
if is_test:
AddArgument(caffe_op, "is_test", is_test)
else:
# Only used for gradient computation
caffe_op.output.append(caffe_op.output[0] + '_argmaxes')
param = layer.roi_pooling_param
if param.HasField('pooled_h'):
AddArgument(caffe_op, 'pooled_h', param.pooled_h)
if param.HasField('pooled_w'):
AddArgument(caffe_op, 'pooled_w', param.pooled_w)
if param.HasField('spatial_scale'):
AddArgument(caffe_op, 'spatial_scale', param.spatial_scale)
return caffe_op, []
@TranslatorRegistry.Register("PReLU")
def TranslatePRelu(layer, pretrained_blobs, is_test):
caffe_op = BaseTranslate(layer, "PRelu")
output = caffe_op.output[0]
caffe_op.input.extend([output + '_Slope'])
slope = utils.NumpyArrayToCaffe2Tensor(pretrained_blobs[0], output + '_Slope')
return caffe_op, [slope]
@TranslatorRegistry.Register("Reduction")
def TranslateReduction(layer, pretrained_blobs, is_test):
param = layer.reduction_param
if param.operation == caffe_pb2.ReductionParameter.SUM:
caffe_op = BaseTranslate(layer, "ReduceBackSum")
elif param.operation == caffe_pb2.ReductionParameter.MEAN:
caffe_op = BaseTranslate(layer, "ReduceBackMean")
else:
raise NotImplementedError("Not yet supported")
if param.axis > 0:
# We can't figure out the number of dims to reduce from positive axis
# for back reduction since the shape info is not known here.
raise NotImplementedError("Not yet supported")
num_reduce_dim = -param.axis
AddArgument(caffe_op, "num_reduce_dim", num_reduce_dim)
return caffe_op, []
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Utilitity to convert pretrained caffe models to Caffe2 models.")
parser.add_argument("prototext", help="Caffe prototext.")
parser.add_argument("caffemodel", help="Caffe trained model.")
parser.add_argument("--init_net", help="Caffe2 initialization net.", default="init_net.pb")
parser.add_argument("--predict_net", help="Caffe2 prediction net.", default="predict_net.pb")
args = parser.parse_args()
caffenet = caffe_pb2.NetParameter()
caffenet_pretrained = caffe_pb2.NetParameter()
input_proto = args.prototext
input_caffemodel = args.caffemodel
output_init_net = args.init_net
output_predict_net = args.predict_net
text_format.Merge(
open(input_proto).read(), caffenet
)
caffenet_pretrained.ParseFromString(
open(input_caffemodel).read()
)
net, pretrained_params = TranslateModel(
caffenet, caffenet_pretrained, is_test=True
)
# Assume there is one input and one output
external_input = net.op[0].input[0]
external_output = net.op[-1].output[0]
net.external_input.extend([external_input])
net.external_input.extend([param.name for param in pretrained_params.protos])
net.external_output.extend([external_output])
init_net = ConvertTensorProtosToInitNet(pretrained_params, external_input)
for param in pretrained_params.protos:
workspace.FeedBlob(param.name, utils.Caffe2TensorToNumpyArray(param))
with open(output_predict_net, 'wb') as f:
f.write(net.SerializeToString())
with open(output_init_net, 'wb') as f:
f.write(init_net.SerializeToString())
|
## @package utils
# Module caffe2.python.utils
from caffe2.proto import caffe2_pb2
from google.protobuf.message import DecodeError, Message
from google.protobuf import text_format
import collections
import functools
import numpy as np
import sys
if sys.version_info > (3,):
# This is python 3. We will define a few stuff that we used.
basestring = str
long = int
def CaffeBlobToNumpyArray(blob):
if (blob.num != 0):
# old style caffe blob.
return (np.asarray(blob.data, dtype=np.float32)
.reshape(blob.num, blob.channels, blob.height, blob.width))
else:
# new style caffe blob.
return (np.asarray(blob.data, dtype=np.float32)
.reshape(blob.shape.dim))
def Caffe2TensorToNumpyArray(tensor):
if tensor.data_type == caffe2_pb2.TensorProto.FLOAT:
return np.asarray(
tensor.float_data, dtype=np.float32).reshape(tensor.dims)
elif tensor.data_type == caffe2_pb2.TensorProto.DOUBLE:
return np.asarray(
tensor.double_data, dtype=np.float64).reshape(tensor.dims)
elif tensor.data_type == caffe2_pb2.TensorProto.INT32:
return np.asarray(
tensor.double_data, dtype=np.int).reshape(tensor.dims)
else:
# TODO: complete the data type.
raise RuntimeError(
"Tensor data type not supported yet: " + str(tensor.data_type))
def NumpyArrayToCaffe2Tensor(arr, name=None):
tensor = caffe2_pb2.TensorProto()
tensor.dims.extend(arr.shape)
if name:
tensor.name = name
if arr.dtype == np.float32:
tensor.data_type = caffe2_pb2.TensorProto.FLOAT
tensor.float_data.extend(list(arr.flatten().astype(float)))
elif arr.dtype == np.float64:
tensor.data_type = caffe2_pb2.TensorProto.DOUBLE
tensor.double_data.extend(list(arr.flatten().astype(np.float64)))
elif arr.dtype == np.int:
tensor.data_type = caffe2_pb2.TensorProto.INT32
tensor.int32_data.extend(list(arr.flatten().astype(np.int)))
else:
# TODO: complete the data type.
raise RuntimeError(
"Numpy data type not supported yet: " + str(arr.dtype))
return tensor
def MakeArgument(key, value):
"""Makes an argument based on the value type."""
argument = caffe2_pb2.Argument()
argument.name = key
iterable = isinstance(value, collections.Iterable)
if isinstance(value, np.ndarray):
value = value.flatten().tolist()
elif isinstance(value, np.generic):
# convert numpy scalar to native python type
value = np.asscalar(value)
if type(value) is float:
argument.f = value
elif type(value) is int or type(value) is bool or type(value) is long:
# We make a relaxation that a boolean variable will also be stored as
# int.
argument.i = value
elif isinstance(value, basestring):
argument.s = (value if type(value) is bytes
else value.encode('utf-8'))
elif isinstance(value, Message):
argument.s = value.SerializeToString()
elif iterable and all(type(v) in [float, np.float_] for v in value):
argument.floats.extend(value)
elif iterable and all(type(v) in [int, bool, long, np.int_] for v in value):
argument.ints.extend(value)
elif iterable and all(isinstance(v, basestring) for v in value):
argument.strings.extend([
(v if type(v) is bytes else v.encode('utf-8')) for v in value])
elif iterable and all(isinstance(v, Message) for v in value):
argument.strings.extend([v.SerializeToString() for v in value])
else:
raise ValueError(
"Unknown argument type: key=%s value=%s, value type=%s" %
(key, str(value), str(type(value)))
)
return argument
def TryReadProtoWithClass(cls, s):
"""Reads a protobuffer with the given proto class.
Inputs:
cls: a protobuffer class.
s: a string of either binary or text protobuffer content.
Outputs:
proto: the protobuffer of cls
Throws:
google.protobuf.message.DecodeError: if we cannot decode the message.
"""
obj = cls()
try:
text_format.Parse(s, obj)
return obj
except text_format.ParseError:
obj.ParseFromString(s)
return obj
def GetContentFromProto(obj, function_map):
"""Gets a specific field from a protocol buffer that matches the given class
"""
for cls, func in function_map.items():
if type(obj) is cls:
return func(obj)
def GetContentFromProtoString(s, function_map):
for cls, func in function_map.items():
try:
obj = TryReadProtoWithClass(cls, s)
return func(obj)
except DecodeError:
continue
else:
raise DecodeError("Cannot find a fit protobuffer class.")
def ConvertProtoToBinary(proto_class, filename, out_filename):
"""Convert a text file of the given protobuf class to binary."""
proto = TryReadProtoWithClass(proto_class, open(filename).read())
with open(out_filename, 'w') as fid:
fid.write(proto.SerializeToString())
def GetGPUMemoryUsageStats():
"""Get GPU memory usage stats from CUDAContext. This requires flag
--caffe2_gpu_memory_tracking to be enabled"""
from caffe2.python import workspace, core
workspace.RunOperatorOnce(
core.CreateOperator(
"GetGPUMemoryUsage",
[],
["____mem____"],
device_option=core.DeviceOption(caffe2_pb2.CUDA, 0),
),
)
b = workspace.FetchBlob("____mem____")
return {
'total_by_gpu': b[0, :],
'max_by_gpu': b[1, :],
'total': np.sum(b[0, :]),
'max_total': np.sum(b[1, :])
}
def ResetBlobs(blobs):
from caffe2.python import workspace, core
workspace.RunOperatorOnce(
core.CreateOperator(
"Free",
list(blobs),
list(blobs),
device_option=core.DeviceOption(caffe2_pb2.CPU),
),
)
class DebugMode(object):
'''
This class allows to drop you into an interactive debugger
if there is an unhandled exception in your python script
Example of usage:
def main():
# your code here
pass
if __name__ == '__main__':
from caffe2.python.utils import DebugMode
DebugMode.run(main)
'''
@classmethod
def run(cls, func):
try:
return func()
except KeyboardInterrupt:
raise
except Exception:
import pdb
print(
'Entering interactive debugger. Type "bt" to print '
'the full stacktrace. Type "help" to see command listing.')
print(sys.exc_info()[1])
print
pdb.post_mortem()
sys.exit(1)
raise
def debug(f):
'''
Use this method to decorate your function with DebugMode's functionality
Example:
@debug
def test_foo(self):
raise Exception("Bar")
'''
@functools.wraps(f)
def wrapper(*args, **kwargs):
def func():
return f(*args, **kwargs)
DebugMode.run(func)
return wrapper
|
## @package pipeline
# Module caffe2.python.pipeline
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, queue_util
from caffe2.python.dataio import Reader, Writer
from caffe2.python.net_builder import NetBuilder, ops
from caffe2.python.schema import as_record, Field
from caffe2.python.task import Node, Task, TaskGroup
class Output(object):
"""
Represents the result of a processor function. A processor can either
return an Output, or it can return a record, in which case an Output will be
created for it afterwards.
"""
def __init__(self, nets=None, record=None, should_stop=None):
builder_children = NetBuilder.current().get()
assert nets is None or len(builder_children) == 0, (
'Cannot both use `ops` syntax and return a list of nets.')
if nets is None:
nets = builder_children
if isinstance(nets, core.Net):
nets = [nets]
self.nets = [] if nets is None else list(nets)
self.record = None if record is None else as_record(record)
self.should_stop = should_stop
DEFAULT_QUEUE_CAPACITY = 100
def _init_output(output, capacity, global_init_net, global_exit_net):
if isinstance(output, Writer):
assert capacity is None, 'capacity would not be used.'
out_queue = None
writer = output
elif hasattr(output, 'writer'):
assert capacity is None, 'capacity would not be used.'
out_queue = output
writer = output.writer()
elif output is None:
out_queue = queue_util.Queue(
capacity=(
capacity if capacity is not None
else DEFAULT_QUEUE_CAPACITY))
writer = out_queue.writer()
else:
raise ValueError('output must be a reader, queue or stream.')
writer.setup_ex(global_init_net, global_exit_net)
return out_queue, writer
def make_processor(processor):
if processor is None:
return lambda rec: rec
elif isinstance(processor, core.Net):
return NetProcessor(processor)
else:
return processor
def normalize_processor_output(output):
"""
Allow for processors to return results in several formats.
TODO(azzolini): simplify once all processors use NetBuilder API.
"""
if isinstance(output, Output):
""" Processor returned an Output. """
return output
elif isinstance(output, Field):
""" Processor returned a record. """
return Output(record=output)
elif isinstance(output, tuple):
is_record_and_blob = (
len(output) == 2 and
isinstance(output[0], Field) and
isinstance(output[1], core.BlobReference))
if is_record_and_blob:
""" Processor returned (record, stop_blob) """
return Output(None, *output)
else:
""" Processor returned (nets, record, stop_blob) """
return Output(*output)
else:
""" Processor returned nets, no output """
return Output(output)
def pipe(
input, output=None, num_threads=1, processor=None, name=None,
capacity=None, group=None):
"""
Given a Reader, Queue or DataStream in `input`, and optionally, a Writer,
Queue or DataStream in `output`, creates a Task that, when run, will
pipe the input into the output, using multiple parallel threads.
Additionally, if a processor is given, it will be called between reading
and writing steps, allowing it to transform the record.
Args:
input: either a Reader, Queue or DataStream that will be read
until a stop is signaled either by the reader or the
writer.
output: either a Writer, a Queue or a DataStream that will be
writen to as long as neither reader or writer signal
a stop condition. If output is not provided or is None,
a Queue is created with given `capacity` and writen to.
num_threads: number of concurrent threads used for processing and
piping. If set to 0, no Task is created, and a
reader is returned instead -- the reader returned will
read from the reader passed in and process it.
processor: (optional) function that takes an input record and
optionally returns a record; this will be called
between read and write steps. If the processor does
not return a record, a writer will not be instantiated.
Processor can also be a core.Net with input and output
records properly set. In that case, a NetProcessor is
instantiated, cloning the net for each of the threads.
name: (optional) name of the task to be created.
capacity: when output is not passed, a queue of given `capacity`
is created and written to.
group: (optional) explicitly add the created Task to this
TaskGroup, instead of using the currently active one.
Returns:
Output Queue, DataStream, Reader, or None, depending on the parameters
passed.
"""
result, _ = _pipe_step(
input, output, num_threads, processor, name, capacity, group)
return result
def pipe_and_output(
input, output=None, num_threads=1, processor=None, name=None,
capacity=None, group=None, final_outputs=None):
"""
Similar to `pipe`, with the additional ability for the pipe Task to
return output values to the `Session` once done.
Returns:
Tuple (out_queue, *task_outputs)
out_queue: same as return value of `pipe`.
task_outputs: TaskOutput object, fetchable from the client after
session.run() returns.
"""
assert num_threads > 0
result, task = _pipe_step(
input, output, num_threads, processor, name, capacity, group,
final_outputs)
output = None
if final_outputs is not None:
output = task.outputs()
if type(final_outputs) not in (list, tuple):
output = output[0]
return result, output
def processor_name(processor):
if hasattr(processor, 'name'):
return processor.name
if hasattr(processor, 'func_name'):
if processor.func_name == '<lambda>':
return processor.__module__
if hasattr(processor, 'im_class'):
return '%s.%s' % (processor.im_class.__name__, processor.func_name)
return processor.func_name
return processor.__class__.__name__
def _pipe_step(
input, output=None, num_threads=1, processor=None, name=None,
capacity=None, group=None, final_outputs=None):
"""
"""
if isinstance(input, Reader):
reader = input
elif hasattr(input, 'reader'):
reader = input.reader()
else:
raise ValueError('in must be a reader, queue or streaam.')
if processor is not None:
reader = ProcessingReader(reader, processor)
if num_threads == 0:
assert output is None
return reader, None
if name is None and processor is not None:
name = processor_name(processor)
if name is None and output is not None:
name = 'pipe_into:%s' % processor_name(output)
if name is None:
name = 'pipe_from:%s' % processor_name(input)
node_name = str(Node.current())
profiler_name = "{0}/{1}/{2}/{3}/{4}".format(
node_name,
"pipe",
name,
processor_name(input) if input else "NoInput",
processor_name(output) if output else "NoOutput")
with Task(name=name, group=group, outputs=final_outputs) as task:
global_exit_net = core.Net('exit')
global_init_net = core.Net('init')
reader.setup_ex(global_init_net, global_exit_net)
out_queue = None
writer = None
steps = []
for thread_id in range(num_threads):
with NetBuilder(name='t:%d' % thread_id) as nb:
init_net = core.Net('init')
exit_net = core.Net('exit')
read_nets, status, rec = reader.read_record_ex(
init_net, exit_net)
if rec is not None:
if writer is None:
# hack so that the out queue gets the right name prefix
# (otherwise they would be prefixed with the thread id)
with NetBuilder(_fullname=task.name):
out_queue, writer = _init_output(
output, capacity, global_init_net,
global_exit_net)
write_nets, _ = writer.write_record_ex(
rec, init_net, exit_net, status)
else:
write_nets = []
timer_start_net = core.Net('timer_start')
timer = timer_start_net.TimerBegin([], counter_name=profiler_name)
timer_end_net = core.Net('timer_end')
timer_end_net.TimerEnd(timer, [])
ops.net(init_net)
ops.net(core.execution_step(
'body',
[timer_start_net] + list(read_nets) + list(write_nets) +
[timer_end_net],
should_stop_blob=status))
ops.net(timer_end_net)
ops.net(exit_net)
steps.append(core.to_execution_step(nb))
ops.net(global_init_net)
ops.net(core.execution_step('body', steps, concurrent_substeps=True))
ops.net(global_exit_net)
return out_queue, task
class ProcessingReader(Reader):
"""
Reader that reads from a upstream reader, calls the processor, and returns
the processed record.
"""
def __init__(self, reader, processor):
Reader.__init__(self)
self.reader = reader
self.processor = make_processor(processor)
def setup_ex(self, init_net, finish_net):
self.reader.setup_ex(init_net, finish_net)
def read_ex(self, init_net, exit_net):
read_nets, status, rec = self.reader.read_record_ex(init_net, exit_net)
with NetBuilder(_stop_blob=status):
# Current NetBuilder is optionally used inside the processor,
# then its children are retrived inside of
# normalize_processor_output.
# Once readers and writers also use NetBuilder,
# this logic will be more natural.
result = normalize_processor_output(self.processor(rec))
read_nets += result.nets
if result.should_stop is not None:
stop_net = core.Net('stop_net')
stop_net.Copy([result.should_stop], [status])
read_nets.append(stop_net)
if hasattr(self.processor, 'setup'):
init_net.add_attribute(TaskGroup.LOCAL_SETUP, self.processor)
self._set_schema(result.record)
fields = result.record.field_blobs() if result.record else None
return read_nets, status, fields
class NetProcessor(object):
"""
Processor that clones a core.Net each time it's called, executing
the cloned net as the processor. It requires the Net to have input
and (optionally) output records set, with net.set_input_record() and
net.set_output_record().
"""
def __init__(self, net, stop_signal=None, thread_init_nets=None, name=None):
assert isinstance(net, core.Net)
assert stop_signal is None or isinstance(
stop_signal, core.BlobReference)
self.name = name or str(net)
self.thread_init_nets = thread_init_nets or []
self.net = net
self._stop_signal = stop_signal
self._blob_maps = []
self._frozen = False
self._cloned_init_nets = []
def setup(self, init_net):
self._frozen = True
cloned_init_nets = self._cloned_init_nets
self._cloned_init_nets = []
return cloned_init_nets
def __call__(self, rec):
assert not self._frozen
prefix = NetBuilder.current().name + '/'
blob_remap = {}
for net in self.thread_init_nets:
new_net, _ = core.clone_and_bind_net(
net, str(net) + prefix, prefix, blob_remap)
self._cloned_init_nets.append(new_net)
new_net, remappings = core.clone_and_bind_net(
self.net, str(self.net) + prefix, prefix, blob_remap, rec)
if self._stop_signal is None:
stop_signal = None
elif str(self._stop_signal) in remappings:
stop_signal = core.BlobReference(
remappings[str(self._stop_signal)],
net=new_net)
else:
stop_signal = self._stop_signal
self._blob_maps.append(remappings)
return Output([new_net], new_net.output_record(), stop_signal)
def blob_maps(self):
self._frozen = True
return self._blob_maps
|
import numpy as np
import unittest
from caffe2.python import core, workspace, muji, test_util
@unittest.skipIf(not workspace.has_gpu_support, "no gpu")
class TestMuji(test_util.TestCase):
def RunningAllreduceWithGPUs(self, gpu_ids, allreduce_function):
"""A base function to test different scenarios."""
net = core.Net("mujitest")
for id in gpu_ids:
net.ConstantFill(
[],
"testblob_gpu_" + str(id),
shape=[1, 2, 3, 4],
value=float(id + 1),
device_option=muji.OnGPU(id)
)
allreduce_function(
net, ["testblob_gpu_" + str(i)
for i in gpu_ids], "_reduced", gpu_ids
)
workspace.RunNetOnce(net)
target_value = sum(gpu_ids) + len(gpu_ids)
all_blobs = workspace.Blobs()
all_blobs.sort()
for blob in all_blobs:
print('{} {}'.format(blob, workspace.FetchBlob(blob)))
for idx in gpu_ids:
blob = workspace.FetchBlob("testblob_gpu_" + str(idx) + "_reduced")
np.testing.assert_array_equal(
blob,
target_value,
err_msg="gpu id %d of %s" % (idx, str(gpu_ids))
)
def testAllreduceFallback(self):
self.RunningAllreduceWithGPUs(
range(workspace.NumCudaDevices()), muji.AllreduceFallback
)
def testAllreduceSingleGPU(self):
for i in range(workspace.NumCudaDevices()):
self.RunningAllreduceWithGPUs([i], muji.Allreduce)
def testAllreduceWithTwoGPUs(self):
pattern = workspace.GetCudaPeerAccessPattern()
if pattern.shape[0] >= 2 and np.all(pattern[:2, :2]):
self.RunningAllreduceWithGPUs([0, 1], muji.Allreduce2)
else:
print('Skipping allreduce with 2 gpus. Not peer access ready.')
def testAllreduceWithFourGPUs(self):
pattern = workspace.GetCudaPeerAccessPattern()
if pattern.shape[0] >= 4 and np.all(pattern[:4, :4]):
self.RunningAllreduceWithGPUs([0, 1, 2, 3], muji.Allreduce4)
else:
print('Skipping allreduce with 4 gpus. Not peer access ready.')
def testAllreduceWithEightGPUs(self):
pattern = workspace.GetCudaPeerAccessPattern()
if (
pattern.shape[0] >= 8 and np.all(pattern[:4, :4]) and
np.all(pattern[4:, 4:])
):
self.RunningAllreduceWithGPUs(range(8), muji.Allreduce8)
else:
print('Skipping allreduce with 8 gpus. Not peer access ready.')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import workspace
from caffe2.python.core import Plan, to_execution_step
from caffe2.python.task import Task, final_output
from caffe2.python.net_builder import ops, NetBuilder
from caffe2.python.session import LocalSession
import unittest
def _test_loop():
x = ops.Const(5)
y = ops.Const(0)
with ops.loop():
ops.stop_if(ops.EQ([x, ops.Const(0)]))
ops.Add([x, ops.Const(-1)], [x])
ops.Add([y, ops.Const(1)], [y])
return y
def _test_inner_stop(x):
ops.stop_if(ops.LT([x, ops.Const(5)]))
def _test_outer():
x = ops.Const(10)
# test stop_if(False)
with ops.stop_guard() as g1:
_test_inner_stop(x)
# test stop_if(True)
y = ops.Const(3)
with ops.stop_guard() as g2:
_test_inner_stop(y)
# test no stop
with ops.stop_guard() as g4:
ops.Const(0)
# test empty clause
with ops.stop_guard() as g3:
pass
return (
g1.has_stopped(), g2.has_stopped(), g3.has_stopped(), g4.has_stopped())
def _test_if(x):
y = ops.Const(1)
with ops.If(ops.GT([x, ops.Const(50)])):
ops.Const(2, blob_out=y)
with ops.If(ops.LT([x, ops.Const(50)])):
ops.Const(3, blob_out=y)
ops.stop()
ops.Const(4, blob_out=y)
return y
class TestNetBuilder(unittest.TestCase):
def test_ops(self):
with NetBuilder() as nb:
y = _test_loop()
z, w, a, b = _test_outer()
p = _test_if(ops.Const(75))
q = _test_if(ops.Const(25))
plan = Plan('name')
plan.AddStep(to_execution_step(nb))
ws = workspace.C.Workspace()
ws.run(plan)
expected = [
(y, 5),
(z, False),
(w, True),
(a, False),
(b, False),
(p, 2),
(q, 3),
]
for b, expected in expected:
actual = ws.blobs[str(b)].fetch()
self.assertEquals(actual, expected)
def _expected_loop(self):
total = 0
total_large = 0
total_small = 0
total_tiny = 0
for loop_iter in range(10):
outer = loop_iter * 10
for inner_iter in range(loop_iter):
val = outer + inner_iter
if val >= 80:
total_large += val
elif val >= 50:
total_small += val
else:
total_tiny += val
total += val
return total, total_large, total_small, total_tiny
def _actual_loop(self):
total = ops.Const(0)
total_large = ops.Const(0)
total_small = ops.Const(0)
total_tiny = ops.Const(0)
with ops.loop(10) as loop:
outer = ops.Mul([loop.iter(), ops.Const(10)])
with ops.loop(loop.iter()) as inner:
val = ops.Add([outer, inner.iter()])
with ops.If(ops.GE([val, ops.Const(80)])) as c:
ops.Add([total_large, val], [total_large])
with c.Elif(ops.GE([val, ops.Const(50)])) as c:
ops.Add([total_small, val], [total_small])
with c.Else():
ops.Add([total_tiny, val], [total_tiny])
ops.Add([total, val], total)
return map(final_output, (total, total_large, total_small, total_tiny))
def test_loops(self):
with Task() as task:
out_actual = self._actual_loop()
with LocalSession() as session:
session.run(task)
expected = self._expected_loop()
actual = [o.fetch() for o in out_actual]
for e, a in zip(expected, actual):
self.assertEquals(e, a)
def test_setup(self):
with Task() as task:
with ops.task_init():
one = ops.Const(1)
two = ops.Add([one, one])
with ops.task_init():
three = ops.Const(3)
accum = ops.Add([two, three])
# here, accum should be 5
with ops.task_exit():
# here, accum should be 6, since this executes after lines below
seven_1 = ops.Add([accum, one])
six = ops.Add([accum, one])
ops.Add([accum, one], [accum])
seven_2 = ops.Add([accum, one])
o6 = final_output(six)
o7_1 = final_output(seven_1)
o7_2 = final_output(seven_2)
with LocalSession() as session:
session.run(task)
self.assertEquals(o6.fetch(), 6)
self.assertEquals(o7_1.fetch(), 7)
self.assertEquals(o7_2.fetch(), 7)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from hypothesis import given
import hypothesis.strategies as st
import unittest
from caffe2.proto import caffe2_pb2
from caffe2.python import core, test_util
from caffe2.python.core import CreateOperator, GradientRegistry
# First, we will set up a few gradient registry entries so that we can manually
# construct some test cases.
def NeedAll(op, g_output):
"""A sanity check to make sure that all the gradient are given."""
for name, g in zip(op.output, g_output):
if g is None:
raise RuntimeError(
'Need gradient for "%s" but it is not provided.' % name)
return g_output
def GIS(op):
"""A test util function to generate the gradient name for input."""
return [s + '_grad' for s in op.input]
def CopyDeviceOption(op, src_op):
if src_op.HasField('device_option'):
op.device_option.CopyFrom(src_op.device_option)
return op
# First gradient: (in -> out) leading to (out_grad -> in_grad)
@GradientRegistry.RegisterGradient('Direct')
def AddDirectGradient(op, g_output):
return (
CopyDeviceOption(
CreateOperator('DirectGradient', NeedAll(op, g_output), GIS(op)),
op),
GIS(op)
)
# Second gradient: (in -> out) leading to (out, out_grad -> in_grad)
@GradientRegistry.RegisterGradient('UseOutput')
def AddUseOutputGradient(op, g_output):
return (
CopyDeviceOption(
CreateOperator(
'UseOutputGradient',
list(op.output) + NeedAll(op, g_output), GIS(op)),
op),
GIS(op)
)
@GradientRegistry.RegisterGradient('UseInput')
def AddUseInputGradient(op, g_output):
return (
CopyDeviceOption(
CreateOperator(
'UseInputGradient',
list(op.input) + NeedAll(op, g_output), GIS(op)),
op),
GIS(op)
)
@GradientRegistry.RegisterGradient('Nogradient')
def AddNogradient(op, g_output):
return (
[],
[None for s in op.input]
)
class TestGradientCalculation(test_util.TestCase):
@given(device_option=st.sampled_from([
None,
core.DeviceOption(caffe2_pb2.CUDA, 1)]))
def testDirect(self, device_option):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('Direct', 'hidden', 'out'),
]
if device_option:
for op in operators:
op.device_option.CopyFrom(device_option)
desired_grad_operators = [
CreateOperator('DirectGradient', 'out_grad', 'hidden_grad'),
CreateOperator('DirectGradient', 'hidden_grad', 'in_grad'),
]
if device_option:
for op in desired_grad_operators:
op.device_option.CopyFrom(device_option)
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
self.assertEqual(gradients, desired_grad_operators)
def testDirectImplicitGradientSource(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('Direct', 'hidden', 'out'),
]
desired_grad_operators = [
CreateOperator(
"ConstantFill", 'out', "out_autogen_grad", value=1.0),
CreateOperator(
'DirectGradient', 'out_autogen_grad', 'hidden_grad'),
CreateOperator('DirectGradient', 'hidden_grad', 'in_grad'),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, ['out'])
self.assertEqual(gradients, desired_grad_operators)
def testDoesNotGenerateUnnecessaryGradients(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('Direct', 'hidden', 'out'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'hidden_grad', 'in_grad'),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'hidden': 'hidden_grad'})
self.assertEqual(gradients, desired_grad_operators)
def testDirectButNoOutputGradientGiven(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('Direct', 'hidden', 'out'),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {})
self.assertEqual(gradients, [])
def testDirectInPlace(self):
operators = [
CreateOperator('Direct', 'in', 'in'),
CreateOperator('Direct', 'in', 'out'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'out_grad', 'in_grad'),
CreateOperator('DirectGradient', 'in_grad', 'in_grad'),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
self.assertEqual(gradients, desired_grad_operators)
def testUseOutput(self):
operators = [
CreateOperator('UseOutput', 'in', 'hidden'),
CreateOperator('UseOutput', 'hidden', 'out'),
CreateOperator('Direct', 'out', 'sink'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'sink_grad', 'out_grad'),
CreateOperator(
'UseOutputGradient',
['out', 'out_grad'], 'hidden_grad'
),
CreateOperator(
'UseOutputGradient',
['hidden', 'hidden_grad'], 'in_grad'
),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'sink': 'sink_grad'})
self.assertEqual(gradients, desired_grad_operators)
def testUseOutputInPlace(self):
operators = [
CreateOperator('UseOutput', 'in', 'in'),
CreateOperator('UseOutput', 'in', 'out'),
CreateOperator('Direct', 'out', 'sink'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'sink_grad', 'out_grad'),
CreateOperator(
'UseOutputGradient',
['out', 'out_grad'], 'in_grad'
),
CreateOperator(
'UseOutputGradient',
['in', 'in_grad'], 'in_grad'
),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'sink': 'sink_grad'})
self.assertEqual(gradients, desired_grad_operators)
def testUseOutputButOutputHasBeenChanged(self):
operators = [
CreateOperator('UseOutput', 'in', 'hidden'),
# Note here: we overwrite hidden, but hidden will be needed by the
# gradient calculation of the first operator, so the gradient
# registry should return an error.
CreateOperator('Direct', 'hidden', 'hidden'),
CreateOperator('UseOutput', 'hidden', 'out'),
CreateOperator('Direct', 'out', 'sink'),
]
with self.assertRaises(RuntimeError):
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'sink': 'sink_grad'})
def testUseInput(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('UseInput', 'hidden', 'out'),
CreateOperator('Direct', 'out', 'sink'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'sink_grad', 'out_grad'),
CreateOperator(
'UseInputGradient',
['hidden', 'out_grad'], 'hidden_grad'
),
CreateOperator(
'DirectGradient',
'hidden_grad', 'in_grad'
),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'sink': 'sink_grad'})
self.assertEqual(gradients, desired_grad_operators)
def testUseInputButInputHasBeenChanged(self):
"""Test gradient for the following case:
in -> out, with UseInput
in -> in
Since we overwrite in in op#1, but in will be needed by the gradient
calculation of op#0, the gradient registry should raise an error.
"""
operators = [
CreateOperator('UseInput', 'in', 'out'),
CreateOperator('Direct', 'in', 'in'),
]
with self.assertRaises(RuntimeError):
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
@given(device_option=st.sampled_from([
None,
core.DeviceOption(caffe2_pb2.CUDA, 1)]))
def testMultiUseInput(self, device_option):
"""Test gradient for the following case:
in -> hidden1
in -> hidden2
hidden1, hidden2 -> out
"""
operators = [
CreateOperator('Direct', 'in', 'hidden1'),
CreateOperator('Direct', 'in', 'hidden2'),
CreateOperator('Direct', ['hidden1', 'hidden2'], 'out'),
]
if device_option:
for op in operators:
op.device_option.CopyFrom(device_option)
desired_grad_operators = [
CreateOperator(
'DirectGradient',
'out_grad', ['hidden1_grad', 'hidden2_grad']
),
CreateOperator(
'DirectGradient',
'hidden2_grad', '_in_grad_autosplit_0'
),
CreateOperator(
'DirectGradient',
'hidden1_grad', '_in_grad_autosplit_1'
),
CreateOperator(
'Sum',
['_in_grad_autosplit_0', '_in_grad_autosplit_1'], 'in_grad'
),
]
if device_option:
for op in desired_grad_operators:
op.device_option.CopyFrom(device_option)
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {"out": "out_grad"})
self.assertEqual(gradients, desired_grad_operators)
def testMultiUseInputButWithNoGradient(self):
"""Test gradient for the following case:
in -> hidden1
in -(no gradient)-> hidden2
hidden1, hidden2 -> out
"""
operators = [
CreateOperator('Direct', 'in', 'hidden1'),
CreateOperator('Nogradient', 'in', 'hidden2'),
CreateOperator('Direct', ['hidden1', 'hidden2'], 'out'),
]
desired_grad_operators = [
CreateOperator(
'DirectGradient',
'out_grad', ['hidden1_grad', 'hidden2_grad']
),
CreateOperator(
'DirectGradient',
'hidden1_grad', 'in_grad'
),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
self.assertEqual(gradients, desired_grad_operators)
def testMultiUseInputAndMultipleVersions(self):
"""Test gradient for the following case:
in -> in
in -> hidden1, hidden2
hidden1, hidden2 -> out
"""
operators = [
CreateOperator('Direct', 'in', 'in'),
CreateOperator('Direct', 'in', 'hidden1'),
CreateOperator('Direct', 'in', 'hidden2'),
CreateOperator('Direct', ['hidden1', 'hidden2'], 'out'),
]
desired_grad_operators = [
CreateOperator(
'DirectGradient',
'out_grad', ['hidden1_grad', 'hidden2_grad']
),
CreateOperator(
'DirectGradient',
'hidden2_grad', '_in_grad_autosplit_0'
),
CreateOperator(
'DirectGradient',
'hidden1_grad', '_in_grad_autosplit_1'
),
CreateOperator(
'Sum',
['_in_grad_autosplit_0', '_in_grad_autosplit_1'], 'in_grad'
),
CreateOperator(
'DirectGradient',
'in_grad', 'in_grad'
),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
self.assertEqual(gradients, desired_grad_operators)
def testMultiUseInputAndMultipleVersionsBig(self):
"""Test gradient for the following case:
in -> in
in -> hidden1, hidden2
hidden1, hidden2 -> in
in -> hidden3, hidden4, hidden5
hidden3, hidden4, hidden5 -> out
"""
operators = [
CreateOperator('Direct', 'in', 'in'),
CreateOperator('Direct', 'in', 'hidden1'),
CreateOperator('Direct', 'in', 'hidden2'),
CreateOperator('Direct', ['hidden1', 'hidden2'], 'in'),
CreateOperator('Direct', 'in', 'hidden3'),
CreateOperator('Direct', 'in', 'hidden4'),
CreateOperator('Direct', 'in', 'hidden5'),
CreateOperator('Direct', ['hidden3', 'hidden4', 'hidden5'], 'out'),
]
desired_grad_operators = [
CreateOperator(
'DirectGradient',
'out_grad', ['hidden3_grad', 'hidden4_grad', 'hidden5_grad']
),
CreateOperator(
'DirectGradient',
'hidden5_grad', '_in_grad_autosplit_0'
),
CreateOperator(
'DirectGradient',
'hidden4_grad', '_in_grad_autosplit_1'
),
CreateOperator(
'DirectGradient',
'hidden3_grad', '_in_grad_autosplit_2'
),
CreateOperator(
'Sum',
['_in_grad_autosplit_0', '_in_grad_autosplit_1',
'_in_grad_autosplit_2'],
'in_grad'
),
CreateOperator(
'DirectGradient',
'in_grad', ['hidden1_grad', 'hidden2_grad']
),
CreateOperator(
'DirectGradient',
'hidden2_grad', '_in_grad_autosplit_0'
),
CreateOperator(
'DirectGradient',
'hidden1_grad', '_in_grad_autosplit_1'
),
CreateOperator(
'Sum',
['_in_grad_autosplit_0', '_in_grad_autosplit_1'],
'in_grad'
),
CreateOperator(
'DirectGradient',
'in_grad', 'in_grad'
),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
for s in gradients:
print(str(s))
self.assertEqual(gradients, desired_grad_operators)
def testGradientMappingUsingSumOp(self):
"""Since Sum is used in accumulating gradients, we will test if
it is OK to also explicitly use it in the graph."""
operators = [
CreateOperator('FC', ['in', 'w', 'b'], 'fc'),
CreateOperator('Sum', 'fc', 'agg'),
CreateOperator('AveragedLoss', 'agg', 'loss'),
]
# This should run correctly.
gradient_ops, _ = GradientRegistry.GetBackwardPass(
operators, {'loss': 'loss_grad'})
for s in gradient_ops:
print(str(s))
def testGradientCalculationWithPrint(self):
"""Test a common use case where we have Print in the forward pass."""
operators = [
CreateOperator('FC', ['in', 'w', 'b'], 'fc'),
CreateOperator('Print', 'fc', []),
CreateOperator('AveragedLoss', 'fc', 'loss'),
]
desired_grad_operators = [
CreateOperator('AveragedLossGradient',
['fc', 'loss_grad'], 'fc_grad'),
CreateOperator('FCGradient', ['in', 'w', 'fc_grad'],
['w_grad', 'b_grad', 'in_grad']),
]
for g in desired_grad_operators:
g.is_gradient_op = 1
# This should run correctly.
gradient_ops, _ = GradientRegistry.GetBackwardPass(
operators, {'loss': 'loss_grad'})
for s in gradient_ops:
print(str(s))
self.assertEqual(gradient_ops, desired_grad_operators)
def testStopGradient(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('StopGradient', 'hidden', 'hidden2'),
CreateOperator('Direct', 'hidden2', 'out'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'out_grad', 'hidden2_grad'),
]
gradients, _ = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
self.assertEqual(gradients, desired_grad_operators)
def testStopGradientInplace(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('StopGradient', 'hidden', 'hidden'),
CreateOperator('Direct', 'hidden', 'out'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'out_grad', 'hidden_grad'),
]
gradients, grad_map = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
self.assertEqual(gradients, desired_grad_operators)
self.assertEqual(grad_map, {'out': 'out_grad'})
def testStopGradientWithMultiUseOperators(self):
operators = [
CreateOperator('Direct', 'in', 'hidden'),
CreateOperator('Direct', 'hidden', 'hidden2'),
CreateOperator('StopGradient', 'hidden', 'hidden3'),
CreateOperator('Direct', ['hidden2', 'hidden3'], 'out'),
]
desired_grad_operators = [
CreateOperator('DirectGradient', 'out_grad',
['hidden2_grad', 'hidden3_grad']),
CreateOperator('DirectGradient', 'hidden2_grad', 'hidden_grad'),
CreateOperator('DirectGradient', 'hidden_grad', 'in_grad'),
]
gradients, grad_map = GradientRegistry.GetBackwardPass(
operators, {'out': 'out_grad'})
self.assertEqual(gradients, desired_grad_operators)
self.assertEqual(
grad_map, {'out': 'out_grad', 'hidden2': 'hidden2_grad',
'hidden3': 'hidden3_grad', 'hidden': 'hidden_grad',
'in': 'in_grad'})
# Skip if sparse operators are not available
@unittest.skipIf(not core.IsOperator('SparseFunHash'),
'Sparse operators not available')
class TestSparseGradientsAccumulation(test_util.TestCase):
def testSparseAccumulationWithValues(self):
# The gradient for "Gather" only computes values. indices are directly
# passed from the input
#
# x1-->Gather-->x4-->
# | |
# x2-----+ DotProduct-->x6
# | |
# x3-->Gather-->x5-->
net = core.Net("test_net")
net.Gather(["x2", "x1"], "x4")
net.Gather(["x2", "x3"], "x5")
net.DotProduct(["x4", "x5"], "x6")
net.AddGradientOperators(["x6"])
sum_op_i = net.Proto().op[-2]
sum_op_v = net.Proto().op[-1]
self.assertEqual(sum_op_i.input[0], "x3")
self.assertEqual(sum_op_i.input[1], "x1")
self.assertEqual(sum_op_i.output[0], "x2_grad_indices_concat")
self.assertEqual(sum_op_v.input[0], "x5_grad")
self.assertEqual(sum_op_v.input[1], "x4_grad")
self.assertEqual(sum_op_v.output[0], "x2_grad_values_concat")
def testSparseGradientToDense(self):
#
# x1-->Gather-->x4-->
# | |
# x0, w, b-->FC-->x2-->EnsureDenseGradient-->x2---+ DotProduct-->x6
# | |
# x3-->Gather-->x5-->
net = core.Net("test_net")
net.FC(["x0", "w", "b"], "x2")
net.EnsureDense(["x2"], "x2")
net.Gather(["x2", "x1"], "x4")
net.Gather(["x2", "x3"], "x5")
net.DotProduct(["x4", "x5"], "x6")
net.AddGradientOperators(["x6"])
ensure_dense_op = net.Proto().op[-2]
self.assertEqual(ensure_dense_op.input[0], "x2_grad_indices_concat")
self.assertEqual(ensure_dense_op.input[1], "x2_grad_values_concat")
self.assertEqual(ensure_dense_op.output[0], "x2_grad")
def testSparseAccumulationWithIndicesAndValues(self):
# The gradient for "SparseFunHash" computes both indices and values
#
# x1-------->
# |
# x2----> |
# | |
# x3---SparseFunHash-->x8
# / \
# x4---+ DotProduct-->x10
# \ /
# x5---SparseFunHash-->x9
# | |
# x6----> |
# |
# x7-------->
net = core.Net("test_net")
net.SparseFunHash(["x1", "x2", "x3", "x4"], "x8")
net.SparseFunHash(["x5", "x6", "x7", "x4"], "x9")
net.DotProduct(["x8", "x9"], "x10")
net.AddGradientOperators(["x10"])
sum_op_i = net.Proto().op[-2]
sum_op_v = net.Proto().op[-1]
self.assertEqual(sum_op_i.input[0], "_x4_grad_indices_autosplit_0")
self.assertEqual(sum_op_i.input[1], "_x4_grad_indices_autosplit_1")
self.assertEqual(sum_op_i.output[0], "x4_grad_indices_concat")
self.assertEqual(sum_op_v.input[0], "_x4_grad_values_autosplit_0")
self.assertEqual(sum_op_v.input[1], "_x4_grad_values_autosplit_1")
self.assertEqual(sum_op_v.output[0], "x4_grad_values_concat")
class TestGradientsAccumulationWithNoGradientOps(test_util.TestCase):
def testNormalAccumulation(self):
# x1-->Relu--x2----------------->DotProduct-->x4
# | |
# -->Softmax-->x3-->
net = core.Net("test_net")
net.Relu("x1", "x2")
net.Softmax("x2", "x3")
net.DotProduct(["x2", "x3"], "x4")
net.AddGradientOperators(["x4"])
sum_op = net.Proto().op[-2]
self.assertEqual(sum_op.input[0], "_x2_grad_autosplit_0")
self.assertEqual(sum_op.input[1], "_x2_grad_autosplit_1")
self.assertEqual(sum_op.output[0], "x2_grad")
def testAccumulationWithNoGradientBranch(self):
# -->PRINT
# |
# x1-->Relu--x2----------------->DotProduct-->x4
# | |
# -->Softmax-->x3-->
net = core.Net("test_net")
net.Relu("x1", "x2")
net.Print("x2", [])
net.Softmax("x2", "x3")
net.DotProduct(["x2", "x3"], "x4")
net.AddGradientOperators(["x4"])
sum_op = net.Proto().op[-2]
self.assertEqual(sum_op.input[0], "_x2_grad_autosplit_0")
self.assertEqual(sum_op.input[1], "_x2_grad_autosplit_1")
self.assertEqual(sum_op.output[0], "x2_grad")
class TestGradientsAccumulationWithPassThroughGradients(test_util.TestCase):
def testAddOpInMiddle(self):
# x1-->Relu--x2----------------->Add-->x4
# | |
# -->Softmax-->x3-->
#
# Expected gradient graph:
#
# x1_g<--ReluG<--x2_g<--Sum<------------<---------x4_g
# | |
# <--_x2_g_split_0<--SoftmaxG
net = core.Net("test_net")
net.Relu("x1", "x2")
net.Softmax("x2", "x3")
net.Add(["x2", "x3"], "x4")
input_to_grad = net.AddGradientOperators({"x4": "x4_grad"})
sum_op = net.Proto().op[-2]
self.assertEqual(sum_op.input[0], "x4_grad")
self.assertEqual(sum_op.input[1], "_x2_grad_autosplit_0")
self.assertEqual(sum_op.output[0], "x2_grad")
self.assertEqual(input_to_grad["x1"], "x1_grad")
def testAddAndDynamicConstant(self):
net = core.Net("test_net")
net.FC(["x1", "x1_w", "x1_b"], ["x2"])
net.Relu("x2", "x2")
net.ConstantFill(["x2"], ["x3"])
net.Add(["x2", "x3"], "x4")
net.FC(["x4", "x4_w", "x4_b"], ["x5"])
net.SoftmaxWithLoss(["x5", "labels"], ["softmax", "loss"])
input_to_grad = net.AddGradientOperators(["loss"])
for op in net.Proto().op:
self.assertFalse(op.type == 'Sum')
self.assertTrue("x4" in input_to_grad)
self.assertTrue("x1" in input_to_grad)
self.assertEqual(input_to_grad["x1"], "x1_grad")
def testAddAndStaticConstant(self):
net = core.Net("test_net")
net.FC(["x1", "x1_w", "x1_b"], ["x2"])
net.Relu("x2", "x2")
net.ConstantFill([], ["x3"], shape=[1])
net.Add(["x2", "x3"], "x4", broadcast=1)
net.FC(["x4", "x4_w", "x4_b"], ["x5"])
net.SoftmaxWithLoss(["x5", "labels"], ["softmax", "loss"])
input_to_grad = net.AddGradientOperators(["loss"])
print(input_to_grad)
self.assertTrue("x1" in input_to_grad)
self.assertEqual(input_to_grad["x1"], "x1_grad")
def testSubOpInMiddle(self):
# x1-->Relu--x2----------------->Sub-->x4
# | |
# -->Softmax-->x3-->
#
# Expected gradient graph:
#
# x1_g<--ReluG<--x2_g<--Sum<------------<-----------------------x4_g
# | |
# <--_x2_g_split_0<--SoftmaxG<--x3_g<--neg
net = core.Net("test_net")
net.Relu("x1", "x2")
net.Softmax("x2", "x3")
net.Sub(["x2", "x3"], "x4")
input_to_grad = net.AddGradientOperators({"x4": "x4_grad"})
print(str(net.Proto()))
sum_op = net.Proto().op[-2]
self.assertEqual(sum_op.input[0], "x4_grad")
self.assertEqual(sum_op.input[1], "_x2_grad_autosplit_0")
self.assertEqual(sum_op.output[0], "x2_grad")
self.assertEqual(input_to_grad["x1"], "x1_grad")
def testAddOpAtLeaf(self):
# x1
# \
# -->Add-->x4
# / \
# x2 -->DotProduct-->x6
# \ /
# -->Add-->x5
# /
# x3
#
# Expected gradient graph:
#
# x2_g<--Sum<--x4_g<--DotProductG<--x6_g
# | | |
# <---x5_g<-------
net = core.Net("test_net")
net.Add(["x1", "x2"], "x4")
net.Add(["x2", "x3"], "x5")
net.DotProduct(["x4", "x5"], "x6")
input_to_grad = net.AddGradientOperators({"x6": "x6_grad"})
sum_op = net.Proto().op[-1]
self.assertEqual(sum_op.input[0], "x5_grad")
self.assertEqual(sum_op.input[1], "x4_grad")
self.assertEqual(sum_op.output[0], "x2_grad")
self.assertEqual(input_to_grad["x1"], "x4_grad")
self.assertEqual(input_to_grad["x2"], "x2_grad")
self.assertEqual(input_to_grad["x3"], "x5_grad")
def testSubOpAtLeaf(self):
# x1
# \
# -->Sub-->x4
# / \
# x2 -->DotProduct-->x6
# \ /
# -->Sub-->x5
# /
# x3
#
# Expected gradient graph:
#
# x2_g<-------Sum<--x2_g_split_0<--neg<--x4_g<--DotProductG<--x6_g
# | |
# x3_g<--neg<--<--x5_g<--------------------------------
net = core.Net("test_net")
net.Sub(["x1", "x2"], "x4")
net.Sub(["x2", "x3"], "x5")
net.DotProduct(["x4", "x5"], "x6")
input_to_grad = net.AddGradientOperators({"x6": "x6_grad"})
sum_op = net.Proto().op[-1]
self.assertEqual(sum_op.input[0], "x5_grad")
self.assertEqual(sum_op.input[1], "_x2_grad_autosplit_0")
self.assertEqual(sum_op.output[0], "x2_grad")
self.assertEqual(input_to_grad["x1"], "x4_grad")
self.assertEqual(input_to_grad["x2"], "x2_grad")
self.assertEqual(input_to_grad["x3"], "x3_grad")
def testMultiLayerAddOps(self):
# x1
# \
# -->Add-->x4
# / \
# x2 -->Add-->x6
# \ /
# -->Add-->x5
# /
# x3
#
# Expected gradient graph:
#
# x2_g<--Sum<-----x6_g
# | |
# <--------
net = core.Net("test_net")
net.Add(["x1", "x2"], "x4")
net.Add(["x2", "x3"], "x5")
net.Add(["x4", "x5"], "x6")
input_to_grad = net.AddGradientOperators({"x6": "x6_grad"})
sum_op = net.Proto().op[-1]
self.assertEqual(sum_op.input[0], "x6_grad")
self.assertEqual(sum_op.input[1], "x6_grad")
self.assertEqual(sum_op.output[0], "x2_grad")
self.assertEqual(input_to_grad["x1"], "x6_grad")
self.assertEqual(input_to_grad["x2"], "x2_grad")
self.assertEqual(input_to_grad["x3"], "x6_grad")
def testMultiLayerSubOps(self):
# x1
# \
# -->Sub-->x4
# / \
# x2 -->Sub-->x6
# \ /
# -->Sub-->x5
# /
# x3
#
# Expected gradient graph:
#
# x2_g<--Sum<-----x6_g
# | |
# <--------
net = core.Net("test_net")
net.Sub(["x1", "x2"], "x4")
net.Sub(["x2", "x3"], "x5")
net.Sub(["x4", "x5"], "x6")
input_to_grad = net.AddGradientOperators({"x6": "x6_grad"})
sum_op = net.Proto().op[-1]
self.assertEqual(sum_op.input[0], "x5_grad")
self.assertEqual(sum_op.input[1], "_x2_grad_autosplit_0")
self.assertEqual(sum_op.output[0], "x2_grad")
self.assertEqual(input_to_grad["x1"], "x6_grad")
self.assertEqual(input_to_grad["x2"], "x2_grad")
self.assertEqual(input_to_grad["x3"], "x3_grad")
if __name__ == '__main__':
unittest.main()
|
## @package model_helper
# Module caffe2.python.model_helper
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, scope, workspace
import numpy as np
import logging
class ParameterType(object):
DENSE = 'dense'
SPARSE = 'sparse'
class ParameterInfo(object):
def __init__(
self, param_id, param, key=None, shape=None, length=None):
assert isinstance(param, core.BlobReference)
self.param_id = param_id
self.name = str(param)
self.blob = param
self.key = key
self.shape = shape
self.size = None if shape is None else np.prod(shape)
self.length = max(1, length if length is not None else 1)
self.grad = None
self._cloned_init_net = None
def grad_type(self):
# self.grad could be None for model parallelism with parameter server
if self.grad is None:
return
return (
ParameterType.SPARSE if isinstance(self.grad, core.GradientSlice)
else ParameterType.DENSE)
def cloned_init_net(self):
if not self._cloned_init_net:
init_net, outputs = self.blob.Net().ClonePartial(
'param_%d_%s_init' % (self.param_id, self.name),
inputs=[],
outputs=[self.blob])
self._cloned_init_net = (init_net, outputs[0])
return self._cloned_init_net
def __str__(self):
return self.name
# _known_working_ops are operators that do not need special care.
_known_working_ops = [
"Accuracy",
"Adam",
"Add",
"Adagrad",
"SparseAdagrad",
"AveragedLoss",
"Cast",
"Checkpoint",
"ConstantFill",
"Copy",
"CopyGPUToCPU",
"CopyCPUToGPU",
"DequeueBlobs",
"EnsureCPUOutput",
"Flatten",
"FlattenToVec",
"LabelCrossEntropy",
"LearningRate",
"MakeTwoClass",
"MatMul",
"NCCLAllreduce",
"NHWC2NCHW",
"PackSegments",
"Print",
"PRelu",
"Scale",
"ScatterWeightedSum",
"Sigmoid",
"SortedSegmentSum",
"Snapshot", # Note: snapshot is deprecated, use Checkpoint
"Softmax",
"SoftmaxWithLoss",
"SquaredL2Distance",
"Squeeze",
"StopGradient",
"Summarize",
"Tanh",
"UnpackSegments",
"WeightedSum",
"ReduceFrontSum",
]
class ModelHelper(object):
"""A helper model so we can manange models more easily. It contains net def
and parameter storages. You can add an Operator yourself, e.g.
model = model_helper.ModelHelper(name="train_net")
# init your weight and bias as w and b
w = model.param_init_net.XavierFill(...)
b = model.param_init_net.ConstantFill(...)
fc1 = model.FC([input, w, b], output, **kwargs)
or you can use helper functions in brew module without manually
defining parameter initializations and operators.
model = model_helper.ModelHelper(name="train_net")
fc1 = brew.fc(model, input, output, dim_in, dim_out, **kwargs)
"""
def __init__(self, name=None, init_params=True, allow_not_known_ops=True,
skip_sparse_optim=False, param_model=None, arg_scope=None):
self.name = name or "model"
self.net = core.Net(self.name)
if param_model is not None:
self.param_init_net = param_model.param_init_net
self.param_to_grad = param_model.param_to_grad
self.params = param_model.params
self.computed_params = param_model.computed_params
else:
self.param_init_net = core.Net(name + '_init')
self.param_to_grad = {}
self.params = []
self.computed_params = []
self._param_info = []
self._devices = []
self.gradient_ops_added = False
self.init_params = init_params
self.allow_not_known_ops = allow_not_known_ops
self.skip_sparse_optim = skip_sparse_optim
self.weights = []
self.biases = []
self._arg_scope = {
'order': "NCHW",
'use_cudnn': True,
'cudnn_exhaustive_search': False,
}
if arg_scope is not None:
# Please notice value as None is not acceptable. We are not checking it
# here because we already have check in MakeArgument.
self._arg_scope.update(arg_scope)
@property
def arg_scope(self):
return self._arg_scope
def get_name(self):
return self.name
def _infer_param_shape(self, param):
for op in self.param_init_net.Proto().op:
if str(param) in op.output:
for arg in op.arg:
if arg.name == "shape":
return list(arg.ints)
return None
def _update_param_info(self):
assert len(self._param_info) <= len(self.params)
for param in self.params[len(self._param_info):]:
if not isinstance(param, core.BlobReference):
raise ValueError("Param %s must be a BlobReference!" % str(param))
self._param_info.append(ParameterInfo(
param_id=len(self._param_info),
param=param,
shape=self._infer_param_shape(param)))
for info in self._param_info:
info.grad = self.param_to_grad.get(info.name)
def add_param(self, param, key=None, shape=None, length=None):
self._update_param_info()
if key is not None and self.net.input_record() is not None:
idx = self.net.input_record().field_blobs().index(key)
key = self.net.input_record().field_names()[idx]
shape = shape if shape is not None else self._infer_param_shape(param)
self.params.append(param)
if not isinstance(param, core.BlobReference):
raise ValueError("Param %s must be a BlobReference!" % str(param))
self._param_info.append(ParameterInfo(
param_id=len(self._param_info),
param=param,
shape=shape,
key=key,
length=length,
))
return self._param_info[-1]
def param_info(self, grad_type=None, id=None):
self._update_param_info()
if id is not None:
assert grad_type is None
info = self._param_info[id]
assert info.param_id == id
return info
elif grad_type is not None:
return [
info for info in self._param_info
if info.grad_type() == grad_type]
else:
return self._param_info
def GetParams(self, namescope=None, top_scope=False):
'''
Returns the params in current namescope
'''
if namescope is None:
namescope = scope.CurrentNameScope()
else:
if not namescope.endswith(scope._NAMESCOPE_SEPARATOR):
namescope += scope._NAMESCOPE_SEPARATOR
if namescope == '':
return self.params[:]
elif top_scope:
return [
p for p in self.params
if p.GetNameScope().startswith(namescope)
]
else:
return [p for p in self.params if
p.GetNameScope().startswith(namescope)]
def Proto(self):
return self.net.Proto()
def InitProto(self):
return self.param_init_net.Proto()
def RunAllOnGPU(self, *args, **kwargs):
self.param_init_net.RunAllOnGPU(*args, **kwargs)
self.net.RunAllOnGPU(*args, **kwargs)
def CreateDB(self, blob_out, db, db_type, **kwargs):
dbreader = self.param_init_net.CreateDB(
[], blob_out, db=db, db_type=db_type, **kwargs)
return dbreader
def AddGradientOperators(self, *args, **kwargs):
if self.gradient_ops_added:
raise RuntimeError("You cannot run AddGradientOperators twice.")
self.gradient_ops_added = True
self.grad_map = self.net.AddGradientOperators(*args, **kwargs)
self.param_to_grad = self.get_param_to_grad(self.params)
return self.grad_map
def get_param_to_grad(self, params):
'''
Given a list of parameters returns a dict from a parameter
to a corresponding gradient
'''
param_to_grad = {}
if not self.gradient_ops_added:
raise RuntimeError("You need to run AddGradientOperators first.")
# We need to use empty namescope when creating the gradients
# to prevent duplicating the namescope prefix for gradient blobs.
for p in params:
if str(p) in self.grad_map:
param_to_grad[p] = self.grad_map[str(p)]
return param_to_grad
def GetOptimizationPairs(self, params=None):
'''
Returns a map for param => grad.
If params is not specified, all parameters will be considered.
'''
if not self.gradient_ops_added:
raise RuntimeError("Need to call AddGradientOperators first")
param_to_grad = self.param_to_grad
if params:
param_to_grad = self.get_param_to_grad(params)
if not self.skip_sparse_optim:
return param_to_grad
else:
return {param: grad for param, grad in param_to_grad.items()
if not isinstance(grad, core.GradientSlice)}
def GetComputedParams(self, namescope=None):
'''
Returns the computed params in current namescope. 'Computed params'
are such parameters that are not optimized via gradient descent but are
directly computed from data, such as the running mean and variance
of Spatial Batch Normalization.
'''
if namescope is None:
namescope = scope.CurrentNameScope()
else:
if not namescope.endswith(scope._NAMESCOPE_SEPARATOR):
namescope += scope._NAMESCOPE_SEPARATOR
if namescope == '':
return self.computed_params[:]
else:
return [p for p in self.computed_params
if p.GetNameScope() == namescope]
def GetAllParams(self, namescope=None):
return self.GetParams(namescope) + self.GetComputedParams(namescope)
def TensorProtosDBInput(
self, unused_blob_in, blob_out, batch_size, db, db_type, **kwargs
):
"""TensorProtosDBInput."""
dbreader_name = "dbreader_" + db
dbreader = self.param_init_net.CreateDB(
[], dbreader_name,
db=db, db_type=db_type)
return self.net.TensorProtosDBInput(
dbreader, blob_out, batch_size=batch_size)
def GetDevices(self):
assert len(self._devices) > 0, \
"Use data_parallel_model to run model on multiple GPUs."
return self._devices
def __getattr__(self, op_type):
"""Catch-all for all other operators, mostly those without params."""
if op_type.startswith('__'):
raise AttributeError(op_type)
if not core.IsOperator(op_type):
raise RuntimeError(
'Method ' + op_type + ' is not a registered operator.' +
' Did you mean: [' +
','.join(workspace.C.nearby_opnames(op_type)) + ']'
)
if op_type not in _known_working_ops:
if not self.allow_not_known_ops:
raise RuntimeError(
"Operator {} is not known to be safe".format(op_type))
logging.warning("You are creating an op that the ModelHelper "
"does not recognize: {}.".format(op_type))
return self.net.__getattr__(op_type)
def __dir__(self):
return sorted(set(
dir(type(self)) +
self.__dict__.keys() +
_known_working_ops))
def ExtractPredictorNet(
net_proto,
input_blobs,
output_blobs,
device=None,
renames=None,
disabled_inputs=None
):
'''
Takes a model net for training and returns a net which can be
used for prediction. For example, all gradient operators and
input operators are removed.
@param net_proto protobuf of the net you want to process (net.Proto())
@param input_blobs list/set of blob names that are the inputs of predictor
@param output_blobs list/set of blob names that are outputs of predictor
@param device optional device option that is assigned
@param renames dictionary of blob name to a new name (optional)
@param disabled_inputs optional set of blobs that are 'switched off'. This
will cause branches with those blobs as inputs to be removed
'''
predict_net = core.Net(net_proto.name + "_predict")
predict_proto = predict_net.Proto()
orig_external_inputs = set(net_proto.external_input)
orig_external_outputs = set(net_proto.external_output)
input_blobs = {str(b) for b in input_blobs}
known_blobs = set(orig_external_inputs).union(input_blobs)
output_blobs = {str(b) for b in output_blobs}
external_inputs = set(input_blobs)
external_outputs = set(output_blobs)
if disabled_inputs is not None:
known_blobs = known_blobs - set(disabled_inputs)
ops = list(net_proto.op)
# Find the range of ops that we should include
try:
first_op_with_input = min(
[
j for j in range(len(ops))
if input_blobs.intersection(ops[j].input) and ops[j].type !=
'StopGradient'
]
)
except ValueError:
raise Exception("No ops with input={}".format(input_blobs))
try:
last_op_with_output = max(
[
j for j in range(len(ops))
if output_blobs.intersection(ops[j].output)
]
)
except ValueError:
raise Exception("No ops with output={}".format(output_blobs))
def validate_op(op):
# Check that the op does not have is_test = 0 set. This is a common
# pitfall with SpatialBN op, at lest.
for arg in op.arg:
if arg.name == "is_test" and arg.i == 0:
raise Exception(
"A operator had is_test=0, did you try to extract a " +
"predictor from a train model (instead of test model)?" +
" Op was: {}".format(str(op))
)
# Iterate through the ops and only include those whose inputs
# we can satisfy.
for op in ops[first_op_with_input:(last_op_with_output + 1)]:
if known_blobs.issuperset(op.input):
if device is not None:
op.device_option.device_type = device.device_type
op.device_option.cuda_gpu_id = device.cuda_gpu_id
validate_op(op)
predict_proto.op.extend([op])
known_blobs.update(op.output)
external_inputs.update(
set(op.input).intersection(orig_external_inputs)
)
external_outputs.update(
set(op.output).intersection(orig_external_outputs)
)
else:
logging.debug(
"Op {} had unknown inputs: {}".format(
op.type, set(op.input).difference(known_blobs)
)
)
def rename_list(proto_list):
if renames is None:
return
# proto lists don't support assignments
new_list = proto_list[:]
for j, b in enumerate(new_list):
if b in renames:
new_list[j] = renames[b]
del proto_list[:]
proto_list.extend(new_list)
# Predictor net's external inputs and outputs include only those
# that are part of this net.
predict_proto.external_input.extend(external_inputs)
predict_proto.external_output.extend(external_outputs)
rename_list(predict_proto.external_input)
rename_list(predict_proto.external_output)
for op in predict_proto.op:
rename_list(op.input)
rename_list(op.output)
return predict_net
|
## @package optimizer
# Module caffe2.python.optimizer
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
from caffe2.python import core
from caffe2.proto import caffe2_pb2
_OPTIMIZER_ITERATION_NAME = "optimizer_iteration"
AuxOptimizerParams = namedtuple("AuxOptimizerParams", ["local", "shared"])
class Optimizer(object):
def __init__(self):
self._aux_params = AuxOptimizerParams(local=[], shared=[])
def __call__(self, net, param_init_net, param, grad):
raise NotImplementedError()
@staticmethod
def build_lr(net, param_init_net, base_learning_rate,
learning_rate_blob="lr", policy="fixed",
iter_val=0, **kwargs):
if not param_init_net.BlobIsDefined(_OPTIMIZER_ITERATION_NAME):
# Add training operators.
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
iteration = param_init_net.ConstantFill(
[], _OPTIMIZER_ITERATION_NAME, shape=[1],
value=iter_val,
dtype=core.DataType.INT64)
iter_mutex = param_init_net.CreateMutex([], ["iteration_mutex"])
net.AtomicIter([iter_mutex, iteration], [iteration])
else:
iteration = param_init_net.GetBlobRef(_OPTIMIZER_ITERATION_NAME)
# There is one interesting thing here: since we are minimizing, we are
# doing "descent" so the learning rate is set to be negative.
lr = net.LearningRate(
[iteration],
learning_rate_blob,
base_lr=-base_learning_rate,
policy=policy,
**kwargs
)
return lr, iteration
@staticmethod
def dedup(net, sparse_dedup_aggregator, grad):
assert (isinstance(grad, core.GradientSlice))
if sparse_dedup_aggregator:
return net.DeduplicateGradientSlices(
grad, aggregator=sparse_dedup_aggregator)
else:
return grad
def get_auxiliary_parameters(self):
"""Returns a list of auxiliary parameters.
Returns:
aux_params: A namedtuple, AuxParams.
aux_params.local stores a list of blobs. Each blob is a local
auxiliary parameter. A local auxiliary parameter is a parameter in
parallel to a learning rate parameter. Take adagrad as an example,
the local auxiliary parameter is the squared sum parameter, because
every learning rate has a squared sum associated with it.
aux_params.shared also stores a list of blobs. Each blob is a shared
auxiliary parameter. A shared auxiliary parameter is a parameter
that is shared across all the learning rate parameters. Take adam as
an example, the iteration parameter is a shared parameter, because
all the learning rates share the same iteration parameter.
"""
return self._aux_params
# TODO(xlwang): In transfer learning, parameter initialized from pretrained
# model might require a different learning rate than otherwise initialized.
# To this end, here we implement a python solution where
# `base_learning_rate` is scaled by `scale`, by calling
# `scale_learning_rate`; Alternatively, we can achieve same effect by
# rewriting the LearningRate operator in C++
# Note that it is the responsibility of specific optimizer to decide what
# logic should be used for `scale_learning_rate`
def scale_learning_rate(self, *args, **kwargs):
raise NotImplementedError(
"Optimizer Need to Implement `scale_learning_rate` method.")
class SgdOptimizer(Optimizer):
def __init__(self, base_learning_rate=0.01, policy='fixed',
momentum=0.0, **kwargs):
super(SgdOptimizer, self).__init__()
self.base_learning_rate = base_learning_rate
self.policy = policy
self.momentum = momentum
self.init_kwargs = kwargs
def __call__(self, net, param_init_net, param, grad):
if self.base_learning_rate <= 0:
return
lr, _ = self.build_lr(
net, param_init_net,
base_learning_rate=self.base_learning_rate,
learning_rate_blob=str(param) + "_lr",
policy=self.policy,
**(self.init_kwargs)
)
ONE = param_init_net.ConstantFill([], "ONE", shape=[1], value=1.0)
self._aux_params.shared.append(ONE)
if self.momentum > 0:
momentum_data = param_init_net.ConstantFill(
param, str(param) + "_momentum", value=0.)
self._aux_params.local.append(momentum_data)
if isinstance(grad, core.GradientSlice):
assert self.momentum == 0., "Doesn't support momentum for sparse"
net.ScatterWeightedSum(
[param, ONE, grad.indices, grad.values, lr],
param
)
else:
if self.momentum > 0.:
net.MomentumSGD(
[grad, momentum_data, lr], [grad, momentum_data],
momentum=self.momentum,
nesterov=1)
coeff = ONE
else:
coeff = lr
net.WeightedSum(
[param, ONE, grad, coeff],
param
)
def scale_learning_rate(self, scale):
self.base_learning_rate *= scale
return
class AdagradOptimizer(Optimizer):
def __init__(self, alpha=0.01, epsilon=1e-4, policy="fixed",
sparse_dedup_aggregator=None, engine='', **kwargs):
super(AdagradOptimizer, self).__init__()
self.alpha = alpha
self.epsilon = epsilon
self.policy = policy
self.sparse_dedup_aggregator = sparse_dedup_aggregator
self.engine = engine
self.init_kwargs = kwargs
def __call__(self, net, param_init_net, param, grad):
if self.alpha <= 0:
return
lr, _ = self.build_lr(
net, param_init_net,
base_learning_rate=self.alpha,
learning_rate_blob=str(param) + "_lr",
policy=self.policy,
**(self.init_kwargs)
)
param_squared_sum = param_init_net.ConstantFill(
[param],
str(param) + "_squared_sum",
value=0.0
)
self._aux_params.local.append(param_squared_sum)
if isinstance(grad, core.GradientSlice):
grad = self.dedup(net, self.sparse_dedup_aggregator, grad)
net.SparseAdagrad(
[param, param_squared_sum, grad.indices, grad.values, lr],
[param, param_squared_sum],
epsilon=self.epsilon,
engine=self.engine
)
else:
net.Adagrad(
[param, param_squared_sum, grad, lr],
[param, param_squared_sum],
epsilon=self.epsilon,
engine=self.engine
)
def scale_learning_rate(self, scale):
self.alpha *= scale
return
class FtrlOptimizer(Optimizer):
def __init__(self, alpha=0.01, beta=1e-4, lambda1=0, lambda2=0,
sparse_dedup_aggregator=None, engine=''):
super(FtrlOptimizer, self).__init__()
self.alpha = alpha
self.beta = beta
self.lambda1 = lambda1
self.lambda2 = lambda2
self.sparse_dedup_aggregator = sparse_dedup_aggregator
self.engine = engine
def __call__(self, net, param_init_net, param, grad):
if self.alpha <= 0:
return
nz = param_init_net.ConstantFill(
[param],
str(param) + "_ftrl_nz",
extra_shape=[2],
value=0.0
)
self._aux_params.local.append(nz)
if isinstance(grad, core.GradientSlice):
grad = self.dedup(net, self.sparse_dedup_aggregator, grad)
net.SparseFtrl(
[param, nz, grad.indices, grad.values],
[param, nz],
engine=self.engine,
alpha=self.alpha,
beta=self.beta,
lambda1=self.lambda1,
lambda2=self.lambda2
)
else:
net.Ftrl(
[param, nz, grad],
[param, nz],
engine=self.engine,
alpha=self.alpha,
beta=self.beta,
lambda1=self.lambda1,
lambda2=self.lambda2
)
def scale_learning_rate(self, scale):
self.alpha *= scale
return
class AdamOptimizer(Optimizer):
def __init__(self, alpha=0.001, beta1=0.9, beta2=0.999, epsilon=1e-8,
policy='fixed', sparse_dedup_aggregator=None,
engine='', **kwargs):
super(AdamOptimizer, self).__init__()
self.alpha = alpha
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.policy = policy
self.sparse_dedup_aggregator = sparse_dedup_aggregator
self.engine = engine
self.init_kwargs = kwargs
def __call__(self, net, param_init_net, param, grad):
if self.alpha <= 0:
return
lr, iteration = self.build_lr(
net, param_init_net,
base_learning_rate=self.alpha,
learning_rate_blob=str(param) + "_lr",
policy=self.policy,
**(self.init_kwargs)
)
m1 = param_init_net.ConstantFill(
[param],
param + "_first_moment",
value=0.0
)
m2 = param_init_net.ConstantFill(
[param],
param + "_second_moment",
value=0.0
)
self._aux_params.shared.append(iteration)
self._aux_params.local.append(m1)
self._aux_params.local.append(m2)
if isinstance(grad, core.GradientSlice):
grad = self.dedup(net, self.sparse_dedup_aggregator, grad)
net.SparseAdam(
[param, m1, m2, grad.indices, grad.values, lr, iteration],
[param, m1, m2],
beta1=self.beta1,
beta2=self.beta2,
epsilon=self.epsilon
)
else:
net.Adam(
[param, m1, m2, grad, lr, iteration],
[param, m1, m2],
beta1=self.beta1,
beta2=self.beta2,
epsilon=self.epsilon)
def scale_learning_rate(self, scale):
self.alpha *= scale
return
def build_sgd(model, base_learning_rate, **kwargs):
sgd_optimizer = SgdOptimizer(base_learning_rate, **kwargs)
for param, grad in model.GetOptimizationPairs().items():
sgd_optimizer(model.net, model.param_init_net, param, grad)
return sgd_optimizer
def build_ftrl(model, engine="SIMD", **kwargs):
if engine == "SIMD":
assert core.IsOperator('Ftrl_ENGINE_SIMD')
assert core.IsOperator('SparseFtrl_ENGINE_SIMD')
ftrl_optimizer = FtrlOptimizer(engine=engine, **kwargs)
for param, grad in model.GetOptimizationPairs().items():
ftrl_optimizer(model.net, model.param_init_net, param, grad)
return ftrl_optimizer
def build_adagrad(model, base_learning_rate, parameters=None, **kwargs):
adagrad_optimizer = AdagradOptimizer(alpha=base_learning_rate, **kwargs)
param_to_grad = model.GetOptimizationPairs(parameters)
for param, grad in param_to_grad.items():
adagrad_optimizer(model.net, model.param_init_net, param, grad)
return adagrad_optimizer
def build_adam(model, base_learning_rate, **kwargs):
adam_optimizer = AdamOptimizer(alpha=base_learning_rate, **kwargs)
for param, grad in model.GetOptimizationPairs().items():
adam_optimizer(model.net, model.param_init_net, param, grad)
return adam_optimizer
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import context, test_util
from threading import Thread
@context.define_context()
class MyContext(object):
pass
class TestContext(test_util.TestCase):
def use_my_context(self):
try:
for _ in range(100):
with MyContext() as a:
for _ in range(100):
self.assertTrue(MyContext.current() == a)
except Exception as e:
self._exceptions.append(e)
def testMultiThreaded(self):
threads = []
self._exceptions = []
for _ in range(8):
thread = Thread(target=self.use_my_context)
thread.start()
threads.append(thread)
for t in threads:
t.join()
for e in self._exceptions:
raise e
|
## @package layer_model_helper
# Module caffe2.python.layer_model_helper
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, model_helper, schema
from caffe2.python.layers import layers
import logging
import numpy as np
logger = logging.getLogger(__name__)
class LayerModelHelper(model_helper.ModelHelper):
"""
Model helper for building models on top of layers abstractions.
Each layer is the abstraction that is higher level than Operator. Layer
is responsible for ownership of it's own parameters and can easily be
instantiated in multiple nets possible with different sets of ops.
As an example: one can easily instantiate predict and train nets from
the same set of layers, where predict net will have subset of the
operators from train net.
"""
def __init__(self, name, input_feature_schema, trainer_extra_schema,
keep_blobs=False):
super(LayerModelHelper, self).__init__(name=name)
self._layer_names = set()
self._layers = []
# optimizer bookkeeping
self.param_to_optim = {}
self._default_optimizer = None
self._loss = None
self._output_schema = None
# Connect Schema to self.net. That particular instance of schmea will be
# use for generation of the Layers accross the network and would be used
# for connection with Readers.
self._input_feature_schema = schema.NewRecord(
self.net,
input_feature_schema
) if not keep_blobs else input_feature_schema.clone()
self._trainer_extra_schema = schema.NewRecord(
self.net,
trainer_extra_schema
) if not keep_blobs else trainer_extra_schema.clone()
self._metrics_schema = schema.Struct()
self._init_global_constants()
self.param_init_net = self.create_init_net('param_init_net')
def add_metric_field(self, name, value):
assert name not in self._metrics_schema.fields, (
"Try to add metric field twice: {}".format(name))
self._metrics_schema = self._metrics_schema + schema.Struct(
(name, value)
)
def add_global_constant(self, name, array=None, dtype=None,
initializer=None):
# This is global namescope for constants. They will be created in all
# init_nets and there should be very few of them.
assert name not in self.global_constants
self.global_constants[name] = self.net.NextBlob(name)
if array is not None:
assert initializer is None,\
"Only one from array and initializer should be specified"
if dtype is None:
array = np.array(array)
else:
array = np.array(array, dtype=dtype)
# TODO: make GivenTensor generic
op_name = None
if array.dtype == np.int32:
op_name = 'GivenTensorIntFill'
elif array.dtype == np.int64:
op_name = 'GivenTensorInt64Fill'
elif array.dtype == np.str:
op_name = 'GivenTensorStringFill'
else:
op_name = 'GivenTensorFill'
def initializer(blob_name):
return core.CreateOperator(op_name,
[],
blob_name,
shape=array.shape,
values=array.flatten().tolist()
)
else:
assert initializer is not None
self.global_constant_initializers.append(
initializer(self.global_constants[name]))
return self.global_constants[name]
def _init_global_constants(self):
self.global_constants = {}
self.global_constant_initializers = []
self.add_global_constant('ONE', 1.0)
self.add_global_constant('ZERO', 0.0)
self.add_global_constant('ZERO_RANGE', [0, 0], dtype='int32')
def _add_global_constants(self, init_net):
for initializer_op in self.global_constant_initializers:
init_net._net.op.extend([initializer_op])
def create_init_net(self, name):
init_net = core.Net(name)
self._add_global_constants(init_net)
return init_net
def next_layer_name(self, prefix):
base_name = core.ScopedName(prefix)
name = base_name
index = 0
while name in self._layer_names:
name = base_name + '_auto_' + str(index)
index += 1
self._layer_names.add(name)
return name
def add_layer(self, layer):
self._layers.append(layer)
for param in layer.get_parameters():
assert isinstance(param.parameter, core.BlobReference)
self.param_to_optim[str(param.parameter)] = \
param.optimizer or self.default_optimizer
# The primary value of adding everything to self.net - generation of the
# operators right away, i.e. if error happens it'll be detected
# immediately. Other then this - create_x_net should be called.
layer.add_operators(self.net, self.param_init_net)
return layer.output_schema
def get_parameter_blobs(self):
param_blobs = []
for layer in self._layers:
for param in layer.get_parameters():
param_blobs.append(param.parameter)
return param_blobs
@property
def default_optimizer(self):
return self._default_optimizer
@default_optimizer.setter
def default_optimizer(self, optimizer):
self._default_optimizer = optimizer
@property
def input_feature_schema(self):
return self._input_feature_schema
@property
def trainer_extra_schema(self):
return self._trainer_extra_schema
@property
def metrics_schema(self):
"""
Returns the schema that represents model output that should be used for
metric reporting.
During the training/evaluation this schema will be appended to the
schema that represents model output.
"""
return self._metrics_schema
@property
def output_schema(self):
assert self._output_schema is not None
return self._output_schema
@output_schema.setter
def output_schema(self, schema):
assert self._output_schema is None
self._output_schema = schema
@property
def loss(self):
assert self._loss is not None
return self._loss
@loss.setter
def loss(self, loss):
assert self._loss is None
self._loss = loss
def __getattr__(self, layer):
if layer.startswith('__'):
raise AttributeError(layer)
# TODO(amalevich): Add add support for ifbpy inline documentation
if layers.layer_exists(layer):
def wrapper(*args, **kwargs):
return self.add_layer(
layers.create_layer(layer, self, *args, **kwargs))
return wrapper
elif core.IsOperator(layer):
def wrapper(*args, **kwargs):
def apply_operator(net, in_record, out_record, **kwargs):
# TODO(amalevich): Switch to net.operator as soon as it gets
# landed
net.__getattr__(layer)(in_record.field_blobs(),
out_record.field_blobs(),
**kwargs)
if 'name' not in kwargs:
kwargs['name'] = layer
return self.add_layer(
layers.create_layer('Functional',
self, *args, function=apply_operator,
**kwargs))
return wrapper
else:
raise ValueError(
"Tring to create non-registered layer: {0}".format(layer))
@property
def layers(self):
return self._layers
def apply_optimizers(self, train_net, train_init_net, grad_map):
for param, optimizer in self.param_to_optim.items():
assert optimizer is not None, \
"default optimizer must have been set in add_layer"
# note that not all params has gradient and thus we sent None if
# gradient does not exists
optimizer(
train_net, train_init_net, param, grad_map.get(str(param)))
def _GetOne(self):
return self.global_constants['ONE']
# An optimizer which allows us to do NO optimization
def NoOptim(self, *args, **kwargs):
pass
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import unittest
from caffe2.python import core, workspace, tt_core
import caffe2.python.hypothesis_test_util as hu
class TestTTSVD(hu.HypothesisTestCase):
def test_full_tt_svd(self):
size = 256
np.random.seed(1234)
X = np.expand_dims(
np.random.rand(size).astype(np.float32), axis=0)
W = np.random.rand(size, size).astype(np.float32)
b = np.zeros(size).astype(np.float32)
inp_sizes = [4, 4, 4, 4]
out_sizes = [4, 4, 4, 4]
op_fc = core.CreateOperator(
"FC",
["X", "W", "b"],
["Y"],
)
workspace.FeedBlob("X", X)
workspace.FeedBlob("W", W)
workspace.FeedBlob("b", b)
workspace.RunOperatorOnce(op_fc)
Y_fc = workspace.FetchBlob("Y").flatten()
# Testing TT-decomposition with high ranks
full_tt_ranks = [1, 16, 256, 16, 1]
full_cores = tt_core.matrix_to_tt(W, inp_sizes, out_sizes,
full_tt_ranks)
full_op_tt = core.CreateOperator(
"TT",
["X", "b", "cores"],
["Y"],
inp_sizes=inp_sizes,
out_sizes=out_sizes,
tt_ranks=full_tt_ranks,
)
workspace.FeedBlob("X", X)
workspace.FeedBlob("b", b)
workspace.FeedBlob("cores", full_cores)
workspace.RunOperatorOnce(full_op_tt)
Y_full_tt = workspace.FetchBlob("Y").flatten()
assert(len(Y_fc) == len(Y_full_tt))
self.assertAlmostEquals(np.linalg.norm(Y_fc - Y_full_tt), 0, delta=1e-3)
# Testing TT-decomposition with minimal ranks
sparse_tt_ranks = [1, 1, 1, 1, 1]
sparse_cores = tt_core.matrix_to_tt(W, inp_sizes, out_sizes,
sparse_tt_ranks)
sparse_op_tt = core.CreateOperator(
"TT",
["X", "b", "cores"],
["Y"],
inp_sizes=inp_sizes,
out_sizes=out_sizes,
tt_ranks=sparse_tt_ranks,
)
workspace.FeedBlob("X", X)
workspace.FeedBlob("b", b)
workspace.FeedBlob("cores", sparse_cores)
workspace.RunOperatorOnce(sparse_op_tt)
Y_sparse_tt = workspace.FetchBlob("Y").flatten()
assert(len(Y_fc) == len(Y_sparse_tt))
self.assertAlmostEquals(np.linalg.norm(Y_fc - Y_sparse_tt),
39.974, delta=1e-3)
if __name__ == '__main__':
unittest.main()
|
## @package lstm_benchmark
# Module caffe2.python.lstm_benchmark
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.proto import caffe2_pb2
from caffe2.python import cnn, workspace, core, utils, rnn_cell
import argparse
import numpy as np
import time
import logging
logging.basicConfig()
log = logging.getLogger("lstm_bench")
log.setLevel(logging.DEBUG)
def generate_data(T, shape, num_labels):
'''
Fill a queue with input data
'''
log.info("Generating T={} sequence batches".format(T))
generate_input_init_net = core.Net('generate_input_init')
queue = generate_input_init_net.CreateBlobsQueue(
[], "inputqueue", num_blobs=1, capacity=T,
)
label_queue = generate_input_init_net.CreateBlobsQueue(
[], "labelqueue", num_blobs=1, capacity=T,
)
workspace.RunNetOnce(generate_input_init_net)
generate_input_net = core.Net('generate_input')
generate_input_net.EnqueueBlobs([queue, "scratch"], ["scratch"])
generate_input_net.EnqueueBlobs([label_queue, "label_scr"], ["label_scr"])
np.random.seed(2603)
for t in range(T):
if (t % (max(10, T // 10)) == 0):
print("Generating data {}/{}".format(t, T))
# Randomize the seqlength
random_shape = (
[np.random.randint(1, shape[0])] + shape[1:]
if t > 0 else shape
)
X = np.random.rand(*random_shape).astype(np.float32)
batch_size = random_shape[1]
L = num_labels * batch_size
labels = (np.random.rand(random_shape[0]) * L).astype(np.int32)
workspace.FeedBlob("scratch", X)
workspace.FeedBlob("label_scr", labels)
workspace.RunNetOnce(generate_input_net.Proto())
log.info("Finished data generation")
return queue, label_queue
def create_model(args, queue, label_queue, input_shape):
model = cnn.CNNModelHelper(name="LSTM_bench")
seq_lengths, target = \
model.net.AddExternalInputs(
'seq_lengths',
'target',
)
input_blob = model.DequeueBlobs(queue, "input_data")
labels = model.DequeueBlobs(label_queue, "label")
init_blobs = []
if args.implementation == "own":
for i in range(args.num_layers):
init_blobs.append("hidden_init_{}".format(i))
init_blobs.append("cell_init_{}".format(i))
model.net.AddExternalInputs(init_blobs)
output, last_hidden, _, last_state = rnn_cell.LSTM(
model=model,
input_blob=input_blob,
seq_lengths=seq_lengths,
initial_states=init_blobs,
dim_in=args.input_dim,
dim_out=[args.hidden_dim] * args.num_layers,
scope="lstm1",
memory_optimization=args.memory_optimization,
forward_only=args.forward_only,
drop_states=True,
return_last_layer_only=True,
)
elif args.implementation == "cudnn":
# We need to feed a placeholder input so that RecurrentInitOp
# can infer the dimensions.
init_blobs = model.net.AddExternalInputs("hidden_init", "cell_init")
model.param_init_net.ConstantFill([], input_blob, shape=input_shape)
output, last_hidden, _ = rnn_cell.cudnn_LSTM(
model=model,
input_blob=input_blob,
initial_states=init_blobs,
dim_in=args.input_dim,
dim_out=args.hidden_dim,
scope="cudnnlstm",
num_layers=args.num_layers,
)
else:
assert False, "Unknown implementation"
weights = model.UniformFill(labels, "weights")
softmax, loss = model.SoftmaxWithLoss(
[model.Flatten(output), labels, weights],
['softmax', 'loss'],
)
if not args.forward_only:
model.AddGradientOperators([loss])
# carry states over
for init_blob in init_blobs:
model.net.Copy(last_hidden, init_blob)
sz = args.hidden_dim
if args.implementation == "cudnn":
sz *= args.num_layers
workspace.FeedBlob(init_blob, np.zeros(
[1, args.batch_size, sz], dtype=np.float32
))
return model, output
def Caffe2LSTM(args):
T = args.data_size // args.batch_size
input_blob_shape = [args.seq_length, args.batch_size, args.input_dim]
queue, label_queue = generate_data(T // args.seq_length,
input_blob_shape,
args.hidden_dim)
workspace.FeedBlob(
"seq_lengths",
np.array([args.seq_length] * args.batch_size, dtype=np.int32)
)
model, output = create_model(args, queue, label_queue, input_blob_shape)
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
last_time = time.time()
start_time = last_time
num_iters = T // args.seq_length
entries_per_iter = args.seq_length * args.batch_size
total_iters = 0
# Run the Benchmark
log.info("------ Warming up ------")
workspace.RunNet(model.net.Proto().name)
num_iters = num_iters - 1
if (args.gpu):
log.info("Memory stats:")
stats = utils.GetGPUMemoryUsageStats()
log.info("GPU memory:\t{} MB".format(stats['max_total'] / 1024 / 1024))
log.info("------ Starting benchmark ------")
start_time = time.time()
for iteration in range(0, num_iters, args.iters_to_report):
iters_once = min(args.iters_to_report, num_iters - iteration)
total_iters += iters_once
workspace.RunNet(model.net.Proto().name, iters_once)
new_time = time.time()
log.info("Iter: {} / {}. Entries Per Second: {}k.". format(
iteration,
num_iters,
entries_per_iter * iters_once / (new_time - last_time) // 1000,
))
last_time = new_time
log.info("Done. Total EPS excluding 1st iteration: {}k".format(
total_iters * entries_per_iter / (time.time() - start_time) // 1000,
))
if (args.gpu):
log.info("Memory stats:")
stats = utils.GetGPUMemoryUsageStats()
log.info("GPU memory:\t{} MB".format(stats['max_total'] / 1024 / 1024))
if (stats['max_total'] != stats['total']):
log.warning(
"Max usage differs from current total usage: {} > {}".
format(stats['max_total'], stats['total'])
)
log.warning("This means that costly deallocations occured.")
return time.time() - start_time
def Benchmark(args):
return Caffe2LSTM(args)
def GetArgumentParser():
parser = argparse.ArgumentParser(description="LSTM benchmark.")
parser.add_argument(
"--hidden_dim",
type=int,
default=800,
help="Hidden dimension",
)
parser.add_argument(
"--input_dim",
type=int,
default=40,
help="Input dimension",
)
parser.add_argument(
"--batch_size",
type=int,
default=256,
help="The batch size."
)
parser.add_argument(
"--seq_length",
type=int,
default=20,
help="Max sequence length"
)
parser.add_argument(
"--data_size",
type=int,
default=10000000,
help="Number of data points to generate"
)
parser.add_argument(
"--iters_to_report",
type=int,
default=100,
help="Number of iteration to report progress"
)
parser.add_argument(
"--gpu",
action="store_true",
help="Run all on GPU",
)
parser.add_argument(
"--implementation",
type=str,
default="own",
help="'cudnn' or 'own'",
)
parser.add_argument(
"--memory_optimization",
action="store_true",
help="Whether to use memory optimized LSTM or not",
)
parser.add_argument(
"--forward_only",
action="store_true",
help="Whether to run only forward pass"
)
parser.add_argument(
"--num_layers",
type=int,
default=1,
help="Number of LSTM layers. All output dimensions are going to be"
"of hidden_dim size",
)
return parser
if __name__ == '__main__':
args = GetArgumentParser().parse_args()
workspace.GlobalInit([
'caffe2',
'--caffe2_log_level=0',
'--caffe2_print_blob_sizes_at_exit=0',
'--caffe2_gpu_memory_tracking=1'])
device = core.DeviceOption(
caffe2_pb2.CUDA if args.gpu else caffe2_pb2.CPU, 0)
with core.DeviceScope(device):
Benchmark(args)
|
## @package tt_core
# Module caffe2.python.tt_core
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
"""
The following methods are various utility methods for using the Tensor-Train
decomposition, or TT-decomposition introduced by I. V. Oseledets (2011) in his
paper (http://epubs.siam.org/doi/abs/10.1137/090752286).
Broadly speaking, these methods are used to replace fully connected layers in
neural networks with Tensor-Train layers introduced by A. Novikov et. al. (2015)
in their paper (http://arxiv.org/abs/1509.06569). More details about each of
the methods are provided in each respective docstring.
"""
def init_tt_cores(inp_sizes, out_sizes, tt_ranks, seed=1234):
"""
Initialize randomized orthogonalized TT-cores.
This method should be used when a TT-layer will trained from scratch. The
sizes of each of the cores are specified by the inp_sizes and out_sizes, and
the respective tt_ranks will dictate the ranks of each of the cores. Note
that a larger set of tt_ranks will result in slower computation but will
result in more accurate approximations. The size of the ith core is:
tt_ranks[i] * inp_sizes[i] * out_sizes[i] * tt_ranks[i + 1].
Note that the following relationships of lengths of each input is expected:
len(inp_sizes) == len(out_sizes) == len(tt_ranks) - 1.
Args:
inp_sizes: list of the input dimensions of the respective cores
out_sizes: list of the output dimensions of the respective cores
tt_ranks: list of the ranks of the respective cores
seed: integer to seed the random number generator
Returns:
cores: One-dimensional list of cores concatentated along an axis
"""
np.random.seed(seed)
# Assert that the sizes of each input is correct
assert(len(inp_sizes) == len(out_sizes)), \
"The number of input dimensions (" + str(len(inp_sizes)) + \
") must be equal to the number of output dimensions (" + \
str(len(out_sizes)) + ")."
assert(len(tt_ranks) == len(inp_sizes) + 1), \
"The number of tt-ranks (" + str(len(tt_ranks)) + ") must be " + \
"one more than the number of input and output dims (" + \
str(len(out_sizes)) + ")."
# Convert to numpy arrays
inp_sizes = np.array(inp_sizes)
out_sizes = np.array(out_sizes)
tt_ranks = np.array(tt_ranks)
# Initialize the cores array
cores_len = np.sum(
inp_sizes * out_sizes * tt_ranks[1:] * tt_ranks[:-1])
cores = np.zeros(cores_len)
cores_idx = 0
rv = 1
# Compute the full list of cores by computing each individual one
for i in range(inp_sizes.shape[0]):
shape = [tt_ranks[i],
inp_sizes[i],
out_sizes[i],
tt_ranks[i + 1]]
# Precompute the shape of each core
tall_shape = (np.prod(shape[:3]), shape[3])
# Randomly initialize the current core using a normal distribution
curr_core = np.dot(rv, np.random.normal(
0, 1, size=(shape[0], np.prod(shape[1:]))))
curr_core = curr_core.reshape(tall_shape)
# Orthogonalize the initialized current core and append to cores list
if i < inp_sizes.shape[0] - 1:
curr_core, rv = np.linalg.qr(curr_core)
cores[cores_idx:cores_idx +
curr_core.size] = curr_core.flatten()
cores_idx += curr_core.size
# Normalize the list of arrays using this Glarot trick
glarot_style = (np.prod(inp_sizes) *
np.prod(tt_ranks))**(1.0 / inp_sizes.shape[0])
return (0.1 / glarot_style) * np.array(cores).astype(np.float32)
def matrix_to_tt(W, inp_sizes, out_sizes, tt_ranks):
"""
Convert a matrix into the TT-format.
This method will consume an a 2D weight matrix such as those used in fully
connected layers in a neural network and will compute the TT-decomposition
of the weight matrix and return the TT-cores of the resulting computation.
This method should be used when converting a trained, fully connected layer,
into a TT-layer for increased speed and decreased parameter size. The size
of the ith core is:
tt_ranks[i] * inp_sizes[i] * out_sizes[i] * tt_ranks[i + 1].
Note that the following relationships of lengths of each input is expected:
len(inp_sizes) == len(out_sizes) == len(tt_ranks) - 1.
We also require that np.prod(inp_sizes) == W.shape[0] and that
np.prod(out_sizes) == W.shape[1].
Args:
W: two-dimensional weight matrix numpy array representing a fully
connected layer to be converted to TT-format; note that the weight
matrix is transposed before decomposed because we want to emulate the
X * W^T operation that the FC layer performs.
inp_sizes: list of the input dimensions of the respective cores
out_sizes: list of the output dimensions of the respective cores
tt_ranks: list of the ranks of the respective cores
Returns:
new_cores: One-dimensional list of cores concatentated along an axis
"""
# Assert that the sizes of each input is correct
assert(len(inp_sizes) == len(out_sizes)), \
"The number of input dimensions (" + str(len(inp_sizes)) + \
") must be equal to the number of output dimensions (" + \
str(len(out_sizes)) + ")."
assert(len(tt_ranks) == len(inp_sizes) + 1), \
"The number of tt-ranks (" + str(len(tt_ranks)) + ") must be " + \
"one more than the number of input and output dimensions (" + \
str(len(out_sizes)) + ")."
assert(W.shape[0] == np.prod(inp_sizes)), \
"The product of the input sizes (" + str(np.prod(inp_sizes)) + \
") must be equal to first dimension of W (" + str(W.shape[0]) + ")."
assert(W.shape[1] == np.prod(out_sizes)), \
"The product of the output sizes (" + str(np.prod(out_sizes)) + \
") must be equal to second dimension of W (" + str(W.shape[1]) + ")."
# W is transposed so that the multiplication X * W^T can be computed, just
# as it is in the FC layer.
W = W.transpose()
# Convert to numpy arrays
inp_sizes = np.array(inp_sizes)
out_sizes = np.array(out_sizes)
tt_ranks = np.array(tt_ranks)
# Copy the original weight matrix in order to permute and reshape the weight
# matrix. In addition, the inp_sizes and out_sizes are combined to a single
# sizes array to use the tt_svd helper method, which only consumes a single
# sizes array.
W_copy = W.copy()
total_inp_size = inp_sizes.size
W_copy = np.reshape(W_copy, np.concatenate((inp_sizes, out_sizes)))
order = np.repeat(np.arange(0, total_inp_size), 2) + \
np.tile([0, total_inp_size], total_inp_size)
W_copy = np.transpose(W_copy, axes=order)
W_copy = np.reshape(W_copy, inp_sizes * out_sizes)
# Use helper method to convert the W matrix copy into the preliminary
# cores array.
cores = tt_svd(W_copy, inp_sizes * out_sizes, tt_ranks)
# Permute the dimensions of each of the cores to be compatible with the
# TT-layer.
new_cores = np.zeros(cores.shape).astype(np.float32)
idx = 0
for i in range(len(inp_sizes)):
shape = (tt_ranks[i], inp_sizes[i], out_sizes[i], tt_ranks[i + 1])
current_core = cores[idx:idx + np.prod(shape)].reshape(shape)
current_core = current_core.transpose((1, 3, 0, 2))
new_cores[new_cores.shape[0] - idx - np.prod(shape):
new_cores.shape[0] - idx] \
= current_core.flatten()
idx += np.prod(shape)
return new_cores
def tt_svd(W, sizes, tt_ranks):
"""
Helper method for the matrix_to_tt() method performing the TT-SVD
decomposition.
Uses the TT-decomposition algorithm to convert a matrix to TT-format using
multiple reduced SVD operations.
Args:
W: two-dimensional weight matrix representing a fully connected layer to
be converted to TT-format preprocessed by the matrix_to_tt() method.
sizes: list of the dimensions of each of the cores
tt_ranks: list of the ranks of the respective cores
Returns:
cores: One-dimensional list of cores concatentated along an axis
"""
assert(len(tt_ranks) == len(sizes) + 1)
C = W.copy()
total_size = sizes.size
core = np.zeros(np.sum(tt_ranks[:-1] * sizes * tt_ranks[1:]),
dtype='float32')
# Compute iterative reduced SVD operations and store each resulting U matrix
# as an individual core.
pos = 0
for i in range(0, total_size - 1):
shape = tt_ranks[i] * sizes[i]
C = np.reshape(C, [shape, -1])
U, S, V = np.linalg.svd(C, full_matrices=False)
U = U[:, 0:tt_ranks[i + 1]]
S = S[0:tt_ranks[i + 1]]
V = V[0:tt_ranks[i + 1], :]
core[pos:pos + tt_ranks[i] * sizes[i] * tt_ranks[i + 1]] = U.ravel()
pos += tt_ranks[i] * sizes[i] * tt_ranks[i + 1]
C = np.dot(np.diag(S), V)
core[pos:pos + tt_ranks[total_size - 1] *
sizes[total_size - 1] * tt_ranks[total_size]] = C.ravel()
return core
# TODO(Surya) Write a method to convert an entire network where all fully
# connected layers are replaced by an TT layer.
def fc_net_to_tt_net(net):
pass
|
## @package workspace
# Module caffe2.python.workspace
import contextlib
from google.protobuf.message import Message
from multiprocessing import Process
import os
import shutil
import socket
import tempfile
import logging
from six import string_types
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import scope, utils
import caffe2.python._import_c_extension as C
logger = logging.getLogger(__name__)
Blobs = C.blobs
CreateBlob = C.create_blob
CurrentWorkspace = C.current_workspace
DeserializeBlob = C.deserialize_blob
GlobalInit = C.global_init
HasBlob = C.has_blob
RegisteredOperators = C.registered_operators
SerializeBlob = C.serialize_blob
SwitchWorkspace = C.switch_workspace
RootFolder = C.root_folder
Workspaces = C.workspaces
BenchmarkNet = C.benchmark_net
Predictor = C.Predictor
is_asan = C.is_asan
has_gpu_support = C.has_gpu_support
if has_gpu_support:
NumCudaDevices = C.num_cuda_devices
SetDefaultGPUID = C.set_default_gpu_id
GetDefaultGPUID = C.get_default_gpu_id
GetCuDNNVersion = C.get_cudnn_version
def GetCudaPeerAccessPattern():
return np.asarray(C.get_cuda_peer_access_pattern())
else:
NumCudaDevices = lambda: 0 # noqa
SetDefaultGPUID = lambda x: None # noqa
GetDefaultGPUID = lambda: 0 # noqa
GetCuDNNVersion = lambda: 0 # noqa
GetCudaPeerAccessPattern = lambda: np.array([]) # noqa
# Python 2 and 3 compatibility: test if basestring exists
try:
basestring # NOQA
except NameError:
# This is python3 so we define basestring.
basestring = str
def _GetFreeFlaskPort():
"""Get a free flask port."""
# We will prefer to use 5000. If not, we will then pick a random port.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(('127.0.0.1', 5000))
if result == 0:
return 5000
else:
s = socket.socket()
s.bind(('', 0))
port = s.getsockname()[1]
s.close()
# Race condition: between the interval we close the socket and actually
# start a mint process, another process might have occupied the port. We
# don't do much here as this is mostly for convenience in research
# rather than 24x7 service.
return port
def StartMint(root_folder=None, port=None):
"""Start a mint instance.
TODO(Yangqing): this does not work well under ipython yet. According to
https://github.com/ipython/ipython/issues/5862
writing up some fix is a todo item.
"""
from caffe2.python.mint import app
if root_folder is None:
# Get the root folder from the current workspace
root_folder = C.root_folder()
if port is None:
port = _GetFreeFlaskPort()
process = Process(
target=app.main,
args=(
['-p', str(port), '-r', root_folder],
)
)
process.start()
print('Mint running at http://{}:{}'.format(socket.getfqdn(), port))
return process
def StringifyProto(obj):
"""Stringify a protocol buffer object.
Inputs:
obj: a protocol buffer object, or a Pycaffe2 object that has a Proto()
function.
Outputs:
string: the output protobuf string.
Raises:
AttributeError: if the passed in object does not have the right attribute.
"""
if isinstance(obj, string_types):
return obj
else:
if isinstance(obj, Message):
# First, see if this object is a protocol buffer, which we can
# simply serialize with the SerializeToString() call.
return obj.SerializeToString()
elif hasattr(obj, 'Proto'):
return obj.Proto().SerializeToString()
else:
raise ValueError("Unexpected argument to StringifyProto of type " +
type(obj).__name__)
def ResetWorkspace(root_folder=None):
if root_folder is None:
# Reset the workspace, but keep the current root folder setting.
return C.reset_workspace(C.root_folder())
else:
if not os.path.exists(root_folder):
os.makedirs(root_folder)
return C.reset_workspace(root_folder)
def CreateNet(net, overwrite=False, input_blobs=None):
if input_blobs is None:
input_blobs = []
for input_blob in input_blobs:
C.create_blob(input_blob)
return C.create_net(StringifyProto(net), overwrite)
def RunOperatorOnce(operator):
return C.run_operator_once(StringifyProto(operator))
def RunOperatorsOnce(operators):
for op in operators:
success = RunOperatorOnce(op)
if not success:
return False
return True
def RunNetOnce(net):
return C.run_net_once(StringifyProto(net))
def RunNet(name, num_iter=1, allow_fail=False):
"""Runs a given net.
Inputs:
name: the name of the net, or a reference to the net.
num_iter: number of iterations to run
allow_fail: if True, does not assert on net exec failure but returns False
Returns:
True or an exception.
"""
return C.run_net(StringifyNetName(name), num_iter, allow_fail)
def RunPlan(plan_or_step):
# TODO(jiayq): refactor core.py/workspace.py to avoid circular deps
import caffe2.python.core as core
if isinstance(plan_or_step, core.ExecutionStep):
plan_or_step = core.Plan(plan_or_step)
return C.run_plan(StringifyProto(plan_or_step))
def InferShapesAndTypes(nets, blob_dimensions=None):
"""Infers the shapes and types for the specified nets.
Inputs:
nets: the list of nets
blob_dimensions (optional): a dictionary of blobs and their dimensions.
If not specified, the workspace blobs are used.
Returns:
A tuple of (shapes, types) dictionaries keyed by blob name.
"""
net_protos = [StringifyProto(n.Proto()) for n in nets]
if blob_dimensions is None:
blobdesc_prototxt = C.infer_shapes_and_types_from_workspace(net_protos)
else:
blobdesc_prototxt = C.infer_shapes_and_types_from_map(
net_protos, blob_dimensions
)
blobdesc_proto = caffe2_pb2.TensorShapes()
blobdesc_proto.ParseFromString(blobdesc_prototxt)
shapes = {}
types = {}
for ts in blobdesc_proto.shapes:
if not ts.unknown_shape:
shapes[ts.name] = list(ts.dims)
types[ts.name] = ts.data_type
return (shapes, types)
def _StringifyName(name, expected_type):
if isinstance(name, basestring):
return name
assert type(name).__name__ == expected_type, \
"Expected a string or %s" % expected_type
return str(name)
def StringifyBlobName(name):
return _StringifyName(name, "BlobReference")
def StringifyNetName(name):
return _StringifyName(name, "Net")
def FeedBlob(name, arr, device_option=None):
"""Feeds a blob into the workspace.
Inputs:
name: the name of the blob.
arr: either a TensorProto object or a numpy array object to be fed into
the workspace.
device_option (optional): the device option to feed the data with.
Returns:
True or False, stating whether the feed is successful.
"""
if type(arr) is caffe2_pb2.TensorProto:
arr = utils.Caffe2TensorToNumpyArray(arr)
if type(arr) is np.ndarray and arr.dtype.kind == 'S':
# Plain NumPy strings are weird, let's use objects instead
arr = arr.astype(np.object)
if device_option is None:
device_option = scope.CurrentDeviceScope()
if device_option and device_option.device_type == caffe2_pb2.CUDA:
if arr.dtype == np.dtype('float64'):
logger.warning(
"CUDA operators do not support 64-bit doubles, " +
"please use arr.astype(np.float32) or np.int32 for ints." +
" Blob: {}".format(name) +
" type: {}".format(str(arr.dtype))
)
name = StringifyBlobName(name)
if device_option is not None:
return C.feed_blob(name, arr, StringifyProto(device_option))
else:
return C.feed_blob(name, arr)
def FetchBlobs(names):
"""Fetches a list of blobs from the workspace.
Inputs:
names: list of names of blobs - strings or BlobReferences
Returns:
list of fetched blobs
"""
return [FetchBlob(name) for name in names]
def FetchBlob(name):
"""Fetches a blob from the workspace.
Inputs:
name: the name of the blob - a string or a BlobReference
Returns:
Fetched blob (numpy array or string) if successful
"""
return C.fetch_blob(StringifyBlobName(name))
def GetNameScope():
"""Return the current namescope string. To be used to fetch blobs"""
return scope.CurrentNameScope()
class _BlobDict(object):
"""Provides python dict compatible way to do fetching and feeding"""
def __getitem__(self, key):
return FetchBlob(key)
def __setitem__(self, key, value):
return FeedBlob(key, value)
def __len__(self):
return len(C.blobs())
def __iter__(self):
return C.blobs().__iter__()
def __contains__(self, item):
return C.has_blob(item)
blobs = _BlobDict()
################################################################################
# Utilities for immediate mode
#
# Caffe2's immediate mode implements the following behavior: between the two
# function calls StartImmediate() and StopImmediate(), for any operator that is
# called through CreateOperator(), we will also run that operator in a workspace
# that is specific to the immediate mode. The user is explicitly expected to
# make sure that these ops have proper inputs and outputs, i.e. one should not
# run an op where an external input is not created or fed.
#
# Users can use FeedImmediate() and FetchImmediate() to interact with blobs
# in the immediate workspace.
#
# Once StopImmediate() is called, all contents in the immediate workspace is
# freed up so one can continue using normal runs.
#
# The immediate mode is solely for debugging purposes and support will be very
# sparse.
################################################################################
_immediate_mode = False
_immediate_workspace_name = "_CAFFE2_IMMEDIATE"
_immediate_root_folder = ''
def IsImmediate():
return _immediate_mode
@contextlib.contextmanager
def WorkspaceGuard(workspace_name):
current = CurrentWorkspace()
SwitchWorkspace(workspace_name, True)
yield
SwitchWorkspace(current)
def StartImmediate(i_know=False):
global _immediate_mode
global _immediate_root_folder
if IsImmediate():
# already in immediate mode. We will kill the previous one
# and start from fresh.
StopImmediate()
_immediate_mode = True
with WorkspaceGuard(_immediate_workspace_name):
_immediate_root_folder = tempfile.mkdtemp()
ResetWorkspace(_immediate_root_folder)
if i_know:
# if the user doesn't want to see the warning message, sure...
return
print("""
Enabling immediate mode in caffe2 python is an EXTREMELY EXPERIMENTAL
feature and may very easily go wrong. This is because Caffe2 uses a
declarative way of defining operators and models, which is essentially
not meant to run things in an interactive way. Read the following carefully
to make sure that you understand the caveats.
(1) You need to make sure that the sequences of operators you create are
actually runnable sequentially. For example, if you create an op that takes
an input X, somewhere earlier you should have already created X.
(2) Caffe2 immediate uses one single workspace, so if the set of operators
you run are intended to be under different workspaces, they will not run.
To create boundaries between such use cases, you can call FinishImmediate()
and StartImmediate() manually to flush out everything no longer needed.
(3) Underlying objects held by the immediate mode may interfere with your
normal run. For example, if there is a leveldb that you opened in immediate
mode and did not close, your main run will fail because leveldb does not
support double opening. Immediate mode may also occupy a lot of memory esp.
on GPUs. Call FinishImmediate() as soon as possible when you no longer
need it.
(4) Immediate is designed to be slow. Every immediate call implicitly
creates a temp operator object, runs it, and destroys the operator. This
slow-speed run is by design to discourage abuse. For most use cases other
than debugging, do NOT turn on immediate mode.
(5) If there is anything FATAL happening in the underlying C++ code, the
immediate mode will immediately (pun intended) cause the runtime to crash.
Thus you should use immediate mode with extra care. If you still would
like to, have fun [https://xkcd.com/149/].
""")
def StopImmediate():
"""Stops an immediate mode run."""
# Phew, that was a dangerous ride.
global _immediate_mode
global _immediate_root_folder
if not IsImmediate():
return
with WorkspaceGuard(_immediate_workspace_name):
ResetWorkspace()
shutil.rmtree(_immediate_root_folder)
_immediate_root_folder = ''
_immediate_mode = False
def ImmediateBlobs():
with WorkspaceGuard(_immediate_workspace_name):
return Blobs()
def RunOperatorImmediate(op):
with WorkspaceGuard(_immediate_workspace_name):
RunOperatorOnce(op)
def FetchImmediate(*args, **kwargs):
with WorkspaceGuard(_immediate_workspace_name):
return FetchBlob(*args, **kwargs)
def FeedImmediate(*args, **kwargs):
with WorkspaceGuard(_immediate_workspace_name):
return FeedBlob(*args, **kwargs)
# CWorkspace utilities
def _Workspace_create_net(ws, net, overwrite=False):
return ws._create_net(StringifyProto(net), overwrite)
C.Workspace.create_net = _Workspace_create_net
def _Workspace_run(ws, obj):
if hasattr(obj, 'Proto'):
obj = obj.Proto()
if isinstance(obj, caffe2_pb2.PlanDef):
return ws._run_plan(obj.SerializeToString())
if isinstance(obj, caffe2_pb2.NetDef):
return ws._run_net(obj.SerializeToString())
if isinstance(obj, caffe2_pb2.OperatorDef):
return ws._run_operator(obj.SerializeToString())
raise ValueError(
"Don't know how to do Workspace.run() on {}".format(type(obj)))
C.Workspace.run = _Workspace_run
def _Blob_feed(blob, arg, device_option=None):
if device_option is not None:
device_option = StringifyProto(device_option)
return blob._feed(arg, device_option)
C.Blob.feed = _Blob_feed
|
## @package net_drawer
# Module caffe2.python.net_drawer
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import json
import logging
from collections import defaultdict
from caffe2.python import utils
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
try:
import pydot
except ImportError:
logger.info(
'Cannot import pydot, which is required for drawing a network. This '
'can usually be installed in python with "pip install pydot". Also, '
'pydot requires graphviz to convert dot files to pdf: in ubuntu, this '
'can usually be installed with "sudo apt-get install graphviz".'
)
print(
'net_drawer will not run correctly. Please install the correct '
'dependencies.'
)
pydot = None
from caffe2.proto import caffe2_pb2
OP_STYLE = {
'shape': 'box',
'color': '#0F9D58',
'style': 'filled',
'fontcolor': '#FFFFFF'
}
BLOB_STYLE = {'shape': 'octagon'}
def _rectify_operator_and_name(operators_or_net, name):
"""Gets the operators and name for the pydot graph."""
if isinstance(operators_or_net, caffe2_pb2.NetDef):
operators = operators_or_net.op
if name is None:
name = operators_or_net.name
elif hasattr(operators_or_net, 'Proto'):
net = operators_or_net.Proto()
if not isinstance(net, caffe2_pb2.NetDef):
raise RuntimeError(
"Expecting NetDef, but got {}".format(type(net)))
operators = net.op
if name is None:
name = net.name
else:
operators = operators_or_net
if name is None:
name = "unnamed"
return operators, name
def _escape_label(name):
# json.dumps is poor man's escaping
return json.dumps(name)
def GetOpNodeProducer(append_output, **kwargs):
def ReallyGetOpNode(op, op_id):
if op.name:
node_name = '%s/%s (op#%d)' % (op.name, op.type, op_id)
else:
node_name = '%s (op#%d)' % (op.type, op_id)
if append_output:
for output_name in op.output:
node_name += '\n' + output_name
return pydot.Node(node_name, **kwargs)
return ReallyGetOpNode
def GetPydotGraph(
operators_or_net,
name=None,
rankdir='LR',
node_producer=None
):
if node_producer is None:
node_producer = GetOpNodeProducer(False, **OP_STYLE)
operators, name = _rectify_operator_and_name(operators_or_net, name)
graph = pydot.Dot(name, rankdir=rankdir)
pydot_nodes = {}
pydot_node_counts = defaultdict(int)
for op_id, op in enumerate(operators):
op_node = node_producer(op, op_id)
graph.add_node(op_node)
# print 'Op: %s' % op.name
# print 'inputs: %s' % str(op.input)
# print 'outputs: %s' % str(op.output)
for input_name in op.input:
if input_name not in pydot_nodes:
input_node = pydot.Node(
_escape_label(
input_name + str(pydot_node_counts[input_name])),
label=_escape_label(input_name),
**BLOB_STYLE
)
pydot_nodes[input_name] = input_node
else:
input_node = pydot_nodes[input_name]
graph.add_node(input_node)
graph.add_edge(pydot.Edge(input_node, op_node))
for output_name in op.output:
if output_name in pydot_nodes:
# we are overwriting an existing blob. need to updat the count.
pydot_node_counts[output_name] += 1
output_node = pydot.Node(
_escape_label(
output_name + str(pydot_node_counts[output_name])),
label=_escape_label(output_name),
**BLOB_STYLE
)
pydot_nodes[output_name] = output_node
graph.add_node(output_node)
graph.add_edge(pydot.Edge(op_node, output_node))
return graph
def GetPydotGraphMinimal(
operators_or_net,
name=None,
rankdir='LR',
minimal_dependency=False,
node_producer=None,
):
"""Different from GetPydotGraph, hide all blob nodes and only show op nodes.
If minimal_dependency is set as well, for each op, we will only draw the
edges to the minimal necessary ancestors. For example, if op c depends on
op a and b, and op b depends on a, then only the edge b->c will be drawn
because a->c will be implied.
"""
if node_producer is None:
node_producer = GetOpNodeProducer(False, **OP_STYLE)
operators, name = _rectify_operator_and_name(operators_or_net, name)
graph = pydot.Dot(name, rankdir=rankdir)
# blob_parents maps each blob name to its generating op.
blob_parents = {}
# op_ancestry records the ancestors of each op.
op_ancestry = defaultdict(set)
for op_id, op in enumerate(operators):
op_node = node_producer(op, op_id)
graph.add_node(op_node)
# Get parents, and set up op ancestry.
parents = [
blob_parents[input_name] for input_name in op.input
if input_name in blob_parents
]
op_ancestry[op_node].update(parents)
for node in parents:
op_ancestry[op_node].update(op_ancestry[node])
if minimal_dependency:
# only add nodes that do not have transitive ancestry
for node in parents:
if all(
[node not in op_ancestry[other_node]
for other_node in parents]
):
graph.add_edge(pydot.Edge(node, op_node))
else:
# Add all parents to the graph.
for node in parents:
graph.add_edge(pydot.Edge(node, op_node))
# Update blob_parents to reflect that this op created the blobs.
for output_name in op.output:
blob_parents[output_name] = op_node
return graph
def GetOperatorMapForPlan(plan_def):
operator_map = {}
for net_id, net in enumerate(plan_def.network):
if net.HasField('name'):
operator_map[plan_def.name + "_" + net.name] = net.op
else:
operator_map[plan_def.name + "_network_%d" % net_id] = net.op
return operator_map
def _draw_nets(nets, g):
nodes = []
for i, net in enumerate(nets):
nodes.append(pydot.Node(_escape_label(net)))
g.add_node(nodes[-1])
if i > 0:
g.add_edge(pydot.Edge(nodes[-2], nodes[-1]))
return nodes
def _draw_steps(steps, g, skip_step_edges=False): # noqa
kMaxParallelSteps = 3
def get_label():
label = [step.name + '\n']
if step.report_net:
label.append('Reporter: {}'.format(step.report_net))
if step.should_stop_blob:
label.append('Stopper: {}'.format(step.should_stop_blob))
if step.concurrent_substeps:
label.append('Concurrent')
if step.only_once:
label.append('Once')
return '\n'.join(label)
def substep_edge(start, end):
return pydot.Edge(start, end, arrowhead='dot', style='dashed')
nodes = []
for i, step in enumerate(steps):
parallel = step.concurrent_substeps
nodes.append(pydot.Node(_escape_label(get_label()), **OP_STYLE))
g.add_node(nodes[-1])
if i > 0 and not skip_step_edges:
g.add_edge(pydot.Edge(nodes[-2], nodes[-1]))
if step.network:
sub_nodes = _draw_nets(step.network, g)
elif step.substep:
if parallel:
sub_nodes = _draw_steps(
step.substep[:kMaxParallelSteps], g, skip_step_edges=True)
else:
sub_nodes = _draw_steps(step.substep, g)
else:
raise ValueError('invalid step')
if parallel:
for sn in sub_nodes:
g.add_edge(substep_edge(nodes[-1], sn))
if len(step.substep) > kMaxParallelSteps:
ellipsis = pydot.Node('{} more steps'.format(
len(step.substep) - kMaxParallelSteps), **OP_STYLE)
g.add_node(ellipsis)
g.add_edge(substep_edge(nodes[-1], ellipsis))
else:
g.add_edge(substep_edge(nodes[-1], sub_nodes[0]))
return nodes
def GetPlanGraph(plan_def, name=None, rankdir='TB'):
graph = pydot.Dot(name, rankdir=rankdir)
_draw_steps(plan_def.execution_step, graph)
return graph
def GetGraphInJson(operators_or_net, output_filepath):
operators, _ = _rectify_operator_and_name(operators_or_net, None)
blob_strid_to_node_id = {}
node_name_counts = defaultdict(int)
nodes = []
edges = []
for op_id, op in enumerate(operators):
op_label = op.name + '/' + op.type if op.name else op.type
op_node_id = len(nodes)
nodes.append({
'id': op_node_id,
'label': op_label,
'op_id': op_id,
'type': 'op'
})
for input_name in op.input:
strid = _escape_label(
input_name + str(node_name_counts[input_name]))
if strid not in blob_strid_to_node_id:
input_node = {
'id': len(nodes),
'label': input_name,
'type': 'blob'
}
blob_strid_to_node_id[strid] = len(nodes)
nodes.append(input_node)
else:
input_node = nodes[blob_strid_to_node_id[strid]]
edges.append({
'source': blob_strid_to_node_id[strid],
'target': op_node_id
})
for output_name in op.output:
strid = _escape_label(
output_name + str(node_name_counts[output_name]))
if strid in blob_strid_to_node_id:
# we are overwriting an existing blob. need to update the count.
node_name_counts[output_name] += 1
strid = _escape_label(
output_name + str(node_name_counts[output_name]))
if strid not in blob_strid_to_node_id:
output_node = {
'id': len(nodes),
'label': output_name,
'type': 'blob'
}
blob_strid_to_node_id[strid] = len(nodes)
nodes.append(output_node)
edges.append({
'source': op_node_id,
'target': blob_strid_to_node_id[strid]
})
with open(output_filepath, 'w') as f:
json.dump({'nodes': nodes, 'edges': edges}, f)
# A dummy minimal PNG image used by GetGraphPngSafe as a
# placeholder when rendering fail to run.
_DummyPngImage = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00'
b'\x01\x01\x00\x00\x00\x007n\xf9$\x00\x00\x00\nIDATx\x9cc`\x00\x00'
b'\x00\x02\x00\x01H\xaf\xa4q\x00\x00\x00\x00IEND\xaeB`\x82')
def GetGraphPngSafe(func, *args, **kwargs):
"""
Invokes `func` (e.g. GetPydotGraph) with args. If anything fails - returns
and empty image instead of throwing Exception
"""
try:
graph = func(*args, **kwargs)
if not isinstance(graph, pydot.Dot):
raise ValueError("func is expected to return pydot.Dot")
return graph.create_png()
except Exception as e:
logger.error("Failed to draw graph: {}".format(e))
return _DummyPngImage
def main():
parser = argparse.ArgumentParser(description="Caffe2 net drawer.")
parser.add_argument(
"--input",
type=str, required=True,
help="The input protobuf file."
)
parser.add_argument(
"--output_prefix",
type=str, default="",
help="The prefix to be added to the output filename."
)
parser.add_argument(
"--minimal", action="store_true",
help="If set, produce a minimal visualization."
)
parser.add_argument(
"--minimal_dependency", action="store_true",
help="If set, only draw minimal dependency."
)
parser.add_argument(
"--append_output", action="store_true",
help="If set, append the output blobs to the operator names.")
parser.add_argument(
"--rankdir", type=str, default="LR",
help="The rank direction of the pydot graph."
)
args = parser.parse_args()
with open(args.input, 'r') as fid:
content = fid.read()
graphs = utils.GetContentFromProtoString(
content, {
caffe2_pb2.PlanDef: lambda x: GetOperatorMapForPlan(x),
caffe2_pb2.NetDef: lambda x: {x.name: x.op},
}
)
for key, operators in graphs.items():
if args.minimal:
graph = GetPydotGraphMinimal(
operators,
name=key,
rankdir=args.rankdir,
node_producer=GetOpNodeProducer(args.append_output, **OP_STYLE),
minimal_dependency=args.minimal_dependency)
else:
graph = GetPydotGraph(
operators,
name=key,
rankdir=args.rankdir,
node_producer=GetOpNodeProducer(args.append_output, **OP_STYLE))
filename = args.output_prefix + graph.get_name() + '.dot'
graph.write(filename, format='raw')
pdf_filename = filename[:-3] + 'pdf'
try:
graph.write_pdf(pdf_filename)
except Exception:
print(
'Error when writing out the pdf file. Pydot requires graphviz '
'to convert dot files to pdf, and you may not have installed '
'graphviz. On ubuntu this can usually be installed with "sudo '
'apt-get install graphviz". We have generated the .dot file '
'but will not be able to generate pdf file for now.'
)
if __name__ == '__main__':
main()
|
## @package extension_loader
# Module caffe2.python.extension_loader
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import contextlib
import ctypes
import sys
_set_global_flags = (
hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'))
@contextlib.contextmanager
def DlopenGuard():
if _set_global_flags:
old_flags = sys.getdlopenflags()
sys.setdlopenflags(old_flags | ctypes.RTLD_GLOBAL)
yield
if _set_global_flags:
sys.setdlopenflags(old_flags)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python.dataio import ReaderWithLimit
from caffe2.python.dataset import Dataset
from caffe2.python.pipeline import pipe
from caffe2.python.schema import Struct, NewRecord, FeedRecord
from caffe2.python.session import LocalSession
from caffe2.python.task import TaskGroup
from caffe2.python.test_util import TestCase
from caffe2.python import core, workspace
import numpy as np
class TestReaderWithLimit(TestCase):
def test_reader_with_limit(self):
ws = workspace.C.Workspace()
session = LocalSession(ws)
""" 1. feed full dataset """
src_init = core.Net('src_init')
with core.NameScope('src'):
src_values = Struct(('label', np.array(range(100))))
src_blobs = NewRecord(src_init, src_values)
src_ds = Dataset(src_blobs)
FeedRecord(src_blobs, src_values, ws)
ws.run(src_init)
""" 2. Read with limit smaller than size of dataset """
dst_init = core.Net('dst_init')
with core.NameScope('dst'):
dst_ds = Dataset(src_values.clone_schema())
dst_ds.init_empty(dst_init)
ws.run(dst_init)
with TaskGroup() as tg:
reader = ReaderWithLimit(src_ds.reader(), num_iter=10)
pipe(reader, dst_ds.writer(), num_threads=8)
session.run(tg)
self.assertFalse(ws.blobs[str(reader.data_finished())].fetch())
self.assertEquals(
sorted(ws.blobs[str(dst_ds.content().label())].fetch()), range(10))
""" 3. Read with limit larger than size of dataset """
ws.run(dst_init)
with TaskGroup() as tg:
reader = ReaderWithLimit(src_ds.reader(), num_iter=110)
pipe(reader, dst_ds.writer(), num_threads=8)
session.run(tg)
self.assertEquals(
sorted(ws.blobs[str(dst_ds.content().label())].fetch()), range(100))
self.assertTrue(ws.blobs[str(reader.data_finished())].fetch())
""" 3. Read without counter """
ws.run(dst_init)
with TaskGroup() as tg:
reader = ReaderWithLimit(src_ds.reader(), num_iter=None)
pipe(reader, dst_ds.writer(), num_threads=8)
session.run(tg)
self.assertEquals(
sorted(ws.blobs[str(dst_ds.content().label())].fetch()), range(100))
self.assertTrue(ws.blobs[str(reader.data_finished())].fetch())
|
## @package dyndep
# Module caffe2.python.dyndep
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import ctypes
import os
from caffe2.python import core, extension_loader
def InitOpsLibrary(name):
"""Loads a dynamic library that contains custom operators into Caffe2.
Since Caffe2 uses static variable registration, you can optionally load a
separate .so file that contains custom operators and registers that into
the caffe2 core binary. In C++, this is usually done by either declaring
dependency during compilation time, or via dynload. This allows us to do
registration similarly on the Python side.
Args:
name: a name that ends in .so, such as "my_custom_op.so". Otherwise,
the command will simply be ignored.
Returns:
None
"""
if not os.path.exists(name):
# Note(jiayq): if the name does not exist, instead of immediately
# failing we will simply print a warning, deferring failure to the
# time when an actual call is made.
print('Ignoring {} as it is not a valid file.'.format(name))
return
_init_impl(name)
_IMPORTED_DYNDEPS = set()
def GetImportedOpsLibraries():
return _IMPORTED_DYNDEPS
def _init_impl(path):
_IMPORTED_DYNDEPS.add(path)
with extension_loader.DlopenGuard():
ctypes.CDLL(path)
# reinitialize available ops
core.RefreshRegisteredOperators()
|
import numpy as np
import unittest
from caffe2.python import core, workspace, test_util
class TestToyRegression(test_util.TestCase):
def testToyRegression(self):
"""Tests a toy regression end to end.
The test code carries a simple toy regression in the form
y = 2.0 x1 + 1.5 x2 + 0.5
by randomly generating gaussian inputs and calculating the ground
truth outputs in the net as well. It uses a standard SGD to then
train the parameters.
"""
workspace.ResetWorkspace()
init_net = core.Net("init")
W = init_net.UniformFill([], "W", shape=[1, 2], min=-1., max=1.)
B = init_net.ConstantFill([], "B", shape=[1], value=0.0)
W_gt = init_net.GivenTensorFill(
[], "W_gt", shape=[1, 2], values=[2.0, 1.5])
B_gt = init_net.GivenTensorFill([], "B_gt", shape=[1], values=[0.5])
LR = init_net.ConstantFill([], "LR", shape=[1], value=-0.1)
ONE = init_net.ConstantFill([], "ONE", shape=[1], value=1.)
ITER = init_net.ConstantFill([], "ITER", shape=[1], value=0,
dtype=core.DataType.INT32)
train_net = core.Net("train")
X = train_net.GaussianFill([], "X", shape=[64, 2], mean=0.0, std=1.0)
Y_gt = X.FC([W_gt, B_gt], "Y_gt")
Y_pred = X.FC([W, B], "Y_pred")
dist = train_net.SquaredL2Distance([Y_gt, Y_pred], "dist")
loss = dist.AveragedLoss([], ["loss"])
# Get gradients for all the computations above. Note that in fact we
# don't need to get the gradient the Y_gt computation, but we'll just
# leave it there. In many cases, I am expecting one to load X and Y
# from the disk, so there is really no operator that will calculate the
# Y_gt input.
input_to_grad = train_net.AddGradientOperators([loss], skip=2)
# updates
train_net.Iter(ITER, ITER)
train_net.LearningRate(ITER, "LR", base_lr=-0.1,
policy="step", stepsize=20, gamma=0.9)
train_net.WeightedSum([W, ONE, input_to_grad[str(W)], LR], W)
train_net.WeightedSum([B, ONE, input_to_grad[str(B)], LR], B)
for blob in [loss, W, B]:
train_net.Print(blob, [])
# the CPU part.
plan = core.Plan("toy_regression")
plan.AddStep(core.ExecutionStep("init", init_net))
plan.AddStep(core.ExecutionStep("train", train_net, 200))
workspace.RunPlan(plan)
W_result = workspace.FetchBlob("W")
B_result = workspace.FetchBlob("B")
np.testing.assert_array_almost_equal(W_result, [[2.0, 1.5]], decimal=2)
np.testing.assert_array_almost_equal(B_result, [0.5], decimal=2)
workspace.ResetWorkspace()
if __name__ == '__main__':
unittest.main()
|
## @package data_workers
# Module caffe2.python.data_workers
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
'''
This module provides a python-land multithreaded data input mechanism
for Caffe2 nets.
Basic usage is as follows:
coordinator = data_workers.init_data_input_workers(
net,
["data", "label"],
my_fetch_fun,
batch_size=32,
input_source_name="train",
dont_rebatch=False
)
...
coordinator.start()
First argument is the Caffe2 net (or model helper), and second argument
is list of input blobs that are to be fed.
Argument 'input_source_name' is used to distinguish different sources of data,
such as train or test data. This is to ensure the data does not get mixed up,
although two nets would share blobs.
To do the actual data loading, one defines a "fetcher function"
that has call signature
my_fetch_fun(worker_id, batch_size)
Optionally, one can define a "init function" that is called once before
threads start, and has call signature:
my_init_fun(data_coordinator, global_coordinator)
If dont_rebatch is set to True, the data input is not batched into equal sized
chunks but data directly provided by fetchers is used.
'batch_columns' can be used to specify which dimension is the batch dimension,
for each of the inputs. Default is 0 for all iputs.
This function returns a list of numpy arrays corresponding to the different
input blobs. In the example above, it would return two arrays, one for the
data blob and another for the labels. These arrays can have arbitrary number
of elements (i.e they do not need to match the batch size). The batch size
is provided for the function as a hint only.
For example, fetcher function could download images from a remote service or
load random images from a directory on a file system.
For a dummy example, see the data_workers_test unit test.
Note that for data_parallel_models, init_data_input_workers will be called
for each GPU. Note that the 'coordinator' returned by the function is same
each time.
'''
import Queue
import logging
import threading
import atexit
import numpy as np
import time
import collections
from caffe2.python import workspace, core, scope, utils
from caffe2.proto import caffe2_pb2
log = logging.getLogger("data_workers")
log.setLevel(logging.INFO)
LOG_INT_SECS = 60
def get_worker_ids(num_workers):
return range(0, num_workers)
def init_data_input_workers(
net,
input_blob_names,
fetch_fun,
batch_size,
num_worker_threads=2,
input_source_name="train",
max_buffered_batches=800,
init_fun=None,
external_loggers=None,
dont_rebatch=False,
batch_columns=None,
):
global global_coordinator
device_option = scope.CurrentDeviceScope()
if (device_option is None):
device_option = caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CPU)
# Create coordinator object
coordinator = DataInputCoordinator(
net,
input_blob_names,
batch_size,
device_option,
scope.CurrentNameScope(),
input_source_name,
global_coordinator.get_queue(input_source_name, max_buffered_batches),
init_fun=init_fun,
external_loggers=external_loggers,
dont_rebatch=dont_rebatch,
batch_columns=batch_columns,
)
# Launch fetch worker threads
worker_ids = [
global_coordinator.get_new_worker_id()
for i in range(num_worker_threads)
]
workers = [
threading.Thread(
target=fetcher,
name="data_workers fetcher id {}".format(worker_id),
args=[coordinator, worker_id, fetch_fun, batch_size, input_blob_names],
) for worker_id in worker_ids
]
workers.append(threading.Thread(
target=enqueuer,
name="Enqueuer {} {}".format(input_source_name, scope.CurrentNameScope()),
args=[coordinator]))
coordinator._workers = workers
global_coordinator.add(coordinator)
return global_coordinator
class DataInputCoordinator(object):
def __init__(self, net, input_blob_names, batch_size,
device_option, namescope, input_source_name, queue,
init_fun=None, external_loggers=None, dont_rebatch=False,
batch_columns=None):
self._counter = 0
self._input_blob_names = input_blob_names
self._batch_size = batch_size
self._internal_queue = queue
self._scratch_blobs = set()
self._queues = []
self._device_option = device_option
self._namescope = namescope
self._active = True
self._started = False
self._workers = []
self._input_source_name = input_source_name
self._c2_queue_capacity = 4
self._create_caffe2_queues(net)
self._create_caffe2_ops(net)
self._inputs = 0
self._prev_seconds = 0
self._last_warning = time.time()
self._init_fun = init_fun
self._metrics = collections.defaultdict(lambda: 0)
self._external_loggers = external_loggers
self._dont_rebatch = dont_rebatch
if batch_columns is None:
batch_columns = [0 for _ in input_blob_names]
self._batch_columns = batch_columns
def is_active(self):
return self._active
def init(self, global_coordinator):
if self._init_fun and not self._started:
self._init_fun(self, global_coordinator)
def _start(self):
if self._started:
return
self._active = True
self._started = True
self._inputs = 0
self._prev_seconds = time.time()
for w in self._workers:
w.daemon = True
w.start()
def _stop(self, reason=None):
try:
self._active = False
if reason is not None:
log.error("Data input failed due to an error: {}".format(reason))
for q in self._queues:
workspace.RunOperatorOnce(
core.CreateOperator("CloseBlobsQueue", [q], [])
)
# Release memory for the scratch blobs
if len(self._scratch_blobs) > 0:
utils.ResetBlobs(self._scratch_blobs)
self._started = False
finally:
self._log_inputs_per_interval(0, force=True)
def _wait_finish(self):
print("Wait for workers to die: {}".format(self._input_source_name))
for w in self._workers:
if w != threading.current_thread():
w.join(5.0) # don't wait forever, thread may be blocked in i/o
success = True
for w in self._workers:
if w.isAlive():
print("Worker {} failed to close while waiting".format(w))
success = False
print("All workers terminated: {}".format(success))
return success
def _get(self):
while self.is_active():
try:
return self._internal_queue.get(block=True, timeout=0.5)
except Queue.Empty:
continue
return None
def put(self, chunk):
if len(chunk) == 0:
print("Worker provided zero length input")
return
while self.is_active():
try:
qsize = self._internal_queue.qsize()
if qsize < 2 and (time.time() - self._last_warning) > LOG_INT_SECS:
print("Warning, data loading lagging behind: " +
"name={}".format(qsize, self._input_source_name))
self._last_warning = time.time()
self._counter += 1
self._internal_queue.put(chunk, block=True, timeout=0.5)
self._log_inputs_per_interval(chunk[0].shape[0])
return
except Queue.Full:
log.debug("Queue full: stalling fetchers...")
continue
def _enqueue_batch_direct(self):
data = self._get()
if data is None:
return
if self.is_active():
for b, q, c in zip(self._input_blob_names, self._queues, data):
self._enqueue(b, q, c)
def _enqueue_batch(self):
'''
This pulls data from the python-side queue and collects them
into batch-sized pieces, unless dont_rebatch is set to true.
'''
if self._dont_rebatch:
self._enqueue_batch_direct()
return
cur_batch = [np.array([]) for d in self._input_blob_names]
first_batch_col = self._batch_columns[0]
# Collect data until we have a full batch size
while (
cur_batch[0].shape[0] == 0 or
cur_batch[0].shape[first_batch_col] < self._batch_size
) and self.is_active():
chunk = self._get()
if chunk is None:
continue
for j, chunk_elem in enumerate(chunk):
if cur_batch[j].shape[0] == 0:
cur_batch[j] = chunk_elem.copy()
else:
cur_batch[j] = np.append(
cur_batch[j], chunk_elem, axis=self._batch_columns[j]
)
start_time = time.time()
try:
# Return data over the batch size back to queue
if cur_batch[0].shape[0] > 0 and cur_batch[0].shape[
first_batch_col
] > self._batch_size:
leftover = []
trimmed_batch = []
for j, b in enumerate(cur_batch):
[c, l] = np.split(
b, [self._batch_size], axis=self._batch_columns[j]
)
leftover.append(l)
trimmed_batch.append(c)
cur_batch = trimmed_batch
try:
self._internal_queue.put(leftover, block=False)
except Queue.Full:
pass
assert cur_batch[0].shape[first_batch_col] == self._batch_size
if self.is_active():
for b, q, c in zip(
self._input_blob_names, self._queues, cur_batch
):
self._enqueue(b, q, c)
finally:
self.put_metric('enqueue_time', time.time() - start_time)
def _enqueue(self, blob_name, queue, data_arr):
'''
Enqueue the correctly sized batch arrays to Caffe2's queue.
'''
scratch_name = self._namescope + blob_name + \
"_scratch_" + self._input_source_name
blob = core.BlobReference(scratch_name)
status = core.BlobReference(scratch_name + "_status")
workspace.FeedBlob(
blob,
data_arr,
device_option=self._device_option
)
self._scratch_blobs.add(blob)
self._scratch_blobs.add(status)
op = core.CreateOperator(
"SafeEnqueueBlobs",
[queue, blob],
[blob, status],
device_option=self._device_option
)
workspace.RunOperatorOnce(op)
def _create_caffe2_queues(self, net):
'''
Creates queues on caffe2 side
'''
def create_queue(queue_name, num_blobs, capacity):
workspace.RunOperatorOnce(
core.CreateOperator(
"CreateBlobsQueue",
[], [queue_name],
num_blobs=1,
capacity=capacity))
return core.ScopedBlobReference(queue_name)
for blob_name in self._input_blob_names:
qname = blob_name + "_c2queue" + "_" + self._input_source_name
q = create_queue(
qname, num_blobs=1, capacity=self._c2_queue_capacity
)
self._queues.append(q)
def _create_caffe2_ops(self, net):
'''
Creates dequeue-ops on caffe2 side
'''
for q, blob_name in zip(self._queues, self._input_blob_names):
# Add operator to the Caffe2 network to dequeue
net.DequeueBlobs(q, blob_name)
def _log_inputs_per_interval(self, inputs, force=False):
self._inputs += inputs
current_seconds = time.time()
delta_seconds = current_seconds - self._prev_seconds
if delta_seconds >= LOG_INT_SECS or force:
inputs_per_sec = int(self._inputs / delta_seconds)
qsize = self._internal_queue.qsize()
print("{}/{}: {} inputs/sec".format(
self._input_source_name,
self._namescope,
inputs_per_sec,
))
print("-- queue: {} batches".format(qsize))
# log and reset perf metrics
self.put_metric('inputs_per_sec', inputs_per_sec, False)
self.put_metric('queue_size', qsize, False)
self.put_metric('time_elapsed', delta_seconds, False)
self._log(self._metrics)
self._reset_metrics()
self._inputs = 0
self._prev_seconds = current_seconds
def _log(self, metrics):
if not self._external_loggers:
return
for logger in self._external_loggers:
try:
logger.log(metrics)
except Exception as e:
print("Failed to call ExternalLogger: {}".format(e))
def put_metric(self, key, value, count=True):
self._metrics[key] += value
if count:
count_key = '{}_count'.format(key)
self._metrics[count_key] += 1
def _reset_metrics(self):
self._metrics = collections.defaultdict(lambda: 0)
class GlobalCoordinator(object):
def __init__(self):
self._coordinators = []
self._fetcher_id_seq = 0
self._worker_ids = []
self._queues = {}
self.register_shutdown_handler()
def add(self, coordinator):
self._coordinators.append(coordinator)
def get_new_worker_id(self):
worker_id = self._fetcher_id_seq
self._worker_ids.append(worker_id)
self._fetcher_id_seq += 1
return worker_id
def get_worker_ids(self):
return self._worker_ids
def get_queue(self, queue_name, max_buffered_batches):
assert isinstance(max_buffered_batches, int)
if queue_name not in self._queues:
self._queues[queue_name] = Queue.Queue(maxsize=max_buffered_batches)
return self._queues[queue_name]
def start(self):
for c in self._coordinators:
c.init(self)
c._start()
def reset_data_input(self, namescope, name, net, batch_size):
log.info("Reset data input {}, batch size {}: ".format(name, batch_size))
for c in self._coordinators:
if c._input_source_name == name and c._namescope == namescope:
c._batch_size = batch_size
c._create_caffe2_ops(net)
def stop(self):
all_success = True
for c in self._coordinators:
c._stop()
for c in self._coordinators:
success = c._wait_finish()
all_success = all_success and success
self._coordinators = []
return all_success
def stop_coordinator(self, input_source_name):
'''
Stop a specific coordinator
'''
for c in self._coordinators:
if c._input_source_name == input_source_name:
c._stop()
c._wait_finish()
self._coordinators = [
c for c in self._coordinators
if c._input_source_name != input_source_name
]
def register_shutdown_handler(self):
def cleanup():
self.stop()
atexit.register(cleanup)
global_coordinator = GlobalCoordinator()
def fetcher(coordinator, worker_id, fetch_fun, batch_size, input_blob_names):
while coordinator.is_active():
start_time = time.time()
try:
input_data = fetch_fun(worker_id, batch_size)
if input_data is None:
print("Fetcher function returned None")
continue
assert len(input_data) == len(input_blob_names), \
"Expecting data blob for each input"
for d in input_data:
assert isinstance(d, np.ndarray), \
"Fetcher function must return a numpy array"
if not coordinator._dont_rebatch:
j = 1
for d in input_data[1:]:
assert d.shape[coordinator._batch_columns[j]] == input_data[0].shape[coordinator._batch_columns[0]], \
"Each returned input must have equal number of samples"
j += 1
coordinator.put(input_data)
except Exception as e:
print(e)
logging.exception("Exception in fetcher", e)
coordinator._stop("Exception in fetcher {}: {}".format(
worker_id, e
))
finally:
coordinator.put_metric('fetcher_time', time.time() - start_time)
def enqueuer(coordinator):
while coordinator.is_active():
coordinator._enqueue_batch()
|
## @package data_parallel_model
# Module caffe2.python.data_parallel_model
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
import logging
import copy
from caffe2.python import model_helper, dyndep, scope, workspace, core, memonger
from caffe2.proto import caffe2_pb2
dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/nccl:nccl_ops")
dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/gloo:gloo_ops")
dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/gloo:gloo_ops_gpu")
log = logging.getLogger("data_parallel_model")
log.setLevel(logging.INFO)
def Parallelize_GPU(
model_helper_obj,
input_builder_fun,
forward_pass_builder_fun,
param_update_builder_fun,
devices=range(0, workspace.NumCudaDevices()),
rendezvous=None,
net_type='dag',
broadcast_computed_params=True,
optimize_gradient_memory=False,
use_nccl=False,
):
'''
Function to create a model that can run on many GPUs.
model_helper_obj: an object of ModelHelper, such as CNNModelHelper
input_builder_fun:
Function that adds the input operators
Note: Remember to instantiate reader outside of this
function so all GPUs share same reader object.
Signature: input_builder_fun(model)
forward_pass_builder_fun:
Function to add the operators to the model.
Must return list of loss-blob references that
are used to build the gradient. Loss scale parameter
is passed, as you should scale the loss of your model
by 1.0 / the total number of gpus.
Signature: forward_pass_builder_fun(model, loss_scale)
param_update_builder_fun:
Function that adds operators that are run after
gradient update, such as updating the weights and
weight decaying.
Signature: param_update_builder_fun(model)
devices: List of GPU ids, such as [0, 1, 2, 3],
rendezvous: used for rendezvous in distributed computation, if None
then only one node is used. To create rendezvous,
use <TBD>.
net_type: Network type
optimize_gradient_memory: whether to apply 'memonger' to share blobs
in gradient computation to reduce memory footprint
'''
log.info("Parallelizing model for devices: {}".format(devices))
extra_workers = 8 if rendezvous is not None else 0 # best-guess
model_helper_obj.net.Proto().num_workers = len(devices) * 4 + extra_workers
model_helper_obj.net.Proto().type = net_type
# Store some information in the model -- a bit ugly
model_helper_obj._devices = devices
model_helper_obj._rendezvous = rendezvous
model_helper_obj._grad_names = []
assert isinstance(model_helper_obj, model_helper.ModelHelper)
# Keep track of params that were in the model before: they are not
# data parallel, so we need to handle them separately
non_datapar_params = copy.copy(model_helper_obj.params)
# Add input and model
log.info("Create input and model training operators")
losses_by_gpu = {}
num_shards = 1 if rendezvous is None else rendezvous['num_shards']
loss_scale = 1.0 / (len(devices) * num_shards)
for device in devices:
device_opt = core.DeviceOption(caffe2_pb2.CUDA, device)
with core.DeviceScope(device_opt):
with core.NameScope("gpu_{}".format(device)):
log.info("Model for GPU: {}".format(device))
input_builder_fun(model_helper_obj)
losses = forward_pass_builder_fun(model_helper_obj, loss_scale)
# Losses are not needed for test net
if param_update_builder_fun is not None:
assert isinstance(losses, list), \
'Model builder function must return list of loss blobs'
for loss in losses:
assert isinstance(loss, core.BlobReference), \
'Model builder func must return list of loss blobs'
losses_by_gpu[device] = losses
_ValidateParams(model_helper_obj.params)
# Create parameter map
model_helper_obj._device_grouped_blobs =\
_GroupByDevice(devices, model_helper_obj.params, non_datapar_params)
# computed params
computed_params_grouped =\
_GroupByDevice(devices, model_helper_obj.computed_params, [])
model_helper_obj._device_grouped_blobs.update(computed_params_grouped)
model_helper_obj._param_names =\
model_helper_obj._device_grouped_blobs.keys()
model_helper_obj._computed_param_names = computed_params_grouped.keys()
if (param_update_builder_fun is None):
log.info("Parameter update function not defined --> only forward")
_InferBlobDevice(model_helper_obj)
return
log.info("Adding gradient operators")
_AddGradientOperators(devices, model_helper_obj, losses_by_gpu)
_ValidateParams(model_helper_obj.params)
# Group gradients by device and register to blob lookup
param_to_grad = model_helper_obj.param_to_grad
grads_ordered = [param_to_grad[p] for p in
model_helper_obj.params if p in param_to_grad]
non_datapar_grads = [param_to_grad[p] for p in non_datapar_params]
gradients_grouped = _GroupByDevice(
devices,
grads_ordered,
non_datapar_grads
)
model_helper_obj._device_grouped_blobs.update(gradients_grouped)
model_helper_obj._grad_names = gradients_grouped.keys()
model_helper_obj._losses_by_gpu = losses_by_gpu
_InferBlobDevice(model_helper_obj)
log.info("Add gradient all-reduces for SyncSGD")
if broadcast_computed_params:
_BroadcastComputedParams(devices, model_helper_obj, rendezvous)
if len(model_helper_obj._grad_names) > 0:
_AllReduceGradients(devices, model_helper_obj, rendezvous, use_nccl)
else:
log.info("NOTE: Param builder function did not create any parameters.")
log.info("Post-iteration operators for updating params")
num_shards = 1 if rendezvous is None else rendezvous['num_shards']
# The following check is necessary for ring reduce to work
if rendezvous is not None:
assert num_shards > 1, \
"Please use more than one shard for distributed training"
for device in devices:
device_opt = core.DeviceOption(caffe2_pb2.CUDA, device)
with core.DeviceScope(device_opt):
with core.NameScope("gpu_{}".format(device)):
param_update_builder_fun(model_helper_obj)
(sync_blobs, sync_names) = _ComputeBlobsToSync(model_helper_obj)
sync_blobs_grouped = _GroupByDevice(
devices,
sync_blobs,
[],
)
model_helper_obj._device_grouped_blobs.update(sync_blobs_grouped)
_InferBlobDevice(model_helper_obj)
_AnalyzeOperators(model_helper_obj)
# Configure dagnet to run with only one worker on the first iteration,
# to prevent concurrency problems with allocs and nccl.
arg = model_helper_obj.Proto().arg.add()
arg.name = "first_iter_only_one_worker"
arg.i = 1
# Add initial parameter syncs
log.info("Add initial parameter sync")
if (rendezvous is not None):
_AddDistributedParameterSync(
devices,
model_helper_obj,
model_helper_obj.param_init_net,
model_helper_obj.param_init_net,
rendezvous,
sync_names,
)
_SyncParams(
devices, model_helper_obj, model_helper_obj.param_init_net, sync_names
)
if optimize_gradient_memory:
_OptimizeGradientMemoryDEPRECATED(
model_helper_obj, losses_by_gpu, devices
)
model_helper_obj._data_parallel_model_init_nets = [
model_helper_obj.param_init_net,
]
model_helper_obj._data_parallel_model_nets = [model_helper_obj.net]
def Parallelize_GPU_BMUF(
model_helper_obj,
input_builder_fun,
forward_pass_builder_fun,
param_update_builder_fun,
block_learning_rate=1.0,
block_momentum=None,
devices=range(0, workspace.NumCudaDevices()),
net_type='dag',
master_gpu=None,
):
'''
Function to create model that run on many GPUs and creates a net for
parameter_updates that can be run independently for number of iterations
then followed by another net that runs once to compute the final parameter
updates according to block wise model update filtering rule described
in : Scalable Training of Deep Learning Machines by Incremental Block
Training with Intra-block Parallel Optimization and Blockwise Model-Update
Filtering (ICASSP 2016).
'''
assert isinstance(model_helper_obj, model_helper.ModelHelper)
if master_gpu is None:
master_gpu = devices[0]
model_helper_obj._devices = devices
master_gpu_opt = core.DeviceOption(caffe2_pb2.CUDA, master_gpu)
num_workers = len(devices)
loss_scale = 1.0 / num_workers
if block_momentum is None:
block_momentum = 1.0 - 1.0 / num_workers
model_helper_obj.net.Proto().num_workers = num_workers
model_helper_obj.net.Proto().type = net_type
# A net for initializing global model parameters. Its called once in the
# same step as net parameters initialization.
model_helper_obj._global_model_init_net = core.Net('global_model_init')
model_helper_obj._global_model_init_net.Proto().type = net_type
model_helper_obj._global_model_init_net.Proto().num_workers = num_workers
# A net for computing final parameter updates. Its will run once after
# running net (local models updates) for `num_local_iterations` times.
model_helper_obj._global_model_param_updates_net = core.Net('global_model')
model_helper_obj._global_model_param_updates_net.Proto().type = net_type
model_helper_obj._global_model_param_updates_net.Proto().num_workers = \
num_workers
def _v(param):
return "{}_v".format(param)
def _g(param):
return "{}_g".format(param)
# Keep track of params that were in the model before: they are not
# data parallel, so we need to handle them separately
non_datapar_params = copy.copy(model_helper_obj.params)
model_helper_obj._losses_by_gpu = {}
def _InitializeModels(gpu_id):
input_builder_fun(model_helper_obj)
loss = forward_pass_builder_fun(model_helper_obj, loss_scale)
model_helper_obj._losses_by_gpu[gpu_id] = loss
_ForEachGPU(devices, _InitializeModels, scoped=True)
model_helper_obj._device_grouped_blobs =\
_GroupByDevice(devices, model_helper_obj.params, non_datapar_params)
_AddGradientOperators(
devices, model_helper_obj, model_helper_obj._losses_by_gpu
)
_InferBlobDevice(model_helper_obj)
def _InitializeParamUpdate(gpu_id):
param_update_builder_fun(model_helper_obj)
_ForEachGPU(devices, _InitializeParamUpdate, scoped=True)
# (Step-0) Initialize momentum parameters on master GPU.
for param_name in model_helper_obj._device_grouped_blobs.keys():
param = model_helper_obj._device_grouped_blobs[param_name][master_gpu]
with core.DeviceScope(master_gpu_opt):
model_helper_obj._global_model_init_net.ConstantFill(
param, _v(param), value=0.0
)
model_helper_obj._global_model_init_net.Copy(param, _g(param))
# (Step-1) Update models for num_local_iterations.
# (Step-2) Comute post-local-updates average of the params.
# Sum model params across GPUs and store resutls in param_avg blob.
for param_name in model_helper_obj._device_grouped_blobs.keys():
with core.DeviceScope(master_gpu_opt):
_AllReduce(
devices, model_helper_obj,
model_helper_obj._global_model_param_updates_net,
param_name
)
# (Step-3) Update momentum params :
# param_v = block_momentum * param_v
# + block_learning_Rate * (param_avg - param)
# param = param + param_v
for param_name in model_helper_obj._device_grouped_blobs.keys():
param = model_helper_obj._device_grouped_blobs[param_name][master_gpu]
with core.DeviceScope(master_gpu_opt):
# TODO(ataei) : Stop building the graph here to get model average ?
model_helper_obj._global_model_param_updates_net.Scale(
param, param, scale=1.0 / num_workers
)
model_helper_obj._global_model_param_updates_net.Sub(
[param, _g(param)], param
)
model_helper_obj._global_model_param_updates_net.Scale(
param, param, scale=block_learning_rate
)
model_helper_obj._global_model_param_updates_net.Scale(
_v(param), _v(param), scale=block_momentum
)
model_helper_obj._global_model_param_updates_net.Add(
[_v(param), param], _v(param)
)
model_helper_obj._global_model_param_updates_net.Add(
[_g(param), _v(param)], _g(param)
)
model_helper_obj._global_model_param_updates_net.Copy(
_g(param), param
)
_Broadcast(
devices, model_helper_obj,
model_helper_obj._global_model_param_updates_net,
param_name
)
model_helper_obj._data_parallel_model_init_nets = [
model_helper_obj.param_init_net,
model_helper_obj._global_model_init_net
]
model_helper_obj._data_parallel_model_nets = [
model_helper_obj.net,
(model_helper_obj._global_model_param_updates_net, 1)
]
def RunInitNet(model):
for init_net in model._data_parallel_model_init_nets:
workspace.RunNetOnce(init_net)
for net_iters in model._data_parallel_model_nets:
if isinstance(net_iters, tuple):
workspace.CreateNet(net_iters[0])
else:
workspace.CreateNet(net_iters)
def RunNet(model, num_iterations):
for net_iter in model._data_parallel_model_nets:
if isinstance(net_iter, tuple):
workspace.RunNet(net_iter[0].Proto().name, net_iter[1])
else:
workspace.RunNet(model.net.Proto().name, num_iterations)
def _ForEachGPU(gpu_ids, f, scoped=False, *args, **kwargs):
for gpu_id in gpu_ids:
device_opt = core.DeviceOption(caffe2_pb2.CUDA, gpu_id)
with core.DeviceScope(device_opt):
if scoped:
with core.NameScope("gpu_{}".format(gpu_id)):
f(gpu_id, *args, **kwargs)
else:
f(gpu_id, *args, **kwargs)
def _AddGradientOperators(devices, model, losses_by_gpu):
def create_grad(lossp):
return model.ConstantFill(lossp, str(lossp) + "_grad", value=1.0)
loss_grad = {}
# Explicitly need to create gradients on each GPU
for gpu_id in devices:
device = core.DeviceOption(caffe2_pb2.CUDA, gpu_id)
with core.DeviceScope(device):
for l in losses_by_gpu[gpu_id]:
lg = create_grad(l)
loss_grad[str(l)] = str(lg)
model.AddGradientOperators(loss_grad)
def ExtractPredictorNet(model, inputs, outputs, device):
'''
Returns (net, params) that can be exported to be used as a prediction
net.
'''
master_device = model._devices[0]
prefix = "gpu_{}/".format(master_device)
prefix_inputs = [prefix + str(b) for b in inputs]
prefix_outputs = [prefix + str(b) for b in outputs]
predictor_net = model_helper.ExtractPredictorNet(
net_proto=model.net.Proto(),
input_blobs=prefix_inputs,
output_blobs=prefix_outputs,
device=device,
renames={
a: b
for (a, b) in zip(prefix_inputs + prefix_outputs, inputs + outputs)
}
)
params = set(predictor_net.Proto().external_input) - set(inputs)
return (predictor_net, params)
def GetCheckpointParams(model):
'''
Returns a set of blobs that are needed for a complete check point.
They are blobs for the first gpu and iteration blobs.
'''
(all_blobs, _) = _ComputeBlobsToSync(model)
return {
b for b in all_blobs
if str(b).startswith("gpu_{}/".format(model._devices[0]))}
def FinalizeAfterCheckpoint(model, blobs=None):
'''
This function should be called after loading parameters from a
checkpoint / initial parameters file.
'''
if not hasattr(model, "_checkpoint_net"):
if blobs is None:
(_, uniq_blob_names) = _ComputeBlobsToSync(model)
else:
uniq_blob_names = [stripParamName(p) for p in blobs]
# Synchronize to the blob lookup map, as the provided
# blobs might have non-parameters, such as momemtum blobs.
log.info("Creating checkpoint synchronization net")
devices = model.GetDevices()
for name in uniq_blob_names:
if name not in model._device_grouped_blobs:
grouped = {
d:
core.BlobReference("gpu_{}{}{}".format(
d,
scope._NAMESCOPE_SEPARATOR,
name)
) for d in devices}
model._device_grouped_blobs[name] = grouped
model._checkpoint_net = core.Net("checkpoint_sync_net")
model._checkpoint_net.RunAllOnGPU()
if (model._rendezvous is not None):
checkpoint_init_net = core.Net("checkpoint_init_net")
checkpoint_init_net.RunAllOnGPU()
_AddDistributedParameterSync(
devices,
model,
checkpoint_init_net,
model._checkpoint_net,
model._rendezvous,
uniq_blob_names,
)
workspace.RunNetOnce(checkpoint_init_net)
# Setup sync of initial params
_SyncParams(devices, model, model._checkpoint_net, uniq_blob_names)
workspace.CreateNet(model._checkpoint_net)
# Run the sync
log.info("Run checkpoint net")
workspace.RunNet(model._checkpoint_net.Proto().name)
def _Broadcast(devices, model, net, param):
# TODO(akyrola): replace with NCCLBroadcast when it's working
# Copy params from gpu_0 to other
master_gpu = devices[0]
for gpu_idx in devices[1:]:
if _IsGPUBlob(model, param):
device_opt = core.DeviceOption(caffe2_pb2.CUDA, gpu_idx)
else:
device_opt = core.DeviceOption(caffe2_pb2.CPU, 0)
with core.DeviceScope(device_opt):
net.Copy(
model._device_grouped_blobs[param][master_gpu],
model._device_grouped_blobs[param][gpu_idx]
)
def _AllReduce(devices, model, net, param, use_nccl=False, control_input=None):
blobs_group = model._device_grouped_blobs[param].values()
if use_nccl:
model.NCCLAllreduce(
blobs_group, blobs_group, control_input=control_input
)
return
def sum2(d1i, d2i):
d1 = model._devices[d1i]
d2 = model._devices[d2i]
device_opt = core.DeviceOption(caffe2_pb2.CUDA, d1)
with core.DeviceScope(device_opt):
net.Sum(
[blobs_group[d1], blobs_group[d2]], [blobs_group[d1]],
name="dpm",
)
if len(devices) == 8:
# Special tree reduction for 8 gpus, TODO generalize like in muji.py
for j in range(4):
sum2(j * 2, j * 2 + 1)
for j in range(2):
sum2(j * 4, j * 4 + 2)
sum2(0, 4)
_Broadcast(devices, model, net, param)
elif len(devices) == 4:
sum2(0, 1)
sum2(2, 3)
sum2(0, 2)
_Broadcast(devices, model, net, param)
else:
net.Sum(blobs_group, blobs_group[0], name="dpm")
_Broadcast(devices, model, net, param)
def _SyncParams(devices, model, net, unique_param_names):
for param in unique_param_names:
_Broadcast(devices, model, net, param)
def _AddDistributedParameterSync(
devices,
model,
init_net,
net,
rendezvous,
uniq_param_names,
):
gpu_device_opt = core.DeviceOption(caffe2_pb2.CUDA, devices[0])
cpu_device_opt = core.DeviceOption(caffe2_pb2.CPU)
# Create a single common world for all broadcast operations.
# This is not a problem since they are executed sequentially.
comm_world = None
for param_name in sorted(uniq_param_names):
param = model._device_grouped_blobs[param_name][devices[0]]
def broadcast(comm_world, param):
if comm_world is None:
comm_world = init_net.CreateCommonWorld(
rendezvous['kv_handler'],
"broadcast_cw",
name=net.Proto().name + ".broadcast_cw_op",
size=rendezvous['num_shards'],
rank=rendezvous['shard_id'],
engine=rendezvous['engine'],
status_blob="createcw_broadcast_status",
)
net.Broadcast(
inputs=[comm_world, param],
outputs=[param],
engine=rendezvous['engine'],
status_blob="broadcast_{}_status".format(str(param)),
)
return comm_world
device_opt = gpu_device_opt if _IsGPUBlob(
model, param_name
) else cpu_device_opt
if rendezvous['engine'] == 'GLOO':
with core.DeviceScope(device_opt):
comm_world = broadcast(comm_world, param)
else:
# Copy between GPU and CPU
with core.DeviceScope(device_opt):
param_cpu = net.CopyGPUToCPU(param, str(param) + "cpu")
with core.DeviceScope(cpu_device_opt):
comm_world = broadcast(comm_world, param_cpu)
with core.DeviceScope(device_opt):
net.CopyCPUToGPU(param_cpu, param)
def _AllReduceGradients(devices, model, rendezvous, use_nccl):
if rendezvous is None:
_AllReduceGradientsSingleHost(devices, model, use_nccl)
else:
_AllReduceGradientsDistributed(devices, model, rendezvous)
def _AllReduceGradientsDistributed(
devices,
model,
rendezvous,
):
num_workers = model.net.Proto().num_workers
assert num_workers > 1, "Please specify more than 1 worker"
all_reduce_engine = rendezvous['engine']
# Make list of gradients in reverse order
reverse_ordered_grads = _GetReverseOrderedGrads(model)
master_device_opt = core.DeviceOption(caffe2_pb2.CUDA, devices[0])
reducing_device_opt = master_device_opt
# We need to specify a partial order using control_input to ensure
# progress (all machines need to do same allreduce in parallel)
num_controls = min(4, num_workers - 1)
cyclical_controls = []
# Since num_controls determines the partial ordering of
# allreduces, there is no need for more common world instances
# than there are parallel allreduce operations.
num_comm_worlds = num_controls
cyclical_comm_worlds = []
counter = 0
nccl_control_blob = None
# Note: sorted order to ensure each host puts the operators in
# same order.
for grad_name in reverse_ordered_grads:
master_grad = model._device_grouped_blobs[grad_name][devices[0]]
grads_group = model._device_grouped_blobs[grad_name].values()
assert master_grad in grads_group
# Remark: NCCLReduce does not support in-place modifications
# so we need a temporary gradient blob
reduced_grad = str(master_grad) + "_red"
control_input = None if len(cyclical_controls) < num_controls \
else cyclical_controls[counter % num_controls]
comm_world = None if len(cyclical_comm_worlds) < num_comm_worlds \
else cyclical_comm_worlds[counter % num_comm_worlds]
def allreduce(comm_world, grads):
with core.DeviceScope(reducing_device_opt):
if comm_world is None:
comm_number = len(cyclical_comm_worlds)
comm_world = model.param_init_net.CreateCommonWorld(
rendezvous['kv_handler'],
"allreduce_{}_cw".format(comm_number),
name="allreduce_{}_cw_op".format(comm_number),
size=rendezvous['num_shards'],
rank=rendezvous['shard_id'],
engine=rendezvous['engine'],
status_blob="create_cw_{}_status".format(comm_number),
)
model.net.Allreduce(
inputs=[comm_world] + grads,
outputs=grads,
name=grad_name,
engine=all_reduce_engine,
control_input=control_input,
status_blob="allreduce_{}_status".format(grad_name),
)
return comm_world
if rendezvous['engine'] == 'GLOO':
# With Gloo cross GPU and cross machine allreduce
# can be executed in a single operation
comm_world = allreduce(comm_world, grads_group)
control_output = grads_group[0]
else:
# Step 1: sum gradients from local GPUs to master GPU
with core.DeviceScope(master_device_opt):
model.ConstantFill(master_grad, reduced_grad, value=0.0)
# Temp fix since NCCLReduce does not work
model.net.NCCLAllreduce(
grads_group,
grads_group,
control_input=nccl_control_blob,
)
nccl_control_blob = grads_group[0]
model.net.Copy(master_grad, reduced_grad)
# Step 2: allreduce between all hosts, between master GPUs
comm_world = allreduce(comm_world, [reduced_grad])
control_output = reduced_grad
with core.DeviceScope(master_device_opt):
model.net.Copy(reduced_grad, master_grad)
# Step 3: broadcast locally
_Broadcast(devices, model, model.net, grad_name)
if len(cyclical_controls) < num_controls:
cyclical_controls.append(control_output)
else:
cyclical_controls[counter % num_controls] = control_output
if len(cyclical_comm_worlds) < num_comm_worlds:
cyclical_comm_worlds.append(comm_world)
else:
assert cyclical_comm_worlds[counter % num_comm_worlds] == comm_world
counter += 1
def _AllReduceGradientsSingleHost(devices, model, use_nccl):
"""Performs NCCL AllReduce to distribute gradients to all the GPUs."""
if len(devices) == 1:
return
# Gradients in reverse order
reverse_ordered_grads = _GetReverseOrderedGrads(model)
assert(len(reverse_ordered_grads) > 0)
# Now we need to Allreduce gradients on all the GPUs.
# Pick GPU #0 as a master GPU.
master_device_opt = core.DeviceOption(caffe2_pb2.CUDA, devices[0])
last_out = None
concatenated_idx = set()
for grad_name in reverse_ordered_grads:
# Group by grads for reduce.
grads_group = model._device_grouped_blobs[grad_name].values()
assert len(grads_group) == len(devices), \
"Each GPU from {}, should have a copy of {}.".format(
devices, grad_name)
if _IsGPUBlob(model, grad_name):
with core.DeviceScope(master_device_opt):
if not isinstance(grads_group[0], core.GradientSlice):
_AllReduce(
devices, model, model.net, grad_name, use_nccl, last_out
)
# last_out is used to serialize the execution of nccls
last_out = grads_group[0]
else:
# Sparse gradients: all-gather for indices and values
master_ns = "gpu_{}".format(devices[0])
'''
Skip if we have already copied concatenated indices
to the indices of GradientSlice. This happens when two
or more grad blobs are gathered with the same indices
blob
'''
skip_idx_concat = False
for g in grads_group:
if g.indices in concatenated_idx:
skip_idx_concat = True
if not skip_idx_concat:
grad_idx_concat, _ = model.net.Concat(
[g.indices for g in grads_group],
["{}/{}_index_concat".format(master_ns, grad_name),
"{}/{}_index_splitinfo".format(master_ns, grad_name)],
axis=0,
name="note:data_parallel_model")
for gpu, g in model._device_grouped_blobs[grad_name].items():
device_opt = core.DeviceOption(caffe2_pb2.CUDA, gpu)
with core.DeviceScope(device_opt):
model.Copy(grad_idx_concat, g.indices)
concatenated_idx.add(g.indices)
grad_val_concat, _ = model.net.Concat(
[g.values for g in grads_group],
["{}/{}_val_concat".format(master_ns, grad_name),
"{}/{}_val_splitinfo".format(master_ns, grad_name)],
axis=0, name="note:data_parallel_model")
for gpu, g in model._device_grouped_blobs[grad_name].items():
device_opt = core.DeviceOption(caffe2_pb2.CUDA, gpu)
with core.DeviceScope(device_opt):
model.Copy(grad_val_concat, g.values)
else:
assert not isinstance(grads_group[0], core.GradientSlice), \
"Synchronizing gradient slices not supported"
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
# Poor man's allreduce
model.Sum(grads_group, grads_group[0])
_Broadcast(devices, model, grad_name)
def _BroadcastComputedParams(devices, model, rendezvous):
if rendezvous is None:
_BroadcastComputedParamsSingleHost(devices, model)
else:
_BroadcastComputedParamsDistributed(devices, model, rendezvous)
def _BroadcastComputedParamsDistributed(
devices,
model,
rendezvous,
):
_BroadcastComputedParamsSingleHost(devices, model)
log.warn("Distributed computed params all-reduce not implemented yet")
def _BroadcastComputedParamsSingleHost(devices, model):
'''
Average computed params over all devices
'''
if len(devices) == 1:
return
for param_name in model._computed_param_names:
# Copy from master to others -- averaging would be perhaps better,
# but currently NCCLAllReduce is too prone to deadlock
_Broadcast(devices, model, model.net, param_name)
def _GetReverseOrderedGrads(model):
'''
Returns the gradients in reverse order (namespace stripped),
for the optimal synchronization order.
'''
return list(reversed(model._grad_names))
# A helper function to extract a parameter's name
def stripParamName(param):
# Format is "a/b/c/d" -> "b/c/d"
if isinstance(param, core.GradientSlice):
return stripParamName(param.indices) + ":" + stripParamName(param.values)
else:
name = str(param)
return name[name.index(scope._NAMESCOPE_SEPARATOR) + 1:]
def _AnalyzeOperators(model):
'''
Look at all the operators and check that they do not cross device scopes
'''
for op in model.Proto().op:
if "NCCL" in op.type or "Copy" in op.type or "Concat" in op.type:
continue
if "Sum" == op.type and op.name == "dpm":
continue
if "Allreduce" in op.type and "GLOO" in op.engine:
continue
op_dev = op.device_option
op_gpu = op_dev.cuda_gpu_id
# This avoids failing on operators that are only for CPU
if op_dev.device_type == caffe2_pb2.CPU:
continue
namescope = "gpu_{}/".format(op_gpu)
for inp in list(op.input) + list(op.output):
if inp.startswith("gpu_") and not inp.startswith(namescope):
raise Exception(
"Blob {} of op {}, should have namescope {}. Op: {}".format(
inp, op.type, "gpu_{}/".format(op_gpu), str(op),
))
def _InferBlobDevice(model):
'''
Assign blob to device option based on the operator outputing it
'''
mapping = {}
def map_ops(proto):
for op in proto.op:
device_option = op.device_option
if op.type == "Iter":
# Hack for Iters which have blob in CPU context
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CPU
for b in list(op.input) + list(op.output):
if b not in mapping:
mapping[b] = device_option
if op.type.startswith('RecurrentNetwork'):
import google.protobuf.text_format as protobuftx
step_args = [a for a in op.arg if a.name.endswith("step_net")]
for step_arg in step_args:
step_proto = caffe2_pb2.NetDef()
protobuftx.Merge(step_arg.s, step_proto)
map_ops(step_proto)
map_ops(model.net.Proto())
model._blob_to_device = mapping
def _IsGPUBlob(model, blob_name):
if blob_name in model._blob_to_device:
return model._blob_to_device[blob_name].device_type == caffe2_pb2.CUDA
else:
blob_name = "gpu_{}/{}".format(model._devices[0], blob_name)
if blob_name not in model._blob_to_device:
return True
return model._blob_to_device[blob_name].device_type == caffe2_pb2.CUDA
def _GroupByDevice(devices, params, non_data_params):
'''
Groups blobs by device, returning a map of [blobname] = {0: BlobRef, 1: ..}.
Returns ordered dictionary, ensuring the original order.
'''
grouped = OrderedDict()
# Only consider params that were created to be "data parallel"
params = params[len(non_data_params):]
assert len(params) % len(devices) == 0,\
"There should be equal number of params per device"
num_params_per_device = int(len(params) / len(devices))
for i, p in enumerate(params):
assert isinstance(p, core.BlobReference) or \
isinstance(p, core.GradientSlice), \
"Param {} is not BlobReference or GradientSlice".format(p)
name = stripParamName(p)
gpuid = devices[i // num_params_per_device]
if isinstance(p, core.BlobReference):
assert "gpu_{}/".format(gpuid) in p.GetNameScope(),\
"Param {} expected to have namescope 'gpu_{}'".format(str(p), gpuid)
else:
assert "gpu_{}/".format(gpuid) in p.indices.GetNameScope(),\
"Indices {} expected to have namescope 'gpu_{}'".format(str(p), gpuid)
assert "gpu_{}/".format(gpuid) in p.values.GetNameScope(),\
"Values {} expected to have namescope 'gpu_{}'".format(str(p), gpuid)
if name not in grouped:
grouped[name] = {}
grouped[name][gpuid] = p
# Confirm consistency
for j, (p, ps) in enumerate(grouped.items()):
assert \
len(ps) == len(devices), \
"Param {} does not have value for each device (only {}: {})".format(
p, len(ps), ps,
)
# Ensure ordering
if (ps[devices[0]] != params[j]):
log.error("Params: {}".format(params))
log.error("Grouped: {}".format(grouped.keys()))
assert ps[devices[0]] == params[j], \
"Incorrect ordering: {}".format(ps)
return grouped
def _ValidateParams(params):
set_params = set(params)
if len(params) > len(set_params):
dupes = []
sp = sorted(params)
for j, p in enumerate(sp):
if j > 0 and params[j - 1] == p:
dupes.append(p)
assert len(params) == len(set_params), \
"Duplicate entries in params: {}".format(dupes)
def _ComputeBlobsToSync(model):
'''
We sync all blobs that are generated by param init net and
are 'data parallel', i.e assigned to a gpu
'''
sync_names = set()
blobs_to_sync = []
for op in model.param_init_net.Proto().op:
dp_outputs = [o for o in op.output if o.startswith("gpu_")]
sync_names.update([stripParamName(o) for o in dp_outputs])
blobs_to_sync.extend(dp_outputs)
# Sanity check
diff = set(model._param_names) - sync_names
assert diff == set(), \
"Some params not instantiated in param init net: {}".format(diff)
# Remove duplicates and sort
blobs_to_sync = sorted(list(set(blobs_to_sync)))
blobs_to_sync = [core.BlobReference(b) for b in blobs_to_sync]
return (blobs_to_sync, sync_names)
def _OptimizeGradientMemoryDEPRECATED(model, losses_by_gpu, devices):
log.warning("------- DEPRECATED API, please use " +
"data_parallel_model.OptimizeGradientMemory() ----- ")
for device in devices:
namescope = "gpu_{}/".format(device)
model.net._net = memonger.share_grad_blobs(
model.net,
losses_by_gpu[device],
set(model.param_to_grad.values()),
namescope,
share_activations=False,
)
def OptimizeGradientMemory(model,
input_shapes,
excluded_blobs,
recycle_activations):
"""
Optimize memory usage of the backward pass by recycling blobs for gradient
inputs that have been 'used'.
input_shapes: dict of blob name to shape for the inputs of the model.
Pass empty dictionary if not known.
excluded_blobs: list of blobs that cannot be recycled. These are blobs
that you will access externally.
recycle_activations: whether to also recycle forward pass activations
"""
input_shapes_all_devices = {}
for b, shp in input_shapes.items():
for d in model._devices:
input_shapes_all_devices["gpu_{}/{}".format(d, b)] = shp
(shapes, types) = workspace.InferShapesAndTypes(
[model.param_init_net, model.net],
input_shapes_all_devices,
)
for device in model._devices:
namescope = "gpu_{}/".format(device)
excluded_blobs_by_device = set([namescope + b for b in excluded_blobs])
model.net._net = memonger.share_grad_blobs(
model.net,
model._losses_by_gpu[device],
set(model.param_to_grad.values()),
namescope,
dont_share_blobs=excluded_blobs_by_device,
share_activations=recycle_activations,
blob_shapes=shapes,
)
|
## @package scope
# Module caffe2.python.scope
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import contextlib
import threading
from caffe2.proto import caffe2_pb2
# Python 2 and 3 compatibility: test if basestring exists
try:
basestring # NOQA
except NameError:
# This is python3 so we define basestring.
basestring = str
# The name scope and device scope when creating a new operator.
_NAMESCOPE_SEPARATOR = '/'
_threadlocal_scope = threading.local()
def CurrentNameScope():
global _threadlocal_scope
if not hasattr(_threadlocal_scope, "namescope"):
_threadlocal_scope.namescope = ''
return _threadlocal_scope.namescope
def CurrentDeviceScope():
global _threadlocal_scope
if not hasattr(_threadlocal_scope, "devicescope"):
_threadlocal_scope.devicescope = None
return _threadlocal_scope.devicescope
@contextlib.contextmanager
def NameScope(prefix, reset=False):
global _threadlocal_scope
assert isinstance(prefix, basestring), \
"NameScope takes in a string as its argument."
old_scope = CurrentNameScope()
prefix = prefix + _NAMESCOPE_SEPARATOR if prefix is not '' else ''
if reset:
_threadlocal_scope.namescope = prefix
else:
_threadlocal_scope.namescope = _threadlocal_scope.namescope + prefix
try:
yield
finally:
assert _threadlocal_scope.namescope.endswith(prefix), \
"The namescope variable is changed from outside NameScope() calls."
_threadlocal_scope.namescope = old_scope
@contextlib.contextmanager
def DeviceScope(scope):
assert isinstance(scope, caffe2_pb2.DeviceOption), \
"DeviceScope takes in a caffe2_pb2.DeviceOption as its argument."
global _threadlocal_scope
old_scope = CurrentDeviceScope()
_threadlocal_scope.devicescope = scope
try:
yield
finally:
assert _threadlocal_scope.devicescope == scope, \
"The device scope is changed from outside DeviceScope() calls."
_threadlocal_scope.devicescope = old_scope
|
## @package model_helper_api
# Module caffe2.python.model_helper_api
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import copy
import inspect
from caffe2.python.model_helper import ModelHelper
# flake8: noqa
from caffe2.python.helpers.dropout import *
from caffe2.python.helpers.arg_scope import *
from caffe2.python.helpers.fc import *
from caffe2.python.helpers.pooling import *
from caffe2.python.helpers.normalization import *
from caffe2.python.helpers.nonlinearity import *
from caffe2.python.helpers.array_helpers import *
from caffe2.python.helpers.algebra import *
from caffe2.python.helpers.train import *
from caffe2.python.helpers.conv import *
from caffe2.python.helpers.tools import *
class HelperWrapper(object):
_registry = {
'arg_scope': arg_scope,
'fc': fc,
'packed_fc': packed_fc,
'fc_decomp': fc_decomp,
'fc_sparse': fc_sparse,
'fc_prune': fc_prune,
'dropout': dropout,
'max_pool': max_pool,
'average_pool': average_pool,
'max_pool_with_index' : max_pool_with_index,
'lrn': lrn,
'softmax': softmax,
'instance_norm': instance_norm,
'spatial_bn': spatial_bn,
'relu': relu,
'prelu': prelu,
'concat': concat,
'depth_concat': depth_concat,
'sum': sum,
'transpose': transpose,
'iter': iter,
'accuracy': accuracy,
'conv': conv,
'conv_nd': conv_nd,
'conv_transpose': conv_transpose,
'group_conv': group_conv,
'group_conv_deprecated': group_conv_deprecated,
'image_input': image_input,
'video_input': video_input,
'add_weight_decay': add_weight_decay,
}
def __init__(self, wrapped):
self.wrapped = wrapped
def __getattr__(self, helper_name):
if helper_name not in self._registry:
raise AttributeError(
"Helper function {} not "
"registered.".format(helper_name)
)
def scope_wrapper(*args, **kwargs):
new_kwargs = {}
if helper_name != 'arg_scope':
if len(args) > 0 and isinstance(args[0], ModelHelper):
model = args[0]
elif 'model' in kwargs:
model = kwargs['model']
else:
raise RuntimeError(
"The first input of helper function should be model. " \
"Or you can provide it in kwargs as model=<your_model>.")
new_kwargs = copy.deepcopy(model.arg_scope)
func = self._registry[helper_name]
var_names, _, varkw, _= inspect.getargspec(func)
if varkw is None:
# this helper function does not take in random **kwargs
new_kwargs = {
var_name: new_kwargs[var_name]
for var_name in var_names if var_name in new_kwargs
}
cur_scope = get_current_scope()
new_kwargs.update(cur_scope.get(helper_name, {}))
new_kwargs.update(kwargs)
return func(*args, **new_kwargs)
scope_wrapper.__name__ = helper_name
return scope_wrapper
def Register(self, helper):
name = helper.__name__
if name in self._registry:
raise AttributeError(
"Helper {} already exists. Please change your "
"helper name.".format(name)
)
self._registry[name] = helper
def has_helper(self, helper_or_helper_name):
helper_name = (
helper_or_helper_name
if isinstance(helper_or_helper_name, basestring) else
helper_or_helper_name.__name__
)
return helper_name in self._registry
sys.modules[__name__] = HelperWrapper(sys.modules[__name__])
|
## @package net_printer
# Module caffe2.python.net_printer
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.proto.caffe2_pb2 import OperatorDef
from caffe2.python.checkpoint import Job
from caffe2.python.core import Net, ExecutionStep, Plan
from caffe2.python.task import Task, TaskGroup, WorkspaceType, TaskOutput
from collections import defaultdict
from contextlib import contextmanager
from copy import copy
class Visitor(object):
@classmethod
def register(cls, Type):
if not(hasattr(cls, 'visitors')):
cls.visitors = []
def _register(func):
cls.visitors.append((Type, func))
return func
return _register
def __call__(self, obj, *args, **kwargs):
if obj is None:
return
for Type, func in self.__class__.visitors:
if isinstance(obj, Type):
return func(self, obj, *args, **kwargs)
raise TypeError('%s: unsupported object type: %s' % (
self.__class__.__name__, type(obj)))
class Analyzer(Visitor):
PREFIXES_TO_IGNORE = {'distributed_ctx_init'}
def __init__(self):
self.workspaces = defaultdict(lambda: defaultdict(lambda: 0))
self.workspace_ctx = []
@property
def workspace(self):
return self.workspace_ctx[-1]
@contextmanager
def set_workspace(self, node=None, ws=None, do_copy=False):
if ws is not None:
ws = ws
elif node is not None:
ws = self.workspaces[str(node)]
else:
ws = self.workspace
if do_copy:
ws = copy(ws)
self.workspace_ctx.append(ws)
yield ws
del self.workspace_ctx[-1]
def define_blob(self, blob):
self.workspace[blob] += 1
def need_blob(self, blob):
if any(blob.startswith(p) for p in Analyzer.PREFIXES_TO_IGNORE):
return
assert blob in self.workspace, 'Blob undefined: %s' % blob
@Analyzer.register(OperatorDef)
def analyze_op(analyzer, op):
map(analyzer.need_blob, op.input)
map(analyzer.define_blob, op.output)
@Analyzer.register(Net)
def analyze_net(analyzer, net):
map(analyzer, net.Proto().op)
@Analyzer.register(ExecutionStep)
def analyze_step(analyzer, step):
proto = step.Proto()
if proto.report_net:
with analyzer.set_workspace(do_copy=True):
analyzer(step.get_net(proto.report_net))
all_new_blobs = set()
substeps = step.Substeps() + [step.get_net(n) for n in proto.network]
for substep in substeps:
with analyzer.set_workspace(do_copy=proto.concurrent_substeps) as ws_in:
analyzer(substep)
if proto.should_stop_blob:
analyzer.need_blob(proto.should_stop_blob)
if proto.concurrent_substeps:
new_blobs = set(ws_in.keys()) - set(analyzer.workspace.keys())
assert len(all_new_blobs & new_blobs) == 0, (
'Error: Blobs created by multiple parallel steps: %s' % (
', '.join(all_new_blobs & new_blobs)))
all_new_blobs |= new_blobs
map(analyzer.define_blob, all_new_blobs)
@Analyzer.register(Task)
def analyze_task(analyzer, task):
# check that our plan protobuf is not too large (limit of 64Mb)
step = task.get_step()
plan = Plan(task.node)
plan.AddStep(step)
proto_len = len(plan.Proto().SerializeToString())
assert proto_len < 2 ** 26, (
'Due to a protobuf limitation, serialized tasks must be smaller '
'than 64Mb, but this task has {} bytes.' % proto_len)
is_private = task.workspace_type() != WorkspaceType.GLOBAL
with analyzer.set_workspace(do_copy=is_private):
analyzer(step)
@Analyzer.register(TaskGroup)
def analyze_task_group(analyzer, tg):
for task in tg.tasks_by_node().tasks():
with analyzer.set_workspace(node=task.node):
analyzer(task)
@Analyzer.register(Job)
def analyze_job(analyzer, job):
analyzer(job.init_group)
analyzer(job.epoch_group)
def analyze(obj):
"""
Given a Job, visits all the execution steps making sure that:
- no undefined blobs will be found during excution
- no blob with same name is defined in concurrent steps
"""
Analyzer()(obj)
class Text(object):
def __init__(self):
self._indent = 0
self._lines_in_context = [0]
self.lines = []
@contextmanager
def context(self, text):
if text is not None:
self.add('with %s:' % text)
self._indent += 4
self._lines_in_context.append(0)
yield
if text is not None:
if self._lines_in_context[-1] == 0:
self.add('pass')
self._indent -= 4
del self._lines_in_context[-1]
def add(self, text):
self._lines_in_context[-1] += 1
self.lines.append((' ' * self._indent) + text)
def __str__(self):
return '\n'.join(self.lines)
class Printer(Visitor, Text):
def __init__(self, factor_prefixes=False):
super(Visitor, self).__init__()
super(Text, self).__init__()
self.factor_prefixes = factor_prefixes
def _sanitize_str(s):
s = str(s)
return s if len(s) < 64 else (s[:64] + '...<+len=%d>' % (len(s) - 64))
def _arg_val(arg):
if arg.HasField('f'):
return str(arg.f)
if arg.HasField('i'):
return str(arg.i)
if arg.HasField('s'):
return _sanitize_str(arg.s)
if arg.floats:
return str(list(arg.floats))
if arg.ints:
return str(list(arg.ints))
if arg.strings:
return str([_sanitize_str(s) for s in arg.strings])
return '[]'
def commonprefix(m):
"Given a list of strings, returns the longest common prefix"
if not m:
return ''
s1 = min(m)
s2 = max(m)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
def factor_prefix(vals, do_it):
vals = map(str, vals)
prefix = commonprefix(vals) if len(vals) > 1 and do_it else ''
joined = ', '.join(v[len(prefix):] for v in vals)
return '%s[%s]' % (prefix, joined) if prefix else joined
def call(op, inputs=None, outputs=None, factor_prefixes=False):
if not inputs:
inputs = ''
else:
inputs_v = [a for a in inputs if not isinstance(a, tuple)]
inputs_kv = [a for a in inputs if isinstance(a, tuple)]
inputs = ', '.join(filter(
bool,
[factor_prefix(inputs_v, factor_prefixes)] +
['%s=%s' % kv for kv in inputs_kv]))
call = '%s(%s)' % (op, inputs)
return call if not outputs else '%s = %s' % (
factor_prefix(outputs, factor_prefixes), call)
@Printer.register(OperatorDef)
def print_op(text, op):
text.add(call(
op.type,
list(op.input) + [(a.name, _arg_val(a)) for a in op.arg],
op.output,
factor_prefixes=text.factor_prefixes))
@Printer.register(Net)
def print_net(text, net):
text.add('# net: %s' % str(net))
for op in net.Proto().op:
text(op)
def _get_step_context(step):
proto = step.Proto()
if proto.should_stop_blob:
return call('loop'), False
if proto.num_iter and proto.num_iter != 1:
return call('loop', [proto.num_iter]), False
concurrent = proto.concurrent_substeps and len(step.Substeps()) > 1
if concurrent:
return call('parallel'), True
if proto.report_net:
return call('run_once'), False
return None, False
@Printer.register(ExecutionStep)
def print_step(text, step):
proto = step.Proto()
step_ctx, do_substep = _get_step_context(step)
with text.context(step_ctx):
if proto.report_net:
with text.context(call('report_net', [proto.report_interval])):
text(step.get_net(proto.report_net))
substeps = step.Substeps() + [step.get_net(n) for n in proto.network]
for substep in substeps:
if (isinstance(substep, ExecutionStep) and
substep.Proto().run_every_ms):
substep_ctx = call(
'reporter',
[str(substep), ('interval_ms', substep.Proto().run_every_ms)])
elif do_substep:
substep_ctx = call('step', [str(substep)])
else:
substep_ctx = None
with text.context(substep_ctx):
text(substep)
if proto.should_stop_blob:
text.add(call('yield stop_if', [proto.should_stop_blob]))
def _print_task_output(x):
assert isinstance(x, TaskOutput)
return 'Output[' + ', '.join(map(str, x.names)) + ']'
@Printer.register(Task)
def print_task(text, task):
outs = ', '.join(map(_print_task_output, task.outputs()))
context = [('node', task.node), ('name', task.name), ('outputs', outs)]
with text.context(call('Task', context)):
text(task.get_step())
@Printer.register(TaskGroup)
def print_task_group(text, tg, header=None):
with text.context(header or call('TaskGroup')):
for task in tg.tasks_by_node().tasks():
text(task)
@Printer.register(Job)
def print_job(text, job):
text(job.init_group, 'Job.current().init_group')
text(job.epoch_group, 'Job.current().epoch_group')
with text.context('Job.current().stop_signals'):
for out in job.stop_signals:
text.add(_print_task_output(out))
text(job.exit_group, 'Job.current().exit_group')
def to_string(obj):
"""
Given a Net, ExecutionStep, Task, TaskGroup or Job, produces a string
with detailed description of the execution steps.
"""
printer = Printer()
printer(obj)
return str(printer)
def debug_net(net):
"""
Given a Net, produce another net that logs info about the operator call
before each operator execution. Use for debugging purposes.
"""
assert isinstance(net, Net)
debug_net = Net(str(net))
assert isinstance(net, Net)
for op in net.Proto().op:
text = Text()
print_op(op, text)
debug_net.LogInfo(str(text))
debug_net.Proto().op.extend([op])
return debug_net
|
## @package schema
# Module caffe2.python.schema
"""
Defines a minimal set of data types that allow to represent datasets with
arbitrary nested structure, including objects of variable length, such as
maps and lists.
This defines a columnar storage format for such datasets on top of caffe2
tensors. In terms of capacity of representation, it can represent most of
the data types supported by Parquet, ORC, DWRF file formats.
See comments in operator_test/dataset_ops_test.py for a example and
walkthrough on how to use schema to store and iterate through a structured
in-memory dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import numpy as np
from caffe2.python import core
from caffe2.python import workspace
from caffe2.python.core import BlobReference
from collections import OrderedDict, namedtuple
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
FIELD_SEPARATOR = ':'
def _join_field_name(prefix, suffix):
if prefix and suffix:
return '{}{}{}'.format(prefix, FIELD_SEPARATOR, suffix)
elif prefix:
return prefix
elif suffix:
return suffix
else:
return ''
def _normalize_field(field_or_type_or_blob, keep_blobs=True):
"""Clones/normalizes a field before adding it to a container."""
if isinstance(field_or_type_or_blob, Field):
return field_or_type_or_blob.clone(keep_blobs=keep_blobs)
elif type(field_or_type_or_blob) in (type, np.dtype):
return Scalar(dtype=field_or_type_or_blob)
else:
return Scalar(blob=field_or_type_or_blob)
FeatureSpec = namedtuple(
'FeatureSpec',
['feature_type', 'feature_names', 'feature_ids', 'feature_is_request_only']
)
FeatureSpec.__new__.__defaults__ = (None, None, None, None)
class Metadata(
namedtuple(
'Metadata', ['categorical_limit', 'expected_value', 'feature_specs']
)
):
"""Represents additional information associated with a scalar in schema.
`categorical_limit` - for fields of integral type that are guaranteed to be
non-negative it specifies the maximum possible value plus one. It's often
used as a size of an embedding table.
`expected_value` - anticipated average value of elements in the field.
Usually makes sense for length fields of lists.
`feature_specs` - information about the features that contained in this
field. For example if field have more then 1 feature it can have list of
feature names contained in this field."""
__slots__ = ()
Metadata.__new__.__defaults__ = (None, None, None, None)
class Field(object):
"""Represents an abstract field type in a dataset.
"""
def __init__(self, children):
"""Derived classes must call this after their initialization."""
self._parent = (None, 0)
offset = 0
self._field_offsets = []
for child in children:
self._field_offsets.append(offset)
offset += len(child.field_names())
self._field_offsets.append(offset)
def clone_schema(self):
return self.clone(keep_blobs=False)
def field_names(self):
"""Return the children field names for this field."""
raise NotImplementedError('Field is an abstract class.')
def field_types(self):
"""Return the numpy.dtype for each of the children fields."""
raise NotImplementedError('Field is an abstract class.')
def field_metadata(self):
"""Return the Metadata for each of the children fields."""
raise NotImplementedError('Field is an abstract class.')
def field_blobs(self):
"""Return the list of blobs with contents for this Field.
Values can either be all numpy.ndarray or BlobReference.
If any of the fields doens't have a blob, throws.
"""
raise NotImplementedError('Field is an abstract class.')
def all_scalars(self):
"""Return the list of all Scalar instances in the Field.
The order is the same as for field_names() or field_blobs()"""
raise NotImplementedError('Field is an abstract class.')
def has_blobs(self):
"""Return True if every scalar of this field has blobs."""
raise NotImplementedError('Field is an abstract class.')
def clone(self, keep_blobs=True):
"""Clone this Field along with its children."""
raise NotImplementedError('Field is an abstract class.')
def _set_parent(self, parent, relative_id):
self._parent = (parent, relative_id)
def slice(self):
"""
Returns a slice representing the range of field ids that belong to
this field. This slice can be used to index a list of fields.
E.g.:
>>> s = Struct(
>>> ('a', Scalar()),
>>> ('b', Struct(
>>> ('b1', Scalar()),
>>> ('b2', Scalar()),
>>> )),
>>> ('c', Scalar()),
>>> )
>>> field_data = ['da', 'db1', 'db2', 'dc']
>>> field_data[s.b.split()]
['db1', 'db2']
"""
base_id = self._child_base_id()
return slice(base_id, base_id + len(self.field_names()))
def _child_base_id(self, child_index=None):
"""Get the base id of the given child"""
p, i = self._parent
pos = 0 if child_index is None else self._field_offsets[child_index]
if p:
pos += p._child_base_id(i)
return pos
def __eq__(self, other):
"""Equivalance of two schemas"""
return (
(self.field_names() == other.field_names()) and
(self.field_types() == other.field_types()) and
(self.field_metadata() == other.field_metadata())
)
class List(Field):
"""Represents a variable-length list.
Values of a list can also be complex fields such as Lists and Structs.
In addition to the fields exposed by its `values` field, a List exposes an
additional `lengths` field, which will contain the size of each list under
the parent domain.
"""
def __init__(self, values, lengths_blob=None):
if isinstance(lengths_blob, Field):
assert isinstance(lengths_blob, Scalar)
self.lengths = _normalize_field(lengths_blob)
else:
self.lengths = Scalar(np.int32, lengths_blob)
self._items = _normalize_field(values)
self.lengths._set_parent(self, 0)
self._items._set_parent(self, 1)
Field.__init__(self, [self.lengths, self._items])
def field_names(self):
value_fields = self._items.field_names()
return (
['lengths'] + [_join_field_name('values', v) for v in value_fields]
)
def field_types(self):
return self.lengths.field_types() + self._items.field_types()
def field_metadata(self):
return self.lengths.field_metadata() + self._items.field_metadata()
def field_blobs(self):
return self.lengths.field_blobs() + self._items.field_blobs()
def all_scalars(self):
return self.lengths.all_scalars() + self._items.all_scalars()
def has_blobs(self):
return self.lengths.has_blobs() and self._items.has_blobs()
def clone(self, keep_blobs=True):
return List(
_normalize_field(self._items, keep_blobs=keep_blobs),
_normalize_field(self.lengths, keep_blobs=keep_blobs)
)
def __repr__(self):
return "List(lengths={!r}, _items={!r})".format(
self.lengths, self._items)
def __getattr__(self, item):
"""If the value of this list is a struct,
allow to instrospect directly into its fields."""
if item.startswith('__'):
raise AttributeError(item)
if isinstance(self._items, Struct):
return getattr(self._items, item)
elif item == 'value' or item == 'items':
return self._items
else:
raise AttributeError('Field not found in list: %s.' % item)
def __getitem__(self, item):
if isinstance(self._items, Struct):
return self._items[item]
elif item == 'lengths':
return self.lengths
elif item == 'value' or item == 'items':
return self._items
else:
raise KeyError('Field not found in list: %s.' % item)
class Struct(Field):
"""Represents a named list of fields sharing the same domain.
"""
def __init__(self, *fields):
""" fields is a list of tuples in format of (name, field). The name is
a string of nested name, e.g., `a`, `a:b`, `a:b:c`. For example
Struct(
('a', Scalar()),
('b:c', Scalar()),
('b:d:e', Scalar()),
('b', Struct(
('f', Scalar()),
)),
)
is equal to
Struct(
('a', Scalar()),
('b', Struct(
('c', Scalar()),
('d', Struct(('e', Scalar()))),
('f', Scalar()),
)),
)
"""
for field in fields:
assert len(field) == 2
assert field[0], 'Field names cannot be empty'
assert field[0] != 'lengths', (
'Struct cannot contain a field named `lengths`.'
)
fields = [(name, _normalize_field(field)) for name, field in fields]
self.fields = OrderedDict()
for name, field in fields:
if FIELD_SEPARATOR in name:
name, field = self._struct_from_nested_name(name, field)
if name not in self.fields:
self.fields[name] = field
continue
if (
not isinstance(field, Struct) or
not isinstance(self.fields[name], Struct)
):
raise ValueError('Duplicate field name: %s' % name)
self.fields[name] = self.fields[name] + field
for id, (_, field) in enumerate(self.fields.items()):
field._set_parent(self, id)
Field.__init__(self, self.fields.values())
self._frozen = True
def _struct_from_nested_name(self, nested_name, field):
def create_internal(nested_name, field):
names = nested_name.split(FIELD_SEPARATOR, 1)
if len(names) == 1:
added_field = field
else:
added_field = create_internal(names[1], field)
return Struct((names[0], added_field))
names = nested_name.split(FIELD_SEPARATOR, 1)
assert len(names) >= 2
return names[0], create_internal(names[1], field)
def get_children(self):
return self.fields.items()
def field_names(self):
names = []
for name, field in self.fields.items():
names += [_join_field_name(name, f) for f in field.field_names()]
return names
def field_types(self):
types = []
for _, field in self.fields.items():
types += field.field_types()
return types
def field_metadata(self):
metadata = []
for _, field in self.fields.items():
metadata += field.field_metadata()
return metadata
def field_blobs(self):
blobs = []
for _, field in self.fields.items():
blobs += field.field_blobs()
return blobs
def all_scalars(self):
scalars = []
for _, field in self.fields.items():
scalars += field.all_scalars()
return scalars
def has_blobs(self):
return all(field.has_blobs() for field in self.fields.values())
def clone(self, keep_blobs=True):
normalized_fields = [
(k, _normalize_field(v, keep_blobs=keep_blobs))
for k, v in self.fields.items()
]
return Struct(*normalized_fields)
def _get_field_by_nested_name(self, nested_name):
names = nested_name.split(FIELD_SEPARATOR, 1)
field = self.fields.get(names[0], None)
if field is None:
return None
if len(names) == 1:
return field
try:
return field[names[1]]
except (KeyError, TypeError):
return None
def __repr__(self):
return "Struct({})".format(
', '.join(["{}={!r}".format(name, field)
for name, field in self.fields.items()])
)
def __contains__(self, item):
field = self._get_field_by_nested_name(item)
return field is not None
def __len__(self):
return len(self.fields)
def __getitem__(self, item):
"""
item can be a tuple or list of ints or strings, or a single
int or string. String item is a nested field name, e.g., "a", "a:b",
"a:b:c". Int item is the index of a field at the first level of the
Struct.
"""
if isinstance(item, list) or isinstance(item, tuple):
return Struct(
* [
(
self.fields.keys()[k]
if isinstance(k, int) else k, self[k]
) for k in item
]
)
elif isinstance(item, int):
return self.fields.values()[item]
else:
field = self._get_field_by_nested_name(item)
if field is None:
raise KeyError('field "%s" not found' % (item))
return field
def __getattr__(self, item):
if item.startswith('__'):
raise AttributeError(item)
try:
return self.__dict__['fields'][item]
except KeyError:
raise AttributeError(item)
def __setattr__(self, key, value):
# Disable setting attributes after initialization to prevent false
# impression of being able to overwrite a field.
# Allowing setting internal states mainly so that _parent can be set
# post initialization.
if getattr(self, '_frozen', None) and not key.startswith('_'):
raise TypeError('Struct.__setattr__() is disabled after __init__()')
super(Struct, self).__setattr__(key, value)
def __add__(self, other):
"""
Allows to merge fields of two schema.Struct using '+' operator.
If two Struct have common field names, the merge is conducted
recursively. Here are examples:
Example 1
s1 = Struct(('a', Scalar()))
s2 = Struct(('b', Scalar()))
s1 + s2 == Struct(
('a', Scalar()),
('b', Scalar()),
)
Example 2
s1 = Struct(
('a', Scalar()),
('b', Struct(('c', Scalar()))),
)
s2 = Struct(('b', Struct(('d', Scalar()))))
s1 + s2 == Struct(
('a', Scalar()),
('b', Struct(
('c', Scalar()),
('d', Scalar()),
)),
)
"""
if not isinstance(other, Struct):
return NotImplemented
children = OrderedDict(self.get_children())
for name, right_field in other.get_children():
if name not in children:
children[name] = right_field
continue
left_field = children[name]
children[name] = left_field + right_field
return Struct(*(children.items()))
class Scalar(Field):
"""Represents a typed scalar or tensor of fixed shape.
A Scalar is a leaf in a schema tree, translating to exactly one tensor in
the dataset's underlying storage.
Usually, the tensor storing the actual values of this field is a 1D tensor,
representing a series of values in its domain. It is possible however to
have higher rank values stored as a Scalar, as long as all entries have
the same shape.
E.g.:
Scalar(np.float64)
Scalar field of type float32. Caffe2 will expect readers and
datasets to expose it as a 1D tensor of doubles (vector), where
the size of the vector is determined by this fields' domain.
Scalar((np.int32, 5))
Tensor field of type int32. Caffe2 will expect readers and
datasets to implement it as a 2D tensor (matrix) of shape (L, 5),
where L is determined by this fields' domain.
Scalar((str, (10, 20)))
Tensor field of type str. Caffe2 will expect readers and
datasets to implement it as a 3D tensor of shape (L, 10, 20),
where L is determined by this fields' domain.
If the field type is unknown at construction time, call Scalar(), that will
default to np.void as its dtype.
It is an error to pass a structured dtype to Scalar, since it would contain
more than one field. Instead, use from_dtype, which will construct
a nested `Struct` field reflecting the given dtype's structure.
A Scalar can also contain a blob, which represents the value of this
Scalar. A blob can be either a numpy.ndarray, in which case it contain the
actual contents of the Scalar, or a BlobReference, which represents a
blob living in a caffe2 Workspace. If blob of different types are passed,
a conversion to numpy.ndarray is attempted.
"""
def __init__(self, dtype=None, blob=None, metadata=None):
self._metadata = None
self.set(dtype, blob, metadata, unsafe=True)
Field.__init__(self, [])
def field_names(self):
return ['']
def field_type(self):
return self.dtype
def field_types(self):
return [self.dtype]
def field_metadata(self):
return [self._metadata]
def has_blobs(self):
return self._blob is not None
def field_blobs(self):
assert self._blob is not None, 'Value is not set for this field.'
return [self._blob]
def all_scalars(self):
return [self]
def clone(self, keep_blobs=True):
return Scalar(
dtype=self._original_dtype,
blob=self._blob if keep_blobs else None,
metadata=self._metadata
)
def get(self):
"""Gets the current blob of this Scalar field."""
assert self._blob is not None, 'Value is not set for this field.'
return self._blob
def __call__(self):
"""Shortcut for self.get()"""
return self.get()
@property
def metadata(self):
return self._metadata
def set_metadata(self, value):
assert isinstance(value, Metadata), \
'metadata must be Metadata, got {}'.format(type(value))
self._metadata = value
self._validate_metadata()
def _validate_metadata(self):
if self._metadata is None:
return
if (self._metadata.categorical_limit is not None and
self.dtype is not None):
assert np.issubdtype(self.dtype, np.integer), \
"`categorical_limit` can be specified only in integral " + \
"fields but got {}".format(self.dtype)
def set_value(self, blob, throw_on_type_mismatch=False, unsafe=False):
"""Sets only the blob field still validating the existing dtype"""
if self.dtype.base != np.void and throw_on_type_mismatch:
assert isinstance(blob, np.ndarray), "Got {!r}".format(blob)
assert blob.dtype.base == self.dtype.base, (
"Expected {}, got {}".format(self.dtype.base, blob.dtype.base))
self.set(dtype=self._original_dtype, blob=blob, unsafe=unsafe)
def set(self, dtype=None, blob=None, metadata=None, unsafe=False):
"""Set the type and/or blob of this scalar. See __init__ for details.
Args:
dtype: can be any numpy type. If not provided and `blob` is
provided, it will be inferred. If no argument is provided,
this Scalar will be of type np.void.
blob: if provided, can be either a BlobReference or a
numpy.ndarray. If a value of different type is passed,
a conversion to numpy.ndarray is attempted. Strings aren't
accepted, since they can be ambiguous. If you want to pass
a string, to either BlobReference(blob) or np.array(blob).
metadata: optional instance of Metadata, if provided overrides
the metadata information of the scalar
"""
if not unsafe:
logger.warning(
"Scalar should be considered immutable. Only call Scalar.set() "
"on newly created Scalar with unsafe=True. This will become an "
"error soon."
)
if blob is not None and isinstance(blob, core.basestring):
raise ValueError(
'Passing str blob to Scalar.set() is ambiguous. '
'Do either set(blob=np.array(blob)) or '
'set(blob=BlobReference(blob))'
)
self._original_dtype = dtype
if dtype is not None:
dtype = np.dtype(dtype)
# If blob is not None and it is not a BlobReference, we assume that
# it is actual tensor data, so we will try to cast it to an numpy array.
if blob is not None and not isinstance(blob, BlobReference):
preserve_shape = isinstance(blob, np.ndarray)
if dtype is not None and dtype != np.void:
blob = np.array(blob, dtype=dtype.base)
# if array is empty we may need to reshape a little
if blob.size == 0 and not preserve_shape:
blob = blob.reshape((0, ) + dtype.shape)
else:
assert isinstance(blob, np.ndarray), (
'Invalid blob type: %s' % str(type(blob)))
# reshape scalars into 1D arrays
# TODO(azzolini): figure out better way of representing this
if len(blob.shape) == 0 and not preserve_shape:
blob = blob.reshape((1, ))
# infer inner shape from the blob given
# TODO(dzhulgakov): tweak this to make it work with PackedStruct
if (len(blob.shape) > 1 and dtype is not None and
dtype.base != np.void):
dtype = np.dtype((dtype.base, blob.shape[1:]))
# if we were still unable to infer the dtype
if dtype is None:
dtype = np.dtype(np.void)
assert not dtype.fields, (
'Cannot create Scalar with a structured dtype. ' +
'Use from_dtype instead.'
)
self.dtype = dtype
self._blob = blob
if metadata is not None:
self.set_metadata(metadata)
self._validate_metadata()
def set_type(self, dtype):
self._original_dtype = dtype
if dtype is not None:
self.dtype = np.dtype(dtype)
else:
self.dtype = np.dtype(np.void)
self._validate_metadata()
def __repr__(self):
return 'Scalar({!r}, {!r}, {!r})'.format(
self.dtype, self._blob, self._metadata)
def id(self):
"""
Return the zero-indexed position of this scalar field in its schema.
Used in order to index into the field_blob list returned by readers or
accepted by writers.
"""
return self._child_base_id()
def Map(
keys,
values,
keys_name='keys',
values_name='values',
lengths_blob=None
):
"""A map is a List of Struct containing keys and values fields.
Optionally, you can provide custom name for the key and value fields.
"""
return List(
Struct((keys_name, keys), (values_name, values)),
lengths_blob=lengths_blob
)
def NamedTuple(name_prefix, *fields):
return Struct(* [('%s_%d' % (name_prefix, i), field)
for i, field in enumerate(fields)])
def Tuple(*fields):
"""
Creates a Struct with default, sequential, field names of given types.
"""
return NamedTuple('field', *fields)
def RawTuple(num_fields, name_prefix='field'):
"""
Creates a tuple of `num_field` untyped scalars.
"""
assert isinstance(num_fields, int)
assert num_fields >= 0
return NamedTuple(name_prefix, *([np.void] * num_fields))
def from_dtype(dtype, _outer_shape=()):
"""Constructs a Caffe2 schema from the given numpy's dtype.
Numpy supports scalar, array-like and structured datatypes, as long as
all the shapes are fixed. This function breaks down the given dtype into
a Caffe2 schema containing `Struct` and `Scalar` types.
Fields containing byte offsets are not currently supported.
"""
if not isinstance(dtype, np.dtype):
# wrap into a ndtype
shape = _outer_shape
dtype = np.dtype((dtype, _outer_shape))
else:
# concatenate shapes if necessary
shape = _outer_shape + dtype.shape
if shape != dtype.shape:
dtype = np.dtype((dtype.base, shape))
if not dtype.fields:
return Scalar(dtype)
struct_fields = []
for name, (fdtype, offset) in dtype.fields:
assert offset == 0, ('Fields with byte offsets are not supported.')
struct_fields += (name, from_dtype(fdtype, _outer_shape=shape))
return Struct(*struct_fields)
class _SchemaNode(object):
"""This is a private class used to represent a Schema Node"""
def __init__(self, name, type_str=''):
self.name = name
self.children = []
self.type_str = type_str
self.field = None
self.col_blob = None
def add_child(self, name, type_str=''):
for child in self.children:
if child.name == name and child.type_str == type_str:
return child
child = _SchemaNode(name, type_str)
self.children.append(child)
return child
def get_field(self):
list_names = ['lengths', 'values']
map_names = ['lengths', 'keys', 'values']
if len(self.children) == 0 or self.field is not None:
assert self.field is not None
return self.field
child_names = []
for child in self.children:
child_names.append(child.name)
if (set(child_names) == set(list_names)):
for child in self.children:
if child.name == 'values':
self.field = List(
child.get_field(),
lengths_blob=self.children[0].col_blob
)
self.type_str = "List"
return self.field
elif (set(child_names) == set(map_names)):
for child in self.children:
if child.name == 'keys':
key_field = child.get_field()
elif child.name == 'values':
values_field = child.get_field()
self.field = Map(
key_field,
values_field,
lengths_blob=self.children[0].col_blob
)
self.type_str = "Map"
return self.field
else:
struct_fields = []
for child in self.children:
if child.field is not None:
struct_fields.append((child.name, child.field))
else:
struct_fields.append((child.name, child.get_field()))
self.field = Struct(*struct_fields)
self.type_str = "Struct"
return self.field
def print_recursively(self):
for child in self.children:
child.print_recursively()
logger.info("Printing node: Name and type")
logger.info(self.name)
logger.info(self.type_str)
def from_column_list(
col_names, col_types=None,
col_blobs=None, col_metadata=None
):
"""
Given a list of names, types, and optionally values, construct a Schema.
"""
if col_types is None:
col_types = [None] * len(col_names)
if col_metadata is None:
col_metadata = [None] * len(col_names)
if col_blobs is None:
col_blobs = [None] * len(col_names)
assert len(col_names) == len(col_types), (
'col_names and col_types must have the same length.'
)
assert len(col_names) == len(col_metadata), (
'col_names and col_metadata must have the same length.'
)
assert len(col_names) == len(col_blobs), (
'col_names and col_blobs must have the same length.'
)
root = _SchemaNode('root', 'Struct')
for col_name, col_type, col_blob, col_metadata in zip(
col_names, col_types, col_blobs, col_metadata
):
columns = col_name.split(FIELD_SEPARATOR)
current = root
for i in range(len(columns)):
name = columns[i]
type_str = ''
field = None
if i == len(columns) - 1:
type_str = col_type
field = Scalar(
dtype=col_type,
blob=col_blob,
metadata=col_metadata
)
next = current.add_child(name, type_str)
if field is not None:
next.field = field
next.col_blob = col_blob
current = next
return root.get_field()
def from_blob_list(schema, values, throw_on_type_mismatch=False):
"""
Create a schema that clones the given schema, but containing the given
list of values.
"""
assert isinstance(schema, Field), 'Argument `schema` must be a Field.'
if isinstance(values, BlobReference):
values = [values]
record = schema.clone_schema()
scalars = record.all_scalars()
assert len(scalars) == len(values), (
'Values must have %d elements, got %d.' % (len(scalars), len(values))
)
for scalar, value in zip(scalars, values):
scalar.set_value(value, throw_on_type_mismatch, unsafe=True)
return record
def as_record(value):
if isinstance(value, Field):
return value
elif isinstance(value, list) or isinstance(value, tuple):
is_field_list = all(
f is tuple and len(f) == 2 and isinstance(f[0], core.basestring)
for f in value
)
if is_field_list:
return Struct(* [(k, as_record(v)) for k, v in value])
else:
return Tuple(* [as_record(f) for f in value])
elif isinstance(value, dict):
return Struct(* [(k, as_record(v)) for k, v in value.items()])
else:
return _normalize_field(value)
def FetchRecord(blob_record, ws=None, throw_on_type_mismatch=False):
"""
Given a record containing BlobReferences, return a new record with same
schema, containing numpy arrays, fetched from the current active workspace.
"""
def fetch(v):
if ws is None:
return workspace.FetchBlob(str(v))
else:
return ws.blobs[str(v)].fetch()
assert isinstance(blob_record, Field)
field_blobs = blob_record.field_blobs()
assert all(isinstance(v, BlobReference) for v in field_blobs)
field_arrays = [fetch(value) for value in field_blobs]
return from_blob_list(blob_record, field_arrays, throw_on_type_mismatch)
def FeedRecord(blob_record, arrays, ws=None):
"""
Given a Record containing blob_references and arrays, which is either
a list of numpy arrays or a Record containing numpy arrays, feeds the
record to the current workspace.
"""
def feed(b, v):
if ws is None:
workspace.FeedBlob(str(b), v)
else:
ws.create_blob(str(b))
ws.blobs[str(b)].feed(v)
assert isinstance(blob_record, Field)
field_blobs = blob_record.field_blobs()
assert all(isinstance(v, BlobReference) for v in field_blobs)
if isinstance(arrays, Field):
# TODO: check schema
arrays = arrays.field_blobs()
assert len(arrays) == len(field_blobs), (
'Values must contain exactly %d ndarrays.' % len(field_blobs)
)
for blob, array in zip(field_blobs, arrays):
feed(blob, array)
def NewRecord(net, schema):
"""
Given a record of np.arrays, create a BlobReference for each one of them,
returning a record containing BlobReferences. The name of each returned blob
is NextScopedBlob(field_name), which guarantees unique name in the current
net. Use NameScope explicitly to avoid name conflictions between different
nets.
"""
if isinstance(schema, Scalar):
result = schema.clone()
result.set_value(
blob=net.NextScopedBlob('unnamed_scalar'),
unsafe=True,
)
return result
assert isinstance(schema, Field), 'Record must be a schema.Field instance.'
blob_refs = [
net.NextScopedBlob(prefix=name)
for name in schema.field_names()
]
return from_blob_list(schema, blob_refs)
def ConstRecord(net, array_record):
"""
Given a record of arrays, returns a record of blobs,
initialized with net.Const.
"""
blob_record = NewRecord(net, array_record)
for blob, array in zip(
blob_record.field_blobs(), array_record.field_blobs()
):
net.Const(array, blob)
return blob_record
def InitEmptyRecord(net, schema_or_record, enforce_types=False):
if not schema_or_record.has_blobs():
record = NewRecord(net, schema_or_record)
else:
record = schema_or_record
for blob_type, blob in zip(record.field_types(), record.field_blobs()):
try:
data_type = data_type_for_dtype(blob_type)
shape = [0] + list(blob_type.shape)
net.ConstantFill([], blob, shape=shape, dtype=data_type)
except TypeError:
# If data_type_for_dtype doesn't know how to resolve given numpy
# type to core.DataType, that function can throw type error (for
# example that would happen for cases of unknown types such as
# np.void). This is not a problem for cases when the record if going
# to be overwritten by some operator later, though it might be an
# issue for type/shape inference.
if enforce_types:
raise
# If we don't enforce types for all items we'll create a blob with
# the default ConstantFill (FLOAT, no shape)
net.ConstantFill([], blob, shape=[0])
return record
_DATA_TYPE_FOR_DTYPE = [
(np.str, core.DataType.STRING),
(np.float32, core.DataType.FLOAT),
(np.float64, core.DataType.DOUBLE),
(np.bool, core.DataType.BOOL),
(np.int8, core.DataType.INT8),
(np.int16, core.DataType.INT16),
(np.int32, core.DataType.INT32),
(np.int64, core.DataType.INT64),
(np.uint8, core.DataType.UINT8),
(np.uint16, core.DataType.UINT16),
]
def is_schema_subset(schema, original_schema):
# TODO add more checks
return set(schema.field_names()).issubset(
set(original_schema.field_names()))
def equal_schemas(schema,
original_schema,
check_field_names=True,
check_field_types=True,
check_field_metas=False):
assert isinstance(schema, Field)
assert isinstance(original_schema, Field)
if check_field_names and (
schema.field_names() != original_schema.field_names()):
return False
if check_field_types and (
schema.field_types() != original_schema.field_types()):
return False
if check_field_metas and (
schema.field_metadata() != original_schema.field_metadata()):
return False
return True
def schema_check(schema, previous=None):
record = as_record(schema)
if previous is not None:
assert equal_schemas(schema, previous)
return record
def data_type_for_dtype(dtype):
for np_type, dt in _DATA_TYPE_FOR_DTYPE:
if dtype.base == np_type:
return dt
raise TypeError('Unknown dtype: ' + str(dtype.base))
def attach_metadata_to_scalars(field, metadata):
for f in field.all_scalars():
f.set_metadata(metadata)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python.schema import (
Struct, FetchRecord, NewRecord, FeedRecord, InitEmptyRecord)
from caffe2.python import core, workspace
from caffe2.python.session import LocalSession
from caffe2.python.dataset import Dataset
from caffe2.python.pipeline import pipe
from caffe2.python.task import TaskGroup
from caffe2.python.test_util import TestCase
import numpy as np
class TestLocalSession(TestCase):
def test_local_session(self):
init_net = core.Net('init')
src_values = Struct(
('uid', np.array([1, 2, 6])),
('value', np.array([1.4, 1.6, 1.7])))
expected_dst = Struct(
('uid', np.array([2, 4, 12])),
('value', np.array([0.0, 0.0, 0.0])))
with core.NameScope('init'):
src_blobs = NewRecord(init_net, src_values)
dst_blobs = InitEmptyRecord(init_net, src_values.clone_schema())
def proc1(rec):
net = core.Net('proc1')
with core.NameScope('proc1'):
out = NewRecord(net, rec)
net.Add([rec.uid(), rec.uid()], [out.uid()])
out.value.set(blob=rec.value(), unsafe=True)
return [net], out
def proc2(rec):
net = core.Net('proc2')
with core.NameScope('proc2'):
out = NewRecord(net, rec)
out.uid.set(blob=rec.uid(), unsafe=True)
net.Sub([rec.value(), rec.value()], [out.value()])
return [net], out
src_ds = Dataset(src_blobs)
dst_ds = Dataset(dst_blobs)
with TaskGroup() as tg:
out1 = pipe(src_ds.reader(), processor=proc1)
out2 = pipe(out1, processor=proc2)
pipe(out2, dst_ds.writer())
ws = workspace.C.Workspace()
FeedRecord(src_blobs, src_values, ws)
session = LocalSession(ws)
session.run(init_net)
session.run(tg)
output = FetchRecord(dst_blobs, ws=ws)
for a, b in zip(output.field_blobs(), expected_dst.field_blobs()):
np.testing.assert_array_equal(a, b)
|
## @package app
# Module caffe2.python.mint.app
import argparse
import flask
import glob
import numpy as np
import nvd3
import os
import sys
import tornado.httpserver
import tornado.wsgi
__folder__ = os.path.abspath(os.path.dirname(__file__))
app = flask.Flask(
__name__,
template_folder=os.path.join(__folder__, "templates"),
static_folder=os.path.join(__folder__, "static")
)
args = None
def jsonify_nvd3(chart):
chart.buildcontent()
# Note(Yangqing): python-nvd3 does not seem to separate the built HTML part
# and the script part. Luckily, it seems to be the case that the HTML part is
# only a <div>, which can be accessed by chart.container; the script part,
# while the script part occupies the rest of the html content, which we can
# then find by chart.htmlcontent.find['<script>'].
script_start = chart.htmlcontent.find('<script>') + 8
script_end = chart.htmlcontent.find('</script>')
return flask.jsonify(
result=chart.container,
script=chart.htmlcontent[script_start:script_end].strip()
)
def visualize_summary(filename):
try:
data = np.loadtxt(filename)
except Exception as e:
return 'Cannot load file {}: {}'.format(filename, str(e))
chart_name = os.path.splitext(os.path.basename(filename))[0]
chart = nvd3.lineChart(
name=chart_name + '_summary_chart',
height=args.chart_height,
y_axis_format='.03g'
)
if args.sample < 0:
step = max(data.shape[0] / -args.sample, 1)
else:
step = args.sample
xdata = np.arange(0, data.shape[0], step)
# data should have 4 dimensions.
chart.add_serie(x=xdata, y=data[xdata, 0], name='min')
chart.add_serie(x=xdata, y=data[xdata, 1], name='max')
chart.add_serie(x=xdata, y=data[xdata, 2], name='mean')
chart.add_serie(x=xdata, y=data[xdata, 2] + data[xdata, 3], name='m+std')
chart.add_serie(x=xdata, y=data[xdata, 2] - data[xdata, 3], name='m-std')
return jsonify_nvd3(chart)
def visualize_print_log(filename):
try:
data = np.loadtxt(filename)
if data.ndim == 1:
data = data[:, np.newaxis]
except Exception as e:
return 'Cannot load file {}: {}'.format(filename, str(e))
chart_name = os.path.splitext(os.path.basename(filename))[0]
chart = nvd3.lineChart(
name=chart_name + '_log_chart',
height=args.chart_height,
y_axis_format='.03g'
)
if args.sample < 0:
step = max(data.shape[0] / -args.sample, 1)
else:
step = args.sample
xdata = np.arange(0, data.shape[0], step)
# if there is only one curve, we also show the running min and max
if data.shape[1] == 1:
# We also print the running min and max for the steps.
trunc_size = data.shape[0] / step
running_mat = data[:trunc_size * step].reshape((trunc_size, step))
chart.add_serie(
x=xdata[:trunc_size],
y=running_mat.min(axis=1),
name='running_min'
)
chart.add_serie(
x=xdata[:trunc_size],
y=running_mat.max(axis=1),
name='running_max'
)
chart.add_serie(x=xdata, y=data[xdata, 0], name=chart_name)
else:
for i in range(0, min(data.shape[1], args.max_curves)):
# data should have 4 dimensions.
chart.add_serie(
x=xdata,
y=data[xdata, i],
name='{}[{}]'.format(chart_name, i)
)
return jsonify_nvd3(chart)
def visualize_file(filename):
fullname = os.path.join(args.root, filename)
if filename.endswith('summary'):
return visualize_summary(fullname)
elif filename.endswith('log'):
return visualize_print_log(fullname)
else:
return flask.jsonify(
result='Unsupport file: {}'.format(filename),
script=''
)
@app.route('/')
def index():
files = glob.glob(os.path.join(args.root, "*.*"))
files.sort()
names = [os.path.basename(f) for f in files]
return flask.render_template(
'index.html',
root=args.root,
names=names,
debug_messages=names
)
@app.route('/visualization/<string:name>')
def visualization(name):
ret = visualize_file(name)
return ret
def main(argv):
parser = argparse.ArgumentParser("The mint visualizer.")
parser.add_argument(
'-p',
'--port',
type=int,
default=5000,
help="The flask port to use."
)
parser.add_argument(
'-r',
'--root',
type=str,
default='.',
help="The root folder to read files for visualization."
)
parser.add_argument(
'--max_curves',
type=int,
default=5,
help="The max number of curves to show in a dump tensor."
)
parser.add_argument(
'--chart_height',
type=int,
default=300,
help="The chart height for nvd3."
)
parser.add_argument(
'-s',
'--sample',
type=int,
default=-200,
help="Sample every given number of data points. A negative "
"number means the total points we will sample on the "
"whole curve. Default 100 points."
)
global args
args = parser.parse_args(argv)
server = tornado.httpserver.HTTPServer(tornado.wsgi.WSGIContainer(app))
server.listen(args.port)
print("Tornado server starting on port {}.".format(args.port))
tornado.ioloop.IOLoop.instance().start()
if __name__ == '__main__':
main(sys.argv[1:])
|
## @package sampling_train
# Module caffe2.python.layers.sampling_train
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer, get_layer_class
from caffe2.python.layers.sampling_trainable_mixin import SamplingTrainableMixin
class SamplingTrain(ModelLayer):
def __init__(
self,
model,
input_record,
prediction_layer,
output_dims,
subtract_log_odd=True,
name='sampling_train',
**kwargs
):
super(SamplingTrain, self).__init__(
model, name, input_record, **kwargs
)
layer_class = get_layer_class(prediction_layer)
assert issubclass(layer_class, SamplingTrainableMixin)
assert schema.is_schema_subset(
schema.Struct(
('indices', schema.Scalar()),
('input', schema.Scalar()),
),
input_record
)
self.subtract_log_odd = subtract_log_odd
if self.subtract_log_odd:
assert 'sampling_prob' in input_record
self._prediction_layer = layer_class(
model,
input_record.input,
output_dims=output_dims,
**kwargs
)
self._prediction_layer.train_param_blobs = [
model.net.NextBlob(str(blob) + '_sampled')
for blob in self._prediction_layer.param_blobs
]
self.params = self._prediction_layer.params
self.output_schema = self._prediction_layer.output_schema
def add_ops(self, net):
self._prediction_layer.add_ops(net)
def add_train_ops(self, net):
for full_blob, sampled_blob in zip(
self._prediction_layer.param_blobs,
self._prediction_layer.train_param_blobs
):
net.Gather([full_blob, self.input_record.indices()], sampled_blob)
self._prediction_layer.add_train_ops(net)
if not self.subtract_log_odd:
return
log_q = net.Log(self.input_record.sampling_prob(),
net.NextScopedBlob("log_q"))
net.Sub([self.output_schema(), log_q], self.output_schema(),
broadcast=1, use_grad_hack=1)
|
## @package tags
# Module caffe2.python.layers.tags
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import context
@context.define_context(allow_default=True)
class TagContext(object):
"""
Scope driven way to provide tags to the layers.
"""
def __init__(self, tags=None):
# Tags is expected to be list to keep order of adding/removing things
self.tags = tags or []
def add_tags(self, tags):
self.tags.extend(tags)
def remove_tags(self, tags):
assert self.tags[-len(tags):] == tags
self.tags = self.tags[:-len(tags)]
class Tags(object):
# TODO(amalevich): Tags might need to live in their own contexts, add this
# split later
EXCLUDE_FROM_TRAIN = 'exclude_from_train'
EXCLUDE_FROM_EVAL = 'exclude_from_eval'
EXCLUDE_FROM_PREDICTION = 'exclude_from_prediction'
EXCLUDE_FROM_ACCUMULATE_PRED = 'exclude_from_accumulate_pred'
PREPROCESSING = 'preprocessing'
# In certain cases we want to have different schema for training and
# prediction, as an example in prediction we might need to have only
# subset of ids present in the orignal schema. This tag is one of the ways
# to mark operators that will be removed from prediction and should
# override schema for predictors.
PREDICTION_SCHEMA = 'prediction_schema'
def __init__(self, tags):
if not isinstance(tags, list):
tags = [tags]
self.tags = tags
def __enter__(self):
TagContext.current().add_tags(self.tags)
return self
def __exit__(self, type, value, traceback):
TagContext.current().remove_tags(self.tags)
Tags.TRAIN_ONLY = [Tags.EXCLUDE_FROM_PREDICTION, Tags.EXCLUDE_FROM_EVAL,
Tags.EXCLUDE_FROM_ACCUMULATE_PRED]
Tags.EVAL_ONLY = [Tags.EXCLUDE_FROM_PREDICTION, Tags.EXCLUDE_FROM_TRAIN,
Tags.EXCLUDE_FROM_ACCUMULATE_PRED]
Tags.PREDICTION_ONLY = [Tags.EXCLUDE_FROM_TRAIN, Tags.EXCLUDE_FROM_EVAL,
Tags.EXCLUDE_FROM_ACCUMULATE_PRED]
|
## @package simple_operator_layers
# Module caffe2.python.layers.simple_operator_layers
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import (
ModelLayer,
)
def simple_init(self, model, input_record, *args, **kwargs):
ModelLayer.__init__(self, model, self.operator, input_record, **kwargs)
assert self.operator is not None, "Try to create invalid operator layer"
self.args = args
self.output_schema = schema.NewRecord(self.model.net, input_record)
def first_field_schema_init(self, model, input_record, *args, **kwargs):
ModelLayer.__init__(self, model, self.operator, input_record, **kwargs)
assert self.operator is not None, "Try to create invalid operator layer"
assert isinstance(input_record, schema.Struct),\
"Operator {0} expects schema.Struct as input, received {1} instead".\
format(self.operator, input_record)
self.args = args
self.output_schema = schema.NewRecord(self.model.net, input_record[0])
def simple_add_ops(self, net):
getattr(
net,
self.operator)(
self.input_record.field_blobs(),
self.output_schema.field_blobs(),
*self.args,
**self.kwargs
)
_simple_operators = ['Softmax', 'Relu', 'Sigmoid', 'Tanh']
_first_field_schema_operators = ['Sum']
# We need to store refs for all created types, to make sure that they won't be
# GCed before we actually register them.
_known_layers = []
for operator in _simple_operators:
# Generate class instance with name 'operator', that is doing going to use
# simple_init and simple_add_ops implementations for __init__ and add_ops
# calls. It'll also get automatically registered in the registry.
_known_layers.append(
type(
str(operator),
(ModelLayer,),
{'__init__': simple_init,
'add_ops': simple_add_ops,
'operator': operator
}
)
)
for operator in _first_field_schema_operators:
# Generate class instance with name 'operator', that is doing going to use
# first_field_schema_init and simple_add_ops implementations for __init__
# and add_ops calls. It'll also get automatically registered in the
# registry.
_known_layers.append(
type(
str(operator),
(ModelLayer,),
{'__init__': first_field_schema_init,
'add_ops': simple_add_ops,
'operator': operator
}
)
)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import core, schema
from caffe2.python.layers.layers import LayerParameter, ModelLayer
class MapToRange(ModelLayer):
"""
This layer aims to build a mapping from raw keys to indices within [0, max_index).
The mapping is continuously built during training. The mapping will be frozen during
evaluation and prediction. Unseen keys will be assigned to index 0.
"""
def __init__(
self, model,
input_record,
max_index,
name='map_to_range',
**kwargs
):
super(MapToRange, self).__init__(model, name, input_record, **kwargs)
assert max_index > 0
assert isinstance(input_record, schema.Scalar)
self.max_index = max_index
self.handler = model.net.NextScopedBlob(name + "_handler")
self.params.append(
LayerParameter(
parameter=self.handler,
initializer=core.CreateOperator("LongIndexCreate",
[],
self.handler,
max_elements=self.max_index,
),
optimizer=model.NoOptim,
)
)
self.output_schema = schema.Scalar(
np.int64, model.net.NextScopedBlob(name + "_indices")
)
def add_train_ops(self, net):
if self.input_record.field_type().base != np.int64:
keys = net.Cast(
self.input_record(),
net.NextScopedBlob("indices"),
to=core.DataType.INT64
)
else:
keys = self.input_record()
# Load keys into indices
indices = net.IndexGet([self.handler, keys],
self.output_schema())
net.StopGradient(indices, indices)
def add_eval_ops(self, net):
net.IndexFreeze(self.handler, self.handler)
self.add_train_ops(net)
def add_ops(self, net):
self.add_eval_ops(net)
|
## @package sampling_trainable_mixin
# Module caffe2.python.layers.sampling_trainable_mixin
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import six
class SamplingTrainableMixin(six.with_metaclass(abc.ABCMeta, object)):
def __init__(self, *args, **kwargs):
super(SamplingTrainableMixin, self).__init__(*args, **kwargs)
self._train_param_blobs = None
self._train_param_blobs_frozen = False
@property
@abc.abstractmethod
def param_blobs(self):
"""
List of parameter blobs for prediction net
"""
pass
@property
def train_param_blobs(self):
"""
If train_param_blobs is not set before used, default to param_blobs
"""
if self._train_param_blobs is None:
self.train_param_blobs = self.param_blobs
return self._train_param_blobs
@train_param_blobs.setter
def train_param_blobs(self, blobs):
assert not self._train_param_blobs_frozen
assert blobs is not None
self._train_param_blobs_frozen = True
self._train_param_blobs = blobs
@abc.abstractmethod
def _add_ops(self, net, param_blobs):
"""
Add ops to the given net, using the given param_blobs
"""
pass
def add_ops(self, net):
self._add_ops(net, self.param_blobs)
def add_train_ops(self, net):
self._add_ops(net, self.train_param_blobs)
|
## @package last_n_window_collector
# Module caffe2.python.layers.last_n_window_collector
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import (
LayerParameter,
ModelLayer,
)
class LastNWindowCollector(ModelLayer):
"""
Collect last-N samples from input record. If you have complex data,
use PackRecords to pack it before using this layer.
This layer is not thread safe.
"""
def __init__(self, model, input_record, num_to_collect,
name='last_n_window_collector', **kwargs):
super(LastNWindowCollector, self).__init__(
model, name, input_record, **kwargs)
assert num_to_collect > 0
self.num_to_collect = num_to_collect
assert isinstance(input_record, schema.Scalar), \
"Got {!r}".format(input_record)
self.last_n = model.net.NextScopedBlob(self.name + "_last_n")
self.next_blob = model.net.NextScopedBlob(self.name + "_next")
self.params.append(LayerParameter(
parameter=self.last_n,
initializer=core.CreateOperator(
'ConstantFill', [], self.last_n, shape=[0]
),
optimizer=model.NoOptim,
))
self.params.append(LayerParameter(
parameter=self.next_blob,
initializer=core.CreateOperator(
'ConstantFill',
[],
self.next_blob,
shape=[],
value=0,
dtype=core.DataType.INT32,
),
optimizer=model.NoOptim,
))
self.output_schema = schema.from_blob_list(
input_record, [model.net.NextScopedBlob(name + "_output")])
def add_ops(self, net):
net.LastNWindowCollector(
[self.last_n, self.next_blob, self.input_record()],
[self.last_n, self.next_blob],
num_to_collect=self.num_to_collect,
)
# Copy to make sure DAG of record is not broken.
# Also, the output of this is likely going through a pipeline, which
# will move data and require us to copy anyway.
net.Copy(self.last_n, self.output_schema())
|
## @package fc
# Module caffe2.python.layers.fc
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import (
ModelLayer,
LayerParameter
)
from caffe2.python.layers.sampling_trainable_mixin import SamplingTrainableMixin
import math
import numpy as np
class FC(SamplingTrainableMixin, ModelLayer):
def __init__(self, model, input_record, output_dims, weight_init=None,
bias_init=None, weight_optim=None, bias_optim=None, name='fc',
**kwargs):
super(FC, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
assert len(input_record.field_types()[0].shape) > 0, (
"FC expects limited dimensions of the input tensor")
input_dims = input_record.field_types()[0].shape[0]
assert input_dims > 0, (
"FC expects input dimensions > 0, got {}".format(input_dims))
self.output_schema = schema.Scalar(
(np.float32, (output_dims, )),
model.net.NextScopedBlob(name + '_output')
)
scale = math.sqrt(1.0 / input_dims)
weight_init = weight_init if weight_init else (
'UniformFill', {'min': -scale, 'max': scale})
bias_init = bias_init if bias_init else (
'UniformFill', {'min': -scale, 'max': scale})
self.w = model.net.NextScopedBlob(name + "_w")
self.b = model.net.NextScopedBlob(name + "_b")
self.params.append(
LayerParameter(
parameter=self.w,
initializer=core.CreateOperator(weight_init[0],
[],
self.w,
shape=[output_dims, input_dims],
**weight_init[1]
),
optimizer=weight_optim))
self.params.append(
LayerParameter(
parameter=self.b,
initializer=core.CreateOperator(bias_init[0],
[],
self.b,
shape=[output_dims, ],
**bias_init[1]
),
optimizer=bias_optim))
def _add_ops(self, net, params):
net.FC(self.input_record.field_blobs() + params,
self.output_schema.field_blobs(), **self.kwargs)
@property
def param_blobs(self):
return [self.w, self.b]
|
## @package concat
# Module caffe2.python.layers.concat
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import (
ModelLayer,
)
import numpy as np
class Concat(ModelLayer):
def __init__(self, model, input_record, axis=1,
name='concat', **kwargs):
super(Concat, self).__init__(model, name, input_record, **kwargs)
self.axis = axis
assert isinstance(input_record, schema.Struct),\
"Incorrect input type. Excpected Struct, but received: {0}".\
format(input_record)
shapes = []
for field_name, field_type in input_record.fields.items():
assert isinstance(field_type, schema.Scalar),\
"Incorrect input type for {}. Excpected Scalar, but got: {}".\
format(field_name, field_type)
# Assume that first dimension is batch, so actual axis in shape is
# axis - 1
assert len(field_type.field_type().shape) >= axis,\
"Concat expects that limited dimensions of the input tensor"
shapes.append(list(field_type.field_type().shape))
if axis == 0:
self.output_schema = schema.from_blob_list(
input_record[0],
[model.net.NextScopedBlob(name + '_output')]
)
return
concat_dim = 0
for shape in shapes:
concat_dim += shape[axis - 1]
shape[axis - 1] = 0
assert shape == shapes[0],\
"Shapes {0} and {1} are not compatible for Concat".\
format(shape, shapes[0])
output_dims = shapes[0]
output_dims[axis - 1] = concat_dim
self.output_schema = schema.Scalar(
(np.float32, output_dims),
model.net.NextScopedBlob(name + '_output'))
def add_ops(self, net):
net.Concat(
self.input_record.field_blobs(),
[
self.output_schema.field_blobs()[0],
self.output_schema.field_blobs()[0] + "_concat_dims"
],
axis=self.axis,
)
|
## @package batch_distill_lr_loss
# Module caffe2.python.layers.batch_distill_lr_loss
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import (
ModelLayer,
)
from caffe2.python.layers.tags import (
Tags
)
import numpy as np
class BatchDistillLRLoss(ModelLayer):
def __init__(
self, model, input_record,
name='batch_distill_lr_loss', teacherWeight=0.0, **kwargs):
super(BatchDistillLRLoss, self).__init__(model, name, input_record, **kwargs)
assert teacherWeight >= 0 and teacherWeight <= 1, (
'teacherWeight=%0.2f should be in [0, 1]' % teacherWeight
)
self._teacherWeight = teacherWeight
assert schema.is_schema_subset(
schema.Struct(
('teacher_label', schema.Scalar()),
('label', schema.Scalar()),
('prediction', schema.Scalar())
),
input_record
)
self.tags.update(Tags.TRAIN_ONLY)
self.output_schema = schema.Scalar(
np.float32,
model.net.NextScopedBlob(name + '_output'))
def add_train_ops(self, net):
label = self.input_record.label()
if self.input_record.label.field_type() != np.int32:
label = net.Cast(label, net.NextScopedBlob('int32_label'), to='int32')
teacher_label = self.input_record.teacher_label()
class_probabilities = net.MakeTwoClass(
self.input_record.prediction(),
net.NextScopedBlob('two_class_predictions')
)
true_xent = net.LabelCrossEntropy(
[class_probabilities, label],
net.NextScopedBlob('cross_entropy')
)
teacher_xent = net.CrossEntropy(
[self.input_record.prediction(), teacher_label],
net.NextScopedBlob('teacher_cross_entropy')
)
scaled_true_xent = net.Scale(
true_xent,
net.NextScopedBlob('scaled_cross_entropy'),
scale=1.0 - self._teacherWeight,
)
scaled_teacher_xent = net.Scale(
teacher_xent,
net.NextScopedBlob('scaled_teacher_cross_entropy'),
scale=self._teacherWeight,
)
true_loss = net.AveragedLoss(
scaled_true_xent,
net.NextScopedBlob('true_loss')
)
teacher_loss = net.AveragedLoss(
scaled_teacher_xent,
net.NextScopedBlob('teacher_loss')
)
net.Add(
[true_loss, teacher_loss],
self.output_schema.field_blobs()
)
|
## @package batch_softmax_loss
# Module caffe2.python.layers.batch_softmax_loss
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
import numpy as np
class BatchSoftmaxLoss(ModelLayer):
def __init__(
self,
model,
input_record,
name='batch_softmax_loss',
**kwargs
):
super(BatchSoftmaxLoss, self).__init__(
model, name, input_record, **kwargs)
assert schema.is_schema_subset(
schema.Struct(
('label', schema.Scalar()),
('prediction', schema.Scalar()),
),
input_record
)
self.output_schema = schema.Struct(
(
'softmax', schema.Scalar(
input_record.prediction.field_type(),
model.net.NextScopedBlob(name + '_softmax')
)
),
(
'loss', schema.Scalar(
np.float32, model.net.NextScopedBlob(name + '_loss')
)
),
)
def add_ops(self, net):
label = self.input_record.label.field_blobs()
if self.input_record.label.field_types()[0].base != np.int32:
label = [
net.Cast(label,
net.NextScopedBlob('int32_label'),
to=core.DataType.INT32)
]
net.SoftmaxWithLoss(
self.input_record.prediction.field_blobs() + label,
self.output_schema.field_blobs()
)
|
## @package fc_without_bias
# Module caffe2.python.layers.fc_without_bias
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import (ModelLayer, LayerParameter)
from caffe2.python.layers.sampling_trainable_mixin import SamplingTrainableMixin
import math
import numpy as np
class FCWithoutBias(SamplingTrainableMixin, ModelLayer):
def __init__(
self,
model,
input_record,
output_dims,
weight_init=None,
weight_optim=None,
name='fc_without_bias',
**kwargs
):
super(FCWithoutBias, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
assert len(input_record.field_types()[0].shape) > 0, (
"FCWithoutBias expects limited dimensions of the input tensor"
)
input_dims = input_record.field_types()[0].shape[0]
assert input_dims > 0, (
"FCWithoutBias expects input dimensions > 0, got {}".format(input_dims)
)
self.output_schema = schema.Scalar(
(np.float32, (output_dims, )),
model.net.NextScopedBlob(name + '_output')
)
scale = math.sqrt(1.0 / input_dims)
weight_init = weight_init if weight_init else (
'UniformFill', {'min': -scale,
'max': scale}
)
self.w = model.net.NextScopedBlob(name + "_w")
self.params.append(
LayerParameter(
parameter=self.w,
initializer=core.CreateOperator(
weight_init[0], [],
self.w,
shape=[output_dims, input_dims],
**weight_init[1]
),
optimizer=weight_optim
)
)
def _add_ops(self, net, params):
net.MatMul(
self.input_record.field_blobs() + params,
self.output_schema.field_blobs(), trans_b=1, **self.kwargs
)
@property
def param_blobs(self):
return [self.w]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from importlib import import_module
import pkgutil
import sys
from . import layers
def import_recursive(package):
"""
Takes a package and imports all modules underneath it
"""
pkg_dir = package.__path__
module_location = package.__name__
for (module_loader, name, ispkg) in pkgutil.iter_modules(pkg_dir):
module_name = "{}.{}".format(module_location, name) # Module/package
module = import_module(module_name)
if ispkg:
import_recursive(module)
import_recursive(sys.modules[__name__])
for cls in layers.ModelLayer.__subclasses__():
layers.register_layer(cls.__name__, cls)
|
## @package sparse_lookup
# Module caffe2.python.layers.sparse_lookup
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import (
get_categorical_limit,
IdList,
IdScoreList,
LayerParameter,
LayerPsParam,
ModelLayer,
)
import functools
import math
import numpy as np
import operator
class SparseLookup(ModelLayer):
_supported_reducers = ['PositionWeighted', 'LogMeanExp', 'LogSumExp', 'Max',
'Mean', 'Sum', 'Sqrt']
def __init__(self, model, input_record, inner_shape, reducer,
weight_init=None, weight_optim=None,
name='sparse_lookup', **kwargs):
super(SparseLookup, self).__init__(model, name, input_record, **kwargs)
if isinstance(inner_shape, int):
inner_shape = [inner_shape]
assert isinstance(inner_shape, list) or isinstance(inner_shape, tuple),\
"Unexpected type for inner_shape, expected list or tuple, got {0}".\
format(type(inner_shape))
# TODO Add some asserts about input type
assert reducer in self._supported_reducers, "Unsupported reducer: {}".\
format(reducer)
self.reducer = reducer
input_dim = get_categorical_limit(input_record)
assert input_dim is not None, "Unbounded features are not supported"
self.output_schema = schema.Scalar(
(np.float32, inner_shape),
model.net.NextScopedBlob(name + '_output'),
)
scale = math.sqrt(1.0 / input_dim)
self.shape = [input_dim] + inner_shape
self.weight_init = weight_init if weight_init else (
'UniformFill', {'min': -scale, 'max': scale})
self.w = model.net.NextScopedBlob(name + "_w")
if schema.equal_schemas(self.input_record, IdList):
sparse_key = self.input_record.items()
elif schema.equal_schemas(self.input_record, IdScoreList):
sparse_key = self.input_record.keys()
else:
raise NotImplementedError()
if self.input_record.lengths.metadata:
avg_length = self.input_record.lengths.metadata.expected_value
else:
avg_length = None
self.params.append(
LayerParameter(
parameter=self.w,
initializer=core.CreateOperator(self.weight_init[0],
[],
self.w,
shape=self.shape,
**self.weight_init[1]
),
optimizer=weight_optim,
ps_param=LayerPsParam(
sparse_key=sparse_key,
average_length=avg_length
)
))
if reducer == 'PositionWeighted':
self.pos_w = model.net.NextScopedBlob(name + "_pos_w")
self.params.append(
LayerParameter(
parameter=self.pos_w,
initializer=core.CreateOperator('ConstantFill',
[],
self.pos_w,
shape=[input_dim, ],
value=1.0
),
optimizer=weight_optim
))
def get_memory_usage(self):
return functools.reduce(operator.mul, self.shape) * 4
def get_fp16_compatible_parameters(self):
return [self.w]
def add_ops(self, net):
if schema.equal_schemas(self.input_record, IdList):
if self.reducer in ['Sum', 'Mean']:
net.__getattr__('SparseLengths' + self.reducer)(
[
self.w,
self.input_record.items(),
self.input_record.lengths()
],
self.output_schema.field_blobs(),
engine='fp16'
)
elif self.reducer == 'PositionWeighted':
inc_seq = net.LengthsRangeFill(
[self.input_record.lengths()],
self.input_record.lengths() + '_seq'
)
gather_pos_w = net.Gather(
[self.pos_w, inc_seq], self.pos_w + '_gather')
net.SparseLengthsWeightedSum(
[
self.w,
gather_pos_w,
self.input_record.items(),
self.input_record.lengths()
],
self.output_schema.field_blobs(),
grad_on_weights=1,
engine='fp16'
)
elif self.reducer == 'Sqrt':
sqrt_weight = net.LengthsToWeights(
[self.input_record.lengths()],
[self.input_record.lengths() + '_sqrt'],
power=0.5
)
net.SparseLengthsWeightedSum(
[
self.w,
sqrt_weight,
self.input_record.items(),
self.input_record.lengths()
],
self.output_schema.field_blobs(),
engine='fp16'
)
else:
table_rows = net.Gather([self.w, self.input_record.items()])
segment_ids = net.LengthsToSegmentIds(
self.input_record.lengths(),
self.input_record.lengths() + '_sid')
net.__getattr__('SortedSegmentRange' + self.reducer)(
[table_rows, segment_ids],
self.output_schema.field_blobs(),
engine='fp16'
)
elif schema.equal_schemas(self.input_record, IdScoreList):
if self.reducer in ['Sum', 'Mean']:
net.__getattr__('SparseLengthsWeighted' + self.reducer)(
[
self.w,
self.input_record.values(),
self.input_record.keys(),
self.input_record.lengths()
],
self.output_schema.field_blobs(),
engine='fp16'
)
else:
raise "Only Sum, Mean is supported for IdScoreList input." +\
"Trying to create with {}".format(self.reducer)
else:
raise "Unsupported input type {0}".format(self.input_record)
|
## @package batch_sigmoid_cross_entropy_loss
# Module caffe2.python.layers.batch_sigmoid_cross_entropy_loss
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
from caffe2.python.layers.tags import Tags
import numpy as np
class BatchSigmoidCrossEntropyLoss(ModelLayer):
def __init__(
self,
model,
input_record,
name='batch_sigmoid_cross_entropy_loss',
**kwargs
):
super(BatchSigmoidCrossEntropyLoss, self).__init__(
model, name, input_record, **kwargs)
assert schema.is_schema_subset(
schema.Struct(
('label', schema.Scalar(np.float32)),
('prediction', schema.Scalar(np.float32)),
),
input_record
)
assert input_record.prediction.field_type().shape == \
input_record.label.field_type().shape, \
"prediction and label must have the same shape"
self.tags.update(Tags.TRAIN_ONLY)
self.output_schema = schema.Scalar(
(np.float32, tuple()), model.net.NextScopedBlob(name + '_loss')
)
def add_ops(self, net):
sigmoid_cross_entropy = net.SigmoidCrossEntropyWithLogits(
[self.input_record.prediction(), self.input_record.label()],
net.NextScopedBlob('sigmoid_cross_entropy')
)
net.AveragedLoss(
sigmoid_cross_entropy, self.output_schema.field_blobs())
|
## @package batch_lr_loss
# Module caffe2.python.layers.batch_lr_loss
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import (
ModelLayer,
)
from caffe2.python.layers.tags import (
Tags
)
import numpy as np
class BatchLRLoss(ModelLayer):
def __init__(self, model, input_record, name='batch_lr_loss',
average_loss=True, **kwargs):
super(BatchLRLoss, self).__init__(model, name, input_record, **kwargs)
self.average_loss = average_loss
assert schema.is_schema_subset(
schema.Struct(
('label', schema.Scalar()),
('prediction', schema.Scalar())
),
input_record
)
self.tags.update(Tags.TRAIN_ONLY)
self.output_schema = schema.Scalar(
np.float32,
model.net.NextScopedBlob(name + '_output'))
# This should be a bit more complicated than it is right now
def add_ops(self, net):
class_probabilities = net.MakeTwoClass(
self.input_record.prediction.field_blobs(),
net.NextScopedBlob('two_class_predictions')
)
label = self.input_record.label.field_blobs()
if self.input_record.label.field_type().base != np.int32:
label = [net.Cast(
label,
net.NextScopedBlob('int32_label'),
to=core.DataType.INT32)]
# LabelCrossEntropyGraidentOp does not output gradient for the label
xent = net.LabelCrossEntropy(
[class_probabilities] + label,
net.NextScopedBlob('cross_entropy'),
)
if 'weight' in self.input_record.fields:
weight_blob = self.input_record.weight()
if self.input_record.weight.field_type().base != np.float32:
weight_blob = net.Cast(
weight_blob,
weight_blob + '_float32',
to=core.DataType.FLOAT
)
weight_blob = net.StopGradient(
[weight_blob],
[net.NextScopedBlob('weight_stop_gradient')],
)
xent = net.Mul(
[xent, weight_blob],
net.NextScopedBlob('weighted_cross_entropy'),
)
if self.average_loss:
net.AveragedLoss(xent, self.output_schema.field_blobs())
else:
net.ReduceFrontSum(xent, self.output_schema.field_blobs())
|
## @package gather_record
# Module caffe2.python.layers.gather_record
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
class GatherRecord(ModelLayer):
"""
Given 1-D `indices` tensor, gather elements at `i` in `indices` from all the
blobs in `record`. If a blob is a values blob of a list, all the elements
included by the list's lengths blob are gathered. For example,
Input:
indices = [0, 2]
record:a = [[0, 1], [2, 3], [4, 5], [6, 7]]
record:b:lengths = [0, 1, 2, 3]
record:b:items = [0, 1, 2, 3, 4, 5]
Output:
a = [[0, 1], [4, 5]]
b:lengths = [0, 2]
b:items = [1, 2]
This supports nested list.
"""
def __init__(self, model, input_record, name='gather_record', **kwargs):
super(GatherRecord, self).__init__(model, name, input_record, **kwargs)
assert 'indices' in input_record
assert 'record' in input_record
self.output_schema = schema.NewRecord(
model.net, input_record.record.clone_schema())
self._indices = self.input_record.indices()
def _gather_scalar(self, net, record, lengths_blob, output_record):
if lengths_blob is None:
net.Gather([record(), self._indices], output_record())
else:
net.LengthsGather([record(), lengths_blob, self._indices],
output_record())
def _gather_struct(self, net, record, lengths_blob, output_record):
for name, field in record.get_children():
self._dispatch(net, field, lengths_blob, output_record[name])
def _gather_list(self, net, record, lengths_blob, output_record):
self._gather_scalar(
net, record.lengths, lengths_blob, output_record.lengths)
if lengths_blob is None:
lengths_blob = record.lengths()
else:
# TODO(kittipat): This is a hacky solution until LengthsSum for int
# is implemented
lengths_float = net.Cast(
record.lengths(),
net.NextScopedBlob(str(record.lengths()) + '_float'),
to=core.DataType.FLOAT,
)
lengths_blob_float = net.LengthsSum(
[lengths_float, lengths_blob],
net.NextScopedBlob(str(record.lengths()) + "_nested_float")
)
lengths_blob = net.Cast(
lengths_blob_float,
net.NextScopedBlob(str(record.lengths()) + "_nested"),
to=core.DataType.INT32,
)
self._dispatch(net, record._items, lengths_blob, output_record._items)
def _dispatch(self, net, record, lengths_blob, output_record):
if isinstance(record, schema.Scalar):
self._gather_scalar(net, record, lengths_blob, output_record)
elif isinstance(record, schema.Struct):
self._gather_struct(net, record, lengths_blob, output_record)
elif isinstance(record, schema.List):
self._gather_list(net, record, lengths_blob, output_record)
else:
raise NotImplementedError
def add_ops(self, net):
self._dispatch(net, self.input_record.record, None, self.output_schema)
|
## @package optimizers
# Module caffe2.python.layers.optimizers
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import context
@context.define_context(allow_default=True)
class OptimizerContext(object):
"""
Scope driven way to provide optimizers to layers.
Optimizer can be fetched through the 'get_optimizer' method.
"""
def __init__(self):
self._optimizers = {}
self._optimizers_list = []
def _rebuild_optimizers(self):
self._optimizers = {}
for m in self._optimizers_list:
self._optimizers.update(m)
def get_optimizer(self, name):
assert name in self._optimizers, (
"{} optimizer is not provided!".format(name))
return self._optimizers.get(name)
def push_optimizers(self, optimizers):
# optimizer override is allowed
self._optimizers_list.append(optimizers)
self._optimizers.update(optimizers)
def pop_optimizers(self):
assert len(self._optimizers_list) > 0
self._optimizers_list.pop()
self._rebuild_optimizers()
class Optimizers(object):
"""
Optimizers context to provide optimizers to layers
within the context.
Example usage:
optimizers = {'optim1': optim1, 'optim2': optim2}
with Optimizers(optimizers):
optim = OptimizerContext.current().get_optimizer('optim1')
layer(optim=optim)
"""
def __init__(self, optimizers):
self._optimizers = optimizers
def __enter__(self):
OptimizerContext.current().push_optimizers(self._optimizers)
return self
def __exit__(self, type, value, traceback):
OptimizerContext.current().pop_optimizers()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.