python_code
stringlengths 0
258k
|
---|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
def calculate_ap(predictions, labels):
N, D = predictions.shape
ap = np.zeros(D)
num_range = np.arange((N), dtype=np.float32) + 1
for k in range(D):
scores = predictions[:N, k]
label = labels[:N, k]
sortind = np.argsort(-scores, kind='mergesort')
truth = label[sortind]
precision = np.cumsum(truth) / num_range
ap[k] = precision[truth.astype(np.bool)].sum() / max(1, truth.sum())
return ap
class TestAPMeterOps(hu.HypothesisTestCase):
@given(predictions=hu.arrays(dims=[10, 3],
elements=st.floats(allow_nan=False,
allow_infinity=False,
min_value=0.1,
max_value=1)),
labels=hu.arrays(dims=[10, 3],
dtype=np.int32,
elements=st.integers(min_value=0,
max_value=1)),
**hu.gcs_cpu_only)
def test_average_precision(self, predictions, labels, gc, dc):
op = core.CreateOperator(
"APMeter",
["predictions", "labels"],
["AP"],
buffer_size=10,
)
def op_ref(predictions, labels):
ap = calculate_ap(predictions, labels)
return (ap, )
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[predictions, labels],
reference=op_ref)
@given(predictions=hu.arrays(dims=[10, 3],
elements=st.floats(allow_nan=False,
allow_infinity=False,
min_value=0.1,
max_value=1)),
labels=hu.arrays(dims=[10, 3],
dtype=np.int32,
elements=st.integers(min_value=0,
max_value=1)),
**hu.gcs_cpu_only)
def test_average_precision_small_buffer(self, predictions, labels, gc, dc):
op_small_buffer = core.CreateOperator(
"APMeter",
["predictions", "labels"],
["AP"],
buffer_size=5,
)
def op_ref(predictions, labels):
# We can only hold the last 5 in the buffer
ap = calculate_ap(predictions[5:], labels[5:])
return (ap, )
self.assertReferenceChecks(
device_option=gc,
op=op_small_buffer,
inputs=[predictions, labels],
reference=op_ref
)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
class TestLengthsTileOp(hu.HypothesisTestCase):
@given(
inputs=st.integers(min_value=1, max_value=20).flatmap(
lambda size: st.tuples(
hu.arrays([size]),
hu.arrays([size], dtype=np.int32,
elements=st.integers(min_value=0, max_value=20)),
)
),
**hu.gcs)
def test_lengths_tile(self, inputs, gc, dc):
data, lengths = inputs
def lengths_tile_op(data, lengths):
return [np.concatenate([
[d] * l for d, l in zip(data, lengths)
])]
op = core.CreateOperator(
"LengthsTile",
["data", "lengths"],
["output"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[data, lengths],
reference=lengths_tile_op,
)
self.assertGradientChecks(
device_option=gc,
op=op,
inputs=[data, lengths],
outputs_to_check=0,
outputs_with_grads=[0]
)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from hypothesis import assume, given, settings
import hypothesis.strategies as st
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import unittest
class TestGroupConvolution(hu.HypothesisTestCase):
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(1, 5),
size=st.integers(7, 10),
group=st.integers(1, 4),
input_channels_per_group=st.integers(1, 8),
output_channels_per_group=st.integers(1, 8),
batch_size=st.integers(1, 3),
# TODO(jiayq): if needed, add NHWC support.
order=st.sampled_from(["NCHW"]),
# Note: Eigen does not support group convolution, but it should
# fall back to the default engine without failing.
engine=st.sampled_from(["", "CUDNN", "EIGEN"]),
use_bias=st.booleans(),
**hu.gcs)
@settings(max_examples=2, timeout=100)
def test_group_convolution(
self, stride, pad, kernel, size, group,
input_channels_per_group, output_channels_per_group, batch_size,
order, engine, use_bias, gc, dc):
assume(size >= kernel)
input_channels = input_channels_per_group * group
output_channels = output_channels_per_group * group
op = core.CreateOperator(
"Conv",
["X", "w", "b"] if use_bias else ["X", "w"],
["Y"],
stride=stride,
kernel=kernel,
pad=pad,
order=order,
engine=engine,
group=group,
)
X = np.random.rand(
batch_size, size, size, input_channels).astype(np.float32) - 0.5
w = np.random.rand(
output_channels, kernel, kernel,
input_channels_per_group).astype(np.float32)\
- 0.5
b = np.random.rand(output_channels).astype(np.float32) - 0.5
if order == "NCHW":
X = X.transpose((0, 3, 1, 2))
w = w.transpose((0, 3, 1, 2))
inputs = [X, w, b] if use_bias else [X, w]
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(len(inputs)):
self.assertGradientChecks(gc, op, inputs, i, [0])
if __name__ == "__main__":
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
class TestLossOps(hu.HypothesisTestCase):
@given(n=st.integers(1, 8), **hu.gcs)
def test_averaged_loss(self, n, gc, dc):
X = np.random.rand(n).astype(np.float32)
def avg_op(X):
return [np.mean(X)]
op = core.CreateOperator(
"AveragedLoss",
["X"],
["y"],
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=avg_op,
)
self.assertGradientChecks(
device_option=gc,
op=op,
inputs=[X],
outputs_to_check=0,
outputs_with_grads=[0],
)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
import tempfile
class TestCounterOps(TestCase):
def test_counter_ops(self):
workspace.RunOperatorOnce(core.CreateOperator(
'CreateCounter', [], ['c'], init_count=1))
workspace.RunOperatorOnce(core.CreateOperator(
'CountDown', ['c'], ['t1'])) # 1 -> 0
assert not workspace.FetchBlob('t1')
workspace.RunOperatorOnce(core.CreateOperator(
'CountDown', ['c'], ['t2'])) # 0 -> -1
assert workspace.FetchBlob('t2')
workspace.RunOperatorOnce(core.CreateOperator(
'CountUp', ['c'], ['t21'])) # -1 -> 0
assert workspace.FetchBlob('t21') == -1
workspace.RunOperatorOnce(core.CreateOperator(
'RetrieveCount', ['c'], ['t22']))
assert workspace.FetchBlob('t22') == 0
workspace.RunOperatorOnce(core.CreateOperator(
'ResetCounter', ['c'], [], init_count=1)) # -> 1
workspace.RunOperatorOnce(core.CreateOperator(
'CountDown', ['c'], ['t3'])) # 1 -> 0
assert not workspace.FetchBlob('t3')
workspace.RunOperatorOnce(core.CreateOperator(
'ResetCounter', ['c'], ['t31'], init_count=5)) # 0 -> 5
assert workspace.FetchBlob('t31') == 0
workspace.RunOperatorOnce(core.CreateOperator(
'ResetCounter', ['c'], ['t32'])) # 5 -> 0
assert workspace.FetchBlob('t32') == 5
workspace.RunOperatorOnce(core.CreateOperator(
'ConstantFill', [], ['t4'], value=False, shape=[],
dtype=core.DataType.BOOL))
assert workspace.FetchBlob('t4') == workspace.FetchBlob('t1')
workspace.RunOperatorOnce(core.CreateOperator(
'ConstantFill', [], ['t5'], value=True, shape=[],
dtype=core.DataType.BOOL))
assert workspace.FetchBlob('t5') == workspace.FetchBlob('t2')
assert workspace.RunOperatorOnce(core.CreateOperator(
'And', ['t1', 't2'], ['t6']))
assert not workspace.FetchBlob('t6') # True && False
assert workspace.RunOperatorOnce(core.CreateOperator(
'And', ['t2', 't5'], ['t7']))
assert workspace.FetchBlob('t7') # True && True
workspace.RunOperatorOnce(core.CreateOperator(
'CreateCounter', [], ['serialized_c'], init_count=22))
with tempfile.NamedTemporaryFile() as tmp:
workspace.RunOperatorOnce(core.CreateOperator(
'Save', ['serialized_c'], [], absolute_path=1,
db_type='minidb', db=tmp.name))
for i in range(10):
workspace.RunOperatorOnce(core.CreateOperator(
'CountDown', ['serialized_c'], ['t8']))
workspace.RunOperatorOnce(core.CreateOperator(
'RetrieveCount', ['serialized_c'], ['t8']))
assert workspace.FetchBlob('t8') == 12
workspace.RunOperatorOnce(core.CreateOperator(
'Load', [], ['serialized_c'], absolute_path=1,
db_type='minidb', db=tmp.name))
workspace.RunOperatorOnce(core.CreateOperator(
'RetrieveCount', ['serialized_c'], ['t8']))
assert workspace.FetchBlob('t8') == 22
if __name__ == "__main__":
import unittest
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given
import numpy as np
class TestCastOp(hu.HypothesisTestCase):
@given(**hu.gcs)
def test_cast_int_float(self, gc, dc):
data = np.random.rand(5, 5).astype(np.int32)
# from int to float
op = core.CreateOperator('Cast', 'data', 'data_cast', to=1, from_type=2)
self.assertDeviceChecks(dc, op, [data], [0])
# This is actually 0
self.assertGradientChecks(gc, op, [data], 0, [0])
@given(**hu.gcs)
def test_cast_int_float_empty(self, gc, dc):
data = np.random.rand(0).astype(np.int32)
# from int to float
op = core.CreateOperator('Cast', 'data', 'data_cast', to=1, from_type=2)
self.assertDeviceChecks(dc, op, [data], [0])
# This is actually 0
self.assertGradientChecks(gc, op, [data], 0, [0])
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import model_helper, workspace, core, rnn_cell
from caffe2.proto import caffe2_pb2
from future.utils import viewitems
import numpy as np
import unittest
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support.")
class TestLSTMs(unittest.TestCase):
def testEqualToCudnn(self):
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA)):
T = 8
batch_size = 4
input_dim = 8
hidden_dim = 31
workspace.FeedBlob(
"seq_lengths",
np.array([T] * batch_size, dtype=np.int32)
)
workspace.FeedBlob("target", np.zeros(
[T, batch_size, hidden_dim], dtype=np.float32
))
workspace.FeedBlob("hidden_init", np.zeros(
[1, batch_size, hidden_dim], dtype=np.float32
))
workspace.FeedBlob("cell_init", np.zeros(
[1, batch_size, hidden_dim], dtype=np.float32
))
own_model = model_helper.ModelHelper(name="own_lstm")
input_shape = [T, batch_size, input_dim]
cudnn_model = model_helper.ModelHelper(name="cudnn_lstm")
input_blob = cudnn_model.param_init_net.UniformFill(
[], "input", shape=input_shape)
workspace.FeedBlob("CUDNN/hidden_init_cudnn", np.zeros(
[1, batch_size, hidden_dim], dtype=np.float32
))
workspace.FeedBlob("CUDNN/cell_init_cudnn", np.zeros(
[1, batch_size, hidden_dim], dtype=np.float32
))
cudnn_output, cudnn_last_hidden, cudnn_last_state, param_extract = rnn_cell.cudnn_LSTM(
model=cudnn_model,
input_blob=input_blob,
initial_states=("hidden_init_cudnn", "cell_init_cudnn"),
dim_in=input_dim,
dim_out=hidden_dim,
scope="CUDNN",
return_params=True,
)
cudnn_loss = cudnn_model.AveragedLoss(
cudnn_model.SquaredL2Distance(
[cudnn_output, "target"], "CUDNN/dist"
), "CUDNN/loss"
)
own_output, own_last_hidden, _, own_last_state, own_params = rnn_cell.LSTM(
model=own_model,
input_blob=input_blob,
seq_lengths="seq_lengths",
initial_states=("hidden_init", "cell_init"),
dim_in=input_dim,
dim_out=hidden_dim,
scope="OWN",
return_params=True,
)
own_loss = own_model.AveragedLoss(
own_model.SquaredL2Distance([own_output, "target"], "OWN/dist"),
"OWN/loss"
)
# Add gradients
cudnn_model.AddGradientOperators([cudnn_loss])
own_model.AddGradientOperators([own_loss])
# Add parameter updates
LR = cudnn_model.param_init_net.ConstantFill(
[], shape=[1], value=0.01
)
ONE = cudnn_model.param_init_net.ConstantFill(
[], shape=[1], value=1.0
)
for param in cudnn_model.GetParams():
cudnn_model.WeightedSum(
[param, ONE, cudnn_model.param_to_grad[param], LR], param
)
for param in own_model.GetParams():
own_model.WeightedSum(
[param, ONE, own_model.param_to_grad[param], LR], param
)
# Copy states over
own_model.net.Copy(own_last_hidden, "hidden_init")
own_model.net.Copy(own_last_state, "cell_init")
cudnn_model.net.Copy(cudnn_last_hidden, "CUDNN/hidden_init_cudnn")
cudnn_model.net.Copy(cudnn_last_state, "CUDNN/cell_init_cudnn")
workspace.RunNetOnce(cudnn_model.param_init_net)
workspace.CreateNet(cudnn_model.net)
##
## CUDNN LSTM MODEL EXECUTION
##
# Get initial values from CuDNN LSTM so we can feed them
# to our own.
(param_extract_net, param_extract_mapping) = param_extract
workspace.RunNetOnce(param_extract_net)
cudnn_lstm_params = {
input_type: {
k: workspace.FetchBlob(v[0])
for k, v in viewitems(pars)
}
for input_type, pars in viewitems(param_extract_mapping)
}
# Run the model 3 times, so that some parameter updates are done
workspace.RunNet(cudnn_model.net.Proto().name, 3)
##
## OWN LSTM MODEL EXECUTION
##
# Map the cuDNN parameters to our own
workspace.RunNetOnce(own_model.param_init_net)
rnn_cell.InitFromLSTMParams(own_params, cudnn_lstm_params)
# Run the model 3 times, so that some parameter updates are done
workspace.CreateNet(own_model.net)
workspace.RunNet(own_model.net.Proto().name, 3)
##
## COMPARE RESULTS
##
# Then compare that final results after 3 runs are equal
own_output_data = workspace.FetchBlob(own_output)
own_last_hidden = workspace.FetchBlob(own_last_hidden)
own_loss = workspace.FetchBlob(own_loss)
cudnn_output_data = workspace.FetchBlob(cudnn_output)
cudnn_last_hidden = workspace.FetchBlob(cudnn_last_hidden)
cudnn_loss = workspace.FetchBlob(cudnn_loss)
self.assertTrue(np.allclose(own_output_data, cudnn_output_data))
self.assertTrue(np.allclose(own_last_hidden, cudnn_last_hidden))
self.assertTrue(np.allclose(own_loss, cudnn_loss))
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from hypothesis import given
import hypothesis.strategies as st
import unittest
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
class TestTile(hu.HypothesisTestCase):
@given(M=st.integers(min_value=1, max_value=10),
K=st.integers(min_value=1, max_value=10),
N=st.integers(min_value=1, max_value=10),
tiles=st.integers(min_value=1, max_value=3),
axis=st.integers(min_value=0, max_value=2),
**hu.gcs)
def test_tile(self, M, K, N, tiles, axis, gc, dc):
X = np.random.rand(M, K, N).astype(np.float32)
op = core.CreateOperator(
'Tile', ['X'], 'out',
tiles=tiles,
axis=axis,
)
def tile_ref(X, tiles, axis):
dims = np.asarray([1, 1, 1], dtype=np.int)
dims[axis] = tiles
tiled_data = np.tile(X, dims)
return (tiled_data,)
# Check against numpy reference
self.assertReferenceChecks(gc, op, [X, tiles, axis],
tile_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X], [0])
# Gradient check wrt X
self.assertGradientChecks(gc, op, [X], 0, [0])
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(M=st.integers(min_value=1, max_value=200),
N=st.integers(min_value=1, max_value=200),
tiles=st.integers(min_value=50, max_value=100),
**hu.gcs)
def test_tile_grad(self, M, N, tiles, gc, dc):
X = np.random.rand(M, N).astype(np.float32)
axis = 1
op = core.CreateOperator(
'Tile', ['X'], 'out',
tiles=tiles,
axis=axis,
)
def tile_ref(X, tiles, axis):
dims = np.asarray([1, 1], dtype=np.int)
dims[axis] = tiles
tiled_data = np.tile(X, dims)
return (tiled_data,)
# Check against numpy reference
self.assertReferenceChecks(gc, op, [X, tiles, axis],
tile_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X], [0])
# Gradient check wrt X
grad_op = core.CreateOperator(
'TileGradient', ['dOut'], 'dX',
tiles=tiles,
axis=axis,
)
dX = np.random.rand(M, N * tiles).astype(np.float32)
self.assertDeviceChecks(dc, grad_op, [dX], [0])
@given(M=st.integers(min_value=1, max_value=10),
K=st.integers(min_value=1, max_value=10),
N=st.integers(min_value=1, max_value=10),
tiles=st.integers(min_value=1, max_value=3),
axis=st.integers(min_value=0, max_value=2),
**hu.gcs)
def test_tilewinput(self, M, K, N, tiles, axis, gc, dc):
X = np.random.rand(M, K, N).astype(np.float32)
tiles_arg = np.array([tiles], dtype=np.int32)
axis_arg = np.array([axis], dtype=np.int32)
op = core.CreateOperator(
'Tile', ['X', 'tiles', 'axis'], 'out',
)
def tile_ref(X, tiles, axis):
dims = np.asarray([1, 1, 1], dtype=np.int)
dims[axis] = tiles
tiled_data = np.tile(X, dims)
return (tiled_data,)
# Check against numpy reference
self.assertReferenceChecks(gc, op, [X, tiles_arg, axis_arg],
tile_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X, tiles_arg, axis_arg], [0])
# Gradient check wrt X
self.assertGradientChecks(gc, op, [X, tiles_arg, axis_arg], 0, [0])
if __name__ == "__main__":
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
from hypothesis import assume, given
from caffe2.proto import caffe2_pb2
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
import random
import unittest
class TestUtilityOps(hu.HypothesisTestCase):
@given(X=hu.tensor(), args=st.booleans(), **hu.gcs)
def test_slice(self, X, args, gc, dc):
X = X.astype(dtype=np.float32)
dim = random.randint(0, X.ndim - 1)
slice_start = random.randint(0, X.shape[dim] - 1)
slice_end = random.randint(slice_start, X.shape[dim] - 1)
starts = np.array([0] * X.ndim).astype(np.int32)
ends = np.array([-1] * X.ndim).astype(np.int32)
starts[dim] = slice_start
ends[dim] = slice_end
if args:
op = core.CreateOperator(
"Slice", ["X"], ["Y"], starts=starts, ends=ends, device_option=gc
)
def slice_ref(X):
slc = [slice(None)] * X.ndim
slc[dim] = slice(slice_start, slice_end)
return [X[slc]]
inputs = [X]
else:
op = core.CreateOperator(
"Slice", ["X", "starts", "ends"], ["Y"], device_option=gc
)
def slice_ref(x, starts, ends):
slc = [slice(None)] * x.ndim
slc[dim] = slice(slice_start, slice_end)
return [x[slc]]
inputs = [X, starts, ends]
self.assertReferenceChecks(gc, op, inputs, slice_ref)
self.assertDeviceChecks(dc, op, inputs, [0])
self.assertGradientChecks(
device_option=gc,
op=op,
inputs=inputs,
outputs_to_check=0,
outputs_with_grads=[0],
)
@given(dtype=st.sampled_from([np.float32, np.int32]),
ndims=st.integers(min_value=1, max_value=5),
seed=st.integers(min_value=0, max_value=65536),
null_axes=st.booleans(),
engine=st.sampled_from(['CUDNN', None]),
**hu.gcs)
def test_transpose(self, dtype, ndims, seed, null_axes, engine, gc, dc):
if (gc.device_type == caffe2_pb2.CUDA and engine == "CUDNN"):
# cudnn 5.1 does not support int.
assume(workspace.GetCuDNNVersion() >= 6000 or dtype != np.int32)
dims = (np.random.rand(ndims) * 16 + 1).astype(np.int32)
X = (np.random.rand(*dims) * 16).astype(dtype)
if null_axes:
axes = None
op = core.CreateOperator(
"Transpose",
["input"], ["output"],
engine=engine)
else:
np.random.seed(int(seed))
axes = [int(v) for v in list(np.random.permutation(X.ndim))]
op = core.CreateOperator(
"Transpose",
["input"], ["output"],
axes=axes,
engine=engine)
def transpose_ref(x, axes):
return (np.transpose(x, axes),)
self.assertReferenceChecks(gc, op, [X, axes],
transpose_ref)
@given(m=st.integers(5, 10), n=st.integers(5, 10),
o=st.integers(5, 10), nans=st.booleans(), **hu.gcs)
def test_nan_check(self, m, n, o, nans, gc, dc):
other = np.array([1, 2, 3]).astype(np.float32)
X = np.random.rand(m, n, o).astype(np.float32)
if nans:
x_nan = np.random.randint(0, m)
y_nan = np.random.randint(0, n)
z_nan = np.random.randint(0, o)
X[x_nan, y_nan, z_nan] = float('NaN')
# print('nans: {}'.format(nans))
# print(X)
def nan_reference(X, Y):
if not np.isnan(X).any():
return [X]
else:
return [np.array([])]
op = core.CreateOperator(
"NanCheck",
["X", "other"],
["Y"]
)
try:
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, other],
reference=nan_reference,
)
if nans:
self.assertTrue(False, "Did not fail when presented with NaN!")
except RuntimeError:
self.assertTrue(nans, "No NaNs but failed")
try:
self.assertGradientChecks(
device_option=gc,
op=op,
inputs=[X],
outputs_to_check=0,
outputs_with_grads=[0],
)
if nans:
self.assertTrue(False, "Did not fail when gradient had NaN!")
except RuntimeError:
pass
@given(n=st.integers(4, 5), m=st.integers(6, 7),
d=st.integers(2, 3), **hu.gcs)
def test_elementwise_max(self, n, m, d, gc, dc):
X = np.random.rand(n, m, d).astype(np.float32)
Y = np.random.rand(n, m, d).astype(np.float32)
Z = np.random.rand(n, m, d).astype(np.float32)
inputs = [X, Y, Z]
def max_op(X, Y, Z):
return [np.maximum(np.maximum(X, Y), Z)]
op = core.CreateOperator(
"Max",
["X", "Y", "Z"],
["mx"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=max_op,
)
self.assertDeviceChecks(dc, op, inputs, [0])
@given(n=st.integers(4, 5), m=st.integers(6, 7),
d=st.integers(2, 3), **hu.gcs)
def test_elementwise_max_grad(self, n, m, d, gc, dc):
go = np.random.rand(n, m, d).astype(np.float32)
X = np.random.rand(n, m, d).astype(np.float32)
Y = np.random.rand(n, m, d).astype(np.float32)
Z = np.random.rand(n, m, d).astype(np.float32)
mx = np.maximum(np.maximum(X, Y), Z)
inputs = [mx, go, X, Y, Z]
def max_grad_op(mx, go, X, Y, Z):
def mx_grad(a):
return go * (mx == a)
return [mx_grad(a) for a in [X, Y, Z]]
op = core.CreateOperator(
"MaxGradient",
["mx", "go", "X", "Y", "Z"],
["gX", "gY", "gZ"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=max_grad_op,
)
self.assertDeviceChecks(dc, op, inputs, [0, 1, 2])
@given(n=st.integers(4, 5), m=st.integers(6, 7),
d=st.integers(2, 3), **hu.gcs)
def test_elementwise_min(self, n, m, d, gc, dc):
X = np.random.rand(n, m, d).astype(np.float32)
Y = np.random.rand(n, m, d).astype(np.float32)
Z = np.random.rand(n, m, d).astype(np.float32)
inputs = [X, Y, Z]
def min_op(X, Y, Z):
return [np.minimum(np.minimum(X, Y), Z)]
op = core.CreateOperator(
"Min",
["X", "Y", "Z"],
["mx"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=min_op,
)
self.assertDeviceChecks(dc, op, inputs, [0])
@given(n=st.integers(4, 5), m=st.integers(6, 7),
d=st.integers(2, 3), **hu.gcs)
def test_elementwise_min_grad(self, n, m, d, gc, dc):
go = np.random.rand(n, m, d).astype(np.float32)
X = np.random.rand(n, m, d).astype(np.float32)
Y = np.random.rand(n, m, d).astype(np.float32)
Z = np.random.rand(n, m, d).astype(np.float32)
mx = np.minimum(np.minimum(X, Y), Z)
inputs = [mx, go, X, Y, Z]
def min_grad_op(mx, go, X, Y, Z):
def mx_grad(a):
return go * (mx == a)
return [mx_grad(a) for a in [X, Y, Z]]
op = core.CreateOperator(
"MinGradient",
["mx", "go", "X", "Y", "Z"],
["gX", "gY", "gZ"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=min_grad_op,
)
self.assertDeviceChecks(dc, op, inputs, [0, 1, 2])
@given(
inputs=hu.lengths_tensor().flatmap(
lambda pair: st.tuples(
st.just(pair[0]),
st.just(pair[1]),
hu.dims(max_value=len(pair[1])),
)
).flatmap(
lambda tup: st.tuples(
st.just(tup[0]),
st.just(tup[1]),
hu.arrays(
tup[2], dtype=np.int32,
elements=st.integers(
min_value=0, max_value=len(tup[1]) - 1)),
)
),
**hu.gcs_cpu_only)
def test_lengths_gather(self, inputs, gc, dc):
items = inputs[0]
lengths = inputs[1]
indices = inputs[2]
def lengths_gather_op(items, lengths, indices):
ends = np.cumsum(lengths)
return [np.concatenate(
list(items[ends[i] - lengths[i]:ends[i]] for i in indices))]
op = core.CreateOperator(
"LengthsGather",
["items", "lengths", "indices"],
["output"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[items, lengths, indices],
reference=lengths_gather_op,
)
@given(**hu.gcs)
def test_size_op(self, gc, dc):
X = np.array([[1, 2], [3, 4]]).astype(np.float32)
def size_op(tensor):
return [np.prod(tensor.shape)]
op = core.CreateOperator(
"Size",
["X"],
["output"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=size_op,
)
def test_alias_op(self):
""" Don't use hypothesis because there are only 2 cases to check"""
for size in [0, 5]:
X = np.arange(size).astype(np.float32)
workspace.FeedBlob('X', X)
op = core.CreateOperator(
"Alias",
["X"],
["Y"]
)
workspace.RunOperatorOnce(op)
Y = workspace.FetchBlob('Y')
np.testing.assert_array_equal(X, Y)
@given(**hu.gcs)
def test_range(self, gc, dc):
names = [
('stop_',),
('start_', 'stop_'),
('start_', 'stop_', 'step_'),
]
# Most random values aren't great here, so use a fixed set instead of
# hypothesis.
for inputs in (
(10,),
(np.float32(10.0),),
(0,),
(0, 0),
(10., 5.0, -1.),
(2, 10000),
(2, 10000, 20000),
(2, 10000, -1),
):
inputs = [np.array(v) for v in inputs]
op = core.CreateOperator(
"Range",
names[len(inputs) - 1],
["Y"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=lambda *x: [np.arange(*x)],
)
self.assertDeviceChecks(dc, op, inputs, [0])
with self.assertRaisesRegexp(RuntimeError, 'Step size cannot be 0'):
inputs = (np.array(0), np.array(10), np.array(0))
op = core.CreateOperator(
"Range",
names[len(inputs) - 1],
["Y"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=lambda *x: [np.arange(*x)],
)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
class TestDuplicateOperands(TestCase):
def test_duplicate_operands(self):
net = core.Net('net')
shape = (2, 4)
x_in = np.random.uniform(size=shape)
x = net.GivenTensorFill([], 'X', shape=shape,
values=x_in.flatten().tolist())
xsq = net.Mul([x, x])
y = net.DotProduct([xsq, xsq])
net.AddGradientOperators([y])
workspace.RunNetOnce(net)
self.assertTrue(np.allclose(workspace.FetchBlob('X_grad'),
4 * x_in**3))
if __name__ == "__main__":
import unittest
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import core, workspace, dataset
from caffe2.python.dataset import Const
from caffe2.python.schema import (
List, Field, Struct, Scalar, Map, from_blob_list, FetchRecord, NewRecord,
FeedRecord
)
from caffe2.python.test_util import TestCase
import numpy.testing as npt
import string
from hypothesis import given
import hypothesis.strategies as st
def _assert_arrays_equal(actual, ref, err_msg):
if ref.dtype.kind in ('S', 'O', 'U'):
np.testing.assert_array_equal(actual, ref, err_msg=err_msg)
else:
np.testing.assert_allclose(
actual, ref, atol=1e-4,
rtol=1e-4, err_msg=err_msg
)
def _assert_records_equal(actual, ref):
assert isinstance(actual, Field)
assert isinstance(ref, Field)
b1 = actual.field_blobs()
b2 = ref.field_blobs()
assert (len(b1) == len(b2)), 'Records have different lengths: %d vs. %d' % (
len(b1), len(b2)
)
for name, d1, d2 in zip(ref.field_names(), b1, b2):
_assert_arrays_equal(d1, d2, err_msg='Mismatch in field %s.' % name)
@st.composite
def _sparse_features_map(draw, num_records, **kwargs):
sparse_maps_lengths = draw(
st.lists(
st.integers(min_value=1, max_value=10),
min_size=num_records,
max_size=num_records
)
)
sparse_maps_total_length = sum(sparse_maps_lengths)
sparse_keys = draw(
st.lists(
st.integers(min_value=1, max_value=100),
min_size=sparse_maps_total_length,
max_size=sparse_maps_total_length,
unique=True
)
)
sparse_values_lengths = draw(
st.lists(
st.integers(min_value=1, max_value=10),
min_size=sparse_maps_total_length,
max_size=sparse_maps_total_length
)
)
total_sparse_values_lengths = sum(sparse_values_lengths)
sparse_values = draw(
# max_value is max int64
st.lists(
st.integers(min_value=1, max_value=9223372036854775807),
min_size=total_sparse_values_lengths,
max_size=total_sparse_values_lengths
)
)
return [
sparse_maps_lengths,
sparse_keys,
sparse_values_lengths,
sparse_values,
]
@st.composite
def _dense_features_map(draw, num_records, **kwargs):
float_lengths = draw(
st.lists(
st.integers(min_value=1, max_value=10),
min_size=num_records,
max_size=num_records
)
)
total_length = sum(float_lengths)
float_keys = draw(
st.lists(
st.integers(min_value=1, max_value=100),
min_size=total_length,
max_size=total_length,
unique=True
)
)
float_values = draw(
st.lists(st.floats(),
min_size=total_length,
max_size=total_length)
)
return [float_lengths, float_keys, float_values]
@st.composite
def _dataset(draw, min_elements=3, max_elements=10, **kwargs):
schema = Struct(
# Dense Features Map
('floats', Map(
Scalar(np.int32), Scalar(np.float32)
)),
# Sparse Features Map
('int_lists', Map(
Scalar(np.int32),
List(Scalar(np.int64)),
)),
# Complex Type
('text', Scalar(str)),
)
num_records = draw(
st.integers(min_value=min_elements,
max_value=max_elements)
)
raw_dense_features_map_contents = draw(_dense_features_map(num_records))
raw_sparse_features_map_contents = draw(_sparse_features_map(num_records))
raw_text_contents = [
draw(
st.lists(
st.text(alphabet=string.ascii_lowercase),
min_size=num_records,
max_size=num_records
)
)
]
# Concatenate all raw contents to a single one
contents_raw = raw_dense_features_map_contents + raw_sparse_features_map_contents + raw_text_contents
contents = from_blob_list(schema, contents_raw)
return (schema, contents, num_records)
class TestDatasetOps(TestCase):
@given(_dataset())
def test_pack_unpack(self, input):
"""
Tests if packing and unpacking of the whole dataset is an identity.
"""
(schema, contents, num_records) = input
dataset_fields = schema.field_names()
net = core.Net('pack_unpack_net')
batch = NewRecord(net, contents)
FeedRecord(batch, contents)
packed = net.PackRecords(
batch.field_blobs(), 1,
fields=dataset_fields
)
unpacked = packed.UnPackRecords(
[], len(dataset_fields),
fields=dataset_fields
)
workspace.RunNetOnce(net)
for initial_tensor, unpacked_tensor in zip(
batch.field_blobs(), unpacked
):
npt.assert_array_equal(
workspace.FetchBlob(initial_tensor),
workspace.FetchBlob(unpacked_tensor)
)
def test_dataset_ops(self):
"""
1. Defining the schema of our dataset.
This example schema could represent, for example, a search query log.
"""
schema = Struct(
# fixed size vector, which will be stored as a matrix when batched
('dense', Scalar((np.float32, 3))),
# could represent a feature map from feature ID to float value
('floats', Map(
Scalar(np.int32), Scalar(np.float32)
)),
# could represent a multi-valued categorical feature map
('int_lists', Map(
Scalar(np.int32),
List(Scalar(np.int64)),
)),
# could represent a multi-valued, weighted categorical feature map
(
'id_score_pairs', Map(
Scalar(np.int32),
Map(
Scalar(np.int64),
Scalar(np.float32),
keys_name='ids',
values_name='scores'
),
)
),
# additional scalar information
(
'metadata', Struct(
('user_id', Scalar(np.int64)),
('user_embed', Scalar((np.float32, 2))),
('query', Scalar(str)),
)
),
)
"""
This is what the flattened fields for this schema look like, along
with its type. Each one of these fields will be stored, read and
writen as a tensor.
"""
expected_fields = [
('dense', (np.float32, 3)),
('floats:lengths', np.int32),
('floats:values:keys', np.int32),
('floats:values:values', np.float32),
('int_lists:lengths', np.int32),
('int_lists:values:keys', np.int32),
('int_lists:values:values:lengths', np.int32),
('int_lists:values:values:values', np.int64),
('id_score_pairs:lengths', np.int32),
('id_score_pairs:values:keys', np.int32),
('id_score_pairs:values:values:lengths', np.int32),
('id_score_pairs:values:values:values:ids', np.int64),
('id_score_pairs:values:values:values:scores', np.float32),
('metadata:user_id', np.int64),
('metadata:user_embed', (np.float32, 2)),
('metadata:query', str),
]
zipped = zip(
expected_fields, schema.field_names(), schema.field_types()
)
for (ref_name, ref_type), name, dtype in zipped:
self.assertEquals(ref_name, name)
self.assertEquals(np.dtype(ref_type), dtype)
"""
2. The contents of our dataset.
Contents as defined below could represent, for example, a log of
search queries along with dense, sparse features and metadata.
The datset below has 3 top-level entries.
"""
contents_raw = [
# dense
[[1.1, 1.2, 1.3], [2.1, 2.2, 2.3], [3.1, 3.2, 3.3]],
# floats
[1, 2, 3], # len
[11, 21, 22, 31, 32, 33], # key
[1.1, 2.1, 2.2, 3.1, 3.2, 3.3], # value
# int lists
[2, 0, 1], # len
[11, 12, 31], # key
[2, 4, 3], # value:len
[111, 112, 121, 122, 123, 124, 311, 312, 313], # value:value
# id score pairs
[1, 2, 2], # len
[11, 21, 22, 31, 32], # key
[1, 1, 2, 2, 3], # value:len
[111, 211, 221, 222, 311, 312, 321, 322, 323], # value:ids
[11.1, 21.1, 22.1, 22.2, 31.1, 31.2, 32.1, 32.2, 32.3], # val:score
# metadata
[123, 234, 456], # user_id
[[0.2, 0.8], [0.5, 0.5], [0.7, 0.3]], # user_embed
['dog posts', 'friends who like to', 'posts about ca'], # query
]
# convert the above content to ndarrays, checking against the schema
contents = from_blob_list(schema, contents_raw)
"""
3. Creating and appending to the dataset.
We first create an empty dataset with the given schema.
Then, a Writer is used to append these entries to the dataset.
"""
ds = dataset.Dataset(schema)
net = core.Net('init')
with core.NameScope('init'):
ds.init_empty(net)
content_blobs = NewRecord(net, contents)
FeedRecord(content_blobs, contents)
writer = ds.writer(init_net=net)
writer.write_record(net, content_blobs)
workspace.RunNetOnce(net)
"""
4. Iterating through the dataset contents.
If we were to iterate through the top level entries of our dataset,
this is what we should expect to see:
"""
entries_raw = [
(
[[1.1, 1.2, 1.3]], # dense
[1],
[11],
[1.1], # floats
[2],
[11, 12],
[2, 4],
[111, 112, 121, 122, 123, 124], # intlst
[1],
[11],
[1],
[111],
[11.1], # id score pairs
[123],
[[0.2, 0.8]],
['dog posts'], # metadata
),
(
[[2.1, 2.2, 2.3]], # dense
[2],
[21, 22],
[2.1, 2.2], # floats
[0],
[],
[],
[], # int list
[2],
[21, 22],
[1, 2],
[211, 221, 222],
[21.1, 22.1, 22.2],
[234],
[[0.5, 0.5]],
['friends who like to'], # metadata
),
(
[[3.1, 3.2, 3.3]], # dense
[3],
[31, 32, 33],
[3.1, 3.2, 3.3], # floats
[1],
[31],
[3],
[311, 312, 313], # int lst
[2],
[31, 32],
[2, 3],
[311, 312, 321, 322, 323],
[31.1, 31.2, 32.1, 32.2, 32.3], # id score list
[456],
[[0.7, 0.3]],
['posts about ca'], # metadata
),
# after the end of the dataset, we will keep getting empty vectors
([], ) * 16,
([], ) * 16,
]
entries = [from_blob_list(schema, e) for e in entries_raw]
"""
Let's go ahead and create the reading nets.
We will run `read` net multiple times and assert that we are reading the
entries the way we stated above.
"""
read_init_net = core.Net('read_init')
read_next_net = core.Net('read_next')
reader = ds.reader(read_init_net)
should_continue, batch = reader.read_record(read_next_net)
workspace.RunNetOnce(read_init_net)
workspace.CreateNet(read_next_net, True)
for entry in entries:
workspace.RunNet(str(read_next_net))
actual = FetchRecord(batch)
_assert_records_equal(actual, entry)
"""
5. Reading/writing in a single plan
If all of operations on the data are expressible as Caffe2 operators,
we don't need to load the data to python, iterating through the dataset
in a single Plan.
Where we will process the dataset a little and store it in a second
dataset. We can reuse the same Reader since it supports reset.
"""
reset_net = core.Net('reset_net')
reader.reset(reset_net)
read_step, batch = reader.execution_step()
""" We will add the line number * 1000 to the feature ids. """
process_net = core.Net('process')
line_no = Const(process_net, 0, dtype=np.int32)
const_one = Const(process_net, 1000, dtype=np.int32)
process_net.Add([line_no, const_one], [line_no])
field = batch.floats.keys.get()
process_net.Print(field, [])
process_net.Add([field, line_no], field, broadcast=1, axis=0)
""" Lets create a second dataset and append to it. """
ds2 = dataset.Dataset(schema, name='dataset2')
ds2.init_empty(reset_net)
writer = ds2.writer(reset_net)
writer.write_record(process_net, batch)
# commit is not necessary for DatasetWriter but will add it for
# generality of the example
commit_net = core.Net('commit')
writer.commit(commit_net)
""" Time to create and run a plan which will do the processing """
plan = core.Plan('process')
plan.AddStep(core.execution_step('reset', reset_net))
plan.AddStep(read_step.AddNet(process_net))
plan.AddStep(core.execution_step('commit', commit_net))
workspace.RunPlan(plan)
"""
Now we should have dataset2 populated.
"""
ds2_data = FetchRecord(ds2.content())
field = ds2_data.floats.keys
field.set(blob=field.get() - [1000, 2000, 2000, 3000, 3000, 3000])
_assert_records_equal(contents, ds2_data)
"""
6. Slicing a dataset
You can create a new schema from pieces of another schema and reuse
the same data.
"""
subschema = Struct(('top_level', schema.int_lists.values))
int_list_contents = contents.int_lists.values.field_names()
self.assertEquals(len(subschema.field_names()), len(int_list_contents))
"""
7. Random Access a dataset
"""
read_init_net = core.Net('read_init')
read_next_net = core.Net('read_next')
idx = np.array([2, 1, 0])
indices_blob = Const(read_init_net, idx, name='indices')
reader = ds.random_reader(read_init_net, indices_blob)
reader.computeoffset(read_init_net)
should_stop, batch = reader.read_record(read_next_net)
workspace.CreateNet(read_init_net, True)
workspace.RunNetOnce(read_init_net)
workspace.CreateNet(read_next_net, True)
for i in range(len(entries)):
k = idx[i] if i in idx else i
entry = entries[k]
workspace.RunNet(str(read_next_net))
actual = FetchRecord(batch)
_assert_records_equal(actual, entry)
workspace.RunNet(str(read_next_net))
self.assertEquals(True, workspace.FetchBlob(should_stop))
"""
8. Random Access a dataset with loop_over = true
"""
read_init_net = core.Net('read_init')
read_next_net = core.Net('read_next')
idx = np.array([2, 1, 0])
indices_blob = Const(read_init_net, idx, name='indices')
reader = ds.random_reader(read_init_net, indices_blob, loop_over=True)
reader.computeoffset(read_init_net)
should_stop, batch = reader.read_record(read_next_net)
workspace.CreateNet(read_init_net, True)
workspace.RunNetOnce(read_init_net)
workspace.CreateNet(read_next_net, True)
for _ in range(len(entries) * 3):
workspace.RunNet(str(read_next_net))
self.assertEquals(False, workspace.FetchBlob(should_stop))
"""
9. Sort and shuffle a dataset
This sort the dataset using the score of a certain column,
and then shuffle within each chunk of size batch_size * shuffle_size
before shuffling the chunks.
"""
read_init_net = core.Net('read_init')
read_next_net = core.Net('read_next')
reader = ds.random_reader(read_init_net)
reader.sort_and_shuffle(read_init_net, 'int_lists:lengths', 1, 2)
reader.computeoffset(read_init_net)
should_continue, batch = reader.read_record(read_next_net)
workspace.CreateNet(read_init_net, True)
workspace.RunNetOnce(read_init_net)
workspace.CreateNet(read_next_net, True)
expected_idx = np.array([2, 1, 0])
for i in range(len(entries)):
k = expected_idx[i] if i in expected_idx else i
entry = entries[k]
workspace.RunNet(str(read_next_net))
actual = FetchRecord(batch)
_assert_records_equal(actual, entry)
"""
Trim a dataset
"""
trim_net = core.Net('trim_ds')
ds.trim(trim_net, multiple_of=2)
workspace.RunNetOnce(trim_net)
trimmed = FetchRecord(ds.content())
EXPECTED_SIZES = [2, 2, 3, 3, 2, 2, 2, 6, 2, 3, 3, 4, 4, 2, 2, 2]
actual_sizes = [d.shape[0] for d in trimmed.field_blobs()]
self.assertEquals(EXPECTED_SIZES, actual_sizes)
def test_last_n_window_ops(self):
collect_net = core.Net('collect_net')
collect_net.GivenTensorFill(
[],
'input',
shape=[3, 2],
values=[1.0, 2.0, 3.0, 4.0, 5.0, 6.0],
)
input_array =\
np.array(list(range(1, 7)), dtype=np.float32).reshape(3, 2)
workspace.CreateBlob('output')
workspace.FeedBlob('next', np.array(0, dtype=np.int32))
collect_net.LastNWindowCollector(
['output', 'next', 'input'],
['output', 'next'],
num_to_collect=7,
)
plan = core.Plan('collect_data')
plan.AddStep(
core.execution_step('collect_data', [collect_net],
num_iter=1)
)
workspace.RunPlan(plan)
reference_result = workspace.FetchBlob('output')
npt.assert_array_equal(input_array, reference_result)
plan = core.Plan('collect_data')
plan.AddStep(
core.execution_step('collect_data', [collect_net],
num_iter=2)
)
workspace.RunPlan(plan)
reference_result = workspace.FetchBlob('output')
npt.assert_array_equal(input_array[[1, 2, 2, 0, 1, 2, 0]],
reference_result)
plan = core.Plan('collect_data')
plan.AddStep(
core.execution_step('collect_data', [collect_net],
num_iter=3)
)
workspace.RunPlan(plan)
reference_result = workspace.FetchBlob('output')
npt.assert_array_equal(input_array[[2, 0, 1, 2, 2, 0, 1]],
reference_result)
def test_collect_tensor_ops(self):
init_net = core.Net('init_net')
blobs = ['blob_1', 'blob_2', 'blob_3']
bvec_map = {}
ONE = init_net.ConstantFill([], 'ONE', shape=[1, 2], value=1)
for b in blobs:
init_net.ConstantFill([], [b], shape=[1, 2], value=0)
bvec_map[b] = b + '_vec'
init_net.CreateTensorVector([], [bvec_map[b]])
reader_net = core.Net('reader_net')
for b in blobs:
reader_net.Add([b, ONE], [b])
collect_net = core.Net('collect_net')
num_to_collect = 1000
max_example_to_cover = 100000
bvec = [bvec_map[b] for b in blobs]
collect_net.CollectTensor(
bvec + blobs,
bvec,
num_to_collect=num_to_collect,
)
print('Collect Net Proto: {}'.format(collect_net.Proto()))
plan = core.Plan('collect_data')
plan.AddStep(core.execution_step('collect_init', init_net))
plan.AddStep(
core.execution_step(
'collect_data', [reader_net, collect_net],
num_iter=max_example_to_cover
)
)
workspace.RunPlan(plan)
# concat the collected tensors
concat_net = core.Net('concat_net')
bconcated_map = {}
bsize_map = {}
for b in blobs:
bconcated_map[b] = b + '_concated'
bsize_map[b] = b + '_size'
concat_net.ConcatTensorVector([bvec_map[b]], [bconcated_map[b]])
concat_net.TensorVectorSize([bvec_map[b]], [bsize_map[b]])
workspace.RunNetOnce(concat_net)
# check data
reference_result = workspace.FetchBlob(bconcated_map[blobs[0]])
self.assertEqual(
reference_result.shape,
(min(num_to_collect, max_example_to_cover), 2)
)
size = workspace.FetchBlob(bsize_map[blobs[0]])
self.assertEqual(tuple(), size.shape)
self.assertEqual(min(num_to_collect, max_example_to_cover), size.item())
hist, _ = np.histogram(
reference_result[:, 0],
bins=10,
range=(1, max_example_to_cover)
)
print('Sample histogram: {}'.format(hist))
self.assertTrue(all(hist > 0.6 * (num_to_collect / 10)))
for i in range(1, len(blobs)):
result = workspace.FetchBlob(bconcated_map[blobs[i]])
self.assertEqual(reference_result.tolist(), result.tolist())
if __name__ == "__main__":
import unittest
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import hypothesis.strategies as st
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import numpy as np
class TestNGramOps(hu.HypothesisTestCase):
@given(
seed=st.integers(0, 2**32 - 1),
N=st.integers(min_value=10, max_value=100),
D=st.integers(min_value=2, max_value=10),
out_of_vcb=st.floats(min_value=0, max_value=0.5),
max_categorical_limit=st.integers(min_value=5, max_value=20),
max_in_vcb_val=st.integers(min_value=1000, max_value=10000),
**hu.gcs_cpu_only
)
def test_ngram_from_categorical_op(
self,
seed,
N,
D,
out_of_vcb,
max_categorical_limit,
max_in_vcb_val,
gc,
dc,
):
np.random.seed(seed)
col_num = max(int(D / 2), 1)
col_ids = np.random.choice(D, col_num, False).astype(np.int32)
categorical_limits = np.random.randint(
2, high=max_categorical_limit, size=col_num
).astype(np.int32)
vcb = [
np.random.choice(max_in_vcb_val, x, False)
for x in categorical_limits
]
vals = np.array([x for l in vcb for x in l], dtype=np.int32)
# Enforce round(floats) to be negative.
floats = np.random.rand(N, D).astype(np.float32) - 2
expected_output = []
for i in range(N):
val = 0
for (k, j) in enumerate(col_ids):
base = np.prod(categorical_limits[:k])
r = np.random.randint(categorical_limits[k])
p = np.random.rand()
if p > out_of_vcb:
val += base * r
floats[i][j] = vcb[k][r]
expected_output.append(val)
expected_output = np.array(expected_output, dtype=np.int32)
workspace.ResetWorkspace()
workspace.FeedBlob('floats', floats)
op = core.CreateOperator(
"NGramFromCategorical",
['floats'],
['output'],
col_ids=col_ids,
categorical_limits=categorical_limits,
vals=vals,
)
workspace.RunOperatorOnce(op)
output = workspace.blobs['output']
np.testing.assert_array_equal(output, expected_output)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import unittest
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace, test_util, model_helper, brew, build
@unittest.skipIf(build.CAFFE2_NO_OPERATOR_SCHEMA,
'Built with CAFFE2_NO_OPERATOR_SCHEMA')
class TestShapeInference(test_util.TestCase):
def testShapeInferenceSimpleFC(self):
m = model_helper.ModelHelper(name="test_model")
brew.fc(m, "data", "fc1", dim_in=96, dim_out=32)
brew.fc(m, "fc1", "fc2", dim_in=32, dim_out=55)
(shapes, types) = workspace.InferShapesAndTypes(
[m.param_init_net, m.net],
{'data': [64, 96]}
)
self.assertEquals(shapes['data'], [64, 96])
self.assertEquals(shapes['fc1_w'], [32, 96])
self.assertEquals(shapes['fc1_b'], [32])
self.assertEquals(shapes['fc1'], [64, 32])
self.assertEquals(shapes['fc2_w'], [55, 32])
self.assertEquals(shapes['fc2_b'], [55])
self.assertEquals(shapes['fc2'], [64, 55])
def testFCAxis2(self):
model = model_helper.ModelHelper(name="test_model")
model.net.FC(["x", "w", "b"], ["y"], axis=2)
workspace.FeedBlob("x", np.random.rand(4, 20, 36).astype(np.float32))
workspace.FeedBlob("w", np.random.rand(36, 36).astype(np.float32))
workspace.FeedBlob("b", np.random.rand(36,).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testFCTransposed(self):
model = model_helper.ModelHelper(name="test_model")
model.net.FCTransposed(["x", "wt", "b"], ["y"])
workspace.FeedBlob("x", np.random.rand(20, 36).astype(np.float32))
workspace.FeedBlob("wt", np.random.rand(36, 48).astype(np.float32))
workspace.FeedBlob("b", np.random.rand(48,).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testShapeInferenceSlice(self):
model = model_helper.ModelHelper(name="test_model")
model.net.Slice(["x"], ["y"], starts=[0, 0, 0, 0], ends=[-1, -1, -3, -1])
workspace.FeedBlob("x", np.random.rand(64, 1, 255, 384).astype(np.float32))
slice_starts = np.array([0, 0, 0, 0]).astype(np.int32)
slice_ends = np.array([-1, -1, -3, -1]).astype(np.int32)
slice_starts = model.net.GivenTensorIntFill(
[], shape=[4], values=slice_starts)
slice_ends = model.net.GivenTensorIntFill(
[], shape=[4], values=slice_ends)
model.net.Slice(["x2", slice_starts, slice_ends], ["y2"])
workspace.FeedBlob("x2", np.random.rand(64, 1, 255, 384).astype(np.float32))
self.InferTensorRunAndCompare(model, ["y2"])
def testShapeInferenceDistances(self):
model = model_helper.ModelHelper(name="test_model")
model.net.L1Distance(["x1", "y1"], "dl1_D1")
model.net.SquaredL2Distance(["x1", "y1"], "dl2_D1")
model.net.CosineSimilarity(["x1", "y1"], "dcos_D1")
model.net.DotProduct(["x1", "y1"], "ddot_D1")
model.net.DotProductWithPadding(["x1", "y1"], "ddotpad_D1")
model.net.L1Distance(["x2", "y2"], "dl1_D2")
model.net.SquaredL2Distance(["x2", "y2"], "dl2_D2")
model.net.CosineSimilarity(["x2", "y2"], "dcos_D2")
model.net.DotProduct(["x2", "y2"], "ddot_D2")
model.net.DotProductWithPadding(["x2", "z2"], "ddotpad_D2")
workspace.FeedBlob("x1", np.random.rand(10).astype(np.float32))
workspace.FeedBlob("y1", np.random.rand(10).astype(np.float32))
workspace.FeedBlob("x2", np.random.rand(10, 5).astype(np.float32))
workspace.FeedBlob("y2", np.random.rand(10, 5).astype(np.float32))
workspace.FeedBlob("z2", np.random.rand(10, 4).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testShapeInferenceReduceBackFrontX(self):
model = model_helper.ModelHelper(name="test_model")
model.net.ReduceBackSum(["x"], ["x_back_sum"])
model.net.ReduceBackMean(["x"], ["x_back_mean"])
model.net.ReduceBackMax(["x"], ["x_back_max"])
model.net.ReduceFrontSum(["x"], ["x_front_sum"])
model.net.ReduceFrontMean(["x"], ["x_front_mean"])
model.net.ReduceFrontMax(["x"], ["x_front_max"])
workspace.FeedBlob("x", np.random.rand(10, 12, 18).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testGather(self):
model = model_helper.ModelHelper(name="test_model")
model.net.Gather(["X", "idx"], "Y")
workspace.FeedBlob("X", np.random.rand(100, 4, 5).astype(np.float32))
workspace.FeedBlob("idx", np.array([[3, 18], [99, 4], [2, 5]]).astype(np.int32))
self.InferTensorRunAndCompare(model)
def testShapeInferenceConvNet(self):
model = model_helper.ModelHelper(name="convtest")
model.NHWC2NCHW("data", "data_nchw")
brew.conv(model, "data_nchw", 'conv1', 3, 64,
weight_init=("MSRAFill", {}), kernel=7,
stride=2, pad=3, no_bias=0)
brew.spatial_bn(model, 'conv1', 'conv1_spatbn_relu', 64, epsilon=1e-3, is_test=False)
brew.relu(model, 'conv1_spatbn_relu', 'conv1_spatbn_relu')
brew.max_pool(model, 'conv1_spatbn_relu', 'pool1', kernel=3, stride=2)
brew.fc(model, 'pool1', 'fc', dim_in=(64 * 56 * 56), dim_out=100)
brew.dropout(model, 'fc', 'fc_drop', is_test=False)
model.Sigmoid('fc_drop', 'fc_sigm')
brew.softmax(model, 'fc_sigm', 'softmax')
model.LabelCrossEntropy(['softmax', 'label'], 'xent')
loss = model.AveragedLoss('xent', 'loss')
model.AddGradientOperators([loss])
LR = model.param_init_net.ConstantFill(
[], 'LR', shape=[1], value=0.1
)
for param in model.GetParams():
param_grad = model.param_to_grad[param]
param_momentum = model.param_init_net.ConstantFill(
[param], param + '_momentum', value=0.0
)
model.net.MomentumSGDUpdate(
[param_grad, param_momentum, LR, param],
[param_grad, param_momentum, param],
)
workspace.FeedBlob(
"data",
np.random.rand(16, 227, 227, 3).astype(np.float32),
)
workspace.FeedBlob(
"label",
(100 * np.random.rand(16)).astype(np.int32),
)
workspace.FeedBlob(
"label",
(100 * np.random.rand(16)).astype(np.int32),
)
# Then do automatic comparison test: run the next once to
# initialize everything
self.InferTensorRunAndCompare(model)
def testShapeInferenceTranspose(self):
model = model_helper.ModelHelper(name="test_model")
workspace.FeedBlob(
"tensor",
np.random.rand(4, 2, 3, 3, 5).astype(np.float32)
)
# Testing with axes undefined
brew.transpose(
model,
["tensor"],
"transpose",
)
self.InferTensorRunAndCompare(model)
# Testing with axes defined
brew.transpose(
model,
["tensor"],
"transpose",
axes=np.random.permutation(5)
)
return self.InferTensorRunAndCompare(model)
def testShapeInferencePad(self):
model = model_helper.ModelHelper(name="padtest")
model.PadImage("data", 'padded', pad_t=100, pad_l=37, pad_b=28,
pad_r=20, mode="constant", order="NCHW")
workspace.FeedBlob(
"data",
np.random.rand(16, 3, 228, 228).astype(np.float32),
)
self.InferTensorRunAndCompare(model)
def testShapeInferenceTwoClass(self):
model = model_helper.ModelHelper(name="twoclass")
model.MakeTwoClass("v", "v2")
workspace.FeedBlob("v", np.random.rand(32).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testShapeInferencePadZero(self):
model = model_helper.ModelHelper(name="padtest")
model.PadImage("data", 'padded', pad=0, mode="constant",
order="NCHW")
workspace.FeedBlob(
"data",
np.random.rand(16, 3, 228, 228).astype(np.float32),
)
self.InferTensorRunAndCompare(model)
def testShapeInferenceMatMul(self):
model = model_helper.ModelHelper(name="test_model")
model.MatMul(["x", "y"], "MatMul")
workspace.FeedBlob("x", np.random.rand(10, 5).astype(np.float32))
workspace.FeedBlob("y", np.random.rand(5, 10).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testShapeInferenceSoftmaxWithLoss(self):
model = model_helper.ModelHelper(name="test_model")
model.SoftmaxWithLoss(
["logits", "labels"],
["softmax", "loss"],
)
# 2D Shape of [batch_size, num_classes]
workspace.FeedBlob(
"logits",
np.random.rand(4, 3).astype(np.float32),
)
# Shape of size batch_size with all values [0, num_classes)
workspace.FeedBlob(
"labels",
np.random.randint(low=0, high=3, size=(4, 1)).astype(np.int32),
)
self.InferTensorRunAndCompare(model)
# Testing with 1D labels arg
workspace.FeedBlob(
"logits",
np.random.rand(4, 3).astype(np.float32),
)
workspace.FeedBlob(
"labels",
np.random.randint(low=0, high=3, size=4).astype(np.int32),
)
self.InferTensorRunAndCompare(model)
# Testing with weight_tensor
model.SoftmaxWithLoss(
["logits", "labels", "weight_tensor"],
["softmax", "loss"],
)
workspace.FeedBlob(
"logits",
np.random.rand(4, 3).astype(np.float32),
)
workspace.FeedBlob(
"labels",
np.random.randint(low=0, high=3, size=4).astype(np.int32),
)
workspace.FeedBlob(
"weight_tensor",
np.random.rand(4).astype(np.float32),
)
self.InferTensorRunAndCompare(model)
# Test spatial model
model = model_helper.ModelHelper(name="test_model")
workspace.FeedBlob(
"img",
np.random.rand(32, 19, 33, 28).astype(np.float32)
)
workspace.FeedBlob(
"img_labels",
(np.random.rand(32, 33, 28) * 19).astype(np.int32)
)
model.SpatialSoftmaxWithLoss(
["img", "img_labels"],
["softmax_img", "loss"],
)
self.InferTensorRunAndCompare(model)
def testShapeInferenceIm2Col(self):
# Test with NCHW
model = model_helper.ModelHelper(name="test_model")
model.Im2Col("X", "Y", pad=1, kernel=4, dilation=2, stride=2,
order="NCHW")
workspace.FeedBlob(
"X",
np.random.rand(16, 3, 228, 228).astype(np.float32),
)
self.InferTensorRunAndCompare(model)
# Test with NHWC
model = model_helper.ModelHelper(name="test_model")
model.Im2Col("X", "Y", pad=1, kernel=4, dilation=2, stride=2,
order="NHWC")
workspace.FeedBlob(
"X",
np.random.rand(16, 228, 228, 3).astype(np.float32),
)
self.InferTensorRunAndCompare(model)
# Test with different width and height
model = model_helper.ModelHelper(name="test_model")
model.Im2Col("X", "Y", pad=1, kernel_h=8, kernel_w=4,
dilation=2, stride=2)
workspace.FeedBlob(
"X",
np.random.rand(16, 3, 228, 114).astype(np.float32),
)
self.InferTensorRunAndCompare(model)
def testShapeInferenceTile(self):
m = model_helper.ModelHelper(name="test_model")
workspace.FeedBlob(
"tensor",
np.random.rand(4, 2, 3, 3, 5).astype(np.float32)
)
# Testing with axes undefined
for i in range(0, 4):
m.net.Tile(
"tensor", "tiled_tensor_{}".format(i), tiles=5, axis=i)
self.InferTensorRunAndCompare(m)
def testShapeInferenceFlatten(self):
model = model_helper.ModelHelper(name="test_model")
model.FlattenToVec("X", "FlatVec")
model.FlattenToVec("empty", "EmptyFlatVec")
workspace.FeedBlob("X", np.random.rand(17, 5, 13).astype(np.float32))
workspace.FeedBlob("empty", np.random.rand(0, 2, 3).astype(np.float32))
self.InferTensorRunAndCompare(model)
# test Flatten with default axis (=1)
model = model_helper.ModelHelper(name="test_model")
model.Flatten("X", "Flat")
model.Flatten("empty", "EmptyFlat")
workspace.FeedBlob("X", np.random.rand(17, 5, 13).astype(np.float32))
workspace.FeedBlob("empty", np.random.rand(0, 2, 3).astype(np.float32))
self.InferTensorRunAndCompare(model)
# test Flatten with axis
model = model_helper.ModelHelper(name="test_model")
x = np.random.randn(17, 5, 13)
for axis in range(x.ndim + 1):
model.Flatten("x", "Flat", axis=axis)
workspace.FeedBlob("x", x)
self.InferTensorRunAndCompare(model)
empty = np.random.randn(0, 5, 13)
for axis in range(empty.ndim + 1):
model.Flatten("empty", "Flat", axis=axis)
workspace.FeedBlob("empty", empty)
self.InferTensorRunAndCompare(model)
def testShapeInferenceReshape(self):
model = model_helper.ModelHelper(name="test_model")
model.Reshape("X", ["Reshaped", "Old_Shape"], shape=[8, 0, -1, 2])
workspace.FeedBlob("X", np.random.rand(4, 26, 32).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testShapeInferenceUnique(self):
for n in [0, 1]:
model = model_helper.ModelHelper(name="test_model")
model.Unique("X", ["Y"])
model.Unique("X", ["Z", "remap"])
workspace.FeedBlob("X", np.random.rand(n).astype(np.int64))
self.InferTensorRunAndCompare(model)
def testLengthsSum(self):
model = model_helper.ModelHelper(name="test_model")
model.LengthsSum(["X", "length"], ["sum"])
workspace.FeedBlob("X", np.random.rand(6, 32).astype(np.float32))
workspace.FeedBlob("length", np.array([1, 2, 3], dtype=np.int32))
self.InferTensorRunAndCompare(model)
def testConcat(self):
net = core.Net("concat")
net.Concat(["A", "B"], ["C", "splits"], axis=1)
net.Concat(["C", "D"], ["E"], order="NCHW")
net.Concat(["E", "F"], ["G"], add_axis=1, order="NHWC")
(shapes, types) = workspace.InferShapesAndTypes(
[net],
{
'A': [10, 12, 9, 10],
'B': [10, 9, 9, 10],
'D': [10, 2, 9, 10],
'F': [10, 23, 9, 10]
}
)
self.assertEqual(shapes['C'], [10, 21, 9, 10])
self.assertEqual(shapes['splits'], [2])
self.assertEqual(shapes['E'], [10, 23, 9, 10])
self.assertEqual(shapes['G'], [10, 23, 9, 2, 10])
def testSqueeze(self):
net = core.Net("sq")
net.Squeeze(["data"], ["data_squeezed"], dims=[3, 1])
(shapes, types) = workspace.InferShapesAndTypes(
[net],
{'data': [64, 1, 96, 1, 4]}
)
self.assertEqual(shapes['data_squeezed'], [64, 96, 4])
def testCast(self):
model = model_helper.ModelHelper(name="test_model")
types = [
('bool', np.bool, caffe2_pb2.TensorProto.BOOL),
#('byte', None, caffe2_pb2.TensorProto.BYTE),
('int8', np.int8, caffe2_pb2.TensorProto.INT8),
('uint8', np.uint8, caffe2_pb2.TensorProto.UINT8),
('int16', np.int16, caffe2_pb2.TensorProto.INT16),
('uint16', np.uint16, caffe2_pb2.TensorProto.UINT16),
#('float16', np.float16, caffe2_pb2.TensorProto.FLOAT16),
('int32', np.int32, caffe2_pb2.TensorProto.INT32),
('float', np.float32, caffe2_pb2.TensorProto.FLOAT),
('int64', np.int64, caffe2_pb2.TensorProto.INT64),
('double', np.float64, caffe2_pb2.TensorProto.DOUBLE),
#('string', None, caffe2_pb2.TensorProto.STRING),
]
for (xstr, xnp, _) in types:
xname = 'X%s' % xstr
workspace.FeedBlob(xname, np.random.rand(1).astype(xnp))
for (ystr, _, yc2) in types:
yname = 'Y%s_to_%s' % (xstr, ystr)
model.Cast(xname, yname, to=yc2)
self.InferTensorRunAndCompare(model)
def testShapeInferenceRoiPool(self):
for is_test in [True, False]:
model = model_helper.ModelHelper(name="test_model")
outputs = ['Y'] if is_test else ['Y', 'argmaxes']
model.net.RoIPool(
['X', 'R'], outputs, pooled_h=4, pooled_w=5, is_test=is_test)
workspace.FeedBlob(
"X",
np.random.rand(100, 3, 4, 5).astype(np.float32))
workspace.FeedBlob(
"R",
np.random.rand(2, 5).astype(np.float32))
self.InferTensorRunAndCompare(model)
def testShapeInferencePow(self):
model = model_helper.ModelHelper(name="powtest")
model.Pow("x", 'y', exponent=-1.0)
workspace.FeedBlob('x', np.random.rand(1, 2, 3, 4).astype(np.float32))
self.InferTensorRunAndCompare(model)
def InferTensorRunAndCompare(self, model, expected_uninferred_blobs=None):
'''
Runs shape inference, and then the model to check
that the inferred shapes agree with the actual ones
'expected_uninferred_blobs' is the list of blobs for which type and
shape cannot be inferred.
'''
(shapes, types) = workspace.InferShapesAndTypes(
[model.param_init_net, model.net],
)
# .. Create net
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net, True)
workspace.RunNet(model.Proto().name)
# ... and then check the shapes mismatch
correct_shapes = {}
correct_types = {}
for b in workspace.Blobs():
arr = workspace.FetchBlob(b)
correct_shapes[b] = arr.shape
if type(arr) is np.ndarray:
if arr.dtype == np.dtype('float32'):
correct_types[b] = caffe2_pb2.TensorProto.FLOAT
elif arr.dtype == np.dtype('int32'):
correct_types[b] = caffe2_pb2.TensorProto.INT32
# BYTE
# STRING
elif arr.dtype == np.dtype('bool'):
correct_types[b] = caffe2_pb2.TensorProto.BOOL
elif arr.dtype == np.dtype('uint8'):
correct_types[b] = caffe2_pb2.TensorProto.UINT8
elif arr.dtype == np.dtype('int8'):
correct_types[b] = caffe2_pb2.TensorProto.INT8
elif arr.dtype == np.dtype('uint16'):
correct_types[b] = caffe2_pb2.TensorProto.UINT16
elif arr.dtype == np.dtype('int16'):
correct_types[b] = caffe2_pb2.TensorProto.INT16
elif arr.dtype == np.dtype('int64'):
correct_types[b] = caffe2_pb2.TensorProto.INT64
elif arr.dtype == np.dtype('float16'):
correct_types[b] = caffe2_pb2.TensorProto.FLOAT16
elif arr.dtype == np.dtype('float64'):
correct_types[b] = caffe2_pb2.TensorProto.DOUBLE
else:
correct_types[b] = "unknown {}".format(arr.dtype)
else:
correct_types[b] = str(type(arr))
if expected_uninferred_blobs is None:
expected_uninferred_blobs = []
for b in correct_shapes:
# skip blobs for which shape couldn't be inferred
if b in expected_uninferred_blobs:
continue
self.assertTrue(
np.array_equal(
np.array(shapes[b]).astype(np.int32),
np.array(correct_shapes[b]).astype(np.int32)
),
"Shape {} mismatch: {} vs. correct {}".format(
b, shapes[b], correct_shapes[b]
)
)
self.assertFalse(
b not in types and b in correct_types,
"Type for {} not defined".format(b),
)
self.assertEqual(
types[b],
correct_types[b],
"Type {} mismatch: {} vs. {}".format(
b, types[b], correct_types[b],
)
)
if __name__ == "__main__":
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
import unittest
class TestSoftmaxOps(hu.HypothesisTestCase):
@given(n=st.sampled_from([2, 4, 71, 103]),
D=st.sampled_from([4, 8, 64, 79, 256, 333]),
engine=st.sampled_from([None, 'CUDNN']),
**hu.gcs)
def test_softmax(self, n, D, engine, gc, dc):
# n = number of examples, D = |labels|
# Initialize X and add 1e-2 for numerical stability
X = np.random.rand(n, D).astype(np.float32)
X = X + 1e-2
# Reference implementation of cross entropy with soft labels
def label_softmax(X):
probs = np.zeros((n, D))
rowmax = np.zeros(n)
for i in range(n):
rowmax[i] = max(X[i, ])
# We need to subtract the max to avoid numerical issues
probs[i] = X[i] - rowmax[i]
exps = np.exp(probs[i, ])
norm = sum(exps)
probs[i, ] = exps / norm
return [probs]
op = core.CreateOperator(
"Softmax",
["X"],
["probs"],
engine=engine
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=label_softmax,
)
@given(n=st.sampled_from([2, 4, 71, 103, 555, 751, 1201]),
D=st.sampled_from([4, 8, 64, 79, 256, 333, 1000]),
engine=st.sampled_from([None, 'CUDNN']),
**hu.gcs)
def test_softmax_grad(self, n, D, engine, gc, dc):
# n = number of examples, D = |labels|
# Initialize X and add 1e-2 for numerical stability
Y = np.random.rand(n, D).astype(np.float32)
dY = np.random.rand(n, D).astype(np.float32)
Y = Y + 1e-2
# Reference implementation of cross entropy with soft labels
def label_softmax_grad(X, dY):
dX = Y * 0.0
for i in range(n):
d = np.dot(Y[i, :], dY[i, :])
dX[i, :] = Y[i, :] * (dY[i, :] - d)
return [dX]
op = core.CreateOperator(
"SoftmaxGradient",
["Y", "dY"],
["dX"],
engine=engine
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[Y, dY],
reference=label_softmax_grad,
)
@given(axis=st.integers(min_value=1, max_value=4),
engine=st.sampled_from([None, 'CUDNN']),
**hu.gcs)
def test_softmax_axis(self, axis, engine, gc, dc):
np.random.seed(1)
X = np.random.randn(1, 2, 3, 2, 1).astype(np.float32)
X = X + 1e-2
def prod(xs):
p = 1
for x in xs:
p *= x
return p
N = prod(list(X.shape)[:axis])
D = prod(list(X.shape)[axis:])
# Reference implementation of cross entropy with soft labels
def label_softmax(X):
X_ = X.reshape(N, D)
probs = np.zeros((N, D))
rowmax = np.zeros(N)
for i in range(N):
rowmax[i] = max(X_[i, ])
# We need to subtract the max to avoid numerical issues
probs[i] = X_[i] - rowmax[i]
exps = np.exp(probs[i, ])
norm = sum(exps)
probs[i, ] = exps / norm
return [probs.reshape(*X.shape)]
op = core.CreateOperator(
"Softmax",
["X"],
["probs"],
axis=axis,
engine=engine,
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=label_softmax,
)
self.assertGradientChecks(
gc, op, [X], 0, [0], stepsize=1e-4, threshold=1e-2)
@given(n=st.integers(2, 10), D=st.integers(4, 16),
only_loss=st.booleans(), **hu.gcs)
def test_softmax_with_loss(self, n, D, gc, only_loss, dc):
# n = number of examples, D = |labels|
# Initialize X and add 1e-2 for numerical stability
np.random.seed(2603)
X = np.random.rand(n, D).astype(np.float32)
X = X + 1e-2
# Initialize label
label = (np.random.rand(n) * D).astype(np.int32)
# Reference implementation of cross entropy with soft labels
def label_softmax_crossent(X, label):
probs = np.zeros((n, D))
rowmax = np.zeros(n)
for i in range(n):
rowmax[i] = max(X[i, ])
# We need to subtract the max to avoid numerical issues
probs[i] = X[i] - rowmax[i]
exps = np.exp(probs[i, ])
norm = sum(exps)
probs[i, ] = exps / norm
label_xent = [-np.log(max(probs[i][label[i]], 1e-20))
for i in range(n)]
avgloss = np.sum(label_xent) / float(n)
return (probs, avgloss)
op = core.CreateOperator(
"SoftmaxWithLoss",
["X", "label"],
["probs", "avgloss"],
only_loss=only_loss,
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, label],
reference=label_softmax_crossent,
)
self.assertGradientChecks(
gc, op, [X, label], 0, [1], stepsize=1e-4, threshold=1e-2)
@given(
n=st.integers(2, 5),
D=st.integers(4, 16),
only_loss=st.booleans(),
label_prob=st.booleans(),
**hu.gcs
)
def test_softmax_with_loss_axis_2(
self, n, D, only_loss, label_prob,
gc, dc
):
np.random.seed(2603)
X = np.random.rand(n, n, D).astype(np.float32)
X = X + 1e-2
if label_prob:
label = np.random.rand(n, n, D).astype(np.float32)
label /= label.sum(axis=2, keepdims=True)
else:
label = (np.random.rand(n, n) * D).astype(np.int32)
# Reference implementation of cross entropy with soft labels
def label_softmax_crossent(X, label):
probs = np.zeros((n, n, D))
rowmax = np.zeros((n, n))
for i in range(n):
for j in range(n):
rowmax[i, j] = max(X[i, j, ])
# We need to subtract the max to avoid numerical issues
probs[i, j] = X[i, j] - rowmax[i, j]
exps = np.exp(probs[i, j, ])
norm = sum(exps)
probs[i, j, ] = exps / norm
label_xent = 0
for i in range(n):
for j in range(n):
if label_prob:
for k in range(D):
label_xent += (
-np.log(max(probs[i, j, k], 1e-20)) *
label[i, j, k]
)
else:
label_xent += -np.log(max(probs[i, j, label[i, j]], 1e-20))
avgloss = label_xent / float(n * n)
return (probs, avgloss)
op = core.CreateOperator(
"SoftmaxWithLoss",
["X", "label"],
["probs", "avgloss"],
only_loss=only_loss,
label_prob=label_prob,
axis=2,
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, label],
reference=label_softmax_crossent,
)
self.assertGradientChecks(
gc, op, [X, label], 0, [1], stepsize=1e-4, threshold=1e-2)
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(**hu.gcs_gpu_only)
def test_softmax_with_loss_large(self, gc, dc):
np.random.seed(2603)
for n in [32]:
for D in [1000, 2000, 20000]:
# n = number of examples, D = |labels|
# Initialize X and add 1e-2 for numerical stability
X = np.random.rand(n, D).astype(np.float32)
X = X + 1e-2
# Initialize label
label = (np.random.rand(n) * D).astype(np.int32)
# Reference implementation of cross entropy with soft labels
def label_softmax_crossent(X, label):
probs = np.zeros((n, D))
rowmax = np.zeros(n)
for i in range(n):
rowmax[i] = max(X[i, ])
# We need to subtract the max to avoid numerical issues
probs[i] = X[i] - rowmax[i]
exps = np.exp(probs[i, ])
norm = sum(exps)
probs[i, ] = exps / norm
label_xent = [-np.log(max(probs[i][label[i]], 1e-20))
for i in range(n)]
avgloss = np.sum(label_xent) / float(n)
return (probs, avgloss)
op = core.CreateOperator(
"SoftmaxWithLoss",
["X", "label"],
["probs", "avgloss"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, label],
reference=label_softmax_crossent,
)
@given(n=st.integers(2, 10), D=st.integers(4, 16), **hu.gcs)
def test_softmax_with_loss_label_prob(self, n, D, gc, dc):
# n = number of examples, D = |labels|
# Initialize X and add 1e-2 for numerical stability
np.random.seed(2603)
X = np.random.rand(n, D).astype(np.float32)
X = X + 1e-2
# Initialize label
label = np.random.rand(D, n).astype(np.float32)
# normalize labels to sum to 1
label /= np.sum(label, axis=0)
label = label.transpose()
# Reference implementation of cross entropy with soft labels
def label_softmax_crossent(X, label):
probs = np.zeros((n, D))
rowmax = np.zeros(n)
for i in range(n):
rowmax[i] = max(X[i, ])
# We need to subtract the max to avoid numerical issues
probs[i] = X[i] - rowmax[i]
exps = np.exp(probs[i, ])
norm = sum(exps)
probs[i, ] = exps / norm
label_xent = np.zeros(X.shape)
for i in range(n):
for j in range(D):
label_xent[i][j] = -np.log(
max(probs[i, j], 1e-20)) * label[i, j]
avgloss = np.sum(label_xent) / float(n)
return (probs, avgloss)
op = core.CreateOperator(
"SoftmaxWithLoss",
["X", "label"],
["probs", "avgloss"],
label_prob=1
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, label],
reference=label_softmax_crossent,
)
self.assertGradientChecks(
gc, op, [X, label], 0, [1], stepsize=1e-4, threshold=1e-2)
@given(
n=st.integers(2, 10),
D=st.integers(4, 16),
only_loss=st.booleans(),
**hu.gcs)
def test_softmax_with_loss_weighted(self, n, D, only_loss, gc, dc):
# n = number of examples, D = |labels|
# Initialize X and add 1e-2 for numerical stability
np.random.seed(2603)
X = np.random.rand(n, D).astype(np.float32)
X = X + 1e-2
# Initialize label
label = (np.random.rand(n) * D).astype(np.int32)
# Init weights (weight by sample)
weights = np.random.rand(n).astype(np.float32)
# Reference implementation of cross entropy with soft labels
def label_softmax_crossent_weighted(X, label, weights):
probs = np.zeros((n, D))
rowmax = np.zeros(n)
for i in range(n):
rowmax[i] = max(X[i, ])
# We need to subtract the max to avoid numerical issues
probs[i] = X[i] - rowmax[i]
exps = np.exp(probs[i, ])
norm = sum(exps)
probs[i, ] = exps / norm
label_xent = [-weights[i] * np.log(max(probs[i][label[i]], 1e-20))
for i in range(n)]
avgloss = np.sum(label_xent) / sum(weights)
return (probs, avgloss)
op = core.CreateOperator(
"SoftmaxWithLoss",
["X", "label", "weights"],
["probs", "avgloss"],
only_loss=only_loss,
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, label, weights],
reference=label_softmax_crossent_weighted,
)
self.assertGradientChecks(
gc, op, [X, label, weights], 0, [1], stepsize=1e-4, threshold=1e-2)
@given(n=st.integers(2, 10), D=st.integers(4, 16), **hu.gcs)
def test_softmax_with_loss_label_prob_weighted(self, n, D, gc, dc):
# n = number of examples, D = |labels|
# Initialize X and add 1e-2 for numerical stability
X = np.random.rand(n, D).astype(np.float32)
X = X + 1e-2
# Initialize label
label = np.random.rand(D, n).astype(np.float32)
# normalize labels to sum to 1
label /= np.sum(label, axis=0)
label = label.transpose()
# Init weights (weight by sample)
weights = np.random.rand(n).astype(np.float32)
# Reference implementation of cross entropy with soft labels
def label_softmax_crossent_weighted(X, label, weights):
probs = np.zeros((n, D))
rowmax = np.zeros(n)
for i in range(n):
rowmax[i] = max(X[i, ])
# We need to subtract the max to avoid numerical issues
probs[i] = X[i] - rowmax[i]
exps = np.exp(probs[i, ])
norm = sum(exps)
probs[i, ] = exps / norm
label_xent = np.zeros(X.shape)
for i in range(n):
for j in range(D):
label_xent[i][j] = -np.log(
max(probs[i, j], 1e-20)) * label[i, j] * weights[i]
avgloss = np.sum(label_xent) / sum(weights)
return (probs, avgloss)
op = core.CreateOperator(
"SoftmaxWithLoss",
["X", "label", "weights"],
["probs", "avgloss"],
label_prob=1,
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, label, weights],
reference=label_softmax_crossent_weighted,
)
self.assertGradientChecks(
gc, op, [X, label, weights], 0, [1], stepsize=1e-4, threshold=1e-2)
@given(n=st.integers(2, 5), D=st.integers(2, 4),
weighted=st.booleans(), **hu.gcs)
def test_spatial_softmax_with_loss(self, n, D, weighted, gc, dc):
# n = number of examples, D = |labels|
# Initialize X and add 1e-2 for numerical stability
W = 18
H = 12
np.random.seed(2603)
X = np.random.rand(n, D, H, W).astype(np.float32)
X = X + 1e-2
weighted = True
weights = None
if weighted:
weights = np.random.rand(n, H, W).astype(np.float32)
# Initialize label. Some of the labels are (-1), i.e "DONT CARE"
label = (np.random.rand(n, H, W) * (D + 1)).astype(np.int32) - 1
def label_softmax_crossent_spatial(X, label, weights=None):
probs = np.zeros((n, D, H, W))
rowmax = np.zeros((n, H, W))
label_xent = np.zeros((n, H, W))
for i in range(n):
for x in range(W):
for y in range(H):
rowmax[i, y, x] = max(X[i, :, y, x])
# We need to subtract the max to avoid numerical issues
probs[i, :, y, x] = X[i, :, y, x] - rowmax[i, y, x]
exps = np.exp(probs[i, :, y, x])
probs[i, :, y, x] = exps / sum(exps)
label_xent[:, y, x] = \
[-np.log(max(probs[j, label[i, y, x], y, x], 1e-20))
for j in range(n)]
total_xent = 0.0
total_weight = 0.0
for y in range(H):
for x in range(W):
for i in range(n):
l = label[i, y, x]
if (l != (-1)):
w = 1.0 if weights is None else weights[i, y, x]
total_xent += \
-np.log(max(probs[i, l, y, x], 1e-20)) * w
total_weight += w
print("Total weight {}".format(total_weight))
return (probs, total_xent / total_weight)
op = core.CreateOperator(
"SpatialSoftmaxWithLoss",
["X", "label"] + ([] if weights is None else ["weights"]),
["probs", "avgloss"],
)
inputs = [X, label] + ([] if weights is None else [weights])
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=label_softmax_crossent_spatial,
)
self.assertGradientChecks(
gc, op, inputs, 0, [1], stepsize=1e-4, threshold=1e-2)
@given(n=st.integers(4, 5), D=st.integers(3, 4),
weighted=st.booleans(), **hu.gcs)
def test_spatial_softmax_with_loss_allignore(self, n, D, weighted, gc, dc):
# n = number of examples, D = |labels|
# Initialize X and add 1e-2 for numerical stability
W = 18
H = 12
np.random.seed(2603)
X = np.random.rand(n, D, H, W).astype(np.float32)
X = X + 1e-2
weighted = True
weights = None
if weighted:
weights = np.random.rand(n, H, W).astype(np.float32)
# Initialize label. All labels as "DONT CARE"
label = np.zeros((n, H, W)).astype(np.int32) - 1
print(label)
def label_softmax_crossent_spatial(X, label, weights=None):
probs = np.zeros((n, D, H, W))
rowmax = np.zeros((n, H, W))
label_xent = np.zeros((n, H, W))
for i in range(n):
for x in range(W):
for y in range(H):
rowmax[i, y, x] = max(X[i, :, y, x])
# We need to subtract the max to avoid numerical issues
probs[i, :, y, x] = X[i, :, y, x] - rowmax[i, y, x]
exps = np.exp(probs[i, :, y, x])
probs[i, :, y, x] = exps / sum(exps)
label_xent[:, y, x] = \
[-np.log(max(probs[j, label[i, y, x], y, x], 1e-20))
for j in range(n)]
return (probs, 0.0)
op = core.CreateOperator(
"SpatialSoftmaxWithLoss",
["X", "label"] + ([] if weights is None else ["weights"]),
["probs", "avgloss"],
)
inputs = [X, label] + ([] if weights is None else [weights])
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=label_softmax_crossent_spatial,
)
@given(n=st.integers(4, 5), D=st.integers(3, 4),
weighted=st.booleans(), **hu.gcs)
def test_softmax_with_loss_zero_weight(self, n, D, weighted, gc, dc):
# n = number of examples, D = |labels|
# Initialize X and add 1e-2 for numerical stability
np.random.seed(2603)
X = np.random.rand(n, D).astype(np.float32)
X = X + 1e-2
weights = np.zeros(n).astype(np.float32)
# Initialize label
label = (np.random.rand(n) * D).astype(np.int32)
def label_softmax_crossent(X, label, weights=None):
probs = np.zeros((n, D))
rowmax = np.zeros((n))
for i in range(n):
rowmax[i] = max(X[i, ])
# We need to subtract the max to avoid numerical issues
probs[i] = X[i] - rowmax[i]
exps = np.exp(probs[i, ])
norm = sum(exps)
probs[i, ] = exps / norm
return (probs, 0.0)
op = core.CreateOperator(
"SoftmaxWithLoss",
["X", "label", "weights"],
["probs", "avgloss"]
)
inputs = [X, label] + ([] if weights is None else [weights])
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=inputs,
reference=label_softmax_crossent,
)
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
def test_compare_cpugpu(self):
'''
Additional test that checks CPU and GPU returns same values
with larger examples. This is mainly to test the more complex
GPU implementation is correct.
'''
from caffe2.proto import caffe2_pb2
for _j in range(3):
gpuop = core.CreateOperator(
"SpatialSoftmaxWithLoss",
["X_gpu", "label_gpu"],
["probs_gpu", "avgloss_gpu"],
device_option=core.DeviceOption(caffe2_pb2.CUDA, 0)
)
cpuop = core.CreateOperator(
"SpatialSoftmaxWithLoss",
["X_cpu", "label_cpu"],
["probs_cpu", "avgloss_cpu"],
device_option=core.DeviceOption(caffe2_pb2.CPU)
)
n = 8
D = 4
W = 64 + int(np.random.rand(1) * 1024)
H = 64 + int(np.random.rand(1) * 1024)
print("W: {} H: {}".format(W, H))
X = np.random.rand(n, D, H, W).astype(np.float32)
X = X + 1e-2
# Initialize label. Some of the labels are (-1), i.e "DONT CARE"
label = (np.random.rand(n, H, W) * (D + 1)).astype(np.int32) - 1
gpu0 = core.DeviceOption(caffe2_pb2.CUDA, 0)
workspace.FeedBlob("X_cpu", X)
workspace.FeedBlob("label_cpu", label)
workspace.FeedBlob("X_gpu", X, device_option=gpu0)
workspace.FeedBlob("label_gpu", label, device_option=gpu0)
workspace.RunOperatorOnce(gpuop)
workspace.RunOperatorOnce(cpuop)
probs_gpu = workspace.FetchBlob("probs_gpu")
probs_cpu = workspace.FetchBlob("probs_cpu")
loss_gpu = workspace.FetchBlob("avgloss_gpu")
loss_cpu = workspace.FetchBlob("avgloss_cpu")
np.testing.assert_allclose(probs_gpu, probs_cpu, rtol=1e-4)
np.testing.assert_allclose(loss_gpu, loss_cpu, rtol=1e-1)
if __name__ == "__main__":
import unittest
import random
random.seed(2603)
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import assume, given, settings, HealthCheck
import hypothesis.strategies as st
import caffe2.python.hypothesis_test_util as hu
import numpy as np
import unittest
class TestGlu(hu.HypothesisTestCase):
# Suppress filter_too_much health check.
# Reproduce by commenting @settings and uncommenting @seed.
# @seed(302934307671667531413257853548643485645)
@settings(suppress_health_check=[HealthCheck.filter_too_much])
@given(
X=hu.tensor(),
axis=st.integers(min_value=0, max_value=3),
**hu.gcs
)
def test_glu_old(self, X, axis, gc, dc):
def glu_ref(X):
x1, x2 = np.split(X, [X.shape[axis] // 2], axis=axis)
Y = x1 * (1. / (1. + np.exp(-x2)))
return [Y]
# Test only valid tensors.
assume(axis < X.ndim)
assume(X.shape[axis] % 2 == 0)
op = core.CreateOperator("Glu", ["X"], ["Y"], dim=axis)
self.assertReferenceChecks(gc, op, [X], glu_ref)
if __name__ == "__main__":
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
class TestElementwiseOps(hu.HypothesisTestCase):
@given(n=st.integers(0, 10), m=st.integers(4, 6),
d=st.integers(2, 3), seed=st.integers(0, 1000), **hu.gcs)
def test_div(self, n, m, d, gc, dc, seed):
np.random.seed(seed)
X = np.random.rand(n, m, d).astype(np.float32)
Y = np.random.rand(n, m, d).astype(np.float32) + 5.0
def div_op(X, Y):
return [np.divide(X, Y)]
op = core.CreateOperator(
"Div",
["X", "Y"],
["Z"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, Y],
reference=div_op,
)
self.assertGradientChecks(
gc, op, [X, Y], 0, [0], stepsize=1e-4, threshold=1e-2)
@given(n=st.integers(0, 6), m=st.integers(4, 6),
seed=st.integers(0, 1000), **hu.gcs)
def test_log(self, n, m, gc, dc, seed):
np.random.seed(seed)
X = np.random.rand(n, m).astype(np.float32) + 1.0
def log_op(X):
return [np.log(X)]
op = core.CreateOperator(
"Log",
["X"],
["Z"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=log_op,
)
self.assertGradientChecks(
gc, op, [X], 0, [0], stepsize=1e-4, threshold=1e-2)
@given(n=st.integers(0, 10), m=st.integers(4, 6),
d=st.integers(2, 3), seed=st.integers(0, 1000), **hu.gcs)
def test_powt(self, n, m, d, gc, dc, seed):
np.random.seed(seed)
X = np.random.rand(n, m, d).astype(np.float32) + 1.0
Y = np.random.rand(n, m, d).astype(np.float32) + 2.0
def powt_op(X, Y):
return [np.power(X, Y)]
#two gradients Y*X^(Y-1) and X^Y * ln(X)
def powt_grad(g_out, outputs, fwd_inputs):
[X, Y] = fwd_inputs
Z = outputs[0]
return ([Y * np.power(X, Y - 1), Z * np.log(X)] * g_out)
op = core.CreateOperator(
"Pow",
["X", "Y"],
["Z"]
)
self.assertReferenceChecks(device_option=gc,
op=op,
inputs=[X, Y],
reference=powt_op,
output_to_grad="Z",
grad_reference=powt_grad)
@given(n=st.integers(0, 6), m=st.integers(4, 6),
seed=st.integers(0, 1000), **hu.gcs)
def test_sqr(self, n, m, gc, dc, seed):
np.random.seed(seed)
X = np.random.rand(n, m).astype(np.float32)
def sqr_op(X):
return [np.square(X)]
op = core.CreateOperator(
"Sqr",
["X"],
["Z"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=sqr_op,
)
self.assertGradientChecks(
gc, op, [X], 0, [0], stepsize=1e-4, threshold=1e-2)
@given(
X=hu.tensor(
elements=st.floats(0.02, 1),
# allow empty tensor
min_value=0),
**hu.gcs
)
def test_sqrt(self, X, gc, dc):
def sqrt_op(X):
return [np.sqrt(X)]
op = core.CreateOperator(
"Sqrt",
["X"],
["Y"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=sqrt_op,
)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(
gc, op, [X], 0, [0], stepsize=1e-4, threshold=1e-2)
@given(X=hu.tensor(elements=st.floats(0.05, 1)), **hu.gcs)
def test_sqrt_inplace(self, X, gc, dc):
def sqrt_op(X):
return [np.sqrt(X)]
op = core.CreateOperator(
"Sqrt",
["X"],
["X"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=sqrt_op,
)
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(
gc, op, [X], 0, [0], stepsize=1e-4, threshold=1e-2)
@given(n=st.integers(0, 6), m=st.integers(4, 6),
seed=st.integers(0, 1000), **hu.gcs)
def test_swish(self, n, m, gc, dc, seed):
np.random.seed(seed)
X = np.random.rand(n, m).astype(np.float32)
def swish(X):
return [np.divide(X, (1. + np.exp(-X)))]
op = core.CreateOperator(
"Swish",
["X"],
["Z"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=swish,
)
self.assertGradientChecks(
gc, op, [X], 0, [0], stepsize=1e-4, threshold=1e-2)
@given(n=st.integers(0, 6), m=st.integers(4, 6),
seed=st.integers(0, 1000), **hu.gcs)
def test_swish_gradient_inplace(self, n, m, gc, dc, seed):
np.random.seed(seed)
def swish(X):
return [np.divide(X, (1. + np.exp(-X)))]
def swish_gradient(X, Y, dY):
return [dY * (Y + np.divide(1. - Y, 1. + np.exp(-X)))]
X = np.random.rand(n, m).astype(np.float32)
Y = swish(X)[0]
dY = np.random.rand(n, m).astype(np.float32)
op = core.CreateOperator(
"SwishGradient",
["X", "Y", "grad"],
"grad"
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, Y, dY],
reference=swish_gradient,
)
@given(n=st.integers(0, 6), m=st.integers(4, 6),
seed=st.integers(0, 1000), **hu.gcs)
def test_sigmoid(self, n, m, gc, dc, seed):
np.random.seed(seed)
X = np.random.rand(n, m).astype(np.float32)
def sigmoid(X):
return [1. / (1. + np.exp(-X))]
op = core.CreateOperator(
"Sigmoid",
["X"],
["Z"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=sigmoid,
)
self.assertGradientChecks(
gc, op, [X], 0, [0], stepsize=1e-4, threshold=1e-2)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import brew, core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import numpy as np
from caffe2.python.model_helper import ModelHelper
class TestLayerNormOp(hu.HypothesisTestCase):
@given(X=hu.tensors(n=1), **hu.gcs)
def test_layer_norm_grad_op(self, X, gc, dc):
X = X[0]
if len(X.shape) == 1:
X = np.expand_dims(X, axis=0)
axis = np.random.randint(0, len(X.shape))
epsilon = 1e-4
op = core.CreateOperator(
"LayerNormGradient",
["gout", "out", "mean", "stdev", "in"],
["gin"],
axis=axis,
epsilon=epsilon,
)
def layer_norm_ref(X):
left = int(np.prod(X.shape[:axis]))
reshaped = np.reshape(X, [left, -1])
mean = np.mean(reshaped, axis=1).reshape([left, 1])
stdev = np.sqrt(
np.mean(np.square(reshaped), axis=1).reshape([left, 1]) -
np.power(mean, 2) + epsilon
)
norm = (reshaped - mean) / (stdev)
norm = np.reshape(norm, X.shape)
mean = np.reshape(mean, X.shape[:axis] + (1,))
stdev = np.reshape(stdev, X.shape[:axis] + (1,))
return [norm, mean, stdev]
norm, mean, stdev = layer_norm_ref(X)
gout = norm
def layer_norm_grad_ref(gout_full, norm, mean_full, stdev_full, X_full):
left = int(np.prod(X_full.shape[:axis]))
right = int(np.prod(X_full.shape[axis:]))
X = np.reshape(X_full, [left, right])
stdev = np.reshape(stdev_full, [left, 1])
mean = np.reshape(mean_full, [left, 1])
gout = np.reshape(gout_full, [left, right])
dstdev_end = (-1.0) / np.power(stdev, 2.0) \
* np.sum((X - mean) * gout, axis=1).reshape([left, 1])
dmean_end = np.sum(-1.0 / stdev * gout, axis=1).reshape([left, 1])
dx_end = 1.0 / stdev * gout
# stdev block
dmean_stdev = -1.0 * mean / stdev * dstdev_end
dx_stdev = X / (right * stdev) * dstdev_end
# mean block
dmean = dmean_end + dmean_stdev
dxmean = (1.0 / right) * dmean
# final outputs
dx = dx_end + dx_stdev + dxmean
dx = dx.reshape(X_full.shape)
return [dx]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[gout, norm, mean, stdev, X],
reference=layer_norm_grad_ref
)
self.assertDeviceChecks(
device_options=dc,
op=op,
inputs=[gout, norm, mean, stdev, X],
outputs_to_check=[0],
)
@given(X=hu.tensors(n=1), **hu.gcs)
def test_layer_norm_op(self, X, gc, dc):
X = X[0]
if len(X.shape) == 1:
X = np.expand_dims(X, axis=0)
axis = np.random.randint(0, len(X.shape))
epsilon = 1e-4
op = core.CreateOperator(
"LayerNorm",
["input"],
["output", "mean", "stdev"],
axis=axis,
epsilon=epsilon,
)
def layer_norm_ref(X):
left = int(np.prod(X.shape[:axis]))
reshaped = np.reshape(X, [left, -1])
mean = np.mean(reshaped, axis=1).reshape([left, 1])
stdev = np.sqrt(
np.mean(np.power(reshaped, 2), axis=1).reshape([left, 1]) -
np.power(mean, 2) + epsilon
)
norm = (reshaped - mean) / (stdev)
norm = np.reshape(norm, X.shape)
mean = np.reshape(mean, X.shape[:axis] + (1,))
stdev = np.reshape(stdev, X.shape[:axis] + (1,))
return [norm, mean, stdev]
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=layer_norm_ref
)
self.assertDeviceChecks(
device_options=dc,
op=op,
inputs=[X],
outputs_to_check=[0, 1, 2],
)
@given(X=hu.tensors(n=1), **hu.gcs)
def test_layer_norm_brew_wrapper(self, X, gc, dc):
X = X[0]
if len(X.shape) == 1:
X = np.expand_dims(X, axis=0)
axis = np.random.randint(0, len(X.shape))
scale_dim = [1] * np.ndim(X)
scale_dim[axis] = X.shape[axis]
self.ws.create_blob('input').feed(X)
model = ModelHelper(name='test_layer_norm_brew_wrapper')
brew.layer_norm(
model,
'input',
'output',
dim_in=X.shape[axis],
axis=axis,
epsilon=1e-4,
)
self.ws.create_net(model.param_init_net).run()
self.ws.create_net(model.net).run()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import hypothesis.strategies as st
import unittest
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, workspace
from hypothesis import given
from caffe2.proto import caffe2_pb2
class TestONNXWhile(hu.HypothesisTestCase):
@given(
condition=st.booleans(),
max_trip_count=st.integers(0, 100),
save_scopes=st.booleans(),
seed=st.integers(0, 65535),
**hu.gcs_cpu_only)
def test_onnx_while_fibb(
self, condition, max_trip_count, save_scopes, seed, gc, dc):
np.random.seed(seed)
# Create body net
body_net = caffe2_pb2.NetDef()
# Two loop carried dependencies: first and second
body_net.external_input.extend(['i', 'cond', 'first', 'second'])
body_net.external_output.extend(['cond_new', 'second', 'third', 'third'])
add_op = core.CreateOperator(
'Add',
['first', 'second'],
['third'],
)
print3 = core.CreateOperator(
'Print',
['third'],
[],
)
limit_const = core.CreateOperator(
'ConstantFill',
[],
['limit_const'],
shape=[1],
dtype=caffe2_pb2.TensorProto.FLOAT,
value=100.0,
)
cond = core.CreateOperator(
'LT',
['third', 'limit_const'],
['cond_new'],
)
body_net.op.extend([add_op, print3, limit_const, cond])
while_op = core.CreateOperator(
'ONNXWhile',
['max_trip_count', 'condition', 'first_init', 'second_init'],
['first_a', 'second_a', 'third_a'],
body=body_net,
has_cond=True,
has_trip_count=True,
save_scopes=save_scopes,
)
condition_arr = np.array(condition).astype(np.bool)
max_trip_count_arr = np.array(max_trip_count).astype(np.int64)
first_init = np.array([1]).astype(np.float32)
second_init = np.array([1]).astype(np.float32)
def ref(max_trip_count, condition, first_init, second_init):
first = 1
second = 1
results = []
if condition:
for _ in range(max_trip_count):
third = first + second
first = second
second = third
results.append(third)
if third > 100:
break
return (first, second, np.array(results).astype(np.float32))
self.assertReferenceChecks(
gc,
while_op,
[max_trip_count_arr, condition_arr, first_init, second_init],
ref,
)
self.assertFalse(workspace.HasBlob("cond_new"))
if __name__ == "__main__":
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import unittest
import caffe2.python.hypothesis_test_util as hu
from caffe2.python import core, utils
from caffe2.proto import caffe2_pb2
ROI_CANONICAL_SCALE = 224 # default: 224
ROI_CANONICAL_LEVEL = 4 # default: 4
ROI_MAX_LEVEL = 5 # default: 5
ROI_MIN_LEVEL = 2 # default: 2
RPN_MAX_LEVEL = 6 # default: 6
RPN_MIN_LEVEL = 2 # default: 2
RPN_POST_NMS_TOP_N = 2000 # default: 2000
#
# Should match original Detectron code at
# https://github.com/facebookresearch/Detectron/blob/master/lib/ops/collect_and_distribute_fpn_rpn_proposals.py
#
def boxes_area(boxes):
"""Compute the area of an array of boxes."""
w = (boxes[:, 2] - boxes[:, 0] + 1)
h = (boxes[:, 3] - boxes[:, 1] + 1)
areas = w * h
assert np.all(areas >= 0), 'Negative areas founds'
return areas
def map_rois_to_fpn_levels(rois, k_min, k_max):
"""Determine which FPN level each RoI in a set of RoIs should map to based
on the heuristic in the FPN paper.
"""
# Compute level ids
s = np.sqrt(boxes_area(rois))
s0 = ROI_CANONICAL_SCALE
lvl0 = ROI_CANONICAL_LEVEL
# Eqn.(1) in FPN paper
target_lvls = np.floor(lvl0 + np.log2(s / s0 + 1e-6))
target_lvls = np.clip(target_lvls, k_min, k_max)
return target_lvls
def collect(inputs):
post_nms_topN = RPN_POST_NMS_TOP_N
k_max = RPN_MAX_LEVEL
k_min = RPN_MIN_LEVEL
num_lvls = k_max - k_min + 1
roi_inputs = inputs[:num_lvls]
score_inputs = inputs[num_lvls:]
# rois are in [[batch_idx, x0, y0, x1, y2], ...] format
# Combine predictions across all levels and retain the top scoring
#
# equivalent to Detectron code
# rois = np.concatenate([blob.data for blob in roi_inputs])
# scores = np.concatenate([blob.data for blob in score_inputs]).squeeze()
rois = np.concatenate(roi_inputs)
scores = np.concatenate(score_inputs).squeeze()
assert rois.shape[0] == scores.shape[0]
inds = np.argsort(-scores)[:post_nms_topN]
rois = rois[inds, :]
return rois
def distribute(rois, _, outputs):
"""To understand the output blob order see return value of
roi_data.fast_rcnn.get_fast_rcnn_blob_names(is_training=False)
"""
# equivalent to Detectron code
# lvl_min = cfg.FPN.ROI_MIN_LEVEL
# lvl_max = cfg.FPN.ROI_MAX_LEVEL
lvl_min = ROI_MIN_LEVEL
lvl_max = ROI_MAX_LEVEL
lvls = map_rois_to_fpn_levels(rois[:, 1:5], lvl_min, lvl_max)
# equivalent to Detectron code
# outputs[0].reshape(rois.shape)
# outputs[0].data[...] = rois
outputs[0] = rois
# Create new roi blobs for each FPN level
# (See: modeling.FPN.add_multilevel_roi_blobs which is similar but annoying
# to generalize to support this particular case.)
rois_idx_order = np.empty((0, ))
for output_idx, lvl in enumerate(range(lvl_min, lvl_max + 1)):
idx_lvl = np.where(lvls == lvl)[0]
blob_roi_level = rois[idx_lvl, :]
# equivalent to Detectron code
# outputs[output_idx + 1].reshape(blob_roi_level.shape)
# outputs[output_idx + 1].data[...] = blob_roi_level
outputs[output_idx + 1] = blob_roi_level
rois_idx_order = np.concatenate((rois_idx_order, idx_lvl))
rois_idx_restore = np.argsort(rois_idx_order)
# equivalent to Detectron code
# py_op_copy_blob(
# rois_idx_restore.astype(np.int32), outputs[-1])
outputs[-1] = rois_idx_restore.astype(np.int32)
def collect_and_distribute_fpn_rpn_ref(*inputs):
assert inputs
num_rpn_lvls = RPN_MAX_LEVEL - RPN_MIN_LEVEL + 1
assert len(inputs) == 2 * num_rpn_lvls
N = inputs[0].shape[0]
for i in range(num_rpn_lvls):
assert len(inputs[i].shape) == 2
assert inputs[i].shape[0] == N
assert inputs[i].shape[1] == 5
for i in range(num_rpn_lvls, 2 * num_rpn_lvls):
assert len(inputs[i].shape) == 1
assert inputs[i].shape[0] == N
num_roi_lvls = ROI_MAX_LEVEL - ROI_MIN_LEVEL + 1
outputs = (num_roi_lvls + 2) * [None]
rois = collect(inputs)
distribute(rois, None, outputs)
return outputs
class TestCollectAndDistributeFpnRpnProposals(hu.HypothesisTestCase):
def run_on_device(self, device_opts):
np.random.seed(0)
proposal_count = 5000
input_names = []
inputs = []
for lvl in range(RPN_MIN_LEVEL, RPN_MAX_LEVEL + 1):
rpn_roi = (
ROI_CANONICAL_SCALE *
np.random.rand(proposal_count, 5).astype(np.float32)
)
for i in range(proposal_count):
# Make RoIs have positive area, since they
# are in the format [[batch_idx, x0, y0, x1, y2], ...]
rpn_roi[i][3] += rpn_roi[i][1]
rpn_roi[i][4] += rpn_roi[i][2]
input_names.append('rpn_rois_fpn{}'.format(lvl))
inputs.append(rpn_roi)
for lvl in range(RPN_MIN_LEVEL, RPN_MAX_LEVEL + 1):
rpn_roi_score = np.random.rand(proposal_count).astype(np.float32)
input_names.append('rpn_roi_probs_fpn{}'.format(lvl))
inputs.append(rpn_roi_score)
output_names = [
'rois',
]
for lvl in range(ROI_MIN_LEVEL, ROI_MAX_LEVEL + 1):
output_names.append('rois_fpn{}'.format(lvl))
output_names.append('rois_idx_restore')
op = core.CreateOperator(
'CollectAndDistributeFpnRpnProposals',
input_names,
output_names,
arg=[
utils.MakeArgument("roi_canonical_scale", ROI_CANONICAL_SCALE),
utils.MakeArgument("roi_canonical_level", ROI_CANONICAL_LEVEL),
utils.MakeArgument("roi_max_level", ROI_MAX_LEVEL),
utils.MakeArgument("roi_min_level", ROI_MIN_LEVEL),
utils.MakeArgument("rpn_max_level", RPN_MAX_LEVEL),
utils.MakeArgument("rpn_min_level", RPN_MIN_LEVEL),
utils.MakeArgument("post_nms_topN", RPN_POST_NMS_TOP_N),
],
device_option=device_opts)
self.assertReferenceChecks(
device_option=device_opts,
op=op,
inputs=inputs,
reference=collect_and_distribute_fpn_rpn_ref,
)
def test_cpu(self):
device_opts_cpu = caffe2_pb2.DeviceOption()
self.run_on_device(device_opts_cpu)
if __name__ == "__main__":
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import hypothesis.strategies as st
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import numpy as np
class TestKeySplitOps(hu.HypothesisTestCase):
@given(
X=hu.arrays(
dims=[1000],
dtype=np.int64,
elements=st.integers(min_value=0, max_value=100)
),
**hu.gcs_cpu_only
)
def test_key_split_op(self, X, gc, dc):
categorical_limit = max(X) + 1
workspace.ResetWorkspace()
workspace.FeedBlob('X', X)
output_blobs = ['Y_%d' % i for i in range(categorical_limit)]
op = core.CreateOperator(
'KeySplit', ['X'],
output_blobs,
categorical_limit=categorical_limit
)
workspace.RunOperatorOnce(op)
output_vecs = [
workspace.blobs[output_blobs[i]] for i in range(categorical_limit)
]
expected_output_vecs = [[] for _ in range(categorical_limit)]
for i, x in enumerate(X):
expected_output_vecs[x].append(i)
for i in range(categorical_limit):
np.testing.assert_array_equal(
output_vecs[i],
np.array(expected_output_vecs[i], dtype=np.int32)
)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
class TestNormalizeOp(hu.HypothesisTestCase):
@given(X=hu.tensor(min_dim=1,
max_dim=5,
elements=st.floats(min_value=0.5, max_value=1.0)),
**hu.gcs)
def test_normalize(self, X, gc, dc):
def ref_normalize(X, axis):
x_normed = X / (
np.sqrt((X**2).sum(axis=axis, keepdims=True)) + np.finfo(X.dtype).tiny)
return (x_normed,)
for axis in range(-X.ndim, X.ndim):
op = core.CreateOperator("Normalize", "X", "Y", axis=axis)
self.assertReferenceChecks(
gc,
op,
[X],
functools.partial(ref_normalize, axis=axis))
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
@given(X=hu.tensor(min_dim=1,
max_dim=5,
elements=st.floats(min_value=0.5, max_value=1.0)),
**hu.gcs)
def test_normalize_L1(self, X, gc, dc):
def ref(X, axis):
norm = abs(X).sum(axis=axis, keepdims=True)
return (X / norm,)
for axis in range(-X.ndim, X.ndim):
print('axis: ', axis)
op = core.CreateOperator("NormalizeL1", "X", "Y", axis=axis)
self.assertReferenceChecks(
gc,
op,
[X],
functools.partial(ref, axis=axis))
self.assertDeviceChecks(dc, op, [X], [0])
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
class TestMarginRankingCriterion(hu.HypothesisTestCase):
@given(N=st.integers(min_value=10, max_value=20),
seed=st.integers(min_value=0, max_value=65535),
margin=st.floats(min_value=-0.5, max_value=0.5),
**hu.gcs)
def test_margin_ranking_criterion(self, N, seed, margin, gc, dc):
np.random.seed(seed)
X1 = np.random.randn(N).astype(np.float32)
X2 = np.random.randn(N).astype(np.float32)
Y = np.random.choice([-1, 1], size=N).astype(np.int32)
op = core.CreateOperator(
"MarginRankingCriterion", ["X1", "X2", "Y"], ["loss"],
margin=margin)
def ref_cec(X1, X2, Y):
result = np.maximum(-Y * (X1 - X2) + margin, 0)
return (result, )
inputs = [X1, X2, Y]
# This checks the op implementation against a reference function in
# python.
self.assertReferenceChecks(gc, op, inputs, ref_cec)
# This checks the op implementation over multiple device options (e.g.
# CPU and CUDA). [0] means that the 0-th output is checked.
self.assertDeviceChecks(dc, op, inputs, [0])
# Make singular points less sensitive
X1[np.abs(margin - Y * (X1 - X2)) < 0.1] += 0.1
X2[np.abs(margin - Y * (X1 - X2)) < 0.1] -= 0.1
# Check dX1
self.assertGradientChecks(gc, op, inputs, 0, [0])
# Check dX2
self.assertGradientChecks(gc, op, inputs, 1, [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import given
import hypothesis.strategies as st
import caffe2.python.hypothesis_test_util as hu
import numpy as np
import unittest
class TestCeil(hu.HypothesisTestCase):
@given(X=hu.tensor(),
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs)
def test_ceil(self, X, gc, dc, engine):
op = core.CreateOperator("Ceil", ["X"], ["Y"], engine=engine)
def ceil_ref(X):
return (np.ceil(X),)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=ceil_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X], [0])
if __name__ == "__main__":
unittest.main()
|
## @package download
# Module caffe2.python.models.download
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import os
import sys
import signal
import re
# Import urllib
try:
import urllib.error as urlliberror
import urllib.request as urllib
HTTPError = urlliberror.HTTPError
URLError = urlliberror.URLError
except ImportError:
import urllib2 as urllib
HTTPError = urllib.HTTPError
URLError = urllib.URLError
# urllib requires more work to deal with a redirect, so not using vanity url
DOWNLOAD_BASE_URL = "https://s3.amazonaws.com/download.caffe2.ai/models/"
DOWNLOAD_COLUMNS = 70
# Don't let urllib hang up on big downloads
def signalHandler(signal, frame):
print("Killing download...")
exit(0)
signal.signal(signal.SIGINT, signalHandler)
def deleteDirectory(top_dir):
for root, dirs, files in os.walk(top_dir, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(top_dir)
def progressBar(percentage):
full = int(DOWNLOAD_COLUMNS * percentage / 100)
bar = full * "#" + (DOWNLOAD_COLUMNS - full) * " "
sys.stdout.write(u"\u001b[1000D[" + bar + "] " + str(percentage) + "%")
sys.stdout.flush()
def downloadFromURLToFile(url, filename, show_progress=True):
try:
print("Downloading from {url}".format(url=url))
response = urllib.urlopen(url)
size = int(response.info().get('Content-Length').strip())
chunk = min(size, 8192)
print("Writing to {filename}".format(filename=filename))
if show_progress:
downloaded_size = 0
progressBar(0)
with open(filename, "wb") as local_file:
while True:
data_chunk = response.read(chunk)
if not data_chunk:
break
local_file.write(data_chunk)
if show_progress:
downloaded_size += len(data_chunk)
progressBar(int(100 * downloaded_size / size))
print("") # New line to fix for progress bar
except HTTPError as e:
raise Exception("Could not download model. [HTTP Error] {code}: {reason}."
.format(code=e.code, reason=e.reason))
except URLError as e:
raise Exception("Could not download model. [URL Error] {reason}."
.format(reason=e.reason))
except Exception as e:
raise e
def getURLFromName(name, filename):
return "{base_url}{name}/{filename}".format(base_url=DOWNLOAD_BASE_URL,
name=name, filename=filename)
def downloadModel(model, args):
# Figure out where to store the model
model_folder = '{folder}'.format(folder=model)
dir_path = os.path.dirname(os.path.realpath(__file__))
if args.install:
model_folder = '{dir_path}/{folder}'.format(dir_path=dir_path,
folder=model)
# Check if that folder is already there
if os.path.exists(model_folder) and not os.path.isdir(model_folder):
if not args.force:
raise Exception("Cannot create folder for storing the model,\
there exists a file of the same name.")
else:
print("Overwriting existing file! ({filename})"
.format(filename=model_folder))
os.remove(model_folder)
if os.path.isdir(model_folder):
if not args.force:
response = ""
query = "Model already exists, continue? [y/N] "
try:
response = raw_input(query)
except NameError:
response = input(query)
if response.upper() == 'N' or not response:
print("Cancelling download...")
exit(0)
print("Overwriting existing folder! ({filename})".format(filename=model_folder))
deleteDirectory(model_folder)
# Now we can safely create the folder and download the model
os.makedirs(model_folder)
for f in ['predict_net.pb', 'init_net.pb']:
try:
downloadFromURLToFile(getURLFromName(model, f),
'{folder}/{f}'.format(folder=model_folder,
f=f))
except Exception as e:
print("Abort: {reason}".format(reason=str(e)))
print("Cleaning up...")
deleteDirectory(model_folder)
exit(0)
if args.install:
os.symlink("{folder}/__sym_init__.py".format(folder=dir_path),
"{folder}/__init__.py".format(folder=model_folder))
def validModelName(name):
invalid_names = ['__init__']
if name in invalid_names:
return False
if not re.match("^[/0-9a-zA-Z_-]+$", name):
return False
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Download or install pretrained models.')
parser.add_argument('model', nargs='+',
help='Model to download/install.')
parser.add_argument('-i', '--install', action='store_true',
help='Install the model.')
parser.add_argument('-f', '--force', action='store_true',
help='Force a download/installation.')
args = parser.parse_args()
for model in args.model:
if validModelName(model):
downloadModel(model, args)
else:
print("'{}' is not a valid model name.".format(model))
|
## @package resnet
# Module caffe2.python.models.resnet
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from caffe2.python import brew
'''
Utility for creating ResNets
See "Deep Residual Learning for Image Recognition" by He, Zhang et. al. 2015
'''
class ResNetBuilder():
'''
Helper class for constructing residual blocks.
'''
def __init__(self, model, prev_blob, no_bias, is_test, spatial_bn_mom=0.9):
self.model = model
self.comp_count = 0
self.comp_idx = 0
self.prev_blob = prev_blob
self.is_test = is_test
self.spatial_bn_mom = spatial_bn_mom
self.no_bias = 1 if no_bias else 0
def add_conv(self, in_filters, out_filters, kernel, stride=1, pad=0):
self.comp_idx += 1
self.prev_blob = brew.conv(
self.model,
self.prev_blob,
'comp_%d_conv_%d' % (self.comp_count, self.comp_idx),
in_filters,
out_filters,
weight_init=("MSRAFill", {}),
kernel=kernel,
stride=stride,
pad=pad,
no_bias=self.no_bias,
)
return self.prev_blob
def add_relu(self):
self.prev_blob = brew.relu(
self.model,
self.prev_blob,
self.prev_blob, # in-place
)
return self.prev_blob
def add_spatial_bn(self, num_filters):
self.prev_blob = brew.spatial_bn(
self.model,
self.prev_blob,
'comp_%d_spatbn_%d' % (self.comp_count, self.comp_idx),
num_filters,
epsilon=1e-3,
momentum=self.spatial_bn_mom,
is_test=self.is_test,
)
return self.prev_blob
'''
Add a "bottleneck" component as decribed in He et. al. Figure 3 (right)
'''
def add_bottleneck(
self,
input_filters, # num of feature maps from preceding layer
base_filters, # num of filters internally in the component
output_filters, # num of feature maps to output
down_sampling=False,
spatial_batch_norm=True,
):
self.comp_idx = 0
shortcut_blob = self.prev_blob
# 1x1
self.add_conv(
input_filters,
base_filters,
kernel=1,
stride=1
)
if spatial_batch_norm:
self.add_spatial_bn(base_filters)
self.add_relu()
# 3x3 (note the pad, required for keeping dimensions)
self.add_conv(
base_filters,
base_filters,
kernel=3,
stride=(1 if down_sampling is False else 2),
pad=1
)
if spatial_batch_norm:
self.add_spatial_bn(base_filters)
self.add_relu()
# 1x1
last_conv = self.add_conv(base_filters, output_filters, kernel=1)
if spatial_batch_norm:
last_conv = self.add_spatial_bn(output_filters)
# Summation with input signal (shortcut)
# If we need to increase dimensions (feature maps), need to
# do a projection for the short cut
if (output_filters > input_filters):
shortcut_blob = brew.conv(
self.model,
shortcut_blob,
'shortcut_projection_%d' % self.comp_count,
input_filters,
output_filters,
weight_init=("MSRAFill", {}),
kernel=1,
stride=(1 if down_sampling is False else 2),
no_bias=self.no_bias,
)
if spatial_batch_norm:
shortcut_blob = brew.spatial_bn(
self.model,
shortcut_blob,
'shortcut_projection_%d_spatbn' % self.comp_count,
output_filters,
epsilon=1e-3,
momentum=self.spatial_bn_mom,
is_test=self.is_test,
)
self.prev_blob = brew.sum(
self.model, [shortcut_blob, last_conv],
'comp_%d_sum_%d' % (self.comp_count, self.comp_idx)
)
self.comp_idx += 1
self.add_relu()
# Keep track of number of high level components if this ResNetBuilder
self.comp_count += 1
def add_simple_block(
self,
input_filters,
num_filters,
down_sampling=False,
spatial_batch_norm=True
):
self.comp_idx = 0
shortcut_blob = self.prev_blob
# 3x3
self.add_conv(
input_filters,
num_filters,
kernel=3,
stride=(1 if down_sampling is False else 2),
pad=1
)
if spatial_batch_norm:
self.add_spatial_bn(num_filters)
self.add_relu()
last_conv = self.add_conv(num_filters, num_filters, kernel=3, pad=1)
if spatial_batch_norm:
last_conv = self.add_spatial_bn(num_filters)
# Increase of dimensions, need a projection for the shortcut
if (num_filters != input_filters):
shortcut_blob = brew.conv(
self.model,
shortcut_blob,
'shortcut_projection_%d' % self.comp_count,
input_filters,
num_filters,
weight_init=("MSRAFill", {}),
kernel=1,
stride=(1 if down_sampling is False else 2),
no_bias=self.no_bias,
)
if spatial_batch_norm:
shortcut_blob = brew.spatial_bn(
self.model,
shortcut_blob,
'shortcut_projection_%d_spatbn' % self.comp_count,
num_filters,
epsilon=1e-3,
is_test=self.is_test,
)
self.prev_blob = brew.sum(
self.model, [shortcut_blob, last_conv],
'comp_%d_sum_%d' % (self.comp_count, self.comp_idx)
)
self.comp_idx += 1
self.add_relu()
# Keep track of number of high level components if this ResNetBuilder
self.comp_count += 1
# The conv1 and final_avg kernel/stride args provide a basic mechanism for
# adapting resnet50 for different sizes of input images.
def create_resnet50(
model,
data,
num_input_channels,
num_labels,
label=None,
is_test=False,
no_loss=False,
no_bias=0,
conv1_kernel=7,
conv1_stride=2,
final_avg_kernel=7,
):
# conv1 + maxpool
brew.conv(
model,
data,
'conv1',
num_input_channels,
64,
weight_init=("MSRAFill", {}),
kernel=conv1_kernel,
stride=conv1_stride,
pad=3,
no_bias=no_bias
)
brew.spatial_bn(
model,
'conv1',
'conv1_spatbn_relu',
64,
epsilon=1e-3,
momentum=0.1,
is_test=is_test
)
brew.relu(model, 'conv1_spatbn_relu', 'conv1_spatbn_relu')
brew.max_pool(model, 'conv1_spatbn_relu', 'pool1', kernel=3, stride=2)
# Residual blocks...
builder = ResNetBuilder(model, 'pool1', no_bias=no_bias,
is_test=is_test, spatial_bn_mom=0.1)
# conv2_x (ref Table 1 in He et al. (2015))
builder.add_bottleneck(64, 64, 256)
builder.add_bottleneck(256, 64, 256)
builder.add_bottleneck(256, 64, 256)
# conv3_x
builder.add_bottleneck(256, 128, 512, down_sampling=True)
for _ in range(1, 4):
builder.add_bottleneck(512, 128, 512)
# conv4_x
builder.add_bottleneck(512, 256, 1024, down_sampling=True)
for _ in range(1, 6):
builder.add_bottleneck(1024, 256, 1024)
# conv5_x
builder.add_bottleneck(1024, 512, 2048, down_sampling=True)
builder.add_bottleneck(2048, 512, 2048)
builder.add_bottleneck(2048, 512, 2048)
# Final layers
final_avg = brew.average_pool(
model,
builder.prev_blob,
'final_avg',
kernel=final_avg_kernel,
stride=1,
global_pooling=True,
)
# Final dimension of the "image" is reduced to 7x7
last_out = brew.fc(
model, final_avg, 'last_out_L{}'.format(num_labels), 2048, num_labels
)
if no_loss:
return last_out
# If we create model for training, use softmax-with-loss
if (label is not None):
(softmax, loss) = model.SoftmaxWithLoss(
[last_out, label],
["softmax", "loss"],
)
return (softmax, loss)
else:
# For inference, we just return softmax
return brew.softmax(model, last_out, "softmax")
def create_resnet_32x32(
model, data, num_input_channels, num_groups, num_labels, is_test=False
):
'''
Create residual net for smaller images (sec 4.2 of He et. al (2015))
num_groups = 'n' in the paper
'''
# conv1 + maxpool
brew.conv(
model, data, 'conv1', num_input_channels, 16, kernel=3, stride=1
)
brew.spatial_bn(
model, 'conv1', 'conv1_spatbn', 16, epsilon=1e-3, is_test=is_test
)
brew.relu(model, 'conv1_spatbn', 'relu1')
# Number of blocks as described in sec 4.2
filters = [16, 32, 64]
builder = ResNetBuilder(model, 'relu1', no_bias=0, is_test=is_test)
prev_filters = 16
for groupidx in range(0, 3):
for blockidx in range(0, 2 * num_groups):
builder.add_simple_block(
prev_filters if blockidx == 0 else filters[groupidx],
filters[groupidx],
down_sampling=(True if blockidx == 0 and
groupidx > 0 else False))
prev_filters = filters[groupidx]
# Final layers
brew.average_pool(
model, builder.prev_blob, 'final_avg', kernel=8, stride=1
)
brew.fc(model, 'final_avg', 'last_out', 64, num_labels)
softmax = brew.softmax(model, 'last_out', 'softmax')
return softmax
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
from caffe2.proto import caffe2_pb2
def _parseFile(filename):
out_net = caffe2_pb2.NetDef()
# TODO(bwasti): A more robust handler for pathnames.
dir_path = os.path.dirname(__file__)
with open('{dir_path}/{filename}'.format(dir_path=dir_path,
filename=filename), 'rb') as f:
out_net.ParseFromString(f.read())
return out_net
init_net = _parseFile('init_net.pb')
predict_net = _parseFile('predict_net.pb')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import time
from caffe2.python import workspace, cnn, memonger, core
import caffe2.python.models.resnet as resnet
import hypothesis.strategies as st
from hypothesis import given, settings
import caffe2.python.hypothesis_test_util as hu
def has_blob(proto, needle):
for op in proto.op:
for inp in op.input:
if inp == needle:
return True
for outp in op.output:
if outp == needle:
return True
return False
def count_blobs(proto):
blobs = set()
for op in proto.op:
blobs = blobs.union(set(op.input)).union(set(op.output))
return len(blobs)
def count_shared_blobs(proto):
blobs = set()
for op in proto.op:
blobs = blobs.union(set(op.input)).union(set(op.output))
return len([b for b in blobs if "_shared" in b])
class ResnetMemongerTest(hu.HypothesisTestCase):
@given(with_shapes=st.booleans(), **hu.gcs_cpu_only)
@settings(max_examples=2, timeout=120)
def test_resnet_shared_grads(self, with_shapes, gc, dc):
model = cnn.CNNModelHelper(
order="NCHW",
name="test",
cudnn_exhaustive_search=True,
)
with core.NameScope("gpu_0"):
data = model.net.AddExternalInput("gpu_0/data")
label = model.net.AddExternalInput("gpu_0/label")
(_softmax, loss) = resnet.create_resnet50(
model,
data,
num_input_channels=3,
num_labels=1000,
label=label,
is_test=False,
)
param_to_grad = model.AddGradientOperators([loss])
(shapes, types) = workspace.InferShapesAndTypes(
[model.param_init_net, model.net],
{'gpu_0/data': [4, 3, 227, 227],
'gpu_0/label': [4]},
)
count_before = count_blobs(model.net.Proto())
optim_proto = memonger.share_grad_blobs(
model.net,
["gpu_0/loss"],
set(model.param_to_grad.values()),
"gpu_0/",
share_activations=True,
dont_share_blobs=set([str(param_to_grad["gpu_0/conv1_w"])]),
blob_shapes=shapes if with_shapes else None,
)
count_after = count_blobs(optim_proto)
self.assertTrue(count_after < count_before)
# Run model and compare results. We check that the loss is same
# and also that the final gradient (conv1_w_grad is same)
workspace.RunNetOnce(model.param_init_net)
data = np.random.rand(4, 3, 227, 227).astype(np.float32)
label = (np.random.rand(4) * 1000).astype(np.int32)
workspace.FeedBlob("gpu_0/data", data)
workspace.FeedBlob("gpu_0/label", label)
workspace.RunNetOnce(model.net)
model.net.Proto().type = 'dag'
model.net.Proto().num_workers = 4
loss1 = workspace.FetchBlob("gpu_0/last_out_L1000")
conv1_w_grad = workspace.FetchBlob(param_to_grad["gpu_0/conv1_w"])
workspace.FeedBlob(param_to_grad["gpu_0/conv1_w"], np.array([0.0]))
workspace.RunNetOnce(optim_proto)
optimized_loss1 = workspace.FetchBlob("gpu_0/last_out_L1000")
optim_conv1_w_grad = workspace.FetchBlob(param_to_grad["gpu_0/conv1_w"])
print("before: {} after: {}".format(count_before, count_after))
np.testing.assert_almost_equal(loss1, optimized_loss1)
np.testing.assert_almost_equal(conv1_w_grad, optim_conv1_w_grad)
def test_resnet_forward_only(self):
model = cnn.CNNModelHelper(
order="NCHW",
name="test",
cudnn_exhaustive_search=True,
)
with core.NameScope("gpu_0"):
data = model.net.AddExternalInput("gpu_0/data")
resnet.create_resnet50(
model,
data,
num_input_channels=3,
num_labels=1000,
is_test=True
)
count_before = count_blobs(model.net.Proto())
optim_proto = memonger.optimize_inference_for_dag(
model.net, ["gpu_0/data"], "gpu_0/"
)
count_after = count_blobs(optim_proto)
num_shared_blobs = count_shared_blobs(optim_proto)
# Run model and compare results
workspace.RunNetOnce(model.param_init_net)
data = np.random.rand(4, 3, 227, 227).astype(np.float32)
workspace.FeedBlob("gpu_0/data", data)
workspace.RunNetOnce(model.net)
model.net.Proto().type = 'dag'
model.net.Proto().num_workers = 4
loss1 = workspace.FetchBlob("gpu_0/last_out_L1000")
workspace.RunNetOnce(optim_proto)
optimized_loss1 = workspace.FetchBlob("gpu_0/last_out_L1000")
self.assertTrue(count_after < count_before)
self.assertTrue(num_shared_blobs < 7 and num_shared_blobs > 0)
np.testing.assert_almost_equal(loss1, optimized_loss1)
def test_resnet_forward_only_fast_simplenet(self):
'''
Test C++ memonger that is only for simple nets
'''
model = cnn.CNNModelHelper(
order="NCHW",
name="test",
cudnn_exhaustive_search=True,
)
with core.NameScope("gpu_0"):
data = model.net.AddExternalInput("gpu_0/data")
resnet.create_resnet50(
model,
data,
num_input_channels=3,
num_labels=1000,
is_test=True
)
count_before = count_blobs(model.net.Proto())
t = time.time()
optim_proto = memonger.optimize_inference_fast(
model.net.Proto(),
set(["gpu_0/data", "gpu_0/last_out_L1000"]).union(
set(model.net.Proto().external_input))
)
print("Optimization took {} secs".format(time.time() - t))
count_after = count_blobs(optim_proto)
num_shared_blobs = count_shared_blobs(optim_proto)
self.assertTrue(count_after < count_before)
print(count_after, count_before, num_shared_blobs)
self.assertTrue(num_shared_blobs < 7 and num_shared_blobs > 0)
# Run model and compare results
workspace.RunNetOnce(model.param_init_net)
data = np.random.rand(4, 3, 227, 227).astype(np.float32)
workspace.FeedBlob("gpu_0/data", data)
model.net.Proto().type = 'simple'
workspace.RunNetOnce(model.net)
loss1 = workspace.FetchBlob("gpu_0/last_out_L1000")
workspace.RunNetOnce(optim_proto)
optimized_loss1 = workspace.FetchBlob("gpu_0/last_out_L1000")
np.testing.assert_almost_equal(loss1, optimized_loss1)
if __name__ == "__main__":
import unittest
import random
random.seed(2603)
workspace.GlobalInit([
'caffe2',
'--caffe2_log_level=0',
'--caffe2_print_blob_sizes_at_exit=0',
'--caffe2_gpu_memory_tracking=1'])
unittest.main()
|
## @package translate
# Module caffe2.python.models.seq2seq.translate
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
from future.utils import viewitems
import logging
import numpy as np
import sys
from caffe2.python import core, rnn_cell, workspace
from caffe2.python.models.seq2seq.beam_search import BeamSearchForwardOnly
from caffe2.python.models.seq2seq.seq2seq_model_helper import Seq2SeqModelHelper
import caffe2.python.models.seq2seq.seq2seq_util as seq2seq_util
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stderr))
def _weighted_sum(model, values, weight, output_name):
values_weights = zip(values, [weight] * len(values))
values_weights_flattened = [x for v_w in values_weights for x in v_w]
return model.net.WeightedSum(
values_weights_flattened,
output_name,
)
class Seq2SeqModelCaffe2EnsembleDecoder(object):
def scope(self, scope_name, blob_name):
return (
scope_name + '/' + blob_name
if scope_name is not None
else blob_name
)
def _build_decoder(
self,
model,
step_model,
model_params,
scope,
previous_tokens,
timestep,
fake_seq_lengths,
):
attention_type = model_params['attention']
assert attention_type in ['none', 'regular']
use_attention = (attention_type != 'none')
with core.NameScope(scope):
encoder_embeddings = seq2seq_util.build_embeddings(
model=model,
vocab_size=self.source_vocab_size,
embedding_size=model_params['encoder_embedding_size'],
name='encoder_embeddings',
freeze_embeddings=False,
)
(
encoder_outputs,
weighted_encoder_outputs,
final_encoder_hidden_states,
final_encoder_cell_states,
encoder_units_per_layer,
) = seq2seq_util.build_embedding_encoder(
model=model,
encoder_params=model_params['encoder_type'],
num_decoder_layers=len(model_params['decoder_layer_configs']),
inputs=self.encoder_inputs,
input_lengths=self.encoder_lengths,
vocab_size=self.source_vocab_size,
embeddings=encoder_embeddings,
embedding_size=model_params['encoder_embedding_size'],
use_attention=use_attention,
num_gpus=0,
forward_only=True,
scope=scope,
)
with core.NameScope(scope):
if use_attention:
# [max_source_length, beam_size, encoder_output_dim]
encoder_outputs = model.net.Tile(
encoder_outputs,
'encoder_outputs_tiled',
tiles=self.beam_size,
axis=1,
)
if weighted_encoder_outputs is not None:
weighted_encoder_outputs = model.net.Tile(
weighted_encoder_outputs,
'weighted_encoder_outputs_tiled',
tiles=self.beam_size,
axis=1,
)
decoder_embeddings = seq2seq_util.build_embeddings(
model=model,
vocab_size=self.target_vocab_size,
embedding_size=model_params['decoder_embedding_size'],
name='decoder_embeddings',
freeze_embeddings=False,
)
embedded_tokens_t_prev = step_model.net.Gather(
[decoder_embeddings, previous_tokens],
'embedded_tokens_t_prev',
)
decoder_cells = []
decoder_units_per_layer = []
for i, layer_config in enumerate(model_params['decoder_layer_configs']):
num_units = layer_config['num_units']
decoder_units_per_layer.append(num_units)
if i == 0:
input_size = model_params['decoder_embedding_size']
else:
input_size = (
model_params['decoder_layer_configs'][i - 1]['num_units']
)
cell = rnn_cell.LSTMCell(
forward_only=True,
input_size=input_size,
hidden_size=num_units,
forget_bias=0.0,
memory_optimization=False,
)
decoder_cells.append(cell)
with core.NameScope(scope):
if final_encoder_hidden_states is not None:
for i in range(len(final_encoder_hidden_states)):
if final_encoder_hidden_states[i] is not None:
final_encoder_hidden_states[i] = model.net.Tile(
final_encoder_hidden_states[i],
'final_encoder_hidden_tiled_{}'.format(i),
tiles=self.beam_size,
axis=1,
)
if final_encoder_cell_states is not None:
for i in range(len(final_encoder_cell_states)):
if final_encoder_cell_states[i] is not None:
final_encoder_cell_states[i] = model.net.Tile(
final_encoder_cell_states[i],
'final_encoder_cell_tiled_{}'.format(i),
tiles=self.beam_size,
axis=1,
)
initial_states = \
seq2seq_util.build_initial_rnn_decoder_states(
model=model,
encoder_units_per_layer=encoder_units_per_layer,
decoder_units_per_layer=decoder_units_per_layer,
final_encoder_hidden_states=final_encoder_hidden_states,
final_encoder_cell_states=final_encoder_cell_states,
use_attention=use_attention,
)
attention_decoder = seq2seq_util.LSTMWithAttentionDecoder(
encoder_outputs=encoder_outputs,
encoder_output_dim=encoder_units_per_layer[-1],
encoder_lengths=None,
vocab_size=self.target_vocab_size,
attention_type=attention_type,
embedding_size=model_params['decoder_embedding_size'],
decoder_num_units=decoder_units_per_layer[-1],
decoder_cells=decoder_cells,
weighted_encoder_outputs=weighted_encoder_outputs,
name=scope,
)
states_prev = step_model.net.AddExternalInputs(*[
'{}/{}_prev'.format(scope, s)
for s in attention_decoder.get_state_names()
])
decoder_outputs, states = attention_decoder.apply(
model=step_model,
input_t=embedded_tokens_t_prev,
seq_lengths=fake_seq_lengths,
states=states_prev,
timestep=timestep,
)
state_configs = [
BeamSearchForwardOnly.StateConfig(
initial_value=initial_state,
state_prev_link=BeamSearchForwardOnly.LinkConfig(
blob=state_prev,
offset=0,
window=1,
),
state_link=BeamSearchForwardOnly.LinkConfig(
blob=state,
offset=1,
window=1,
),
)
for initial_state, state_prev, state in zip(
initial_states,
states_prev,
states,
)
]
with core.NameScope(scope):
decoder_outputs_flattened, _ = step_model.net.Reshape(
[decoder_outputs],
[
'decoder_outputs_flattened',
'decoder_outputs_and_contexts_combination_old_shape',
],
shape=[-1, attention_decoder.get_output_dim()],
)
output_logits = seq2seq_util.output_projection(
model=step_model,
decoder_outputs=decoder_outputs_flattened,
decoder_output_size=attention_decoder.get_output_dim(),
target_vocab_size=self.target_vocab_size,
decoder_softmax_size=model_params['decoder_softmax_size'],
)
# [1, beam_size, target_vocab_size]
output_probs = step_model.net.Softmax(
output_logits,
'output_probs',
)
output_log_probs = step_model.net.Log(
output_probs,
'output_log_probs',
)
if use_attention:
attention_weights = attention_decoder.get_attention_weights()
else:
attention_weights = step_model.net.ConstantFill(
[self.encoder_inputs],
'zero_attention_weights_tmp_1',
value=0.0,
)
attention_weights = step_model.net.Transpose(
attention_weights,
'zero_attention_weights_tmp_2',
)
attention_weights = step_model.net.Tile(
attention_weights,
'zero_attention_weights_tmp',
tiles=self.beam_size,
axis=0,
)
return (
state_configs,
output_log_probs,
attention_weights,
)
def build_word_rewards(self, vocab_size, word_reward, unk_reward):
word_rewards = np.full([vocab_size], word_reward, dtype=np.float32)
word_rewards[seq2seq_util.PAD_ID] = 0
word_rewards[seq2seq_util.GO_ID] = 0
word_rewards[seq2seq_util.EOS_ID] = 0
word_rewards[seq2seq_util.UNK_ID] = word_reward + unk_reward
return word_rewards
def __init__(
self,
translate_params,
):
self.models = translate_params['ensemble_models']
decoding_params = translate_params['decoding_params']
self.beam_size = decoding_params['beam_size']
assert len(self.models) > 0
source_vocab = self.models[0]['source_vocab']
target_vocab = self.models[0]['target_vocab']
for model in self.models:
assert model['source_vocab'] == source_vocab
assert model['target_vocab'] == target_vocab
self.source_vocab_size = len(source_vocab)
self.target_vocab_size = len(target_vocab)
self.decoder_scope_names = [
'model{}'.format(i) for i in range(len(self.models))
]
self.model = Seq2SeqModelHelper(init_params=True)
self.encoder_inputs = self.model.net.AddExternalInput('encoder_inputs')
self.encoder_lengths = self.model.net.AddExternalInput(
'encoder_lengths'
)
self.max_output_seq_len = self.model.net.AddExternalInput(
'max_output_seq_len'
)
fake_seq_lengths = self.model.param_init_net.ConstantFill(
[],
'fake_seq_lengths',
shape=[self.beam_size],
value=100000,
dtype=core.DataType.INT32,
)
beam_decoder = BeamSearchForwardOnly(
beam_size=self.beam_size,
model=self.model,
go_token_id=seq2seq_util.GO_ID,
eos_token_id=seq2seq_util.EOS_ID,
)
step_model = beam_decoder.get_step_model()
state_configs = []
output_log_probs = []
attention_weights = []
for model, scope_name in zip(
self.models,
self.decoder_scope_names,
):
(
state_configs_per_decoder,
output_log_probs_per_decoder,
attention_weights_per_decoder,
) = self._build_decoder(
model=self.model,
step_model=step_model,
model_params=model['model_params'],
scope=scope_name,
previous_tokens=beam_decoder.get_previous_tokens(),
timestep=beam_decoder.get_timestep(),
fake_seq_lengths=fake_seq_lengths,
)
state_configs.extend(state_configs_per_decoder)
output_log_probs.append(output_log_probs_per_decoder)
if attention_weights_per_decoder is not None:
attention_weights.append(attention_weights_per_decoder)
assert len(attention_weights) > 0
num_decoders_with_attention_blob = (
self.model.param_init_net.ConstantFill(
[],
'num_decoders_with_attention_blob',
value=1 / float(len(attention_weights)),
shape=[1],
)
)
# [beam_size, encoder_length, 1]
attention_weights_average = _weighted_sum(
model=step_model,
values=attention_weights,
weight=num_decoders_with_attention_blob,
output_name='attention_weights_average',
)
num_decoders_blob = self.model.param_init_net.ConstantFill(
[],
'num_decoders_blob',
value=1 / float(len(output_log_probs)),
shape=[1],
)
# [beam_size, target_vocab_size]
output_log_probs_average = _weighted_sum(
model=step_model,
values=output_log_probs,
weight=num_decoders_blob,
output_name='output_log_probs_average',
)
word_rewards = self.model.param_init_net.ConstantFill(
[],
'word_rewards',
shape=[self.target_vocab_size],
value=0.0,
dtype=core.DataType.FLOAT,
)
(
self.output_token_beam_list,
self.output_prev_index_beam_list,
self.output_score_beam_list,
self.output_attention_weights_beam_list,
) = beam_decoder.apply(
inputs=self.encoder_inputs,
length=self.max_output_seq_len,
log_probs=output_log_probs_average,
attentions=attention_weights_average,
state_configs=state_configs,
data_dependencies=[],
word_rewards=word_rewards,
)
workspace.RunNetOnce(self.model.param_init_net)
workspace.FeedBlob(
'word_rewards',
self.build_word_rewards(
vocab_size=self.target_vocab_size,
word_reward=translate_params['decoding_params']['word_reward'],
unk_reward=translate_params['decoding_params']['unk_reward'],
)
)
workspace.CreateNet(
self.model.net,
input_blobs=[
str(self.encoder_inputs),
str(self.encoder_lengths),
str(self.max_output_seq_len),
],
)
logger.info('Params created: ')
for param in self.model.params:
logger.info(param)
def load_models(self):
db_reader = 'reader'
for model, scope_name in zip(
self.models,
self.decoder_scope_names,
):
params_for_current_model = [
param
for param in self.model.GetAllParams()
if str(param).startswith(scope_name)
]
assert workspace.RunOperatorOnce(core.CreateOperator(
'CreateDB',
[], [db_reader],
db=model['model_file'],
db_type='minidb')
), 'Failed to create db {}'.format(model['model_file'])
assert workspace.RunOperatorOnce(core.CreateOperator(
'Load',
[db_reader],
params_for_current_model,
load_all=1,
add_prefix=scope_name + '/',
strip_prefix='gpu_0/',
))
logger.info('Model {} is loaded from a checkpoint {}'.format(
scope_name,
model['model_file'],
))
def decode(self, numberized_input, max_output_seq_len):
workspace.FeedBlob(
self.encoder_inputs,
np.array([
[token_id] for token_id in reversed(numberized_input)
]).astype(dtype=np.int32),
)
workspace.FeedBlob(
self.encoder_lengths,
np.array([len(numberized_input)]).astype(dtype=np.int32),
)
workspace.FeedBlob(
self.max_output_seq_len,
np.array([max_output_seq_len]).astype(dtype=np.int64),
)
workspace.RunNet(self.model.net)
num_steps = max_output_seq_len
score_beam_list = workspace.FetchBlob(self.output_score_beam_list)
token_beam_list = (
workspace.FetchBlob(self.output_token_beam_list)
)
prev_index_beam_list = (
workspace.FetchBlob(self.output_prev_index_beam_list)
)
attention_weights_beam_list = (
workspace.FetchBlob(self.output_attention_weights_beam_list)
)
best_indices = (num_steps, 0)
for i in range(num_steps + 1):
for hyp_index in range(self.beam_size):
if (
(
token_beam_list[i][hyp_index][0] ==
seq2seq_util.EOS_ID or
i == num_steps
) and
(
score_beam_list[i][hyp_index][0] >
score_beam_list[best_indices[0]][best_indices[1]][0]
)
):
best_indices = (i, hyp_index)
i, hyp_index = best_indices
output = []
attention_weights_per_token = []
best_score = -score_beam_list[i][hyp_index][0]
while i > 0:
output.append(token_beam_list[i][hyp_index][0])
attention_weights_per_token.append(
attention_weights_beam_list[i][hyp_index]
)
hyp_index = prev_index_beam_list[i][hyp_index][0]
i -= 1
attention_weights_per_token = reversed(attention_weights_per_token)
# encoder_inputs are reversed, see get_batch func
attention_weights_per_token = [
list(reversed(attention_weights))[:len(numberized_input)]
for attention_weights in attention_weights_per_token
]
output = list(reversed(output))
return output, attention_weights_per_token, best_score
def run_seq2seq_beam_decoder(args, model_params, decoding_params):
source_vocab = seq2seq_util.gen_vocab(
args.source_corpus,
args.unk_threshold,
)
logger.info('Source vocab size {}'.format(len(source_vocab)))
target_vocab = seq2seq_util.gen_vocab(
args.target_corpus,
args.unk_threshold,
)
inversed_target_vocab = {v: k for (k, v) in viewitems(target_vocab)}
logger.info('Target vocab size {}'.format(len(target_vocab)))
decoder = Seq2SeqModelCaffe2EnsembleDecoder(
translate_params=dict(
ensemble_models=[dict(
source_vocab=source_vocab,
target_vocab=target_vocab,
model_params=model_params,
model_file=args.checkpoint,
)],
decoding_params=decoding_params,
),
)
decoder.load_models()
for line in sys.stdin:
numerized_source_sentence = seq2seq_util.get_numberized_sentence(
line,
source_vocab,
)
translation, alignment, _ = decoder.decode(
numerized_source_sentence,
2 * len(numerized_source_sentence) + 5,
)
print(' '.join([inversed_target_vocab[tid] for tid in translation]))
def main():
parser = argparse.ArgumentParser(
description='Caffe2: Seq2Seq Translation',
)
parser.add_argument('--source-corpus', type=str, default=None,
help='Path to source corpus in a text file format. Each '
'line in the file should contain a single sentence',
required=True)
parser.add_argument('--target-corpus', type=str, default=None,
help='Path to target corpus in a text file format',
required=True)
parser.add_argument('--unk-threshold', type=int, default=50,
help='Threshold frequency under which token becomes '
'labeled unknown token')
parser.add_argument('--use-bidirectional-encoder', action='store_true',
help='Set flag to use bidirectional recurrent network '
'in encoder')
parser.add_argument('--use-attention', action='store_true',
help='Set flag to use seq2seq with attention model')
parser.add_argument('--encoder-cell-num-units', type=int, default=512,
help='Number of cell units per encoder layer')
parser.add_argument('--encoder-num-layers', type=int, default=2,
help='Number encoder layers')
parser.add_argument('--decoder-cell-num-units', type=int, default=512,
help='Number of cell units in the decoder layer')
parser.add_argument('--decoder-num-layers', type=int, default=2,
help='Number decoder layers')
parser.add_argument('--encoder-embedding-size', type=int, default=256,
help='Size of embedding in the encoder layer')
parser.add_argument('--decoder-embedding-size', type=int, default=512,
help='Size of embedding in the decoder layer')
parser.add_argument('--decoder-softmax-size', type=int, default=None,
help='Size of softmax layer in the decoder')
parser.add_argument('--beam-size', type=int, default=6,
help='Size of beam for the decoder')
parser.add_argument('--word-reward', type=float, default=0.0,
help='Reward per each word generated.')
parser.add_argument('--unk-reward', type=float, default=0.0,
help='Reward per each UNK token generated. '
'Typically should be negative.')
parser.add_argument('--checkpoint', type=str, default=None,
help='Path to checkpoint', required=True)
args = parser.parse_args()
encoder_layer_configs = [
dict(
num_units=args.encoder_cell_num_units,
),
] * args.encoder_num_layers
if args.use_bidirectional_encoder:
assert args.encoder_cell_num_units % 2 == 0
encoder_layer_configs[0]['num_units'] /= 2
decoder_layer_configs = [
dict(
num_units=args.decoder_cell_num_units,
),
] * args.decoder_num_layers
run_seq2seq_beam_decoder(
args,
model_params=dict(
attention=('regular' if args.use_attention else 'none'),
decoder_layer_configs=decoder_layer_configs,
encoder_type=dict(
encoder_layer_configs=encoder_layer_configs,
use_bidirectional_encoder=args.use_bidirectional_encoder,
),
encoder_embedding_size=args.encoder_embedding_size,
decoder_embedding_size=args.decoder_embedding_size,
decoder_softmax_size=args.decoder_softmax_size,
),
decoding_params=dict(
beam_size=args.beam_size,
word_reward=args.word_reward,
unk_reward=args.unk_reward,
),
)
if __name__ == '__main__':
main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import os
import tempfile
from caffe2.python import test_util, workspace
import caffe2.python.models.seq2seq.seq2seq_util as seq2seq_util
from caffe2.python.models.seq2seq.train import Seq2SeqModelCaffe2
from caffe2.python.models.seq2seq.translate import (
Seq2SeqModelCaffe2EnsembleDecoder,
)
class Seq2SeqBeamSearchTest(test_util.TestCase):
def _build_seq2seq_model(
self,
model_params,
tmp_dir,
source_vocab_size=20,
target_vocab_size=20,
num_gpus=0,
batch_size=2,
):
training_params = dict(
model_params,
batch_size=batch_size,
optimizer_params=dict(
learning_rate=0.1,
),
max_gradient_norm=1.0,
)
model_obj = Seq2SeqModelCaffe2(
training_params,
source_vocab_size,
target_vocab_size,
num_gpus,
)
model_obj.initialize_from_scratch()
checkpoint_path_prefix = os.path.join(tmp_dir, 'checkpoint')
checkpoint_path = model_obj.save(
checkpoint_path_prefix=checkpoint_path_prefix,
current_step=0,
)
return model_obj, checkpoint_path
def _run_compare_train_inference(self, model_params):
tmp_dir = tempfile.mkdtemp()
model_obj, checkpoint_path = self._build_seq2seq_model(
model_params,
tmp_dir=tmp_dir,
source_vocab_size=20,
target_vocab_size=20,
num_gpus=0,
batch_size=2,
)
assert model_obj is not None
translate_params = dict(
ensemble_models=[dict(
source_vocab={i: str(i) for i in range(20)},
target_vocab={i: str(i) for i in range(20)},
model_params=model_params,
model_file=checkpoint_path,
)],
decoding_params=dict(
beam_size=3,
word_reward=0,
unk_reward=0,
),
)
beam_decoder_model = Seq2SeqModelCaffe2EnsembleDecoder(translate_params)
beam_decoder_model.load_models()
encoder_lengths = 5
decoder_lengths = 7
for _ in range(3):
encoder_inputs = np.random.random_integers(
low=3, # after GO_ID (1) and EOS_ID (2)
high=19,
size=encoder_lengths,
)
targets, _, beam_model_score = beam_decoder_model.decode(
encoder_inputs,
decoder_lengths,
)
targets_2, _, beam_model_score = beam_decoder_model.decode(
encoder_inputs,
decoder_lengths,
)
self.assertEqual(targets, targets_2)
workspace.FeedBlob(
'encoder_inputs',
np.array(
[list(reversed(encoder_inputs))]
).transpose().astype(dtype=np.int32))
workspace.FeedBlob(
'encoder_lengths',
np.array([len(encoder_inputs)]).astype(dtype=np.int32),
)
decoder_inputs = [seq2seq_util.GO_ID] + targets[:-1]
workspace.FeedBlob(
'decoder_inputs',
np.array([decoder_inputs]).transpose().astype(dtype=np.int32),
)
workspace.FeedBlob(
'decoder_lengths',
np.array([len(decoder_inputs)]).astype(dtype=np.int32),
)
workspace.FeedBlob(
'targets',
np.array([targets]).transpose().astype(dtype=np.int32),
)
workspace.FeedBlob(
'target_weights',
np.array([[1.0] * len(targets)]).astype(dtype=np.float32),
)
workspace.RunNet(model_obj.forward_net)
train_model_score = workspace.FetchBlob('total_loss_scalar')
np.testing.assert_almost_equal(
beam_model_score,
train_model_score,
decimal=4,
)
def test_attention(self):
model_params = dict(
attention='regular',
decoder_layer_configs=[
dict(
num_units=32,
),
],
encoder_type=dict(
encoder_layer_configs=[
dict(
num_units=16,
),
],
use_bidirectional_encoder=True,
),
encoder_embedding_size=8,
decoder_embedding_size=8,
decoder_softmax_size=None,
)
self._run_compare_train_inference(model_params)
def test_2layer_attention(self):
model_params = dict(
attention='regular',
decoder_layer_configs=[
dict(
num_units=32,
),
dict(
num_units=32,
),
],
encoder_type=dict(
encoder_layer_configs=[
dict(
num_units=16,
),
dict(
num_units=32,
),
],
use_bidirectional_encoder=True,
),
encoder_embedding_size=8,
decoder_embedding_size=8,
decoder_softmax_size=None,
)
self._run_compare_train_inference(model_params)
def test_multi_decoder(self):
model_params = dict(
attention='regular',
decoder_layer_configs=[
dict(
num_units=32,
),
dict(
num_units=32,
),
dict(
num_units=32,
),
],
encoder_type=dict(
encoder_layer_configs=[
dict(
num_units=32,
),
],
use_bidirectional_encoder=False,
),
encoder_embedding_size=8,
decoder_embedding_size=8,
decoder_softmax_size=None,
)
self._run_compare_train_inference(model_params)
|
## @package seq2seq_model_helper
# Module caffe2.python.models.seq2seq.seq2seq_model_helper
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import scope
from caffe2.python.model_helper import ModelHelper
class Seq2SeqModelHelper(ModelHelper):
def __init__(self, init_params=True, **kwargs):
arg_scope = {
'use_cudnn': kwargs.pop('use_cudnn', True),
'cudnn_exhaustive_search': kwargs.pop('cudnn_exhaustive_search', False),
'order': 'NHWC',
}
if kwargs.get('ws_nbytes_limit', None):
arg_scope['ws_nbytes_limit'] = kwargs.pop('ws_nbytes_limit')
super(Seq2SeqModelHelper, self).__init__(
init_params=init_params,
arg_scope=arg_scope,
**kwargs
)
self.non_trainable_params = []
def AddParam(self, name, init=None, init_value=None, trainable=True):
"""Adds a parameter to the model's net and it's initializer if needed
Args:
init: a tuple (<initialization_op_name>, <initialization_op_kwargs>)
init_value: int, float or str. Can be used instead of `init` as a
simple constant initializer
trainable: bool, whether to compute gradient for this param or not
"""
if init_value is not None:
assert init is None
assert type(init_value) in [int, float, str]
init = ('ConstantFill', dict(
shape=[1],
value=init_value,
))
if self.init_params:
param = self.param_init_net.__getattr__(init[0])(
[],
name,
**init[1]
)
else:
param = self.net.AddExternalInput(name)
if trainable:
self.params.append(param)
else:
self.non_trainable_params.append(param)
return param
def GetNonTrainableParams(self, namescope=None):
'''
Returns the params in current namescope
'''
if namescope is None:
namescope = scope.CurrentNameScope()
else:
if not namescope.endswith(scope._NAMESCOPE_SEPARATOR):
namescope += scope._NAMESCOPE_SEPARATOR
if namescope == '':
return self.non_trainable_params[:]
else:
return [
p for p in self.non_trainable_params
if p.GetNameScope() == namescope
]
def GetAllParams(self, namescope=None):
return (
self.GetParams(namescope) +
self.GetComputedParams(namescope) +
self.GetNonTrainableParams(namescope)
)
|
## @package seq2seq_util
# Module caffe2.python.examples.seq2seq_util
""" A bunch of util functions to build Seq2Seq models with Caffe2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import collections
from future.utils import viewitems
import caffe2.proto.caffe2_pb2 as caffe2_pb2
from caffe2.python import attention, core, rnn_cell, brew
PAD_ID = 0
PAD = '<PAD>'
GO_ID = 1
GO = '<GO>'
EOS_ID = 2
EOS = '<EOS>'
UNK_ID = 3
UNK = '<UNK>'
def gen_vocab(corpus, unk_threshold):
vocab = collections.defaultdict(lambda: len(vocab))
freqs = collections.defaultdict(lambda: 0)
# Adding padding tokens to the vocabulary to maintain consistency with IDs
vocab[PAD]
vocab[GO]
vocab[EOS]
vocab[UNK]
with open(corpus) as f:
for sentence in f:
tokens = sentence.strip().split()
for token in tokens:
freqs[token] += 1
for token, freq in viewitems(freqs):
if freq > unk_threshold:
vocab[token]
return vocab
def get_numberized_sentence(sentence, vocab):
numerized_sentence = []
for token in sentence.strip().split():
if token in vocab:
numerized_sentence.append(vocab[token])
else:
numerized_sentence.append(vocab[UNK])
return numerized_sentence
def rnn_unidirectional_layer(
model,
inputs,
input_lengths,
input_size,
num_units,
dropout_keep_prob,
forward_only,
return_sequence_output,
return_final_state,
scope=None,
):
""" Unidirectional LSTM encoder."""
with core.NameScope(scope):
initial_cell_state = model.param_init_net.ConstantFill(
[],
'initial_cell_state',
shape=[num_units],
value=0.0,
)
initial_hidden_state = model.param_init_net.ConstantFill(
[],
'initial_hidden_state',
shape=[num_units],
value=0.0,
)
cell = rnn_cell.LSTMCell(
input_size=input_size,
hidden_size=num_units,
forget_bias=0.0,
memory_optimization=False,
name=(scope + '/' if scope else '') + 'lstm',
forward_only=forward_only,
)
dropout_ratio = (
None if dropout_keep_prob is None else (1.0 - dropout_keep_prob)
)
if dropout_ratio is not None:
cell = rnn_cell.DropoutCell(
internal_cell=cell,
dropout_ratio=dropout_ratio,
name=(scope + '/' if scope else '') + 'dropout',
forward_only=forward_only,
is_test=False,
)
outputs_with_grads = []
if return_sequence_output:
outputs_with_grads.append(0)
if return_final_state:
outputs_with_grads.extend([1, 3])
outputs, (_, final_hidden_state, _, final_cell_state) = (
cell.apply_over_sequence(
model=model,
inputs=inputs,
seq_lengths=input_lengths,
initial_states=(initial_hidden_state, initial_cell_state),
outputs_with_grads=outputs_with_grads,
)
)
return outputs, final_hidden_state, final_cell_state
def rnn_bidirectional_layer(
model,
inputs,
input_lengths,
input_size,
num_units,
dropout_keep_prob,
forward_only,
return_sequence_output,
return_final_state,
scope=None,
):
outputs_fw, final_hidden_fw, final_cell_fw = rnn_unidirectional_layer(
model,
inputs,
input_lengths,
input_size,
num_units,
dropout_keep_prob,
forward_only,
return_sequence_output,
return_final_state,
scope=(scope + '/' if scope else '') + 'fw',
)
with core.NameScope(scope):
reversed_inputs = model.net.ReversePackedSegs(
[inputs, input_lengths],
['reversed_inputs'],
)
outputs_bw, final_hidden_bw, final_cell_bw = rnn_unidirectional_layer(
model,
reversed_inputs,
input_lengths,
input_size,
num_units,
dropout_keep_prob,
forward_only,
return_sequence_output,
return_final_state,
scope=(scope + '/' if scope else '') + 'bw',
)
with core.NameScope(scope):
outputs_bw = model.net.ReversePackedSegs(
[outputs_bw, input_lengths],
['outputs_bw'],
)
# Concatenate forward and backward results
if return_sequence_output:
with core.NameScope(scope):
outputs, _ = model.net.Concat(
[outputs_fw, outputs_bw],
['outputs', 'outputs_dim'],
axis=2,
)
else:
outputs = None
if return_final_state:
with core.NameScope(scope):
final_hidden_state, _ = model.net.Concat(
[final_hidden_fw, final_hidden_bw],
['final_hidden_state', 'final_hidden_state_dim'],
axis=2,
)
final_cell_state, _ = model.net.Concat(
[final_cell_fw, final_cell_bw],
['final_cell_state', 'final_cell_state_dim'],
axis=2,
)
else:
final_hidden_state = None
final_cell_state = None
return outputs, final_hidden_state, final_cell_state
def build_embeddings(
model,
vocab_size,
embedding_size,
name,
freeze_embeddings,
):
embeddings = model.param_init_net.GaussianFill(
[],
name,
shape=[vocab_size, embedding_size],
std=0.1,
)
if not freeze_embeddings:
model.params.append(embeddings)
return embeddings
def get_layer_scope(scope, layer_type, i):
prefix = (scope + '/' if scope else '') + layer_type
return '{}/layer{}'.format(prefix, i)
def build_embedding_encoder(
model,
encoder_params,
num_decoder_layers,
inputs,
input_lengths,
vocab_size,
embeddings,
embedding_size,
use_attention,
num_gpus=0,
forward_only=False,
scope=None,
):
with core.NameScope(scope or ''):
if num_gpus == 0:
embedded_encoder_inputs = model.net.Gather(
[embeddings, inputs],
['embedded_encoder_inputs'],
)
else:
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
embedded_encoder_inputs_cpu = model.net.Gather(
[embeddings, inputs],
['embedded_encoder_inputs_cpu'],
)
embedded_encoder_inputs = model.CopyCPUToGPU(
embedded_encoder_inputs_cpu,
'embedded_encoder_inputs',
)
layer_inputs = embedded_encoder_inputs
layer_input_size = embedding_size
encoder_units_per_layer = []
final_encoder_hidden_states = []
final_encoder_cell_states = []
num_encoder_layers = len(encoder_params['encoder_layer_configs'])
use_bidirectional_encoder = encoder_params.get(
'use_bidirectional_encoder',
False,
)
for i, layer_config in enumerate(encoder_params['encoder_layer_configs']):
if use_bidirectional_encoder and i == 0:
layer_func = rnn_bidirectional_layer
output_dims = 2 * layer_config['num_units']
else:
layer_func = rnn_unidirectional_layer
output_dims = layer_config['num_units']
encoder_units_per_layer.append(output_dims)
is_final_layer = (i == num_encoder_layers - 1)
dropout_keep_prob = layer_config.get(
'dropout_keep_prob',
None,
)
return_final_state = i >= (num_encoder_layers - num_decoder_layers)
(
layer_outputs,
final_layer_hidden_state,
final_layer_cell_state,
) = layer_func(
model=model,
inputs=layer_inputs,
input_lengths=input_lengths,
input_size=layer_input_size,
num_units=layer_config['num_units'],
dropout_keep_prob=dropout_keep_prob,
forward_only=forward_only,
return_sequence_output=(not is_final_layer) or use_attention,
return_final_state=return_final_state,
scope=get_layer_scope(scope, 'encoder', i),
)
if not is_final_layer:
layer_inputs = layer_outputs
layer_input_size = output_dims
final_encoder_hidden_states.append(final_layer_hidden_state)
final_encoder_cell_states.append(final_layer_cell_state)
encoder_outputs = layer_outputs
weighted_encoder_outputs = None
return (
encoder_outputs,
weighted_encoder_outputs,
final_encoder_hidden_states,
final_encoder_cell_states,
encoder_units_per_layer,
)
class LSTMWithAttentionDecoder(object):
def scope(self, name):
return self.name + '/' + name if self.name is not None else name
def _get_attention_type(self, attention_type_as_string):
if attention_type_as_string == 'regular':
return attention.AttentionType.Regular
elif attention_type_as_string == 'recurrent':
return attention.AttentionType.Recurrent
else:
assert False, 'Unknown type ' + attention_type_as_string
def __init__(
self,
encoder_outputs,
encoder_output_dim,
encoder_lengths,
vocab_size,
attention_type,
embedding_size,
decoder_num_units,
decoder_cells,
residual_output_layers=None,
name=None,
weighted_encoder_outputs=None,
):
self.name = name
self.num_layers = len(decoder_cells)
if attention_type == 'none':
self.cell = rnn_cell.MultiRNNCell(
decoder_cells,
name=self.scope('decoder'),
residual_output_layers=residual_output_layers,
)
self.use_attention = False
self.decoder_output_dim = decoder_num_units
self.output_indices = self.cell.output_indices
else:
decoder_cell = rnn_cell.MultiRNNCell(
decoder_cells,
name=self.scope('decoder'),
residual_output_layers=residual_output_layers,
)
self.cell = rnn_cell.AttentionCell(
encoder_output_dim=encoder_output_dim,
encoder_outputs=encoder_outputs,
encoder_lengths=encoder_lengths,
decoder_cell=decoder_cell,
decoder_state_dim=decoder_num_units,
name=self.scope('attention_decoder'),
attention_type=self._get_attention_type(attention_type),
weighted_encoder_outputs=weighted_encoder_outputs,
attention_memory_optimization=True,
)
self.use_attention = True
self.decoder_output_dim = decoder_num_units + encoder_output_dim
self.output_indices = decoder_cell.output_indices
self.output_indices.append(2 * self.num_layers)
def get_state_names(self):
return self.cell.get_state_names()
def get_outputs_with_grads(self):
# sequence (all) output locations are at twice their state index
return [2 * i for i in self.output_indices]
def get_output_dim(self):
return self.decoder_output_dim
def get_attention_weights(self):
assert self.use_attention
# [batch_size, encoder_length, 1]
return self.cell.get_attention_weights()
def apply(
self,
model,
input_t,
seq_lengths,
states,
timestep,
):
return self.cell.apply(
model=model,
input_t=input_t,
seq_lengths=seq_lengths,
states=states,
timestep=timestep,
)
def apply_over_sequence(
self,
model,
inputs,
seq_lengths,
initial_states,
):
return self.cell.apply_over_sequence(
model=model,
inputs=inputs,
seq_lengths=seq_lengths,
initial_states=initial_states,
outputs_with_grads=self.get_outputs_with_grads(),
)
def build_initial_rnn_decoder_states(
model,
encoder_units_per_layer,
decoder_units_per_layer,
final_encoder_hidden_states,
final_encoder_cell_states,
use_attention,
):
num_encoder_layers = len(encoder_units_per_layer)
num_decoder_layers = len(decoder_units_per_layer)
if num_encoder_layers > num_decoder_layers:
offset = num_encoder_layers - num_decoder_layers
else:
offset = 0
initial_states = []
for i, decoder_num_units in enumerate(decoder_units_per_layer):
if (
final_encoder_hidden_states and
len(final_encoder_hidden_states) > (i + offset)
):
final_encoder_hidden_state = final_encoder_hidden_states[i + offset]
else:
final_encoder_hidden_state = None
if final_encoder_hidden_state is None:
decoder_initial_hidden_state = model.param_init_net.ConstantFill(
[],
'decoder_initial_hidden_state_{}'.format(i),
shape=[decoder_num_units],
value=0.0,
)
model.params.append(decoder_initial_hidden_state)
elif decoder_num_units != encoder_units_per_layer[i + offset]:
decoder_initial_hidden_state = brew.fc(
model,
final_encoder_hidden_state,
'decoder_initial_hidden_state_{}'.format(i),
encoder_units_per_layer[i + offset],
decoder_num_units,
axis=2,
)
else:
decoder_initial_hidden_state = final_encoder_hidden_state
initial_states.append(decoder_initial_hidden_state)
if (
final_encoder_cell_states and
len(final_encoder_cell_states) > (i + offset)
):
final_encoder_cell_state = final_encoder_cell_states[i + offset]
else:
final_encoder_cell_state = None
if final_encoder_cell_state is None:
decoder_initial_cell_state = model.param_init_net.ConstantFill(
[],
'decoder_initial_cell_state_{}'.format(i),
shape=[decoder_num_units],
value=0.0,
)
model.params.append(decoder_initial_cell_state)
elif decoder_num_units != encoder_units_per_layer[i + offset]:
decoder_initial_cell_state = brew.fc(
model,
final_encoder_cell_state,
'decoder_initial_cell_state_{}'.format(i),
encoder_units_per_layer[i + offset],
decoder_num_units,
axis=2,
)
else:
decoder_initial_cell_state = final_encoder_cell_state
initial_states.append(decoder_initial_cell_state)
if use_attention:
initial_attention_weighted_encoder_context = (
model.param_init_net.ConstantFill(
[],
'initial_attention_weighted_encoder_context',
shape=[encoder_units_per_layer[-1]],
value=0.0,
)
)
model.params.append(initial_attention_weighted_encoder_context)
initial_states.append(initial_attention_weighted_encoder_context)
return initial_states
def build_embedding_decoder(
model,
decoder_layer_configs,
inputs,
input_lengths,
encoder_lengths,
encoder_outputs,
weighted_encoder_outputs,
final_encoder_hidden_states,
final_encoder_cell_states,
encoder_units_per_layer,
vocab_size,
embeddings,
embedding_size,
attention_type,
forward_only,
num_gpus=0,
scope=None,
):
with core.NameScope(scope or ''):
if num_gpus == 0:
embedded_decoder_inputs = model.net.Gather(
[embeddings, inputs],
['embedded_decoder_inputs'],
)
else:
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
embedded_decoder_inputs_cpu = model.net.Gather(
[embeddings, inputs],
['embedded_decoder_inputs_cpu'],
)
embedded_decoder_inputs = model.CopyCPUToGPU(
embedded_decoder_inputs_cpu,
'embedded_decoder_inputs',
)
decoder_cells = []
decoder_units_per_layer = []
for i, layer_config in enumerate(decoder_layer_configs):
num_units = layer_config['num_units']
decoder_units_per_layer.append(num_units)
if i == 0:
input_size = embedding_size
else:
input_size = decoder_cells[-1].get_output_dim()
cell = rnn_cell.LSTMCell(
forward_only=forward_only,
input_size=input_size,
hidden_size=num_units,
forget_bias=0.0,
memory_optimization=False,
)
dropout_keep_prob = layer_config.get('dropout_keep_prob', None)
if dropout_keep_prob is not None:
dropout_ratio = 1.0 - layer_config.dropout_keep_prob
cell = rnn_cell.DropoutCell(
internal_cell=cell,
dropout_ratio=dropout_ratio,
forward_only=forward_only,
is_test=False,
name=get_layer_scope(scope, 'decoder_dropout', i),
)
decoder_cells.append(cell)
states = build_initial_rnn_decoder_states(
model=model,
encoder_units_per_layer=encoder_units_per_layer,
decoder_units_per_layer=decoder_units_per_layer,
final_encoder_hidden_states=final_encoder_hidden_states,
final_encoder_cell_states=final_encoder_cell_states,
use_attention=(attention_type != 'none'),
)
attention_decoder = LSTMWithAttentionDecoder(
encoder_outputs=encoder_outputs,
encoder_output_dim=encoder_units_per_layer[-1],
encoder_lengths=encoder_lengths,
vocab_size=vocab_size,
attention_type=attention_type,
embedding_size=embedding_size,
decoder_num_units=decoder_units_per_layer[-1],
decoder_cells=decoder_cells,
weighted_encoder_outputs=weighted_encoder_outputs,
name=scope,
)
decoder_outputs, _ = attention_decoder.apply_over_sequence(
model=model,
inputs=embedded_decoder_inputs,
seq_lengths=input_lengths,
initial_states=states,
)
# we do softmax over the whole sequence
# (max_length in the batch * batch_size) x decoder embedding size
# -1 because we don't know max_length yet
decoder_outputs_flattened, _ = model.net.Reshape(
[decoder_outputs],
[
'decoder_outputs_flattened',
'decoder_outputs_and_contexts_combination_old_shape',
],
shape=[-1, attention_decoder.get_output_dim()],
)
decoder_outputs = decoder_outputs_flattened
decoder_output_dim = attention_decoder.get_output_dim()
return (decoder_outputs, decoder_output_dim)
def output_projection(
model,
decoder_outputs,
decoder_output_size,
target_vocab_size,
decoder_softmax_size,
):
if decoder_softmax_size is not None:
decoder_outputs = brew.fc(
model,
decoder_outputs,
'decoder_outputs_scaled',
dim_in=decoder_output_size,
dim_out=decoder_softmax_size,
)
decoder_output_size = decoder_softmax_size
output_projection_w = model.param_init_net.XavierFill(
[],
'output_projection_w',
shape=[target_vocab_size, decoder_output_size],
)
output_projection_b = model.param_init_net.XavierFill(
[],
'output_projection_b',
shape=[target_vocab_size],
)
model.params.extend([
output_projection_w,
output_projection_b,
])
output_logits = model.net.FC(
[
decoder_outputs,
output_projection_w,
output_projection_b,
],
['output_logits'],
)
return output_logits
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python.models.seq2seq import seq2seq_model_helper
from caffe2.python import scope, test_util
class Seq2SeqModelHelperTest(test_util.TestCase):
def testConstuctor(self):
model_name = 'TestModel'
m = seq2seq_model_helper.Seq2SeqModelHelper(name=model_name)
self.assertEqual(m.name, model_name)
self.assertEqual(m.init_params, True)
self.assertEqual(m.arg_scope, {
'use_cudnn': True,
'cudnn_exhaustive_search': False,
'order': 'NHWC'
})
def testAddParam(self):
m = seq2seq_model_helper.Seq2SeqModelHelper()
param_name = 'test_param'
param = m.AddParam(param_name, init_value=1)
self.assertEqual(str(param), param_name)
def testGetNonTrainableParams(self):
m = seq2seq_model_helper.Seq2SeqModelHelper()
m.AddParam('test_param1', init_value=1, trainable=True)
p2 = m.AddParam('test_param2', init_value=2, trainable=False)
self.assertEqual(
m.GetNonTrainableParams(),
[p2]
)
with scope.NameScope('A', reset=True):
p3 = m.AddParam('test_param3', init_value=3, trainable=False)
self.assertEqual(
m.GetNonTrainableParams(),
[p3]
)
self.assertEqual(
m.GetNonTrainableParams(),
[p2, p3]
)
def testGetAllParams(self):
m = seq2seq_model_helper.Seq2SeqModelHelper()
p1 = m.AddParam('test_param1', init_value=1, trainable=True)
p2 = m.AddParam('test_param2', init_value=2, trainable=False)
self.assertEqual(
m.GetAllParams(),
[p1, p2]
)
if __name__ == "__main__":
import unittest
import random
random.seed(2221)
unittest.main()
|
## @package beam_search
# Module caffe2.python.models.seq2seq.beam_search
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import namedtuple
from caffe2.python import core
import caffe2.python.models.seq2seq.seq2seq_util as seq2seq_util
from caffe2.python.models.seq2seq.seq2seq_model_helper import Seq2SeqModelHelper
class BeamSearchForwardOnly(object):
"""
Class generalizing forward beam search for seq2seq models.
Also provides types to specify the recurrent structure of decoding:
StateConfig:
initial_value: blob providing value of state at first step_model
state_prev_link: LinkConfig describing how recurrent step receives
input from global state blob in each step
state_link: LinkConfig describing how step writes (produces new state)
to global state blob in each step
LinkConfig:
blob: blob connecting global state blob to step application
offset: offset from beginning of global blob for link in time dimension
window: width of global blob to read/write in time dimension
"""
LinkConfig = namedtuple('LinkConfig', ['blob', 'offset', 'window'])
StateConfig = namedtuple(
'StateConfig',
['initial_value', 'state_prev_link', 'state_link'],
)
def __init__(
self,
beam_size,
model,
eos_token_id,
go_token_id=seq2seq_util.GO_ID,
post_eos_penalty=None,
):
self.beam_size = beam_size
self.model = model
self.step_model = Seq2SeqModelHelper(
name='step_model',
param_model=self.model,
)
self.go_token_id = go_token_id
self.eos_token_id = eos_token_id
self.post_eos_penalty = post_eos_penalty
(
self.timestep,
self.scores_t_prev,
self.tokens_t_prev,
self.hypo_t_prev,
self.attention_t_prev,
) = self.step_model.net.AddExternalInputs(
'timestep',
'scores_t_prev',
'tokens_t_prev',
'hypo_t_prev',
'attention_t_prev',
)
tokens_t_prev_int32 = self.step_model.net.Cast(
self.tokens_t_prev,
'tokens_t_prev_int32',
to=core.DataType.INT32,
)
self.tokens_t_prev_int32_flattened, _ = self.step_model.net.Reshape(
[tokens_t_prev_int32],
[tokens_t_prev_int32, 'input_t_int32_old_shape'],
shape=[1, -1],
)
def get_step_model(self):
return self.step_model
def get_previous_tokens(self):
return self.tokens_t_prev_int32_flattened
def get_timestep(self):
return self.timestep
# TODO: make attentions a generic state
# data_dependencies is a list of blobs that the operator should wait for
# before beginning execution. This ensures that ops are run in the correct
# order when the RecurrentNetwork op is embedded in a DAGNet, for ex.
def apply(
self,
inputs,
length,
log_probs,
attentions,
state_configs,
data_dependencies,
word_rewards=None,
possible_translation_tokens=None,
go_token_id=None,
):
ZERO = self.model.param_init_net.ConstantFill(
[],
'ZERO',
shape=[1],
value=0,
dtype=core.DataType.INT32,
)
on_initial_step = self.step_model.net.EQ(
[ZERO, self.timestep],
'on_initial_step',
)
if self.post_eos_penalty is not None:
eos_token = self.model.param_init_net.ConstantFill(
[],
'eos_token',
shape=[self.beam_size],
value=self.eos_token_id,
dtype=core.DataType.INT32,
)
finished_penalty = self.model.param_init_net.ConstantFill(
[],
'finished_penalty',
shape=[1],
value=float(self.post_eos_penalty),
dtype=core.DataType.FLOAT,
)
ZERO_FLOAT = self.model.param_init_net.ConstantFill(
[],
'ZERO_FLOAT',
shape=[1],
value=0.0,
dtype=core.DataType.FLOAT,
)
finished_penalty = self.step_model.net.Conditional(
[on_initial_step, ZERO_FLOAT, finished_penalty],
'possible_finished_penalty',
)
tokens_t_flat = self.step_model.net.FlattenToVec(
self.tokens_t_prev,
'tokens_t_flat',
)
tokens_t_flat_int = self.step_model.net.Cast(
tokens_t_flat,
'tokens_t_flat_int',
to=core.DataType.INT32,
)
predecessor_is_eos = self.step_model.net.EQ(
[tokens_t_flat_int, eos_token],
'predecessor_is_eos',
)
predecessor_is_eos_float = self.step_model.net.Cast(
predecessor_is_eos,
'predecessor_is_eos_float',
to=core.DataType.FLOAT,
)
predecessor_is_eos_penalty = self.step_model.net.Mul(
[predecessor_is_eos_float, finished_penalty],
'predecessor_is_eos_penalty',
broadcast=1,
)
log_probs = self.step_model.net.Add(
[log_probs, predecessor_is_eos_penalty],
'log_probs_penalized',
broadcast=1,
axis=0,
)
# [beam_size, beam_size]
best_scores_per_hypo, best_tokens_per_hypo = self.step_model.net.TopK(
log_probs,
['best_scores_per_hypo', 'best_tokens_per_hypo_indices'],
k=self.beam_size,
)
if possible_translation_tokens:
# [beam_size, beam_size]
best_tokens_per_hypo = self.step_model.net.Gather(
[possible_translation_tokens, best_tokens_per_hypo],
['best_tokens_per_hypo']
)
# [beam_size]
scores_t_prev_squeezed, _ = self.step_model.net.Reshape(
self.scores_t_prev,
['scores_t_prev_squeezed', 'scores_t_prev_old_shape'],
shape=[self.beam_size],
)
# [beam_size, beam_size]
output_scores = self.step_model.net.Add(
[best_scores_per_hypo, scores_t_prev_squeezed],
'output_scores',
broadcast=1,
axis=0,
)
if word_rewards is not None:
# [beam_size, beam_size]
word_rewards_for_best_tokens_per_hypo = self.step_model.net.Gather(
[word_rewards, best_tokens_per_hypo],
'word_rewards_for_best_tokens_per_hypo',
)
# [beam_size, beam_size]
output_scores = self.step_model.net.Add(
[output_scores, word_rewards_for_best_tokens_per_hypo],
'output_scores',
)
# [beam_size * beam_size]
output_scores_flattened, _ = self.step_model.net.Reshape(
[output_scores],
[output_scores, 'output_scores_old_shape'],
shape=[-1],
)
MINUS_ONE_INT32 = self.model.param_init_net.ConstantFill(
[],
'MINUS_ONE_INT32',
value=-1,
shape=[1],
dtype=core.DataType.INT32,
)
BEAM_SIZE = self.model.param_init_net.ConstantFill(
[],
'beam_size',
shape=[1],
value=self.beam_size,
dtype=core.DataType.INT32,
)
# current_beam_size (predecessor states from previous step)
# is 1 on first step (so we just need beam_size scores),
# and beam_size subsequently (so we need all beam_size * beam_size
# scores)
slice_end = self.step_model.net.Conditional(
[on_initial_step, BEAM_SIZE, MINUS_ONE_INT32],
['slice_end'],
)
# [current_beam_size * beam_size]
output_scores_flattened_slice = self.step_model.net.Slice(
[output_scores_flattened, ZERO, slice_end],
'output_scores_flattened_slice',
)
# [1, current_beam_size * beam_size]
output_scores_flattened_slice, _ = self.step_model.net.Reshape(
output_scores_flattened_slice,
[
output_scores_flattened_slice,
'output_scores_flattened_slice_old_shape',
],
shape=[1, -1],
)
# [1, beam_size]
scores_t, best_indices = self.step_model.net.TopK(
output_scores_flattened_slice,
['scores_t', 'best_indices'],
k=self.beam_size,
)
BEAM_SIZE_64 = self.model.param_init_net.Cast(
BEAM_SIZE,
'BEAM_SIZE_64',
to=core.DataType.INT64,
)
# [1, beam_size]
hypo_t_int32 = self.step_model.net.Div(
[best_indices, BEAM_SIZE_64],
'hypo_t_int32',
broadcast=1,
)
hypo_t = self.step_model.net.Cast(
hypo_t_int32,
'hypo_t',
to=core.DataType.FLOAT,
)
# [beam_size, encoder_length, 1]
attention_t = self.step_model.net.Gather(
[attentions, hypo_t_int32],
'attention_t',
)
# [1, beam_size, encoder_length]
attention_t, _ = self.step_model.net.Reshape(
attention_t,
[attention_t, 'attention_t_old_shape'],
shape=[1, self.beam_size, -1],
)
# [beam_size * beam_size]
best_tokens_per_hypo_flatten, _ = self.step_model.net.Reshape(
best_tokens_per_hypo,
[
'best_tokens_per_hypo_flatten',
'best_tokens_per_hypo_old_shape',
],
shape=[-1],
)
tokens_t_int32 = self.step_model.net.Gather(
[best_tokens_per_hypo_flatten, best_indices],
'tokens_t_int32',
)
tokens_t = self.step_model.net.Cast(
tokens_t_int32,
'tokens_t',
to=core.DataType.FLOAT,
)
def choose_state_per_hypo(state_config):
state_flattened, _ = self.step_model.net.Reshape(
state_config.state_link.blob,
[
state_config.state_link.blob,
state_config.state_link.blob + '_old_shape',
],
shape=[self.beam_size, -1],
)
state_chosen_per_hypo = self.step_model.net.Gather(
[state_flattened, hypo_t_int32],
str(state_config.state_link.blob) + '_chosen_per_hypo',
)
return self.StateConfig(
initial_value=state_config.initial_value,
state_prev_link=state_config.state_prev_link,
state_link=self.LinkConfig(
blob=state_chosen_per_hypo,
offset=state_config.state_link.offset,
window=state_config.state_link.window,
)
)
state_configs = [choose_state_per_hypo(c) for c in state_configs]
initial_scores = self.model.param_init_net.ConstantFill(
[],
'initial_scores',
shape=[1],
value=0.0,
dtype=core.DataType.FLOAT,
)
if go_token_id:
initial_tokens = self.model.net.Copy(
[go_token_id],
'initial_tokens',
)
else:
initial_tokens = self.model.param_init_net.ConstantFill(
[],
'initial_tokens',
shape=[1],
value=float(self.go_token_id),
dtype=core.DataType.FLOAT,
)
initial_hypo = self.model.param_init_net.ConstantFill(
[],
'initial_hypo',
shape=[1],
value=0.0,
dtype=core.DataType.FLOAT,
)
encoder_inputs_flattened, _ = self.model.net.Reshape(
inputs,
['encoder_inputs_flattened', 'encoder_inputs_old_shape'],
shape=[-1],
)
init_attention = self.model.net.ConstantFill(
encoder_inputs_flattened,
'init_attention',
value=0.0,
dtype=core.DataType.FLOAT,
)
state_configs = state_configs + [
self.StateConfig(
initial_value=initial_scores,
state_prev_link=self.LinkConfig(self.scores_t_prev, 0, 1),
state_link=self.LinkConfig(scores_t, 1, 1),
),
self.StateConfig(
initial_value=initial_tokens,
state_prev_link=self.LinkConfig(self.tokens_t_prev, 0, 1),
state_link=self.LinkConfig(tokens_t, 1, 1),
),
self.StateConfig(
initial_value=initial_hypo,
state_prev_link=self.LinkConfig(self.hypo_t_prev, 0, 1),
state_link=self.LinkConfig(hypo_t, 1, 1),
),
self.StateConfig(
initial_value=init_attention,
state_prev_link=self.LinkConfig(self.attention_t_prev, 0, 1),
state_link=self.LinkConfig(attention_t, 1, 1),
),
]
fake_input = self.model.net.ConstantFill(
length,
'beam_search_fake_input',
input_as_shape=True,
extra_shape=[self.beam_size, 1],
value=0.0,
dtype=core.DataType.FLOAT,
)
all_inputs = (
[fake_input] +
self.step_model.params +
[state_config.initial_value for state_config in state_configs] +
data_dependencies
)
forward_links = []
recurrent_states = []
for state_config in state_configs:
state_name = str(state_config.state_prev_link.blob) + '_states'
recurrent_states.append(state_name)
forward_links.append((
state_config.state_prev_link.blob,
state_name,
state_config.state_prev_link.offset,
state_config.state_prev_link.window,
))
forward_links.append((
state_config.state_link.blob,
state_name,
state_config.state_link.offset,
state_config.state_link.window,
))
link_internal, link_external, link_offset, link_window = (
zip(*forward_links)
)
all_outputs = [
str(s) + '_all'
for s in [scores_t, tokens_t, hypo_t, attention_t]
]
results = self.model.net.RecurrentNetwork(
all_inputs,
all_outputs + ['step_workspaces'],
param=[all_inputs.index(p) for p in self.step_model.params],
alias_src=[
str(s) + '_states'
for s in [
self.scores_t_prev,
self.tokens_t_prev,
self.hypo_t_prev,
self.attention_t_prev,
]
],
alias_dst=all_outputs,
alias_offset=[0] * 4,
recurrent_states=recurrent_states,
initial_recurrent_state_ids=[
all_inputs.index(state_config.initial_value)
for state_config in state_configs
],
link_internal=[str(l) for l in link_internal],
link_external=[str(l) for l in link_external],
link_offset=link_offset,
link_window=link_window,
backward_link_internal=[],
backward_link_external=[],
backward_link_offset=[],
step_net=self.step_model.net.Proto(),
timestep=str(self.timestep),
outputs_with_grads=[],
enable_rnn_executor=1,
rnn_executor_debug=0
)
score_t_all, tokens_t_all, hypo_t_all, attention_t_all = results[:4]
output_token_beam_list = self.model.net.Cast(
tokens_t_all,
'output_token_beam_list',
to=core.DataType.INT32,
)
output_prev_index_beam_list = self.model.net.Cast(
hypo_t_all,
'output_prev_index_beam_list',
to=core.DataType.INT32,
)
output_score_beam_list = self.model.net.Alias(
score_t_all,
'output_score_beam_list',
)
output_attention_weights_beam_list = self.model.net.Alias(
attention_t_all,
'output_attention_weights_beam_list',
)
return (
output_token_beam_list,
output_prev_index_beam_list,
output_score_beam_list,
output_attention_weights_beam_list,
)
|
## @package train
# Module caffe2.python.models.seq2seq.train
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import collections
import logging
import math
import numpy as np
import random
import time
import sys
import os
import caffe2.proto.caffe2_pb2 as caffe2_pb2
from caffe2.python import core, workspace, data_parallel_model
import caffe2.python.models.seq2seq.seq2seq_util as seq2seq_util
from caffe2.python.models.seq2seq.seq2seq_model_helper import Seq2SeqModelHelper
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler(sys.stderr))
Batch = collections.namedtuple('Batch', [
'encoder_inputs',
'encoder_lengths',
'decoder_inputs',
'decoder_lengths',
'targets',
'target_weights',
])
def prepare_batch(batch):
encoder_lengths = [len(entry[0]) for entry in batch]
max_encoder_length = max(encoder_lengths)
decoder_lengths = []
max_decoder_length = max([len(entry[1]) for entry in batch])
batch_encoder_inputs = []
batch_decoder_inputs = []
batch_targets = []
batch_target_weights = []
for source_seq, target_seq in batch:
encoder_pads = (
[seq2seq_util.PAD_ID] * (max_encoder_length - len(source_seq))
)
batch_encoder_inputs.append(
list(reversed(source_seq)) + encoder_pads
)
decoder_pads = (
[seq2seq_util.PAD_ID] * (max_decoder_length - len(target_seq))
)
target_seq_with_go_token = [seq2seq_util.GO_ID] + target_seq
decoder_lengths.append(len(target_seq_with_go_token))
batch_decoder_inputs.append(target_seq_with_go_token + decoder_pads)
target_seq_with_eos = target_seq + [seq2seq_util.EOS_ID]
targets = target_seq_with_eos + decoder_pads
batch_targets.append(targets)
if len(source_seq) + len(target_seq) == 0:
target_weights = [0] * len(targets)
else:
target_weights = [
1 if target != seq2seq_util.PAD_ID else 0
for target in targets
]
batch_target_weights.append(target_weights)
return Batch(
encoder_inputs=np.array(
batch_encoder_inputs,
dtype=np.int32,
).transpose(),
encoder_lengths=np.array(encoder_lengths, dtype=np.int32),
decoder_inputs=np.array(
batch_decoder_inputs,
dtype=np.int32,
).transpose(),
decoder_lengths=np.array(decoder_lengths, dtype=np.int32),
targets=np.array(
batch_targets,
dtype=np.int32,
).transpose(),
target_weights=np.array(
batch_target_weights,
dtype=np.float32,
).transpose(),
)
class Seq2SeqModelCaffe2(object):
def _build_model(
self,
init_params,
):
model = Seq2SeqModelHelper(init_params=init_params)
self._build_shared(model)
self._build_embeddings(model)
forward_model = Seq2SeqModelHelper(init_params=init_params)
self._build_shared(forward_model)
self._build_embeddings(forward_model)
if self.num_gpus == 0:
loss_blobs = self.model_build_fun(model)
model.AddGradientOperators(loss_blobs)
self.norm_clipped_grad_update(
model,
scope='norm_clipped_grad_update'
)
self.forward_model_build_fun(forward_model)
else:
assert (self.batch_size % self.num_gpus) == 0
data_parallel_model.Parallelize_GPU(
forward_model,
input_builder_fun=lambda m: None,
forward_pass_builder_fun=self.forward_model_build_fun,
param_update_builder_fun=None,
devices=list(range(self.num_gpus)),
)
def clipped_grad_update_bound(model):
self.norm_clipped_grad_update(
model,
scope='norm_clipped_grad_update',
)
data_parallel_model.Parallelize_GPU(
model,
input_builder_fun=lambda m: None,
forward_pass_builder_fun=self.model_build_fun,
param_update_builder_fun=clipped_grad_update_bound,
devices=list(range(self.num_gpus)),
)
self.norm_clipped_sparse_grad_update(
model,
scope='norm_clipped_sparse_grad_update',
)
self.model = model
self.forward_net = forward_model.net
def _build_shared(self, model):
optimizer_params = self.model_params['optimizer_params']
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
self.learning_rate = model.AddParam(
name='learning_rate',
init_value=float(optimizer_params['learning_rate']),
trainable=False,
)
self.global_step = model.AddParam(
name='global_step',
init_value=0,
trainable=False,
)
self.start_time = model.AddParam(
name='start_time',
init_value=time.time(),
trainable=False,
)
def _build_embeddings(self, model):
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
sqrt3 = math.sqrt(3)
self.encoder_embeddings = model.param_init_net.UniformFill(
[],
'encoder_embeddings',
shape=[
self.source_vocab_size,
self.model_params['encoder_embedding_size'],
],
min=-sqrt3,
max=sqrt3,
)
model.params.append(self.encoder_embeddings)
self.decoder_embeddings = model.param_init_net.UniformFill(
[],
'decoder_embeddings',
shape=[
self.target_vocab_size,
self.model_params['decoder_embedding_size'],
],
min=-sqrt3,
max=sqrt3,
)
model.params.append(self.decoder_embeddings)
def model_build_fun(self, model, forward_only=False, loss_scale=None):
encoder_inputs = model.net.AddExternalInput(
workspace.GetNameScope() + 'encoder_inputs',
)
encoder_lengths = model.net.AddExternalInput(
workspace.GetNameScope() + 'encoder_lengths',
)
decoder_inputs = model.net.AddExternalInput(
workspace.GetNameScope() + 'decoder_inputs',
)
decoder_lengths = model.net.AddExternalInput(
workspace.GetNameScope() + 'decoder_lengths',
)
targets = model.net.AddExternalInput(
workspace.GetNameScope() + 'targets',
)
target_weights = model.net.AddExternalInput(
workspace.GetNameScope() + 'target_weights',
)
attention_type = self.model_params['attention']
assert attention_type in ['none', 'regular', 'dot']
(
encoder_outputs,
weighted_encoder_outputs,
final_encoder_hidden_states,
final_encoder_cell_states,
encoder_units_per_layer,
) = seq2seq_util.build_embedding_encoder(
model=model,
encoder_params=self.encoder_params,
num_decoder_layers=len(self.model_params['decoder_layer_configs']),
inputs=encoder_inputs,
input_lengths=encoder_lengths,
vocab_size=self.source_vocab_size,
embeddings=self.encoder_embeddings,
embedding_size=self.model_params['encoder_embedding_size'],
use_attention=(attention_type != 'none'),
num_gpus=self.num_gpus,
)
(
decoder_outputs,
decoder_output_size,
) = seq2seq_util.build_embedding_decoder(
model,
decoder_layer_configs=self.model_params['decoder_layer_configs'],
inputs=decoder_inputs,
input_lengths=decoder_lengths,
encoder_lengths=encoder_lengths,
encoder_outputs=encoder_outputs,
weighted_encoder_outputs=weighted_encoder_outputs,
final_encoder_hidden_states=final_encoder_hidden_states,
final_encoder_cell_states=final_encoder_cell_states,
encoder_units_per_layer=encoder_units_per_layer,
vocab_size=self.target_vocab_size,
embeddings=self.decoder_embeddings,
embedding_size=self.model_params['decoder_embedding_size'],
attention_type=attention_type,
forward_only=False,
num_gpus=self.num_gpus,
)
output_logits = seq2seq_util.output_projection(
model=model,
decoder_outputs=decoder_outputs,
decoder_output_size=decoder_output_size,
target_vocab_size=self.target_vocab_size,
decoder_softmax_size=self.model_params['decoder_softmax_size'],
)
targets, _ = model.net.Reshape(
[targets],
['targets', 'targets_old_shape'],
shape=[-1],
)
target_weights, _ = model.net.Reshape(
[target_weights],
['target_weights', 'target_weights_old_shape'],
shape=[-1],
)
_, loss_per_word = model.net.SoftmaxWithLoss(
[output_logits, targets, target_weights],
['OutputProbs_INVALID', 'loss_per_word'],
only_loss=True,
)
num_words = model.net.SumElements(
[target_weights],
'num_words',
)
total_loss_scalar = model.net.Mul(
[loss_per_word, num_words],
'total_loss_scalar',
)
total_loss_scalar_weighted = model.net.Scale(
[total_loss_scalar],
'total_loss_scalar_weighted',
scale=1.0 / self.batch_size,
)
return [total_loss_scalar_weighted]
def forward_model_build_fun(self, model, loss_scale=None):
return self.model_build_fun(
model=model,
forward_only=True,
loss_scale=loss_scale
)
def _calc_norm_ratio(self, model, params, scope, ONE):
with core.NameScope(scope):
grad_squared_sums = []
for i, param in enumerate(params):
logger.info(param)
grad = (
model.param_to_grad[param]
if not isinstance(
model.param_to_grad[param],
core.GradientSlice,
) else model.param_to_grad[param].values
)
grad_squared = model.net.Sqr(
[grad],
'grad_{}_squared'.format(i),
)
grad_squared_sum = model.net.SumElements(
grad_squared,
'grad_{}_squared_sum'.format(i),
)
grad_squared_sums.append(grad_squared_sum)
grad_squared_full_sum = model.net.Sum(
grad_squared_sums,
'grad_squared_full_sum',
)
global_norm = model.net.Pow(
grad_squared_full_sum,
'global_norm',
exponent=0.5,
)
clip_norm = model.param_init_net.ConstantFill(
[],
'clip_norm',
shape=[],
value=float(self.model_params['max_gradient_norm']),
)
max_norm = model.net.Max(
[global_norm, clip_norm],
'max_norm',
)
norm_ratio = model.net.Div(
[clip_norm, max_norm],
'norm_ratio',
)
return norm_ratio
def _apply_norm_ratio(
self, norm_ratio, model, params, learning_rate, scope, ONE
):
for param in params:
param_grad = model.param_to_grad[param]
nlr = model.net.Negative(
[learning_rate],
'negative_learning_rate',
)
with core.NameScope(scope):
update_coeff = model.net.Mul(
[nlr, norm_ratio],
'update_coeff',
broadcast=1,
)
if isinstance(param_grad, core.GradientSlice):
param_grad_values = param_grad.values
model.net.ScatterWeightedSum(
[
param,
ONE,
param_grad.indices,
param_grad_values,
update_coeff,
],
param,
)
else:
model.net.WeightedSum(
[
param,
ONE,
param_grad,
update_coeff,
],
param,
)
def norm_clipped_grad_update(self, model, scope):
if self.num_gpus == 0:
learning_rate = self.learning_rate
else:
learning_rate = model.CopyCPUToGPU(self.learning_rate, 'LR')
params = []
for param in model.GetParams(top_scope=True):
if param in model.param_to_grad:
if not isinstance(
model.param_to_grad[param],
core.GradientSlice,
):
params.append(param)
ONE = model.param_init_net.ConstantFill(
[],
'ONE',
shape=[1],
value=1.0,
)
logger.info('Dense trainable variables: ')
norm_ratio = self._calc_norm_ratio(model, params, scope, ONE)
self._apply_norm_ratio(
norm_ratio, model, params, learning_rate, scope, ONE
)
def norm_clipped_sparse_grad_update(self, model, scope):
learning_rate = self.learning_rate
params = []
for param in model.GetParams(top_scope=True):
if param in model.param_to_grad:
if isinstance(
model.param_to_grad[param],
core.GradientSlice,
):
params.append(param)
ONE = model.param_init_net.ConstantFill(
[],
'ONE',
shape=[1],
value=1.0,
)
logger.info('Sparse trainable variables: ')
norm_ratio = self._calc_norm_ratio(model, params, scope, ONE)
self._apply_norm_ratio(
norm_ratio, model, params, learning_rate, scope, ONE
)
def total_loss_scalar(self):
if self.num_gpus == 0:
return workspace.FetchBlob('total_loss_scalar')
else:
total_loss = 0
for i in range(self.num_gpus):
name = 'gpu_{}/total_loss_scalar'.format(i)
gpu_loss = workspace.FetchBlob(name)
total_loss += gpu_loss
return total_loss
def _init_model(self):
workspace.RunNetOnce(self.model.param_init_net)
def create_net(net):
workspace.CreateNet(
net,
input_blobs=[str(i) for i in net.external_inputs],
)
create_net(self.model.net)
create_net(self.forward_net)
def __init__(
self,
model_params,
source_vocab_size,
target_vocab_size,
num_gpus=1,
num_cpus=1,
):
self.model_params = model_params
self.encoder_type = 'rnn'
self.encoder_params = model_params['encoder_type']
self.source_vocab_size = source_vocab_size
self.target_vocab_size = target_vocab_size
self.num_gpus = num_gpus
self.num_cpus = num_cpus
self.batch_size = model_params['batch_size']
workspace.GlobalInit([
'caffe2',
# NOTE: modify log level for debugging purposes
'--caffe2_log_level=0',
# NOTE: modify log level for debugging purposes
'--v=0',
# Fail gracefully if one of the threads fails
'--caffe2_handle_executor_threads_exceptions=1',
'--caffe2_mkl_num_threads=' + str(self.num_cpus),
])
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
workspace.ResetWorkspace()
def initialize_from_scratch(self):
logger.info('Initializing Seq2SeqModelCaffe2 from scratch: Start')
self._build_model(init_params=True)
self._init_model()
logger.info('Initializing Seq2SeqModelCaffe2 from scratch: Finish')
def get_current_step(self):
return workspace.FetchBlob(self.global_step)[0]
def inc_current_step(self):
workspace.FeedBlob(
self.global_step,
np.array([self.get_current_step() + 1]),
)
def step(
self,
batch,
forward_only
):
if self.num_gpus < 1:
batch_obj = prepare_batch(batch)
for batch_obj_name, batch_obj_value in zip(
Batch._fields,
batch_obj,
):
workspace.FeedBlob(batch_obj_name, batch_obj_value)
else:
for i in range(self.num_gpus):
gpu_batch = batch[i::self.num_gpus]
batch_obj = prepare_batch(gpu_batch)
for batch_obj_name, batch_obj_value in zip(
Batch._fields,
batch_obj,
):
name = 'gpu_{}/{}'.format(i, batch_obj_name)
if batch_obj_name in ['encoder_inputs', 'decoder_inputs']:
dev = core.DeviceOption(caffe2_pb2.CPU)
else:
dev = core.DeviceOption(caffe2_pb2.CUDA, i)
workspace.FeedBlob(name, batch_obj_value, device_option=dev)
if forward_only:
workspace.RunNet(self.forward_net)
else:
workspace.RunNet(self.model.net)
self.inc_current_step()
return self.total_loss_scalar()
def save(self, checkpoint_path_prefix, current_step):
checkpoint_path = '{0}-{1}'.format(
checkpoint_path_prefix,
current_step,
)
assert workspace.RunOperatorOnce(core.CreateOperator(
'Save',
self.model.GetAllParams(),
[],
absolute_path=True,
db=checkpoint_path,
db_type='minidb',
))
checkpoint_config_path = os.path.join(
os.path.dirname(checkpoint_path_prefix),
'checkpoint',
)
with open(checkpoint_config_path, 'w') as checkpoint_config_file:
checkpoint_config_file.write(
'model_checkpoint_path: "' + checkpoint_path + '"\n'
'all_model_checkpoint_paths: "' + checkpoint_path + '"\n'
)
logger.info('Saved checkpoint file to ' + checkpoint_path)
return checkpoint_path
def gen_batches(source_corpus, target_corpus, source_vocab, target_vocab,
batch_size, max_length):
with open(source_corpus) as source, open(target_corpus) as target:
parallel_sentences = []
for source_sentence, target_sentence in zip(source, target):
numerized_source_sentence = seq2seq_util.get_numberized_sentence(
source_sentence,
source_vocab,
)
numerized_target_sentence = seq2seq_util.get_numberized_sentence(
target_sentence,
target_vocab,
)
if (
len(numerized_source_sentence) > 0 and
len(numerized_target_sentence) > 0 and
(
max_length is None or (
len(numerized_source_sentence) <= max_length and
len(numerized_target_sentence) <= max_length
)
)
):
parallel_sentences.append((
numerized_source_sentence,
numerized_target_sentence,
))
parallel_sentences.sort(key=lambda s_t: (len(s_t[0]), len(s_t[1])))
batches, batch = [], []
for sentence_pair in parallel_sentences:
batch.append(sentence_pair)
if len(batch) >= batch_size:
batches.append(batch)
batch = []
if len(batch) > 0:
while len(batch) < batch_size:
batch.append(batch[-1])
assert len(batch) == batch_size
batches.append(batch)
random.shuffle(batches)
return batches
def run_seq2seq_model(args, model_params=None):
source_vocab = seq2seq_util.gen_vocab(
args.source_corpus,
args.unk_threshold,
)
target_vocab = seq2seq_util.gen_vocab(
args.target_corpus,
args.unk_threshold,
)
logger.info('Source vocab size {}'.format(len(source_vocab)))
logger.info('Target vocab size {}'.format(len(target_vocab)))
batches = gen_batches(args.source_corpus, args.target_corpus, source_vocab,
target_vocab, model_params['batch_size'],
args.max_length)
logger.info('Number of training batches {}'.format(len(batches)))
batches_eval = gen_batches(args.source_corpus_eval, args.target_corpus_eval,
source_vocab, target_vocab,
model_params['batch_size'], args.max_length)
logger.info('Number of eval batches {}'.format(len(batches_eval)))
with Seq2SeqModelCaffe2(
model_params=model_params,
source_vocab_size=len(source_vocab),
target_vocab_size=len(target_vocab),
num_gpus=args.num_gpus,
num_cpus=20,
) as model_obj:
model_obj.initialize_from_scratch()
for i in range(args.epochs):
logger.info('Epoch {}'.format(i))
total_loss = 0
for batch in batches:
total_loss += model_obj.step(
batch=batch,
forward_only=False,
)
logger.info('\ttraining loss {}'.format(total_loss))
total_loss = 0
for batch in batches_eval:
total_loss += model_obj.step(
batch=batch,
forward_only=True,
)
logger.info('\teval loss {}'.format(total_loss))
if args.checkpoint is not None:
model_obj.save(args.checkpoint, i)
def main():
random.seed(31415)
parser = argparse.ArgumentParser(
description='Caffe2: Seq2Seq Training'
)
parser.add_argument('--source-corpus', type=str, default=None,
help='Path to source corpus in a text file format. Each '
'line in the file should contain a single sentence',
required=True)
parser.add_argument('--target-corpus', type=str, default=None,
help='Path to target corpus in a text file format',
required=True)
parser.add_argument('--max-length', type=int, default=None,
help='Maximal lengths of train and eval sentences')
parser.add_argument('--unk-threshold', type=int, default=50,
help='Threshold frequency under which token becomes '
'labeled unknown token')
parser.add_argument('--batch-size', type=int, default=32,
help='Training batch size')
parser.add_argument('--epochs', type=int, default=10,
help='Number of iterations over training data')
parser.add_argument('--learning-rate', type=float, default=0.5,
help='Learning rate')
parser.add_argument('--max-gradient-norm', type=float, default=1.0,
help='Max global norm of gradients at the end of each '
'backward pass. We do clipping to match the number.')
parser.add_argument('--num-gpus', type=int, default=0,
help='Number of GPUs for data parallel model')
parser.add_argument('--use-bidirectional-encoder', action='store_true',
help='Set flag to use bidirectional recurrent network '
'for first layer of encoder')
parser.add_argument('--use-attention', action='store_true',
help='Set flag to use seq2seq with attention model')
parser.add_argument('--source-corpus-eval', type=str, default=None,
help='Path to source corpus for evaluation in a text '
'file format', required=True)
parser.add_argument('--target-corpus-eval', type=str, default=None,
help='Path to target corpus for evaluation in a text '
'file format', required=True)
parser.add_argument('--encoder-cell-num-units', type=int, default=512,
help='Number of cell units per encoder layer')
parser.add_argument('--encoder-num-layers', type=int, default=2,
help='Number encoder layers')
parser.add_argument('--decoder-cell-num-units', type=int, default=512,
help='Number of cell units in the decoder layer')
parser.add_argument('--decoder-num-layers', type=int, default=2,
help='Number decoder layers')
parser.add_argument('--encoder-embedding-size', type=int, default=256,
help='Size of embedding in the encoder layer')
parser.add_argument('--decoder-embedding-size', type=int, default=512,
help='Size of embedding in the decoder layer')
parser.add_argument('--decoder-softmax-size', type=int, default=None,
help='Size of softmax layer in the decoder')
parser.add_argument('--checkpoint', type=str, default=None,
help='Path to checkpoint')
args = parser.parse_args()
encoder_layer_configs = [
dict(
num_units=args.encoder_cell_num_units,
),
] * args.encoder_num_layers
if args.use_bidirectional_encoder:
assert args.encoder_cell_num_units % 2 == 0
encoder_layer_configs[0]['num_units'] /= 2
decoder_layer_configs = [
dict(
num_units=args.decoder_cell_num_units,
),
] * args.decoder_num_layers
run_seq2seq_model(args, model_params=dict(
attention=('regular' if args.use_attention else 'none'),
decoder_layer_configs=decoder_layer_configs,
encoder_type=dict(
encoder_layer_configs=encoder_layer_configs,
use_bidirectional_encoder=args.use_bidirectional_encoder,
),
batch_size=args.batch_size,
optimizer_params=dict(
learning_rate=args.learning_rate,
),
encoder_embedding_size=args.encoder_embedding_size,
decoder_embedding_size=args.decoder_embedding_size,
decoder_softmax_size=args.decoder_softmax_size,
max_gradient_norm=args.max_gradient_norm,
))
if __name__ == '__main__':
main()
|
## @package formatter
# Module caffe2.python.docs.formatter
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python.docs.parser import Parser
class Formatter(object):
def __init__(self):
self.content = ""
def clone(self):
return self.__class__()
def dump(self):
return self.content
def parseAndAdd(self, text):
text = Parser(text, self).parse()
self.addRaw(text)
def addRaw(self, text):
raise Exception('Not yet implemented.')
def addLine(self, text):
raise Exception('Not yet implemented.')
def addLinebreak(self):
raise Exception('Not yet implemented.')
def addHeader(self, text):
raise Exception('Not yet implemented.')
def addEmphasis(self, text):
raise Exception('Not yet implemented.')
def addList(self, textList):
raise Exception('Not yet implemented.')
def addLink(self, text, url):
raise Exception('Not yet implemented.')
def addCode(self, text):
raise Exception('Not yet implemented.')
def addCodeLink(self, text):
raise Exception('Not yet implemented.')
def addTable(self, table):
raise Exception('Not yet implemented.')
def addBreak(self):
raise Exception('Not yet implemented.')
class Markdown(Formatter):
def addRaw(self, text):
self.content += "{text}".format(text=text)
def addLine(self, text, new_line=False):
self.content += "{line}{text}\n".format(line=('\n' if new_line else ''),
text=text)
def addLinebreak(self):
self.content += "\n"
def addHeader(self, text, h=1):
self.addLine("{header} {text}".format(header=h * '#', text=text), True)
def addEmphasis(self, text, s=1):
self.addRaw("{stars}{text}{stars}".format(stars=s * '*', text=text))
def addList(self, textList):
for text in textList:
self.addLine("- {text}".format(text=text), True)
self.addLinebreak()
def addLink(self, text, url):
self.addRaw("[{text}]({url})".format(text=text, url=url))
def addCodeLink(self, path, options=None):
self.addRaw("({path})".format(path=path))
def addCode(self, text, inline=False):
if (inline):
self.content += "`{text}`".format(text=text)
else:
self.addRaw("\n\n```\n{text}```\n\n".format(text=text))
def addTable(self, table, noTitle=False):
self.addLinebreak()
assert(len(table) > 1)
if noTitle:
table.insert(0, [' ' for i in range(len(table[0]))])
self.addLine(' | '.join(table[0]))
self.addLine(' | '.join(['----' for i in range(len(table[0]))]))
for row in table[1:]:
self.addLine(' | '.join(row))
self.addLinebreak()
def addBreak(self):
self.addLine('\n---\n', True)
|
## @package parser
# Module caffe2.python.docs.parser
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
class Parser(object):
# List of tuples (regex_str, lambda(regex_match, formatter))
# If a lambda returns True it will be called repeatedly with replacement
# otherwise it will only be called on text that hasn't been parsed yet.
regexes = [
# Code blocks of various formats
('````(.+?)````',
lambda m, f: f.addCode(m.group(1))
),
('```(.+?)```',
lambda m, f: f.addCode(m.group(1))
),
('((( {2})+)(\S.*)(\n\s*\n|\n))+',
lambda m, f: f.addCode(m.group(0))
),
('([^\.])\n',
lambda m, f: f.addRaw('{c} '.format(c=m.group(1))) or True
),
('`(.+?)`',
lambda m, f: f.addCode(m.group(1), True)
),
# Make links clickable
('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]'
'|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',
lambda m, f: f.addLink(m.group(0), m.group(0))
),
('\*\*(.+?)\*\*',
lambda m, f: f.addEmphasis(m.group(1), 2)
),
('\*(.+?)\*',
lambda m, f: f.addEmphasis(m.group(1), 1)
),
]
def __init__(self, text, formatter):
self.text = text
self.lines = []
self.formatter = formatter
def parseText(self):
UNPARSED = 0
PARSED = 1
parsed_block = [(UNPARSED, self.text)]
for regex, func in self.regexes:
index = 0
while index < len(parsed_block):
label, text = parsed_block[index]
# Already been parsed
if (label == PARSED):
index += 1
continue
match = re.search(regex, text)
if match:
parsed_block.pop(index)
start = match.start(0)
end = match.end(0)
f = self.formatter.clone()
merge = func(match, f)
if merge:
merged = text[:start] + f.dump() + text[end:]
parsed_block.insert(index, (UNPARSED, merged))
else:
if text[:start]:
parsed_block.insert(index,
(UNPARSED, text[:start]))
index += 1
parsed_block.insert(index, (PARSED, f.dump()))
index += 1
if text[end:]:
parsed_block.insert(index,
(UNPARSED, text[end:]))
else:
index += 1
self.lines += [i for _, i in parsed_block]
self.text = ' '.join(self.lines)
def parse(self):
self.parseText()
return self.text
|
## @package generator
# Module caffe2.python.docs.generator
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import os
from caffe2.python import core, workspace
from caffe2.python.docs.formatter import Markdown
from future.utils import viewitems, viewvalues
OpSchema = workspace.C.OpSchema
class DocUploader(object):
def __init__(self):
pass
def upload(self, text):
pass
class DocGenerator(object):
def __init__(self, formatter, uploader):
self.formatter = formatter
self.uploader = uploader
self.content_body = ""
def create_body(self):
pass
def update(self):
self.uploader.upload(self.content_body)
class OpDocGenerator(DocGenerator):
def getOperatorDoc(self, name, schema, priority):
return OperatorDoc(name, schema, priority)
def getOperatorEngine(self, name):
return OperatorEngine(name)
def getOperators(self):
# map: op_name -> operator
self.operators = {}
# map: op_name -> [engine, engine]
self.engines = {}
def filePriority(x):
if x == "caffe2/caffe2/operators":
return 0
if 'contrib' in x.split('/'):
return 2
if 'experiments' in x.split('/'):
return 3
return 1
for name in core._GetRegisteredOperators():
schema = OpSchema.get(name)
if schema:
priority = filePriority(os.path.dirname(schema.file))
operator = self.getOperatorDoc(name, schema, priority)
self.operators[name] = operator
# Engine
elif name.find("_ENGINE_") != -1:
engine = self.getOperatorEngine(name)
if engine.base_op_name in self.engines:
self.engines[engine.base_op_name].append(engine)
else:
self.engines[engine.base_op_name] = [engine]
# No schema
else:
priority = 4
self.operators[name] = self.getOperatorDoc(name, schema, priority)
for name, engines in viewitems(self.engines):
if name in self.operators:
self.operators[name].addEngines(engines)
# Generate a sorted list of operators
return sorted(
viewvalues(self.operators),
key=lambda op: (op.priority, op.name)
)
def createBody(self):
operators = self.getOperators()
for operator in operators:
operator.generateSchema(self.formatter)
self.content_body += self.formatter.dump()
class OperatorEngine(object):
def __init__(self, name):
self.op_name = name
self.base_op_name, self.engine = name.split("_ENGINE_", 1)
def getDeviceImpl(self):
deviceImplList = []
for device, impl in [('CPU', OpSchema.get_cpu_impl(self.op_name)),
('CUDA', OpSchema.get_cuda_impl(self.op_name))]:
if not impl:
continue
deviceImplList.append((device, impl))
return deviceImplList
def generateDoc(self, formatter):
for device, impl in self.getDeviceImpl():
formatter.addLine(
'{engine} on {device}: {impl}'.format(engine=self.engine,
device=device,
impl=impl))
class OperatorDoc(object):
def __init__(self, name, schema, priority):
self.name = name
self.schema = schema
self.priority = priority
print("Gathering docs for {}...".format(self.name))
self.engines = []
def addEngines(self, engines):
self.engines = engines
def generateDoc(self, formatter):
if self.schema.doc:
formatter.parseAndAdd(self.schema.doc)
formatter.addLinebreak()
else:
formatter.addLine("No documentation yet.")
def generateTable(self, formatter, tuples, title_row, title):
if tuples:
if title:
formatter.addHeader(title, 3)
table = []
if title_row:
table = [title_row]
for name, doc in tuples:
table.append([name, doc or ''])
formatter.addTable(table, (table == []))
def generateInterface(self, formatter):
def makeDesc(title, args):
f = formatter.clone()
f.addEmphasis(title, 1)
out = [(f.dump(), '')]
for arg in args:
f = formatter.clone()
if isinstance(arg, tuple):
name = arg[0]
if len(arg) > 1:
description = arg[1] or ''
else:
description = ''
else:
name = arg.name
description = arg.description or ''
f.addCode(name, inline=True)
out.append((f.dump(), description or ''))
return out
tuples = []
if self.schema.args:
tuples += makeDesc('Arguments', self.schema.args)
if self.schema.input_desc:
tuples += makeDesc('Inputs', self.schema.input_desc)
if self.schema.output_desc:
tuples += makeDesc('Outputs', self.schema.output_desc)
self.generateTable(formatter, tuples, None, 'Interface')
print("Generated interface for {}".format(self.name))
def generateCodeLink(self, formatter):
formatter.addHeader("Code", 3)
formatter.addLinebreak()
formatter.addCodeLink(self.schema.file)
def getInfo(self, formatter, name, impl):
pass
def generateDevices(self, formatter):
formatter.addHeader("Devices", 3)
devices = [
self.getInfo(formatter,
'CPU', OpSchema.get_cpu_impl(self.name)),
self.getInfo(formatter,
'GPU', OpSchema.get_cuda_impl(self.name)),
]
formatter.addList([i for i in devices if i])
def generateEngines(self, formatter):
if not len(self.engines):
return
formatter.addHeader("Engines", 3)
for engine in self.engines:
engine.generateDoc(formatter)
def generateSchema(self, formatter):
formatter.addHeader(self.name, 2)
if self.schema:
self.generateDoc(formatter)
self.generateInterface(formatter)
self.generateCodeLink(formatter)
self.generateDevices(formatter)
self.generateEngines(formatter)
formatter.addBreak()
else:
formatter.addLine("No schema documented yet.")
self.generateDevices(formatter)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Operators catalog generator.")
parser.add_argument('catalog_path', type=str,
help='operators-catalogue.md to write out to')
args = parser.parse_args()
with open(args.catalog_path, 'w') as fp:
ops = OpDocGenerator(Markdown(), DocUploader())
ops.createBody()
fp.write(ops.content_body)
|
## @package github
# Module caffe2.python.docs.github
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import os
from caffe2.python.docs.formatter import Markdown
from caffe2.python.docs.generator import OpDocGenerator, DocUploader
from caffe2.python.docs.generator import OperatorDoc, OperatorEngine
class GHOpDocUploader(DocUploader):
def __init__(self):
pass
def upload(self, content_body):
print(content_body)
class GHMarkdown(Markdown):
def addHeader(self, text, h=1):
self.addLine("\n{header} {text}\n".format(header=h * '#', text=text), True)
def addDocHeader(self):
self.addLine("---")
self.addLine("docid: operators-catalog")
self.addLine("title: Operators Catalog")
self.addLine("layout: operators")
self.addLine("permalink: /docs/operators-catalogue.html")
self.addLine("---")
self.addLine("* TOC")
self.addLine("{:toc}")
def addTable(self, table, noTitle=False):
self.addLinebreak()
assert(len(table) > 1)
self.addLine(' | '.join(['----------' for i in range(len(table[0]))]))
self.addLine(' | '.join(table[0]))
for row in table[1:]:
self.addLine(' | '.join(row))
def addTableHTML(self, table, noTitle=False):
self.addRaw("<table>")
for row in table:
self.addRaw("<tr>")
for cell in row:
self.addRaw("<td>")
self.addLine("{cell}".format(cell=cell))
self.addRaw("</td>")
self.addRaw("</tr>")
self.addRaw("</table>")
def getCodeLink(formatter, schema):
formatter = formatter.clone()
path = os.path.join("caffe2", os.path.relpath(schema.file, "caffe2"))
schemaLink = ('https://github.com/caffe2/caffe2/blob/master/{path}'
.format(path=path))
formatter.addLink('{path}'.format(path=path), schemaLink)
return formatter.dump()
class GHOperatorEngine(OperatorEngine):
def generateDoc(self, formatter):
for device, _ in self.getDeviceImpl():
formatter.addCode('{engine}'.format(engine=self.engine), True)
if device:
formatter.addRaw(' on ')
formatter.addEmphasis("{device}".format(device=device), 1)
class GHOperatorDoc(OperatorDoc):
def generateCodeLink(self, formatter):
formatter.addHeader("Code", 3)
formatter.addLinebreak()
formatter.addRaw(getCodeLink(formatter, self.schema))
def getInfo(self, formatter, name, impl):
formatter = formatter.clone()
if impl:
formatter.addEmphasis('{name}'.format(name=name), 1)
formatter.addRaw(' ')
formatter.addCode('{impl}'.format(impl=impl), True)
return formatter.dump()
def generateSchema(self, formatter):
formatter.addHeader(self.name, 2)
if self.schema:
self.generateDoc(formatter)
self.generateInterface(formatter)
self.generateCodeLink(formatter)
formatter.addBreak()
else:
formatter.addLine("No schema documented yet.")
class GHOpDocGenerator(OpDocGenerator):
def getOperatorDoc(self, name, schema, priority):
return GHOperatorDoc(name, schema, priority)
def getOperatorEngine(self, name):
return GHOperatorEngine(name)
def createBody(self):
self.formatter.addDocHeader()
operators = self.getOperators()
for operator in operators:
operator.generateSchema(self.formatter)
self.content_body += self.formatter.dump()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Operators catalog generator.")
parser.add_argument('catalog_path', type=str,
help='operators-catalogue.md to write out to')
args = parser.parse_args()
with open(args.catalog_path, 'w') as fp:
ops = GHOpDocGenerator(GHMarkdown(), GHOpDocUploader)
ops.createBody()
fp.write(ops.content_body)
print("Updated {}!".format(args.catalog_path))
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.modeling.net_modifier import NetModifier
import numpy as np
class ComputeNormForBlobs(NetModifier):
"""
This class modifies the net passed in by adding ops to compute norms for
certain blobs.
Args:
blobs: list of blobs to compute norm for
logging_frequency: frequency for printing norms to logs
p: type of norm. Currently it supports p=1 or p=2
compute_averaged_norm: norm or averaged_norm (averaged_norm = norm/size)
"""
def __init__(self, blobs, logging_frequency, p=2, compute_averaged_norm=False):
self._blobs = blobs
self._logging_frequency = logging_frequency
self._p = p
self._compute_averaged_norm = compute_averaged_norm
self._field_name_suffix = '_l{}_norm'.format(p)
if compute_averaged_norm:
self._field_name_suffix = '_averaged' + self._field_name_suffix
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None):
p = self._p
compute_averaged_norm = self._compute_averaged_norm
for blob_name in self._blobs:
blob = core.BlobReference(blob_name)
if not net.BlobIsDefined(blob):
raise Exception('blob {0} is not defined in net {1}'.format(
blob, net.Name()))
norm_name = net.NextScopedBlob(prefix=blob + self._field_name_suffix)
norm = net.LpNorm(blob, norm_name, p=p, average=compute_averaged_norm)
if self._logging_frequency >= 1:
net.Print(norm, [], every_n=self._logging_frequency)
output_field_name = str(blob) + self._field_name_suffix
output_scalar = schema.Scalar((np.float, (1,)), norm)
if net.output_record() is None:
net.set_output_record(
schema.Struct((output_field_name, output_scalar))
)
else:
net.AppendOutputRecordField(
output_field_name,
output_scalar)
def field_name_suffix(self):
return self._field_name_suffix
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from caffe2.python import workspace, brew, model_helper
from caffe2.python.modeling.gradient_clipping import GradientClipping
import numpy as np
class GradientClippingTest(unittest.TestCase):
def test_gradient_clipping(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
fc2 = brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
sigm = model.net.Sigmoid(fc2, 'sigm')
sq = model.net.SquaredL2Distance([sigm, 'label'], 'sq')
loss = model.net.SumElements(sq, 'loss')
grad_map = model.AddGradientOperators([loss])
grad_map_for_param = {key: grad_map[key] for key in ['fc1_w', 'fc2_w']}
net_modifier = GradientClipping(
grad_clip_method='by_norm',
clip_norm_type='l2_norm',
clip_threshold=0.1,
)
net_modifier(model.net, grad_map=grad_map_for_param)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.FeedBlob('label', np.random.rand(10, 1).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
# 5 forward ops + 6 backward ops + 2 * (3 gradient clipping ops)
self.assertEqual(len(model.net.Proto().op), 17)
def test_gradient_clipping_l1_norm(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
fc2 = brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
sigm = model.net.Sigmoid(fc2, 'sigm')
sq = model.net.SquaredL2Distance([sigm, 'label'], 'sq')
loss = model.net.SumElements(sq, 'loss')
grad_map = model.AddGradientOperators([loss])
grad_map_for_param = {key: grad_map[key] for key in ['fc1_w', 'fc2_w']}
net_modifier = GradientClipping(
grad_clip_method='by_norm',
clip_norm_type='l1_norm',
clip_threshold=0.1,
)
net_modifier(model.net, grad_map=grad_map_for_param)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.FeedBlob('label', np.random.rand(10, 1).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
# 5 forward ops + 6 backward ops + 2 * (2 gradient clipping ops)
self.assertEqual(len(model.net.Proto().op), 15)
def test_gradient_clipping_using_param_norm(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
fc2 = brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
sigm = model.net.Sigmoid(fc2, 'sigm')
sq = model.net.SquaredL2Distance([sigm, 'label'], 'sq')
loss = model.net.SumElements(sq, 'loss')
grad_map = model.AddGradientOperators([loss])
grad_map_for_param = {key: grad_map[key] for key in ['fc1_w', 'fc2_w']}
net_modifier = GradientClipping(
grad_clip_method='by_norm',
clip_norm_type='l2_norm',
clip_threshold=0.1,
use_parameter_norm=True,
)
net_modifier(model.net, grad_map=grad_map_for_param)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.FeedBlob('label', np.random.rand(10, 1).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
# 5 forward ops + 6 backward ops + 2 * (5 gradient clipping ops)
self.assertEqual(len(model.net.Proto().op), 21)
def test_gradient_clipping_compute_norm_ratio(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
fc2 = brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
sigm = model.net.Sigmoid(fc2, 'sigm')
sq = model.net.SquaredL2Distance([sigm, 'label'], 'sq')
loss = model.net.SumElements(sq, 'loss')
grad_map = model.AddGradientOperators([loss])
grad_map_for_param = {key: grad_map[key] for key in ['fc1_w', 'fc2_w']}
net_modifier = GradientClipping(
grad_clip_method='by_norm',
clip_norm_type='l2_norm',
clip_threshold=0.1,
use_parameter_norm=True,
compute_norm_ratio=True,
)
net_modifier(model.net, grad_map=grad_map_for_param)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.FeedBlob('label', np.random.rand(10, 1).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
# 5 forward ops + 6 backward ops + 2 * (6 gradient clipping ops)
self.assertEqual(len(model.net.Proto().op), 23)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from caffe2.python import brew, model_helper, workspace
from caffe2.python.modeling.initializers import (
Initializer, PseudoFP16Initializer)
class InitializerTest(unittest.TestCase):
def test_fc_initializer(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=1, dim_out=1)
# no operator name set, will use default
fc2 = brew.fc(model, fc1, "fc2", dim_in=1, dim_out=1,
WeightInitializer=Initializer)
# no operator name set, will use custom
fc3 = brew.fc(model, fc2, "fc3", dim_in=1, dim_out=1,
WeightInitializer=Initializer,
weight_init=("ConstantFill", {}),
)
# operator name set, no initializer class set
fc4 = brew.fc(model, fc3, "fc4", dim_in=1, dim_out=1,
WeightInitializer=None,
weight_init=("ConstantFill", {})
)
@unittest.skipIf(not workspace.has_gpu_support, 'No GPU support')
def test_fc_fp16_initializer(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=1, dim_out=1)
# default operator, PseudoFP16Initializer
fc2 = brew.fc(model, fc1, "fc2", dim_in=1, dim_out=1,
WeightInitializer=PseudoFP16Initializer
)
# specified operator, PseudoFP16Initializer
fc3 = brew.fc(model, fc2, "fc3", dim_in=1, dim_out=1,
weight_init=("ConstantFill", {}),
WeightInitializer=PseudoFP16Initializer
)
def test_fc_external_initializer(self):
model = model_helper.ModelHelper(name="test", init_params=False)
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=1, dim_out=1) # noqa
self.assertEqual(len(model.net.Proto().op), 1)
self.assertEqual(len(model.param_init_net.Proto().op), 0)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.modeling.net_modifier import NetModifier
import numpy as np
class ComputeHistogramForBlobs(NetModifier):
"""
This class modifies the net passed in by adding ops to compute histogram for
certain blobs.
Args:
blobs: list of blobs to compute histogram for
logging_frequency: frequency for printing
lower_bound: left boundary of histogram values
upper_bound: right boundary of histogram values
num_buckets: number of buckets to use in [lower_bound, upper_bound)
accumulate: boolean to output accumulate or per-batch histogram
"""
def __init__(self, blobs, logging_frequency, num_buckets=30,
lower_bound=0.0, upper_bound=1.0, accumulate=False):
self._blobs = blobs
self._logging_frequency = logging_frequency
self._accumulate = accumulate
if self._accumulate:
self._field_name_suffix = '_acc_normalized_hist'
else:
self._field_name_suffix = '_curr_normalized_hist'
self._num_buckets = int(num_buckets)
assert self._num_buckets > 0, (
"num_buckets need to be greater than 0, got {}".format(num_buckets))
self._lower_bound = float(lower_bound)
self._upper_bound = float(upper_bound)
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None):
for blob_name in self._blobs:
blob = core.BlobReference(blob_name)
if not net.BlobIsDefined(blob):
raise Exception('blob {0} is not defined in net {1}'.format(
blob, net.Name()))
blob_float = net.Cast(blob, net.NextScopedBlob(prefix=blob +
'_float'), to=core.DataType.FLOAT)
curr_hist, acc_hist = net.AccumulateHistogram(
[blob_float],
[net.NextScopedBlob(prefix=blob + '_curr_hist'),
net.NextScopedBlob(prefix=blob + '_acc_hist')],
num_buckets=self._num_buckets,
lower_bound=self._lower_bound,
upper_bound=self._upper_bound)
if self._accumulate:
hist = net.Cast(
acc_hist,
net.NextScopedBlob(prefix=blob + '_cast_hist'),
to=core.DataType.FLOAT)
else:
hist = net.Cast(
curr_hist,
net.NextScopedBlob(prefix=blob + '_cast_hist'),
to=core.DataType.FLOAT)
normalized_hist = net.NormalizeL1(
hist,
net.NextScopedBlob(prefix=blob + self._field_name_suffix)
)
if self._logging_frequency >= 1:
net.Print(normalized_hist, [], every_n=self._logging_frequency)
output_field_name = str(blob) + self._field_name_suffix
output_scalar = schema.Scalar((np.float32, (self._num_buckets + 2,)),
normalized_hist)
if net.output_record() is None:
net.set_output_record(
schema.Struct((output_field_name, output_scalar))
)
else:
net.AppendOutputRecordField(
output_field_name,
output_scalar)
def field_name_suffix(self):
return self._field_name_suffix
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import brew, model_helper, scope
from caffe2.python.modeling.parameter_sharing import (
ParameterSharing,
parameter_sharing_context,
)
from caffe2.python.modeling.initializers import (
Initializer
)
import unittest
class ParameterSharingTest(unittest.TestCase):
def test_parameter_sharing_default_scopes(self):
# Test no sharing default scopes
param_1 = parameter_sharing_context.get_parameter_name('w')
self.assertEquals(param_1, 'w')
with scope.NameScope('scope'):
param_2 = parameter_sharing_context.get_parameter_name('w')
self.assertEquals(param_2, 'scope/w')
with scope.NameScope('scope_2'):
param_3 = parameter_sharing_context.get_parameter_name('w')
self.assertEquals(param_3, 'scope/scope_2/w')
def test_parameter_sharing_nested_scopes(self):
# Test parameter sharing
with scope.NameScope('global_scope'):
with ParameterSharing({'model_b': 'model_a'}):
param_global = parameter_sharing_context.get_parameter_name('w')
self.assertEquals(param_global, 'global_scope/w')
# This scope is overridden to match 'model_a'
with scope.NameScope('model_b'):
with ParameterSharing({'shared_scope': ''}):
param_4 = parameter_sharing_context.get_parameter_name(
'w')
self.assertEquals(param_4, 'global_scope/model_a/w')
with scope.NameScope('shared_scope'):
param_5 = parameter_sharing_context.\
get_parameter_name('w')
self.assertEquals(param_5, 'global_scope/model_a/w')
# This scope is supposed to have not sharing
with scope.NameScope('model_c'):
with ParameterSharing({'shared_scope': ''}):
param_4 = parameter_sharing_context.get_parameter_name(
'w')
self.assertEquals(param_4, 'global_scope/model_c/w')
with scope.NameScope('shared_scope'):
param_5 = parameter_sharing_context.\
get_parameter_name('w')
self.assertEquals(param_5, 'global_scope/model_c/w')
def test_parameter_sharing_subscopes(self):
# Sharing only one of the subscopes
with ParameterSharing({'global_scope/b': 'global_scope/a'}):
with scope.NameScope('global_scope'):
param_6 = parameter_sharing_context.get_parameter_name('w')
self.assertEquals(param_6, 'global_scope/w')
with scope.NameScope('a'):
param_7 = parameter_sharing_context.get_parameter_name('w')
self.assertEquals(param_7, 'global_scope/a/w')
with scope.NameScope('b'):
param_8 = parameter_sharing_context.get_parameter_name('w')
self.assertEquals(param_8, 'global_scope/a/w')
with scope.NameScope('c'):
param_9 = parameter_sharing_context.get_parameter_name('w')
self.assertEquals(param_9, 'global_scope/c/w')
def test_create_param(self):
model = model_helper.ModelHelper(name="test")
# Test no sharing default scopes
p1 = model.create_param(
'w',
shape=[2],
initializer=Initializer("ConstantFill")
)
with scope.NameScope('some_global_scope'):
p2 = model.create_param(
'w',
shape=[2],
initializer=Initializer("ConstantFill")
)
self.assertNotEqual(model.get_param_info(p1), None)
self.assertNotEqual(model.get_param_info(p2), None)
self.assertNotEqual(model.get_param_info(p1), model.get_param_info(p2))
model.Validate()
def test_deep_hierarchy(self):
model = model_helper.ModelHelper(name="test")
with ParameterSharing({'a': 'b'}):
with scope.NameScope('a'):
with ParameterSharing({'c': 'd'}):
with scope.NameScope('c'):
with ParameterSharing({'e': 'f'}):
with scope.NameScope('e'):
p = model.create_param(
'w',
shape=[2],
initializer=Initializer("ConstantFill")
)
self.assertNotEqual(model.get_param_info(p), None)
def test_parameter_sharing_brew(self):
# Test no sharing default scopes
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=16, dim_out=16)
# Shared params are expected to share the same shape and fail if it's
# not true
with self.assertRaises(AssertionError):
_ = brew.fc(model, data, "fc1", dim_in=2, dim_out=2) # noqa
output_blobs = set()
with scope.NameScope('some_global_scope'):
with scope.NameScope('model_a'):
output_blobs.add(str(brew.fc(model, fc1, 'output', 16, 16)))
with ParameterSharing({'model_b': 'model_a'}),\
scope.NameScope('model_b'):
with ParameterSharing({'shared_1': '', 'shared_2': ''}):
# All params in DenseLayers from shared_1, shared_2 and
# model_a are shared and will be pointing to:
# [some_global_scope/model_a/output_W,
# some_global_scope/model_a/output_b]
with scope.NameScope('shared_1'):
output_blobs.add(
str(brew.fc(model, fc1, 'output', 16, 16)))
with scope.NameScope('shared_2'):
output_blobs.add(
str(brew.fc(model, fc1, 'output', 16, 16)))
# Params of this layer are not shared with anyone unless
# there is some explicit sharing with model_a/unshared (not
# in this example).
# Names of the blobs are
# [some_global_scope/model_a/unshared/output_W,
# some_global_scope/model_a/unshared/output_b]
with scope.NameScope('unshared'):
output_blobs.add(
str(brew.fc(model, fc1, 'output', 16, 16)))
self.assertEqual(len(model._parameters_info), 6)
self.assertEqual(len(output_blobs), 4)
self.assertEqual(sorted(model._parameters_info.keys()), [
'fc1_b',
'fc1_w',
'some_global_scope/model_a/output_b',
'some_global_scope/model_a/output_w',
'some_global_scope/model_a/unshared/output_b',
'some_global_scope/model_a/unshared/output_w',
])
model.Validate()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.modeling.net_modifier import NetModifier
import numpy as np
class ComputeStatisticsForBlobs(NetModifier):
"""
This class modifies the net passed in by adding ops to compute statistics
for certain blobs. For each blob in the list, its min, max, mean and standard
deviation will be computed.
Args:
blobs: list of blobs to compute norm for
logging_frequency: frequency for printing norms to logs
"""
def __init__(self, blobs, logging_frequency):
self._blobs = blobs
self._logging_frequency = logging_frequency
self._field_name_suffix = '_summary'
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None):
for blob_name in self._blobs:
blob = core.BlobReference(blob_name)
if not net.BlobIsDefined(blob):
raise Exception('blob {0} is not defined in net {1}'.format(
blob, net.Name()))
cast_blob = net.Cast(blob, to=core.DataType.FLOAT)
stats_name = net.NextScopedBlob(prefix=blob + self._field_name_suffix)
stats = net.Summarize(cast_blob, stats_name, to_file=0)
net.Print(stats, [], every_n=self._logging_frequency)
output_field_name = str(blob) + self._field_name_suffix
output_scalar = schema.Scalar((np.float, (1,)), stats)
if net.output_record() is None:
net.set_output_record(
schema.Struct((output_field_name, output_scalar))
)
else:
net.AppendOutputRecordField(
output_field_name,
output_scalar)
def field_name_suffix(self):
return self._field_name_suffix
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python.core import DataType, BlobReference, ScopedBlobReference
from caffe2.python.modeling.parameter_info import ParameterInfo
import six
class Initializer(object):
'''
This class abstracts out parameter creation. One cancome up with a new
Initializer in order to implement more complex parameter initializaion logic
'''
def __init__(self, operator_name=None, **kwargs):
self.operator_name = operator_name
self.operator_kwargs = kwargs
def update(self, operator_name, kwargs):
if self.operator_name is not None:
raise Exception("Operator name overwrites are not allowed")
self.operator_name = operator_name
self.operator_kwargs = kwargs
def create_param(self, param_name, init_net, shape):
param = init_net.__getattr__(self.operator_name)(
[], param_name, shape=shape, **self.operator_kwargs)
return ParameterInfo(
param_id=None,
param=param,
shape=shape,
)
class ExternalInitializer(object):
'''
This class is used in cases when the parameter should not be initialized by
the initializer, but rather provided in the workspace when param_init_net is
executed.
Current version is not doing any real sanity checks to the parameter.
'''
def create_param(self, param_name, init_net, shape):
if isinstance(param_name, BlobReference):
param = BlobReference(str(param_name), init_net)
elif isinstance(param_name, six.string_types):
param = ScopedBlobReference(param_name, init_net)
else:
raise "Unsupported type for param_name"
# TODO(amalevich): Add operator that will check param in the workspace
return ParameterInfo(
param_id=None,
param=param,
shape=shape,
)
class PseudoFP16Initializer(Initializer):
'''
Used in cases when the parameter should be used at half (16-bit) precision
for compute purposes (i.e. on the forward and backward pass) but
needs to be stored and optimized at single (32-bit) precision so tiny
gradients with small learning rates don't underflow FP16 precision.
A 32-bit copy of the 16-bit blob is stored in the ParameterInfo.
This is helpful for mixed-precision training, see
https://arxiv.org/abs/1710.03740 for details.
'''
def update(self, operator_name, kwargs):
if self.operator_name is not None:
raise Exception("Operator name overwrites are not allowed")
self.operator_name = operator_name
self.operator_kwargs = kwargs
def create_param(self, param_name, init_net, shape):
# create master fp32 copy
param_fp32 = init_net.__getattr__(self.operator_name)(
[], param_name + "_fp32", shape=shape,
**self.operator_kwargs)
# cast to fp16 copy
param = init_net.FloatToHalf(
param_fp32, param_name)
return ParameterInfo(
param_id=None,
param=param,
shape=shape,
blob_copy={DataType.FLOAT: param_fp32}
)
class ReversePseudoFP16Initializer(Initializer):
'''
Like PseudoFP16Initializer above, except the primary blob is taken to
be the 32-bit precision parameter, and the 16-bit version of the blob
is stored in blob_copy instead.
'''
def update(self, operator_name, kwargs):
if self.operator_name is not None:
raise Exception("Operator name overwrites are not allowed")
self.operator_name = operator_name
self.operator_kwargs = kwargs
def create_param(self, param_name, init_net, shape):
# create master fp32 copy
param_fp32 = init_net.__getattr__(self.operator_name)(
[], param_name, shape=shape,
**self.operator_kwargs)
# cast to fp16 copy
param_fp16 = init_net.FloatToHalf(
param_fp32, param_name + "_fp16")
return ParameterInfo(
param_id=None,
param=param_fp32,
shape=shape,
blob_copy={DataType.FLOAT16: param_fp16}
)
def update_initializer(initializer_class,
operator_name_and_kwargs,
default_operator_name_and_kwargs):
'''
A helper function to convert from operator_name_and_kwargs to new
object of type initializer_class. This function serves two purposes:
1. Support for custom initialization operators being passed in
2. Allow user to specify a custom Initializer without overwriting
default operators used for initialization
If initializer_class is None, creates a default initializer using
the Initializer class and operator_name_and_kwargs provided
If operator_name_and_kwargs is None, uses default_operator_name_and_kwargs
returns an instantiated Initializer object
'''
def get_initializer_args():
return (
operator_name_and_kwargs or
default_operator_name_and_kwargs
)
if initializer_class is not None:
init = initializer_class(get_initializer_args()[0],
**get_initializer_args()[1])
else:
init = Initializer(
get_initializer_args()[0],
**get_initializer_args()[1]
)
return init
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from caffe2.proto import caffe2_pb2
from caffe2.python.optimizer import get_param_device
from caffe2.python.modeling.net_modifier import NetModifier
import logging
logger = logging.getLogger(__name__)
class GradientClipping(NetModifier):
L1_NORM = 'l1_norm'
L2_NORM = 'l2_norm'
BY_NORM = 'by_norm'
GRAD_CLIP_METHODS = [BY_NORM]
CLIP_GRADIENT_NORM_TYPES = [L2_NORM, L1_NORM]
def __init__(self, grad_clip_method, clip_norm_type, clip_threshold,
use_parameter_norm=False, compute_norm_ratio=False):
"""
Clips gradient to avoid gradient magnitude explosion or vanishing gradient.
Args:
grad_clip_method: ways to clip the gradients
clip_norm_type: type of norm used in the necessary computation
clip_threshold: threshold used to determine whether to clip
use_parameter_norm: a boolean to indicate whether to incorporate
the norm of the parameter
compute_norm_ratio: a boolean to compute the ratio between gradient norm
and parameter norm explicitly for debugging purpose
"""
assert grad_clip_method in self.GRAD_CLIP_METHODS, (
"This method of clipping, {}, has not been implemented.".format(
clip_norm_type))
assert clip_norm_type in self.CLIP_GRADIENT_NORM_TYPES, (
"This method of clipping, {}, has not been implemented.".format(
clip_norm_type))
self.grad_clip_method = grad_clip_method
self.clip_norm_type = clip_norm_type
self.clip_threshold = float(clip_threshold)
self.use_parameter_norm = use_parameter_norm
self.compute_norm_ratio = compute_norm_ratio
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None):
assert grad_map is not None
CPU = core.DeviceOption(caffe2_pb2.CPU)
for param, grad in grad_map.items():
# currently sparse gradients won't be clipped
# futher implementation is needed to enable it
if isinstance(grad, core.GradientSlice):
continue
device = get_param_device(
param,
grad_map[str(param)],
param_to_device=blob_to_device,
default_device=CPU,
)
with core.DeviceScope(device):
if self.grad_clip_method == self.BY_NORM:
if self.clip_norm_type == self.L2_NORM:
p = 2
elif self.clip_norm_type == self.L1_NORM:
p = 1
grad_norm = net.LpNorm(
[grad],
net.NextScopedBlob(prefix=str(grad) + '_l{}_norm'.format(p)),
p=p,
)
if p == 2:
grad_norm = net.Pow([grad_norm], exponent=0.5)
op_inputs = [grad, grad_norm]
if self.use_parameter_norm:
param_norm = net.LpNorm(
[param],
net.NextScopedBlob(
prefix=str(param) + '_l{}_norm'.format(p)),
p=p,
)
if p == 2:
param_norm = net.Pow([param_norm], exponent=0.5)
op_inputs.append(param_norm)
if self.compute_norm_ratio:
net.Div(
[grad_norm, param_norm],
[net.NextScopedBlob(
prefix=str(param) + '_norm_ratio')]
)
net.ClipTensorByScaling(
op_inputs,
[grad],
threshold=self.clip_threshold,
)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import six
class NetModifier(six.with_metaclass(abc.ABCMeta, object)):
"""
An abstraction class for supporting modifying a generated net.
Inherited classes should implement the modify_net method where
related operators are added to the net.
Example usage:
modifier = SomeNetModifier(opts)
modifier(net)
"""
def __init__(self):
pass
@abc.abstractmethod
def modify_net(self, net, init_net=None, grad_map=None, blob_to_device=None):
pass
def __call__(self, net, init_net=None, grad_map=None, blob_to_device=None):
self.modify_net(
net,
init_net=init_net,
grad_map=grad_map,
blob_to_device=blob_to_device)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from caffe2.python import workspace, brew, model_helper
from caffe2.python.modeling.compute_statistics_for_blobs import (
ComputeStatisticsForBlobs
)
import numpy as np
class ComputeStatisticsForBlobsTest(unittest.TestCase):
def test_compute_statistics_for_blobs(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
net_modifier = ComputeStatisticsForBlobs(
blobs=['fc1_w', 'fc2_w'],
logging_frequency=10,
)
net_modifier(model.net)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
fc1_w = workspace.FetchBlob('fc1_w')
fc1_w_summary = workspace.FetchBlob('fc1_w_summary')
# std is unbiased here
stats_ref = np.array([fc1_w.flatten().min(), fc1_w.flatten().max(),
fc1_w.flatten().mean(), fc1_w.flatten().std(ddof=1)])
self.assertAlmostEqual(np.linalg.norm(stats_ref - fc1_w_summary), 0,
delta=1e-5)
self.assertEqual(fc1_w_summary.size, 4)
self.assertEqual(len(model.net.Proto().op), 8)
assert 'fc1_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs()
assert 'fc2_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from caffe2.python import workspace, brew, model_helper
from caffe2.python.modeling.compute_histogram_for_blobs import (
ComputeHistogramForBlobs
)
import numpy as np
class ComputeHistogramForBlobsTest(unittest.TestCase):
def histogram(self, X, lower_bound=0.0, upper_bound=1.0, num_buckets=20):
assert X.ndim == 2, ('this test assume 2d array, but X.ndim is {0}'.
format(X.ndim))
N, M = X.shape
hist = np.zeros((num_buckets + 2, ), dtype=np.int32)
segment = (upper_bound - lower_bound) / num_buckets
Y = np.zeros((N, M), dtype=np.int32)
Y[X < lower_bound] = 0
Y[X >= upper_bound] = num_buckets + 1
Y[(X >= lower_bound) & (X < upper_bound)] = \
((X[(X >= lower_bound) & (X < upper_bound)] - lower_bound) /
segment + 1).astype(np.int32)
for i in range(Y.shape[0]):
for j in range(Y.shape[1]):
hist[Y[i][j]] += 1
cur_hist = hist.astype(np.float32) / (N * M)
acc_hist = cur_hist
return [cur_hist, acc_hist]
def test_compute_histogram_for_blobs(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
num_buckets = 20
lower_bound = 0.2
upper_bound = 0.8
accumulate = False
net_modifier = ComputeHistogramForBlobs(blobs=['fc1_w', 'fc2_w'],
logging_frequency=10,
num_buckets=num_buckets,
lower_bound=lower_bound,
upper_bound=upper_bound,
accumulate=accumulate)
net_modifier(model.net)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
fc1_w = workspace.FetchBlob('fc1_w')
fc1_w_curr_normalized_hist = workspace.FetchBlob('fc1_w_curr_normalized_hist')
cur_hist, acc_hist = self.histogram(fc1_w,
lower_bound=lower_bound,
upper_bound=upper_bound,
num_buckets=num_buckets)
self.assertEqual(fc1_w_curr_normalized_hist.size, num_buckets + 2)
self.assertAlmostEqual(np.linalg.norm(
fc1_w_curr_normalized_hist - cur_hist), 0.0, delta=1e-5)
self.assertEqual(len(model.net.Proto().op), 12)
assert 'fc1_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs(),\
model.net.output_record().field_blobs()
assert 'fc2_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs(),\
model.net.output_record().field_blobs()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import scope
import contextlib
import logging
logger = logging.getLogger(__name__)
class ParameterSharingContext(object):
"""
This class manages scope driven way of parameter sharing across different
NameScopes.
"""
def __init__(self):
self._scope_overrides = {}
self._contexts = []
def _resolve_scope_overrides(self, candidate_scope):
"""
Recursively resolves all scope overrides, i.e multiple steps of
override can be used.
For example, if one provides following scope overrides:
{'scope_b': 'scope_a'} and within 'scope_b' - {'shared_child': ''},
then name 'w' will get resolved to the following blobs depending on the
namescope:
a. 'scope_a' -> 'scope_a/w'
b. 'scope_b' -> 'scope_a/w'
c. 'scope_c' -> 'scope_c/w'
d. 'scope_b/shared_child' -> 'scope_a/w'
d. 'scope_b/unshared_child' -> 'scope_a/unshared_child/w'
"""
best_scope = candidate_scope
best_scope_idx = 0
sub_scopes = candidate_scope.split(scope._NAMESCOPE_SEPARATOR)
cur_scope = ''
for idx, sub_scope in enumerate(sub_scopes):
cur_scope = cur_scope + sub_scope + scope._NAMESCOPE_SEPARATOR
if cur_scope in self._scope_overrides:
best_scope = self._scope_overrides[cur_scope]
best_scope_idx = idx
if best_scope == candidate_scope:
return candidate_scope
else:
return (self._resolve_scope_overrides(best_scope) +
scope._NAMESCOPE_SEPARATOR.join(
sub_scopes[best_scope_idx + 1:]))
def get_parameter_name(self, name):
candidate_scope = scope.CurrentNameScope()
best_scope = self._resolve_scope_overrides(candidate_scope)
if best_scope != candidate_scope:
logger.info("Overwiting scope {0} with scope {1}".format(
candidate_scope, best_scope))
return best_scope + name
def add_scope_overrides(self, shared_scopes):
self._contexts.append(shared_scopes)
self._scope_overrides.update(shared_scopes)
def pop(self):
assert len(self._contexts) > 0
self._contexts.pop()
self._scope_overrides = {}
for x in self._contexts:
self._scope_overrides.update(x)
parameter_sharing_context = ParameterSharingContext()
def _normalize_namescope(namescope):
if namescope and namescope[-1] != scope._NAMESCOPE_SEPARATOR:
return namescope + scope._NAMESCOPE_SEPARATOR
else:
return namescope
@contextlib.contextmanager
def ParameterSharing(shared_scopes):
"""
Helper function for sharing scopes.
All the parameters within the shared_scopes, will be remapped with the
respect of CurrentNamescope()
I.e. if one calls ParameterSharing with {'scope_b': 'scope_'a'}, from the
scope 'some_global_scope', it'll effectively mean, that all parameters from
'some_global_scope/scope_b' will shared with the parameters from
'some_global_scope/scope_a'
"""
assert isinstance(shared_scopes, dict)
shared_scope_overrides = {}
current_scope = scope.CurrentNameScope()
for k, v in shared_scopes.items():
assert not v.startswith(k), (
"Illegal override for parameter sharing. {} is prefix of {}".
format(k, v))
k = current_scope + k
v = current_scope + v
# Normalize all the scopes, so scope_a and scope_a/ are equivalent
k = _normalize_namescope(k)
v = _normalize_namescope(v)
shared_scope_overrides[k] = v
try:
parameter_sharing_context.add_scope_overrides(shared_scope_overrides)
yield
finally:
parameter_sharing_context.pop()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
import numpy as np
class ParameterTags(object):
BIAS = 'BIAS'
WEIGHT = 'WEIGHT'
COMPUTED_PARAM = 'COMPUTED_PARAM'
class ParameterType(object):
DENSE = 'dense'
SPARSE = 'sparse'
class ParameterInfo(object):
def __init__(
self, param_id, param, key=None, shape=None, length=None,
grad=None, blob_copy=None):
assert isinstance(param, core.BlobReference)
self.param_id = param_id
self.name = str(param)
self.blob = param
self.key = key
self.shape = shape
self.size = None if shape is None else np.prod(shape)
self.length = max(1, length if length is not None else 1)
self.grad = grad
self._cloned_init_net = None
# Optionally store equivalent copies of the blob
# in different precisions (i.e. half and float copies)
# stored as a dict of TensorProto.DataType -> BlobReference
self.blob_copy = blob_copy
# each param_info can have its own optimizer. It can be set within
# OptimizerContext (caffe2/python/optimizer.py)
self._optimizer = None
def grad_type(self):
# self.grad could be None for model parallelism with parameter server
if self.grad is None:
return
return (
ParameterType.SPARSE if isinstance(self.grad, core.GradientSlice)
else ParameterType.DENSE)
@property
def parameter(self):
return self.blob
@property
def optimizer(self):
return self._optimizer
@optimizer.setter
def optimizer(self, value):
assert self._optimizer is None, "optimizer has already been set"
self._optimizer = value
def __str__(self):
return self.name
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from caffe2.python import workspace, brew, model_helper
from caffe2.python.modeling.compute_norm_for_blobs import ComputeNormForBlobs
import numpy as np
class ComputeNormForBlobsTest(unittest.TestCase):
def test_compute_norm_for_blobs(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
net_modifier = ComputeNormForBlobs(
blobs=['fc1_w', 'fc2_w'],
logging_frequency=10,
)
net_modifier(model.net)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
fc1_w = workspace.FetchBlob('fc1_w')
fc1_w_l2_norm = workspace.FetchBlob('fc1_w_l2_norm')
self.assertEqual(fc1_w_l2_norm.size, 1)
self.assertAlmostEqual(fc1_w_l2_norm[0],
np.linalg.norm(fc1_w)**2,
delta=1e-5)
self.assertEqual(len(model.net.Proto().op), 6)
assert 'fc1_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs(),\
model.net.output_record().field_blobs()
assert 'fc2_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs(),\
model.net.output_record().field_blobs()
def test_compute_averaged_norm_for_blobs(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
net_modifier = ComputeNormForBlobs(
blobs=['fc1_w', 'fc2_w'],
logging_frequency=10,
compute_averaged_norm=True,
)
net_modifier(model.net)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
fc1_w = workspace.FetchBlob('fc1_w')
fc1_w_l2_averaged_norm = workspace.FetchBlob('fc1_w_averaged_l2_norm')
self.assertEqual(fc1_w_l2_averaged_norm.size, 1)
self.assertAlmostEqual(fc1_w_l2_averaged_norm[0],
np.linalg.norm(fc1_w)**2 / fc1_w.size,
delta=1e-5)
self.assertEqual(len(model.net.Proto().op), 6)
assert 'fc1_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs(),\
model.net.output_record().field_blobs()
assert 'fc2_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs(),\
model.net.output_record().field_blobs()
def test_compute_norm_for_blobs_no_print(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
net_modifier = ComputeNormForBlobs(
blobs=['fc1_w', 'fc2_w'],
logging_frequency=-1,
)
net_modifier(model.net)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
fc1_w = workspace.FetchBlob('fc1_w')
fc1_w_l2_norm = workspace.FetchBlob('fc1_w_l2_norm')
self.assertEqual(fc1_w_l2_norm.size, 1)
self.assertAlmostEqual(fc1_w_l2_norm[0],
np.linalg.norm(fc1_w)**2,
delta=1e-5)
self.assertEqual(len(model.net.Proto().op), 4)
assert 'fc1_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs(),\
model.net.output_record().field_blobs()
assert 'fc2_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs(),\
model.net.output_record().field_blobs()
def test_compute_l1_norm_for_blobs(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
net_modifier = ComputeNormForBlobs(
blobs=['fc1_w', 'fc2_w'],
logging_frequency=10,
p=1,
)
net_modifier(model.net)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
fc1_w = workspace.FetchBlob('fc1_w')
fc1_w_l1_norm = workspace.FetchBlob('fc1_w_l1_norm')
self.assertEqual(fc1_w_l1_norm.size, 1)
self.assertAlmostEqual(fc1_w_l1_norm[0],
np.sum(np.abs(fc1_w)),
delta=1e-5)
self.assertEqual(len(model.net.Proto().op), 6)
assert 'fc1_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs(),\
model.net.output_record().field_blobs()
assert 'fc2_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs(),\
model.net.output_record().field_blobs()
def test_compute_l1_averaged_norm_for_blobs(self):
model = model_helper.ModelHelper(name="test")
data = model.net.AddExternalInput("data")
fc1 = brew.fc(model, data, "fc1", dim_in=4, dim_out=2)
# no operator name set, will use default
brew.fc(model, fc1, "fc2", dim_in=2, dim_out=1)
net_modifier = ComputeNormForBlobs(
blobs=['fc1_w', 'fc2_w'],
logging_frequency=10,
p=1,
compute_averaged_norm=True,
)
net_modifier(model.net)
workspace.FeedBlob('data', np.random.rand(10, 4).astype(np.float32))
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
fc1_w = workspace.FetchBlob('fc1_w')
fc1_w_averaged_l1_norm = workspace.FetchBlob('fc1_w_averaged_l1_norm')
self.assertEqual(fc1_w_averaged_l1_norm.size, 1)
self.assertAlmostEqual(fc1_w_averaged_l1_norm[0],
np.sum(np.abs(fc1_w)) / fc1_w.size,
delta=1e-5)
self.assertEqual(len(model.net.Proto().op), 6)
assert 'fc1_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs(),\
model.net.output_record().field_blobs()
assert 'fc2_w' + net_modifier.field_name_suffix() in\
model.net.output_record().field_blobs(),\
model.net.output_record().field_blobs()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import copy
from caffe2.proto import caffe2_pb2
from caffe2.python import core
def rewrite_init_net_simple(net):
for op in net.op:
op.device_option.device_type = caffe2_pb2.MKLDNN
def last_producer(ops, blob):
for (i, op) in reversed(list(enumerate(ops))):
if blob in op.output:
return i
raise ValueError("Failed to find last producer of blob, %s", blob)
def rewrite_run_net_simple(net):
# Simple rewrite for now - assume entire graph can be executed
# with MKL, so just insert copy ops for external_input[0] and
# external_output[0]
def mkl_tmp(name):
return "{}__MKL__".format(name)
input_blob = net.external_input[0]
if input_blob != net.op[0].input[0]:
raise Exception(
"Input blob: {} is not consumed by first op: {}".format(
input_blob, net.op[0]))
# Modify input/outputs to point to copied MKL blobs.
copy_input_op = core.CreateOperator(
"CopyCPUToMKL", input_blob, mkl_tmp(input_blob))
net.op[0].input[0] = mkl_tmp(input_blob)
copy_output_ops = [
core.CreateOperator("CopyMKLToCPU", mkl_tmp(output_blob), output_blob)
for output_blob in net.external_output]
for output_blob in net.external_output:
last_producer_idx = last_producer(net.op, output_blob)
renamed_outputs = [blob if blob != output_blob else mkl_tmp(blob)
for blob in net.op[last_producer_idx].output]
net.op[last_producer_idx].output[:] = renamed_outputs
# Rename any subsequent consumers of an output blob.
for op in net.op[last_producer_idx + 1:]:
renamed_input = [blob if blob != output_blob else mkl_tmp(blob)
for blob in op.input]
op.input[:] = renamed_input
ops = [copy_input_op] + net.op[:] + copy_output_ops
del net.op[:]
net.op.extend(ops)
for op in net.op:
op.device_option.MergeFrom(
core.DeviceOption(device_type=caffe2_pb2.MKLDNN))
op.engine = ""
def rewrite_model_helper_simple(model):
model = copy.deepcopy(model)
# All parameter initialization should run on MKL
rewrite_init_net_simple(model.param_init_net.Proto())
rewrite_run_net_simple(model.net.Proto())
return model
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(
not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn."
)
class MKLConcatTest(hu.HypothesisTestCase):
@given(
batch_size=st.integers(1, 10),
channel_splits=st.lists(st.integers(1, 10), min_size=1, max_size=3),
height=st.integers(1, 10),
width=st.integers(1, 10),
**mu.gcs
)
def test_mkl_concat(
self, batch_size, channel_splits, height, width, gc, dc
):
Xs = [
np.random.rand(batch_size, channel,
height, width).astype(np.float32)
for channel in channel_splits
]
op = core.CreateOperator(
"Concat",
["X_{}".format(i) for i in range(len(Xs))],
["concat_result", "split_info"],
order="NCHW",
)
self.assertDeviceChecks(dc, op, Xs, [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLReluTest(hu.HypothesisTestCase):
@given(size=st.integers(8, 20),
input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
inplace=st.booleans(),
**mu.gcs)
def test_mkl_relu(self, size, input_channels, batch_size, inplace, gc, dc):
op = core.CreateOperator(
"Relu",
["X"],
["Y"] if not inplace else ["X"],
)
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
self.assertDeviceChecks(dc, op, [X], [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import numpy as np
import copy
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python.model_helper import ModelHelper
from caffe2.python.models import resnet
from caffe2.python import workspace, brew
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl.rewrite_graph as rewrite_graph
def deterministic_io(model):
model = copy.deepcopy(model)
for i, op in enumerate(model.InitProto().op):
op.device_option.random_seed = i + 1
if not model.Proto().external_output:
model.Proto().external_output.extend([model.Proto().op[-1].output[0]])
return model
def simple_fc():
model = ModelHelper(name="r")
brew.fc(model, "data", "fc", 10, 10)
return model, [(1, 10)]
def double_matmul():
model = ModelHelper(name="r")
fc0 = brew.fc(model, "data", "fc0", 10, 10)
fc1 = brew.fc(model, fc0, "fc1", 10, 10)
model.Proto().external_output[:] = [str(fc0), str(fc1)]
return model, [(1, 10)]
def simple_relu():
model = ModelHelper(name="r")
brew.relu(model, "data", "fc")
return model, [(1, 10)]
def simple_mlp():
model = ModelHelper(name="r")
brew.relu(
model,
brew.fc(
model,
brew.relu(
model,
brew.fc(
model,
"data",
"fc1",
10,
10),
"rl1"),
"fc2",
10,
10),
"rl2")
return model, [(1, 10)]
def simple_cnn():
model = ModelHelper(name="r", arg_scope={"order": "NCHW", "is_test": True})
brew.conv(
model, "data", 'conv1', 3, 16, kernel=3, stride=1
)
brew.spatial_bn(
model, 'conv1', 'conv1_spatbn', 16, epsilon=1e-3
)
brew.relu(model, 'conv1_spatbn', 'relu1')
return model, [(1, 3, 32, 32)]
def alexnet():
model = ModelHelper(name="r", arg_scope={"order": "NCHW", "is_test": True})
conv1 = brew.conv(
model,
"data",
"conv1",
3,
64,
11, ('XavierFill', {}), ('ConstantFill', {}),
stride=4,
pad=2
)
relu1 = brew.relu(model, conv1, "conv1")
pool1 = brew.max_pool(model, relu1, "pool1", kernel=3, stride=2, pad=0,
legacy_pad=3)
lrn1 = brew.lrn(
model, pool1, "pool1_lrn", size=5, alpha=1.0e-4, beta=0.75, bias=1.0)
conv2 = brew.conv(
model,
lrn1,
"conv2",
64,
192,
5,
('XavierFill', {}),
('ConstantFill', {}),
pad=2
)
relu2 = brew.relu(model, conv2, "conv2")
pool2 = brew.max_pool(model, relu2, "pool2", kernel=3, stride=2)
lrn2 = brew.lrn(
model, pool2, "pool2_lrn", size=5, alpha=1.0e-4, beta=0.75, bias=1.0)
conv3 = brew.conv(
model,
lrn2,
"conv3",
192,
384,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu3 = brew.relu(model, conv3, "conv3")
conv4 = brew.conv(
model,
relu3,
"conv4",
384,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu4 = brew.relu(model, conv4, "conv4")
conv5 = brew.conv(
model,
relu4,
"conv5",
256,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu5 = brew.relu(model, conv5, "conv5")
pool5 = brew.max_pool(model, relu5, "pool5", kernel=3, stride=2)
fc6 = brew.fc(
model,
pool5, "fc6", 256 * 6 * 6, 4096, ('XavierFill', {}),
('ConstantFill', {})
)
relu6 = brew.relu(model, fc6, "fc6")
fc7 = brew.fc(
model, relu6, "fc7", 4096, 4096, ('XavierFill', {}), ('ConstantFill', {})
)
relu7 = brew.relu(model, fc7, "fc7")
drop7 = brew.dropout(model, relu7, "fc7_dropout", is_test=1, ratio=0.5)
fc8 = brew.fc(
model, drop7, "fc8", 4096, 1000, ('XavierFill', {}), ('ConstantFill', {})
)
relu8 = brew.relu(model, fc8, "fc8")
_ = brew.dropout(model, relu8, "fc8_dropout", is_test=1, ratio=0.5)
return model, [(1, 3, 224, 224)]
def simple_resnet():
model = ModelHelper(name="r", arg_scope={"order": "NCHW", "is_test": True})
resnet.create_resnet_32x32(
model, "data", num_input_channels=1, num_groups=1, num_labels=5,
is_test=True)
return model, [(1, 1, 32, 32)]
def complex_resnet():
model = ModelHelper(name="r", arg_scope={"order": "NCHW", "is_test": True})
resnet.create_resnet50(
model, "data", num_input_channels=1, num_labels=5, is_test=True,
no_loss=True)
return model, [(1, 1, 224, 224)]
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLRewriteTest(hu.HypothesisTestCase):
@given(gen=st.sampled_from([simple_relu, simple_fc,
simple_mlp, simple_cnn]))
def test_mkl_simple_rewrite(self, gen):
cpu_model, (shape,) = gen()
cpu_model = deterministic_io(cpu_model)
mkl_model = rewrite_graph.rewrite_model_helper_simple(cpu_model)
X = np.random.randn(*shape).astype(np.float32)
def run(model):
self.ws.run(model.InitProto())
self.ws.create_blob(model.Proto().external_input[0]).feed(X)
self.ws.run(model.Proto())
return self.ws.blobs[model.Proto().external_output[0]].fetch()
np.testing.assert_allclose(run(cpu_model), run(mkl_model),
atol=1e-4, rtol=1e-4)
def test_mkl_resnet_rewrite(self):
cpu_model, (shape,) = complex_resnet()
cpu_model = deterministic_io(cpu_model)
mkl_model = rewrite_graph.rewrite_model_helper_simple(cpu_model)
np.random.seed(1701)
X = np.random.randn(*shape).astype(np.float32)
def run(model):
self.ws.run(model.InitProto())
self.ws.create_blob(model.Proto().external_input[0]).feed(X)
self.ws.run(model.Proto())
return self.ws.blobs[model.Proto().external_output[0]].fetch()
np.testing.assert_allclose(run(cpu_model), run(mkl_model),
atol=1e-4, rtol=1e-4)
def test_mkl_multi_output_rewrite(self):
cpu_model, shapes = double_matmul()
cpu_model = deterministic_io(cpu_model)
mkl_model = rewrite_graph.rewrite_model_helper_simple(cpu_model)
np.random.seed(1701)
Xs = [np.random.randn(*shape).astype(np.float32) for shape in shapes]
def run(model):
self.ws.run(model.InitProto())
for (name, X) in zip(model.Proto().external_input, Xs):
self.ws.create_blob(name).feed(X)
print(model.Proto())
self.ws.run(model.Proto())
return [self.ws.blobs[name].fetch()
for name in model.Proto().external_output]
run(mkl_model)
np.testing.assert_allclose(run(cpu_model), run(mkl_model),
atol=1e-4, rtol=1e-4)
def test_mkl_alexnet_rewrite(self):
cpu_model, (shape,) = alexnet()
cpu_model = deterministic_io(cpu_model)
mkl_model = rewrite_graph.rewrite_model_helper_simple(cpu_model)
np.random.seed(1701)
X = np.random.randn(*shape).astype(np.float32)
def run(model):
self.ws.run(model.InitProto())
self.ws.create_blob(model.Proto().external_input[0]).feed(X)
self.ws.run(model.Proto())
return self.ws.blobs[model.Proto().external_output[0]].fetch()
np.testing.assert_allclose(run(cpu_model), run(mkl_model),
atol=1e-4, rtol=1e-4)
if __name__ == "__main__":
import unittest
unittest.main()
|
## @package convnet_benchmarks
# Module caffe2.python.convnet_benchmarks
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
"""
Benchmark for common convnets.
Speed on Titan X, with 10 warmup steps and 10 main steps and with different
versions of cudnn, are as follows (time reported below is per-batch time,
forward / forward+backward):
CuDNN V3 CuDNN v4
AlexNet 32.5 / 108.0 27.4 / 90.1
OverFeat 113.0 / 342.3 91.7 / 276.5
Inception 134.5 / 485.8 125.7 / 450.6
VGG (batch 64) 200.8 / 650.0 164.1 / 551.7
Speed on Inception with varied batch sizes and CuDNN v4 is as follows:
Batch Size Speed per batch Speed per image
16 22.8 / 72.7 1.43 / 4.54
32 38.0 / 127.5 1.19 / 3.98
64 67.2 / 233.6 1.05 / 3.65
128 125.7 / 450.6 0.98 / 3.52
Speed on Tesla M40, which 10 warmup steps and 10 main steps and with cudnn
v4, is as follows:
AlexNet 68.4 / 218.1
OverFeat 210.5 / 630.3
Inception 300.2 / 1122.2
VGG (batch 64) 405.8 / 1327.7
(Note that these numbers involve a "full" backprop, i.e. the gradient
with respect to the input image is also computed.)
To get the numbers, simply run:
for MODEL in AlexNet OverFeat Inception; do
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size 128 --model $MODEL --forward_only True
done
for MODEL in AlexNet OverFeat Inception; do
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size 128 --model $MODEL
done
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size 64 --model VGGA --forward_only True
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size 64 --model VGGA
for BS in 16 32 64 128; do
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size $BS --model Inception --forward_only True
PYTHONPATH=../gen:$PYTHONPATH python convnet_benchmarks.py \
--batch_size $BS --model Inception
done
Note that VGG needs to be run at batch 64 due to memory limit on the backward
pass.
"""
import argparse
from caffe2.python import brew, cnn, workspace
from caffe2.python.model_helper import ModelHelper
from caffe2.python.models import resnet
import numpy as np
def MLP(order, cudnn_ws, mkl):
model = ModelHelper(name="benchmark")
d = 256
depth = 20
width = 3
for i in range(depth):
for j in range(width):
current = "fc_{}_{}".format(i, j) if i > 0 else "data"
next_ = "fc_{}_{}".format(i + 1, j)
brew.fc(
model,
current, next_,
dim_in=d, dim_out=d,
weight_init=('XavierFill', {}),
bias_init=('XavierFill', {}))
brew.sum(model, ["fc_{}_{}".format(depth, j) for j in range(width)], ["sum"])
brew.fc(model, "sum", "last",
dim_in=d, dim_out=1000,
weight_init=('XavierFill', {}),
bias_init=('XavierFill', {}))
xent = model.LabelCrossEntropy(["last", "label"], "xent")
if not mkl:
model.AveragedLoss(xent, "loss")
return model, d
def ResNet50(order, cudnn_ws, mkl):
my_arg_scope = {'order': order, 'use_cudnn': True,
'cudnn_exhaustive_search': True,
'ws_nbytes_limit': str(cudnn_ws)}
model = ModelHelper(name="alexnet", arg_scope=my_arg_scope)
resnet.create_resnet50(model, "data", 3, 1000, is_test=True,
final_avg_kernel=14)
return model, 448
def AlexNet(order, cudnn_ws, mkl):
my_arg_scope = {'order': order, 'use_cudnn': True,
'cudnn_exhaustive_search': True,
'ws_nbytes_limit': str(cudnn_ws)}
model = ModelHelper(name="alexnet", arg_scope=my_arg_scope)
conv1 = brew.conv(
model,
"data",
"conv1",
3,
64,
11,
('XavierFill', {}),
('ConstantFill', {}),
stride=4,
pad=2
)
relu1 = brew.relu(model, conv1, "conv1")
pool1 = brew.max_pool(model, relu1, "pool1", kernel=3, stride=2)
conv2 = brew.conv(
model,
pool1,
"conv2",
64,
192,
5,
('XavierFill', {}),
('ConstantFill', {}),
pad=2
)
relu2 = brew.relu(model, conv2, "conv2")
pool2 = brew.max_pool(model, relu2, "pool2", kernel=3, stride=2)
conv3 = brew.conv(
model,
pool2,
"conv3",
192,
384,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu3 = brew.relu(model, conv3, "conv3")
conv4 = brew.conv(
model,
relu3,
"conv4",
384,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu4 = brew.relu(model, conv4, "conv4")
conv5 = brew.conv(
model,
relu4,
"conv5",
256,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu5 = brew.relu(model, conv5, "conv5")
pool5 = brew.max_pool(model, relu5, "pool5", kernel=3, stride=2)
fc6 = brew.fc(
model, pool5, "fc6", 256 * 6 * 6, 4096, ('XavierFill', {}),
('ConstantFill', {})
)
relu6 = brew.relu(model, fc6, "fc6")
fc7 = brew.fc(
model, relu6, "fc7", 4096, 4096, ('XavierFill', {}), ('ConstantFill', {})
)
relu7 = brew.relu(model, fc7, "fc7")
fc8 = brew.fc(
model, relu7, "fc8", 4096, 1000, ('XavierFill', {}), ('ConstantFill', {})
)
pred = brew.softmax(model, fc8, "pred")
xent = model.LabelCrossEntropy([pred, "label"], "xent")
if not mkl:
loss = model.AveragedLoss(xent, "loss")
return model, 224
def OverFeat(order, cudnn_ws, mkl):
my_arg_scope = {'order': order, 'use_cudnn': True,
'cudnn_exhaustive_search': True,
'ws_nbytes_limit': str(cudnn_ws)}
model = ModelHelper(name='overfeat', arg_scope=my_arg_scope)
conv1 = brew.conv(
model,
"data",
"conv1",
3,
96,
11,
('XavierFill', {}),
('ConstantFill', {}),
stride=4
)
relu1 = brew.relu(model, conv1, "conv1")
pool1 = brew.max_pool(model, relu1, "pool1", kernel=2, stride=2)
conv2 = brew.conv(
model, pool1, "conv2", 96, 256, 5, ('XavierFill', {}), ('ConstantFill', {})
)
relu2 = brew.relu(model, conv2, "conv2")
pool2 = brew.max_pool(model, relu2, "pool2", kernel=2, stride=2)
conv3 = brew.conv(
model,
pool2,
"conv3",
256,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu3 = brew.relu(model, conv3, "conv3")
conv4 = brew.conv(
model,
relu3,
"conv4",
512,
1024,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu4 = brew.relu(model, conv4, "conv4")
conv5 = brew.conv(
model,
relu4,
"conv5",
1024,
1024,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu5 = brew.relu(model, conv5, "conv5")
pool5 = brew.max_pool(model, relu5, "pool5", kernel=2, stride=2)
fc6 = brew.fc(
model, pool5, "fc6", 1024 * 6 * 6, 3072, ('XavierFill', {}),
('ConstantFill', {})
)
relu6 = brew.relu(model, fc6, "fc6")
fc7 = brew.fc(
model, relu6, "fc7", 3072, 4096, ('XavierFill', {}), ('ConstantFill', {})
)
relu7 = brew.relu(model, fc7, "fc7")
fc8 = brew.fc(
model, relu7, "fc8", 4096, 1000, ('XavierFill', {}), ('ConstantFill', {})
)
pred = brew.softmax(model, fc8, "pred")
xent = model.LabelCrossEntropy([pred, "label"], "xent")
if not mkl:
loss = model.AveragedLoss(xent, "loss")
return model, 231
def VGGA(order, cudnn_ws, mkl):
my_arg_scope = {'order': order, 'use_cudnn': True,
'cudnn_exhaustive_search': True,
'ws_nbytes_limit': str(cudnn_ws)}
model = ModelHelper(name='vgg-a', arg_scope=my_arg_scope)
conv1 = brew.conv(
model,
"data",
"conv1",
3,
64,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu1 = brew.relu(model, conv1, "conv1")
pool1 = brew.max_pool(model, relu1, "pool1", kernel=2, stride=2)
conv2 = brew.conv(
model,
pool1,
"conv2",
64,
128,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu2 = brew.relu(model, conv2, "conv2")
pool2 = brew.max_pool(model, relu2, "pool2", kernel=2, stride=2)
conv3 = brew.conv(
model,
pool2,
"conv3",
128,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu3 = brew.relu(model, conv3, "conv3")
conv4 = brew.conv(
model,
relu3,
"conv4",
256,
256,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu4 = brew.relu(model, conv4, "conv4")
pool4 = brew.max_pool(model, relu4, "pool4", kernel=2, stride=2)
conv5 = brew.conv(
model,
pool4,
"conv5",
256,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu5 = brew.relu(model, conv5, "conv5")
conv6 = brew.conv(
model,
relu5,
"conv6",
512,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu6 = brew.relu(model, conv6, "conv6")
pool6 = brew.max_pool(model, relu6, "pool6", kernel=2, stride=2)
conv7 = brew.conv(
model,
pool6,
"conv7",
512,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu7 = brew.relu(model, conv7, "conv7")
conv8 = brew.conv(
model,
relu7,
"conv8",
512,
512,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu8 = brew.relu(model, conv8, "conv8")
pool8 = brew.max_pool(model, relu8, "pool8", kernel=2, stride=2)
fcix = brew.fc(
model, pool8, "fcix", 512 * 7 * 7, 4096, ('XavierFill', {}),
('ConstantFill', {})
)
reluix = brew.relu(model, fcix, "fcix")
fcx = brew.fc(
model, reluix, "fcx", 4096, 4096, ('XavierFill', {}), ('ConstantFill', {})
)
relux = brew.relu(model, fcx, "fcx")
fcxi = brew.fc(
model, relux, "fcxi", 4096, 1000, ('XavierFill', {}), ('ConstantFill', {})
)
pred = brew.softmax(model, fcxi, "pred")
xent = model.LabelCrossEntropy([pred, "label"], "xent")
if not mkl:
loss = model.AveragedLoss(xent, "loss")
return model, 231
def _InceptionModule(
model, input_blob, input_depth, output_name, conv1_depth, conv3_depths,
conv5_depths, pool_depth
):
# path 1: 1x1 conv
conv1 = brew.conv(
model, input_blob, output_name + ":conv1", input_depth, conv1_depth, 1,
('XavierFill', {}), ('ConstantFill', {})
)
conv1 = brew.relu(model, conv1, conv1)
# path 2: 1x1 conv + 3x3 conv
conv3_reduce = brew.conv(
model, input_blob, output_name + ":conv3_reduce", input_depth,
conv3_depths[0], 1, ('XavierFill', {}), ('ConstantFill', {})
)
conv3_reduce = brew.relu(model, conv3_reduce, conv3_reduce)
conv3 = brew.conv(
model,
conv3_reduce,
output_name + ":conv3",
conv3_depths[0],
conv3_depths[1],
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
conv3 = brew.relu(model, conv3, conv3)
# path 3: 1x1 conv + 5x5 conv
conv5_reduce = brew.conv(
model, input_blob, output_name + ":conv5_reduce", input_depth,
conv5_depths[0], 1, ('XavierFill', {}), ('ConstantFill', {})
)
conv5_reduce = brew.relu(model, conv5_reduce, conv5_reduce)
conv5 = brew.conv(
model,
conv5_reduce,
output_name + ":conv5",
conv5_depths[0],
conv5_depths[1],
5,
('XavierFill', {}),
('ConstantFill', {}),
pad=2
)
conv5 = brew.relu(model, conv5, conv5)
# path 4: pool + 1x1 conv
pool = brew.max_pool(
model,
input_blob,
output_name + ":pool",
kernel=3,
stride=1,
pad=1
)
pool_proj = brew.conv(
model, pool, output_name + ":pool_proj", input_depth, pool_depth, 1,
('XavierFill', {}), ('ConstantFill', {})
)
pool_proj = brew.relu(model, pool_proj, pool_proj)
output = brew.concat(model, [conv1, conv3, conv5, pool_proj], output_name)
return output
def Inception(order, cudnn_ws, mkl):
my_arg_scope = {'order': order, 'use_cudnn': True,
'cudnn_exhaustive_search': True,
'ws_nbytes_limit': str(cudnn_ws)}
model = ModelHelper(name="inception", arg_scope=my_arg_scope)
conv1 = brew.conv(
model,
"data",
"conv1",
3,
64,
7,
('XavierFill', {}),
('ConstantFill', {}),
stride=2,
pad=3
)
relu1 = brew.relu(model, conv1, "conv1")
pool1 = brew.max_pool(model, relu1, "pool1", kernel=3, stride=2, pad=1)
conv2a = brew.conv(
model, pool1, "conv2a", 64, 64, 1,
('XavierFill', {}), ('ConstantFill', {})
)
conv2a = brew.relu(model, conv2a, conv2a)
conv2 = brew.conv(
model,
conv2a,
"conv2",
64,
192,
3,
('XavierFill', {}),
('ConstantFill', {}),
pad=1
)
relu2 = brew.relu(model, conv2, "conv2")
pool2 = brew.max_pool(model, relu2, "pool2", kernel=3, stride=2, pad=1)
# Inception modules
inc3 = _InceptionModule(
model, pool2, 192, "inc3", 64, [96, 128], [16, 32], 32
)
inc4 = _InceptionModule(
model, inc3, 256, "inc4", 128, [128, 192], [32, 96], 64
)
pool5 = brew.max_pool(model, inc4, "pool5", kernel=3, stride=2, pad=1)
inc5 = _InceptionModule(
model, pool5, 480, "inc5", 192, [96, 208], [16, 48], 64
)
inc6 = _InceptionModule(
model, inc5, 512, "inc6", 160, [112, 224], [24, 64], 64
)
inc7 = _InceptionModule(
model, inc6, 512, "inc7", 128, [128, 256], [24, 64], 64
)
inc8 = _InceptionModule(
model, inc7, 512, "inc8", 112, [144, 288], [32, 64], 64
)
inc9 = _InceptionModule(
model, inc8, 528, "inc9", 256, [160, 320], [32, 128], 128
)
pool9 = brew.max_pool(model, inc9, "pool9", kernel=3, stride=2, pad=1)
inc10 = _InceptionModule(
model, pool9, 832, "inc10", 256, [160, 320], [32, 128], 128
)
inc11 = _InceptionModule(
model, inc10, 832, "inc11", 384, [192, 384], [48, 128], 128
)
pool11 = brew.average_pool(model, inc11, "pool11", kernel=7, stride=1)
fc = brew.fc(
model, pool11, "fc", 1024, 1000,
('XavierFill', {}), ('ConstantFill', {})
)
# It seems that Soumith's benchmark does not have softmax on top
# for Inception. We will add it anyway so we can have a proper
# backward pass.
pred = brew.softmax(model, fc, "pred")
xent = model.LabelCrossEntropy([pred, "label"], "xent")
if not mkl:
loss = model.AveragedLoss(xent, "loss")
return model, 224
def AddParameterUpdate(model):
""" Simple plain SGD update -- not tuned to actually train the models """
ITER = brew.iter(model, "iter")
LR = model.LearningRate(
ITER, "LR", base_lr=-1e-8, policy="step", stepsize=10000, gamma=0.999)
ONE = model.param_init_net.ConstantFill([], "ONE", shape=[1], value=1.0)
for param in model.params:
param_grad = model.param_to_grad[param]
model.WeightedSum([param, ONE, param_grad, LR], param)
def Benchmark(model_gen, arg):
model, input_size = model_gen(arg.order, arg.cudnn_ws, arg.mkl)
model.Proto().type = arg.net_type
model.Proto().num_workers = arg.num_workers
# In order to be able to run everything without feeding more stuff, let's
# add the data and label blobs to the parameter initialization net as well.
if arg.order == "NCHW":
input_shape = [arg.batch_size, 3, input_size, input_size]
else:
input_shape = [arg.batch_size, input_size, input_size, 3]
if arg.model == "MLP":
input_shape = [arg.batch_size, input_size]
model.param_init_net.GaussianFill(
[],
"data",
shape=input_shape,
mean=0.0,
std=1.0
)
#MKL doesn't support int, so have to use numpy
if arg.mkl:
label = np.random.randint(low=0, high=1000, size=(arg.batch_size,)).astype(np.int32)
workspace.FeedBlob("label", label)
else:
model.param_init_net.UniformIntFill(
[],
"label",
shape=[arg.batch_size, ],
min=0,
max=999
)
if arg.forward_only:
print('{}: running forward only.'.format(arg.model))
else:
if arg.mkl:
print(
'==WARNING==\n'
'forward-backward not supported yet in MKL, so exiting'
)
print('{}: running forward-backward.'.format(arg.model))
model.AddGradientOperators(["loss"])
AddParameterUpdate(model)
if arg.order == 'NHWC':
print(
'==WARNING==\n'
'NHWC order with CuDNN may not be supported yet, so I might\n'
'exit suddenly.'
)
if not arg.cpu:
if arg.mkl:
model.param_init_net.RunAllOnMKL()
model.net.RunAllOnMKL()
else:
model.param_init_net.RunAllOnGPU()
model.net.RunAllOnGPU()
if arg.engine:
for op in model.net.Proto().op:
op.engine = arg.engine
if arg.dump_model:
# Writes out the pbtxt for benchmarks on e.g. Android
with open(
"{0}_init_batch_{1}.pbtxt".format(arg.model, arg.batch_size), "w"
) as fid:
fid.write(str(model.param_init_net.Proto()))
with open("{0}.pbtxt".format(arg.model, arg.batch_size), "w") as fid:
fid.write(str(model.net.Proto()))
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
workspace.BenchmarkNet(
model.net.Proto().name, arg.warmup_iterations, arg.iterations,
arg.layer_wise_benchmark)
def GetArgumentParser():
parser = argparse.ArgumentParser(description="Caffe2 benchmark.")
parser.add_argument(
"--batch_size",
type=int,
default=128,
help="The batch size."
)
parser.add_argument("--model", type=str, help="The model to benchmark.")
parser.add_argument(
"--order",
type=str,
default="NCHW",
help="The order to evaluate."
)
parser.add_argument(
"--cudnn_ws",
type=int,
help="The cudnn workspace size."
)
parser.add_argument(
"--iterations",
type=int,
default=10,
help="Number of iterations to run the network."
)
parser.add_argument(
"--warmup_iterations",
type=int,
default=10,
help="Number of warm-up iterations before benchmarking."
)
parser.add_argument(
"--forward_only",
action='store_true',
help="If set, only run the forward pass."
)
parser.add_argument(
"--layer_wise_benchmark",
action='store_true',
help="If True, run the layer-wise benchmark as well."
)
parser.add_argument(
"--cpu",
action='store_true',
help="If True, run testing on CPU instead of GPU."
)
parser.add_argument(
"--mkl",
action='store_true',
help="If True, run testing on CPU-MKL instead of GPU."
)
parser.add_argument(
"--engine",
type=str,
default="",
help="If set, blindly prefer the given engine(s) for every op.")
parser.add_argument(
"--dump_model",
action='store_true',
help="If True, dump the model prototxts to disk."
)
parser.add_argument("--net_type", type=str, default="simple")
parser.add_argument("--num_workers", type=int, default=2)
parser.add_argument("--use-nvtx", default=False, action='store_true')
parser.add_argument("--htrace_span_log_path", type=str)
return parser
if __name__ == '__main__':
args, extra_args = GetArgumentParser().parse_known_args()
if (
not args.batch_size or not args.model or not args.order
):
GetArgumentParser().print_help()
else:
workspace.GlobalInit(
['caffe2', '--caffe2_log_level=0'] + extra_args +
(['--caffe2_use_nvtx'] if args.use_nvtx else []) +
(['--caffe2_htrace_span_log_path=' + args.htrace_span_log_path]
if args.htrace_span_log_path else []))
model_map = {
'AlexNet': AlexNet,
'OverFeat': OverFeat,
'VGGA': VGGA,
'Inception': Inception,
'ResNet50': ResNet50,
'MLP': MLP,
}
Benchmark(model_map[args.model], args)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLConvTest(hu.HypothesisTestCase):
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(3, 5),
size=st.integers(8, 20),
input_channels=st.integers(1, 16),
output_channels=st.integers(1, 16),
batch_size=st.integers(1, 3),
use_bias=st.booleans(),
group=st.integers(1, 8),
**mu.gcs)
def test_mkl_convolution(self, stride, pad, kernel, size,
input_channels, output_channels,
batch_size, use_bias, group, gc, dc):
op = core.CreateOperator(
"Conv",
["X", "w", "b"] if use_bias else ["X", "w"],
["Y"],
stride=stride,
pad=pad,
kernel=kernel,
group=group
)
X = np.random.rand(
batch_size, input_channels * group, size, size).astype(np.float32) - 0.5
w = np.random.rand(
output_channels * group, input_channels, kernel, kernel) \
.astype(np.float32) - 0.5
b = np.random.rand(output_channels * group).astype(np.float32) - 0.5
inputs = [X, w, b] if use_bias else [X, w]
self.assertDeviceChecks(dc, op, inputs, [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLElementwiseAddTest(hu.HypothesisTestCase):
@given(size=st.integers(7, 9),
input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
inplace=st.booleans(),
**mu.gcs)
def test_mkl_elementwise_add(self,
size,
input_channels,
batch_size,
inplace,
gc,
dc):
op = core.CreateOperator(
"Add",
["X0", "X1"],
["X0" if inplace else "Y"],
)
Xs = [np.random.rand(batch_size, input_channels, size, size).astype(
np.float32) for _ in range(2)]
self.assertDeviceChecks(dc, op, Xs, [0])
if __name__ == "__main__":
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLElementwiseSumTest(hu.HypothesisTestCase):
@given(size=st.integers(7, 9),
input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
inputs=st.integers(1, 3),
inplace=st.booleans(),
**mu.gcs)
def test_mkl_elementwise_sum(self,
size,
input_channels,
batch_size,
inputs,
inplace,
gc,
dc):
op = core.CreateOperator(
"Sum",
["X_{}".format(i) for i in range(inputs)],
["X_0" if inplace else "Y"],
)
Xs = [np.random.rand(batch_size, input_channels, size, size).astype(
np.float32) for _ in range(inputs)]
self.assertDeviceChecks(dc, op, Xs, [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import cnn, core, workspace, test_util
@unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.")
class TestMKLBasic(test_util.TestCase):
def testReLUSpeed(self):
X = np.random.randn(128, 4096).astype(np.float32)
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("X_mkl", X, device_option=mkl_do)
net = core.Net("test")
# Makes sure that we can run relu.
net.Relu("X", "Y")
net.Relu("X_mkl", "Y_mkl", device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
# makes sure that the results are good.
np.testing.assert_allclose(
workspace.FetchBlob("Y"),
workspace.FetchBlob("Y_mkl"),
atol=1e-10,
rtol=1e-10)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
# The returned runtime is the time of
# [whole_net, cpu_op, mkl_op]
# so we will assume that the MKL one runs faster than the CPU one.
# Note(Yangqing): in fact, it seems that in optimized mode, this is
# not always guaranteed - MKL runs slower than the Eigen vectorized
# version, so I am turning this assertion off.
#self.assertTrue(runtime[1] >= runtime[2])
print("Relu CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2]))
def testConvSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 256, 27, 27).astype(np.float32) - 0.5
W = np.random.rand(192, 256, 3, 3).astype(np.float32) - 0.5
b = np.random.rand(192).astype(np.float32) - 0.5
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("W", W)
workspace.FeedBlob("b", b)
workspace.FeedBlob("X_mkl", X, device_option=mkl_do)
workspace.FeedBlob("W_mkl", W, device_option=mkl_do)
workspace.FeedBlob("b_mkl", b, device_option=mkl_do)
net = core.Net("test")
# Makes sure that we can run relu.
net.Conv(["X", "W", "b"], "Y", pad=1, stride=1, kernel=3)
net.Conv(["X_mkl", "W_mkl", "b_mkl"], "Y_mkl",
pad=1, stride=1, kernel=3, device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
# makes sure that the results are good.
np.testing.assert_allclose(
workspace.FetchBlob("Y"),
workspace.FetchBlob("Y_mkl"),
atol=1e-2,
rtol=1e-2)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
print("Conv CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2]))
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import cnn, core, workspace, test_util
@unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.")
class TestMKLBasic(test_util.TestCase):
def testSpatialBNTestingSpeed(self):
input_channel = 10
X = np.random.rand(1, input_channel, 100, 100).astype(np.float32) - 0.5
scale = np.random.rand(input_channel).astype(np.float32) + 0.5
bias = np.random.rand(input_channel).astype(np.float32) - 0.5
mean = np.random.randn(input_channel).astype(np.float32)
var = np.random.rand(input_channel).astype(np.float32) + 0.5
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("scale", scale)
workspace.FeedBlob("bias", bias)
workspace.FeedBlob("mean", mean)
workspace.FeedBlob("var", var)
workspace.FeedBlob("X_mkl", X, device_option=mkl_do)
workspace.FeedBlob("scale_mkl", scale, device_option=mkl_do)
workspace.FeedBlob("bias_mkl", bias, device_option=mkl_do)
workspace.FeedBlob("mean_mkl", mean, device_option=mkl_do)
workspace.FeedBlob("var_mkl", var, device_option=mkl_do)
net = core.Net("test")
# Makes sure that we can run relu.
net.SpatialBN(["X", "scale", "bias","mean","var"], "Y", order="NCHW",
is_test=True,
epsilon=1e-5)
net.SpatialBN(["X_mkl", "scale_mkl", "bias_mkl","mean_mkl","var_mkl"], "Y_mkl", order="NCHW",
is_test=True,
epsilon=1e-5, device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
# makes sure that the results are good.
np.testing.assert_allclose(
workspace.FetchBlob("Y"),
workspace.FetchBlob("Y_mkl"),
atol=1e-2,
rtol=1e-2)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
print("FC CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2]))
def testSpatialBNTrainingSpeed(self):
input_channel = 10
X = np.random.rand(1, input_channel, 100, 100).astype(np.float32) - 0.5
scale = np.random.rand(input_channel).astype(np.float32) + 0.5
bias = np.random.rand(input_channel).astype(np.float32) - 0.5
mean = np.random.randn(input_channel).astype(np.float32)
var = np.random.rand(input_channel).astype(np.float32) + 0.5
#mean = np.zeros(input_channel)
#var = np.zeros(input_channel)
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("scale", scale)
workspace.FeedBlob("bias", bias)
workspace.FeedBlob("mean", mean)
workspace.FeedBlob("var", var)
workspace.FeedBlob("X_mkl", X, device_option=mkl_do)
workspace.FeedBlob("scale_mkl", scale, device_option=mkl_do)
workspace.FeedBlob("bias_mkl", bias, device_option=mkl_do)
workspace.FeedBlob("mean_mkl", mean, device_option=mkl_do)
workspace.FeedBlob("var_mkl", var, device_option=mkl_do)
net = core.Net("test")
# Makes sure that we can run relu.
net.SpatialBN(["X", "scale", "bias","mean", "var"],
["Y", "mean", "var", "saved_mean", "saved_var"],
order="NCHW",
is_test=False,
epsilon=1e-5)
net.SpatialBN(["X_mkl", "scale_mkl", "bias_mkl","mean_mkl","var_mkl"],
["Y_mkl", "mean_mkl", "var_mkl", "saved_mean_mkl", "saved_var_mkl"],
order="NCHW",
is_test=False,
epsilon=1e-5,
device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
# makes sure that the results are good.
np.testing.assert_allclose(
workspace.FetchBlob("Y"),
workspace.FetchBlob("Y_mkl"),
atol=1e-2,
rtol=1e-2)
np.testing.assert_allclose(
workspace.FetchBlob("mean"),
workspace.FetchBlob("mean_mkl"),
atol=1e-2,
rtol=1e-2)
np.testing.assert_allclose(
workspace.FetchBlob("var"),
workspace.FetchBlob("var_mkl"),
atol=1e-2,
rtol=1e-2)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
print("FC CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2]))
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
import caffe2.proto.caffe2_pb2 as pb2
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKCopyTest(hu.HypothesisTestCase):
@given(width=st.integers(7, 9),
height=st.integers(7, 9),
input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
**mu.gcs)
def test_mkl_copy(self,
width,
height,
input_channels,
batch_size,
gc, dc):
X = np.random.rand(
batch_size, input_channels, width, height).astype(np.float32)
self.ws.create_blob("X").feed(X, pb2.DeviceOption())
self.ws.run(core.CreateOperator(
"CopyCPUToMKL",
["X"],
["X_MKL"],
device_option=pb2.DeviceOption(device_type=pb2.MKLDNN)
))
self.ws.run(core.CreateOperator(
"CopyMKLToCPU",
["X_MKL"],
["X_copy"],
device_option=pb2.DeviceOption(device_type=pb2.MKLDNN)
))
np.testing.assert_array_equal(X, self.ws.blobs["X_copy"].fetch())
@given(n=st.sampled_from([0, 10]))
def test_mkl_zero_copy(self, n):
shape = (0, n)
X = np.zeros(shape=shape).astype(np.float32)
self.ws.create_blob("X").feed(X, pb2.DeviceOption())
self.ws.run(core.CreateOperator(
"CopyCPUToMKL",
["X"],
["X_MKL"],
device_option=pb2.DeviceOption(device_type=pb2.MKLDNN)
))
self.ws.run(core.CreateOperator(
"CopyMKLToCPU",
["X_MKL"],
["X_copy"],
device_option=pb2.DeviceOption(device_type=pb2.MKLDNN)
))
np.testing.assert_equal(shape, self.ws.blobs["X_copy"].fetch().shape)
if __name__ == "__main__":
import unittest
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLLRNTest(hu.HypothesisTestCase):
@given(input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
im_size=st.integers(1, 10),
order=st.sampled_from(["NCHW"]),
**mu.gcs)
def test_mkl_LRN(self, input_channels,
batch_size, im_size, order,
gc, dc):
op = core.CreateOperator(
"LRN",
["X"],
["Y", "Y_scale"],
size=5,
alpha=0.001,
beta=0.75,
bias=2.0,
order=order,
)
X = np.random.rand(
batch_size, input_channels, im_size, im_size).astype(np.float32)
self.assertDeviceChecks(dc, op, [X], [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings, assume
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLPoolTest(hu.HypothesisTestCase):
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(3, 5),
size=st.integers(7, 9),
input_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
method=st.sampled_from(["MaxPool", "AveragePool"]),
**mu.gcs)
@settings(max_examples=2, timeout=100)
def test_mkl_pooling(self, stride, pad, kernel, size,
input_channels, batch_size,
method, gc, dc):
assume(pad < kernel)
op = core.CreateOperator(
method,
["X"],
["Y"],
stride=stride,
pad=pad,
kernel=kernel,
)
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32)
self.assertDeviceChecks(dc, op, [X], [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLFcTest(hu.HypothesisTestCase):
@given(n=st.integers(1, 5), m=st.integers(1, 5),
k=st.integers(1, 5), **mu.gcs)
def test_mkl_fc(self,n, m, k, gc, dc):
X = np.random.rand(m, k).astype(np.float32) - 0.5
W = np.random.rand(n, k).astype(np.float32) - 0.5
b = np.random.rand(n).astype(np.float32) - 0.5
op = core.CreateOperator(
'FC',
['X', 'W', 'b'],
["Y"]
)
self.assertDeviceChecks(dc, op, [X, W, b], [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLSigmoidTest(hu.HypothesisTestCase):
@given(n=st.integers(1, 5), m=st.integers(1, 5), inplace=st.booleans(),
**mu.gcs)
def test_mkl_sigmoid(self, n, m, inplace, gc, dc):
X = np.random.rand(m, n).astype(np.float32)
op = core.CreateOperator(
"Sigmoid",
["X"],
["Y" if not inplace else "X"]
)
self.assertDeviceChecks(dc, op, [X], [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import cnn, core, workspace, test_util
@unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.")
class TestMKLBasic(test_util.TestCase):
def testLRNSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 2, 224, 224).astype(np.float32)
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("X_mkl", X, device_option=mkl_do)
net = core.Net("test")
# Makes sure that we can run relu.
net.LRN("X", ["Y", "Y_Scale"], size=5, alpha=0.001, beta=0.75, bias=2.0, order="NCHW")
net.LRN("X_mkl", ["Y_mkl", "Y_Scale_mkl"], size=5, alpha=0.001, beta=0.75, bias=2.0, order="NCHW", device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
# makes sure that the results are good.
np.testing.assert_allclose(
workspace.FetchBlob("Y"),
workspace.FetchBlob("Y_mkl"),
atol=1e-2,
rtol=1e-2)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
print("LRN CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2]))
def testConvReluLRNSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 3, 224, 224).astype(np.float32) - 0.5
W = np.random.rand(64, 3, 11, 11).astype(np.float32) - 0.5
b = np.random.rand(64).astype(np.float32) - 0.5
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("W", W)
workspace.FeedBlob("b", b)
workspace.FeedBlob("X_mkl", X, device_option=mkl_do)
workspace.FeedBlob("W_mkl", W, device_option=mkl_do)
workspace.FeedBlob("b_mkl", b, device_option=mkl_do)
net = core.Net("test")
net.Conv(["X", "W", "b"], "C", pad=1, stride=1, kernel=11)
net.Conv(["X_mkl", "W_mkl", "b_mkl"], "C_mkl",
pad=1, stride=1, kernel=11, device_option=mkl_do)
net.Relu("C", "R")
net.Relu("C_mkl", "R_mkl", device_option=mkl_do)
net.LRN("R", ["Y", "Y_Scale"], size=5, alpha=0.001, beta=0.75, bias=2.0, order="NCHW")
net.LRN("R_mkl", ["Y_mkl", "Y_Scale_mkl"],size=5, alpha=0.001, beta=0.75, bias=2.0, order="NCHW", device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
# makes sure that the results are good.
np.testing.assert_allclose(
workspace.FetchBlob("Y"),
workspace.FetchBlob("Y_mkl"),
atol=1e-2,
rtol=1e-2)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import cnn, core, workspace, test_util
@unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.")
class TestMKLBasic(test_util.TestCase):
def testMaxPoolingSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 64, 224, 224).astype(np.float32)
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("X_mkl", X, device_option=mkl_do)
net = core.Net("test")
# Makes sure that we can run relu.
net.MaxPool("X", "Y", stride=2, kernel=3)
net.MaxPool("X_mkl", "Y_mkl",
stride=2, kernel=3, device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
# makes sure that the results are good.
np.testing.assert_allclose(
workspace.FetchBlob("Y"),
workspace.FetchBlob("Y_mkl"),
atol=1e-2,
rtol=1e-2)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
print("Maxpooling CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2]))
def testAveragePoolingSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 64, 224, 224).astype(np.float32)
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("X_mkl", X, device_option=mkl_do)
net = core.Net("test")
# Makes sure that we can run relu.
net.AveragePool("X", "Y", stride=2, kernel=3)
net.AveragePool("X_mkl", "Y_mkl",
stride=2, kernel=3, device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
# makes sure that the results are good.
np.testing.assert_allclose(
workspace.FetchBlob("Y"),
workspace.FetchBlob("Y_mkl"),
atol=1e-2,
rtol=1e-2)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
print("Averagepooling CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2]))
def testConvReluMaxPoolSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 3, 224, 224).astype(np.float32) - 0.5
W = np.random.rand(64, 3, 11, 11).astype(np.float32) - 0.5
b = np.random.rand(64).astype(np.float32) - 0.5
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("W", W)
workspace.FeedBlob("b", b)
workspace.FeedBlob("X_mkl", X, device_option=mkl_do)
workspace.FeedBlob("W_mkl", W, device_option=mkl_do)
workspace.FeedBlob("b_mkl", b, device_option=mkl_do)
net = core.Net("test")
net.Conv(["X", "W", "b"], "C", pad=1, stride=1, kernel=11)
net.Conv(["X_mkl", "W_mkl", "b_mkl"], "C_mkl",
pad=1, stride=1, kernel=11, device_option=mkl_do)
net.Relu("C", "R")
net.Relu("C_mkl", "R_mkl", device_option=mkl_do)
net.AveragePool("R", "Y", stride=2, kernel=3)
net.AveragePool("R_mkl", "Y_mkl",
stride=2, kernel=3, device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
# makes sure that the results are good.
np.testing.assert_allclose(
workspace.FetchBlob("Y"),
workspace.FetchBlob("Y_mkl"),
atol=1e-2,
rtol=1e-2)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLSpatialBNTest(hu.HypothesisTestCase):
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
seed=st.integers(0, 65535),
#order=st.sampled_from(["NCHW", "NHWC"]),
order=st.sampled_from(["NCHW"]),
epsilon=st.floats(1e-5, 1e-2),
**mu.gcs)
def test_spatialbn_test_mode(self, size, input_channels,
batch_size, seed, order, epsilon, gc, dc):
np.random.seed(seed)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "mean", "var"],
["Y"],
order=order,
is_test=True,
epsilon=epsilon,
)
self.assertDeviceChecks(dc, op, [X, scale, bias, mean, var], [0])
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
seed=st.integers(0, 65535),
#order=st.sampled_from(["NCHW", "NHWC"]),
order=st.sampled_from(["NCHW"]),
epsilon=st.floats(1e-5, 1e-2),
**mu.gcs)
def test_spatialbn_train_mode(
self, size, input_channels, batch_size, seed, order, epsilon,
gc, dc):
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "running_mean", "running_var"],
["Y", "running_mean", "running_var", "saved_mean", "saved_var"],
order=order,
is_test=False,
epsilon=epsilon,
)
np.random.seed(seed)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
# Note: it seems that the running mean and var do not pass the device
# test, suggesting that the semantics are a bit different. Only
# checking the output and saved mean and var at this stage.
self.assertDeviceChecks(dc, op, [X, scale, bias, mean, var],
[0, 3, 4])
if __name__ == "__main__":
import unittest
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import cnn, core, workspace, test_util
@unittest.skipIf(not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn.")
class TestMKLBasic(test_util.TestCase):
def testFCSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 256, 6, 6).astype(np.float32) - 0.5
#X = np.random.rand(32, 256*6*6).astype(np.float32) - 0.5
W = np.random.rand(4096, 9216).astype(np.float32) - 0.5
b = np.random.rand(4096).astype(np.float32) - 0.5
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("W", W)
workspace.FeedBlob("b", b)
workspace.FeedBlob("X_mkl", X, device_option=mkl_do)
workspace.FeedBlob("W_mkl", W, device_option=mkl_do)
workspace.FeedBlob("b_mkl", b, device_option=mkl_do)
net = core.Net("test")
# Makes sure that we can run relu.
net.FC(["X", "W", "b"], "Y")
net.FC(["X_mkl", "W_mkl", "b_mkl"], "Y_mkl", device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
# makes sure that the results are good.
np.testing.assert_allclose(
workspace.FetchBlob("Y"),
workspace.FetchBlob("Y_mkl"),
atol=1e-2,
rtol=1e-2)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
print("FC CPU runtime {}, MKL runtime {}.".format(runtime[1], runtime[2]))
def testConvReluMaxPoolFcSpeed(self):
# We randomly select a shape to test the speed. Intentionally we
# test a batch size of 1 since this may be the most frequent use
# case for MKL during deployment time.
X = np.random.rand(1, 256, 13, 13).astype(np.float32) - 0.5
W = np.random.rand(256, 256, 3, 3).astype(np.float32) - 0.5
b = np.random.rand(256).astype(np.float32) - 0.5
w_fc = np.random.rand(4096, 9216).astype(np.float32) - 0.5
b_fc = np.random.rand(4096).astype(np.float32) - 0.5
mkl_do = core.DeviceOption(caffe2_pb2.MKLDNN)
# Makes sure that feed works.
workspace.FeedBlob("X", X)
workspace.FeedBlob("W", W)
workspace.FeedBlob("b", b)
workspace.FeedBlob("w_fc", w_fc)
workspace.FeedBlob("b_fc", b_fc)
workspace.FeedBlob("X_mkl", X, device_option=mkl_do)
workspace.FeedBlob("W_mkl", W, device_option=mkl_do)
workspace.FeedBlob("b_mkl", b, device_option=mkl_do)
workspace.FeedBlob("w_fc_mkl", w_fc, device_option=mkl_do)
workspace.FeedBlob("b_fc_mkl", b_fc, device_option=mkl_do)
net = core.Net("test")
net.Conv(["X", "W", "b"], "C", pad=1, stride=1, kernel=3)
net.Relu("C", "R")
net.MaxPool("R", "P", stride=2, kernel=3)
net.FC(["P","w_fc", "b_fc"], "Y")
net.Conv(["X_mkl", "W_mkl", "b_mkl"], "C_mkl",
pad=1, stride=1, kernel=3, device_option=mkl_do)
net.Relu("C_mkl", "R_mkl", device_option=mkl_do)
net.MaxPool("R_mkl", "P_mkl",
stride=2, kernel=3, device_option=mkl_do)
net.FC(["P_mkl","w_fc_mkl", "b_fc_mkl"], "Y_mkl", device_option=mkl_do)
workspace.CreateNet(net)
workspace.RunNet(net)
# makes sure that the results are good.
np.testing.assert_allclose(
workspace.FetchBlob("Y"),
workspace.FetchBlob("Y_mkl"),
atol=1e-2,
rtol=1e-2)
runtime = workspace.BenchmarkNet(net.Proto().name, 1, 100, True)
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import hypothesis.strategies as st
from hypothesis import given, settings
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(not workspace.C.has_mkldnn,
"Skipping as we do not have mkldnn.")
class MKLFillTest(hu.HypothesisTestCase):
@given(n=st.integers(1, 4), c=st.integers(1, 4),
h=st.integers(1, 4), w=st.integers(1, 4),
filler=st.sampled_from(
["XavierFill", "ConstantFill", "GaussianFill", "MSRAFill"]
),
seed=st.integers(5, 10),
**mu.gcs_cpu_mkl)
def test_mkl_fill(self, n, c, h, w, filler, seed, gc, dc):
op = core.CreateOperator(
filler,
[],
["Y"],
shape=[n, c, h, w],
)
for d in dc:
d.random_seed = seed
self.assertDeviceChecks(dc, op, [], [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import hypothesis.strategies as st
from hypothesis import given
import numpy as np
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.mkl_test_util as mu
@unittest.skipIf(
not workspace.C.has_mkldnn, "Skipping as we do not have mkldnn."
)
class MKLSqueezeTest(hu.HypothesisTestCase):
@given(
squeeze_dims=st.lists(st.integers(0, 3), min_size=1, max_size=3),
inplace=st.booleans(),
**mu.gcs
)
def test_mkl_squeeze(self, squeeze_dims, inplace, gc, dc):
shape = [
1 if dim in squeeze_dims else np.random.randint(1, 5)
for dim in range(4)
]
X = np.random.rand(*shape).astype(np.float32)
op = core.CreateOperator(
"Squeeze", "X", "X" if inplace else "Y", dims=squeeze_dims
)
self.assertDeviceChecks(dc, op, [X], [0])
if __name__ == "__main__":
unittest.main()
|
## @package lmdb_create_example
# Module caffe2.python.examples.lmdb_create_example
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import numpy as np
import lmdb
from caffe2.proto import caffe2_pb2
from caffe2.python import workspace, model_helper
'''
Simple example to create an lmdb database of random image data and labels.
This can be used a skeleton to write your own data import.
It also runs a dummy-model with Caffe2 that reads the data and
validates the checksum is same.
'''
def create_db(output_file):
print(">>> Write database...")
LMDB_MAP_SIZE = 1 << 40 # MODIFY
env = lmdb.open(output_file, map_size=LMDB_MAP_SIZE)
checksum = 0
with env.begin(write=True) as txn:
for j in range(0, 128):
# MODIFY: add your own data reader / creator
label = j % 10
width = 64
height = 32
img_data = np.random.rand(3, width, height)
# ...
# Create TensorProtos
tensor_protos = caffe2_pb2.TensorProtos()
img_tensor = tensor_protos.protos.add()
img_tensor.dims.extend(img_data.shape)
img_tensor.data_type = 1
flatten_img = img_data.reshape(np.prod(img_data.shape))
img_tensor.float_data.extend(flatten_img)
label_tensor = tensor_protos.protos.add()
label_tensor.data_type = 2
label_tensor.int32_data.append(label)
txn.put(
'{}'.format(j).encode('ascii'),
tensor_protos.SerializeToString()
)
checksum += np.sum(img_data) * label
if (j % 16 == 0):
print("Inserted {} rows".format(j))
print("Checksum/write: {}".format(int(checksum)))
return checksum
def read_db_with_caffe2(db_file, expected_checksum):
print(">>> Read database...")
model = model_helper.ModelHelper(name="lmdbtest")
batch_size = 32
data, label = model.TensorProtosDBInput(
[], ["data", "label"], batch_size=batch_size,
db=db_file, db_type="lmdb")
checksum = 0
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
for _ in range(0, 4):
workspace.RunNet(model.net.Proto().name)
img_datas = workspace.FetchBlob("data")
labels = workspace.FetchBlob("label")
for j in range(batch_size):
checksum += np.sum(img_datas[j, :]) * labels[j]
print("Checksum/read: {}".format(int(checksum)))
assert np.abs(expected_checksum - checksum < 0.1), \
"Read/write checksums dont match"
def main():
parser = argparse.ArgumentParser(
description="Example LMDB creation"
)
parser.add_argument("--output_file", type=str, default=None,
help="Path to write the database to",
required=True)
args = parser.parse_args()
checksum = create_db(args.output_file)
# For testing reading:
read_db_with_caffe2(args.output_file, checksum)
if __name__ == '__main__':
main()
|
# Module caffe2.python.examples.resnet50_trainer
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import logging
import numpy as np
import time
import os
from caffe2.python import core, workspace, experiment_util, data_parallel_model
from caffe2.python import data_parallel_model_utils, dyndep, optimizer
from caffe2.python import timeout_guard, model_helper, brew
from caffe2.proto import caffe2_pb2
import caffe2.python.models.resnet as resnet
from caffe2.python.modeling.initializers import Initializer, PseudoFP16Initializer
import caffe2.python.predictor.predictor_exporter as pred_exp
import caffe2.python.predictor.predictor_py_utils as pred_utils
from caffe2.python.predictor_constants import predictor_constants as predictor_constants
'''
Parallelized multi-GPU distributed trainer for Resnet 50. Can be used to train
on imagenet data, for example.
To run the trainer in single-machine multi-gpu mode by setting num_shards = 1.
To run the trainer in multi-machine multi-gpu mode with M machines,
run the same program on all machines, specifying num_shards = M, and
shard_id = a unique integer in the set [0, M-1].
For rendezvous (the trainer processes have to know about each other),
you can either use a directory path that is visible to all processes
(e.g. NFS directory), or use a Redis instance. Use the former by
passing the `file_store_path` argument. Use the latter by passing the
`redis_host` and `redis_port` arguments.
'''
logging.basicConfig()
log = logging.getLogger("resnet50_trainer")
log.setLevel(logging.DEBUG)
dyndep.InitOpsLibrary('@/caffe2/caffe2/distributed:file_store_handler_ops')
dyndep.InitOpsLibrary('@/caffe2/caffe2/distributed:redis_store_handler_ops')
def AddImageInput(model, reader, batch_size, img_size, dtype, is_test):
'''
The image input operator loads image and label data from the reader and
applies transformations to the images (random cropping, mirroring, ...).
'''
data, label = brew.image_input(
model,
reader, ["data", "label"],
batch_size=batch_size,
output_type=dtype,
use_gpu_transform=True if model._device_type == 1 else False,
use_caffe_datum=True,
mean=128.,
std=128.,
scale=256,
crop=img_size,
mirror=1,
is_test=is_test,
)
data = model.StopGradient(data, data)
def AddNullInput(model, reader, batch_size, img_size, dtype):
'''
The null input function uses a gaussian fill operator to emulate real image
input. A label blob is hardcoded to a single value. This is useful if you
want to test compute throughput or don't have a dataset available.
'''
suffix = "_fp16" if dtype == "float16" else ""
model.param_init_net.GaussianFill(
[],
["data" + suffix],
shape=[batch_size, 3, img_size, img_size],
)
if dtype == "float16":
model.param_init_net.FloatToHalf("data" + suffix, "data")
model.param_init_net.ConstantFill(
[],
["label"],
shape=[batch_size],
value=1,
dtype=core.DataType.INT32,
)
def SaveModel(args, train_model, epoch):
prefix = "[]_{}".format(train_model._device_prefix, train_model._devices[0])
predictor_export_meta = pred_exp.PredictorExportMeta(
predict_net=train_model.net.Proto(),
parameters=data_parallel_model.GetCheckpointParams(train_model),
inputs=[prefix + "/data"],
outputs=[prefix + "/softmax"],
shapes={
prefix + "/softmax": (1, args.num_labels),
prefix + "/data": (args.num_channels, args.image_size, args.image_size)
}
)
# save the train_model for the current epoch
model_path = "%s/%s_%d.mdl" % (
args.file_store_path,
args.save_model_name,
epoch,
)
# set db_type to be "minidb" instead of "log_file_db", which breaks
# the serialization in save_to_db. Need to switch back to log_file_db
# after migration
pred_exp.save_to_db(
db_type="minidb",
db_destination=model_path,
predictor_export_meta=predictor_export_meta,
)
def LoadModel(path, model):
'''
Load pretrained model from file
'''
log.info("Loading path: {}".format(path))
meta_net_def = pred_exp.load_from_db(path, 'minidb')
init_net = core.Net(pred_utils.GetNet(
meta_net_def, predictor_constants.GLOBAL_INIT_NET_TYPE))
predict_init_net = core.Net(pred_utils.GetNet(
meta_net_def, predictor_constants.PREDICT_INIT_NET_TYPE))
predict_init_net.RunAllOnGPU()
init_net.RunAllOnGPU()
assert workspace.RunNetOnce(predict_init_net)
assert workspace.RunNetOnce(init_net)
# Hack: fix iteration counter which is in CUDA context after load model
itercnt = workspace.FetchBlob("optimizer_iteration")
workspace.FeedBlob(
"optimizer_iteration",
itercnt,
device_option=core.DeviceOption(caffe2_pb2.CPU, 0)
)
def RunEpoch(
args,
epoch,
train_model,
test_model,
total_batch_size,
num_shards,
expname,
explog,
):
'''
Run one epoch of the trainer.
TODO: add checkpointing here.
'''
# TODO: add loading from checkpoint
log.info("Starting epoch {}/{}".format(epoch, args.num_epochs))
epoch_iters = int(args.epoch_size / total_batch_size / num_shards)
for i in range(epoch_iters):
# This timeout is required (temporarily) since CUDA-NCCL
# operators might deadlock when synchronizing between GPUs.
timeout = 600.0 if i == 0 else 60.0
with timeout_guard.CompleteInTimeOrDie(timeout):
t1 = time.time()
workspace.RunNet(train_model.net.Proto().name)
t2 = time.time()
dt = t2 - t1
fmt = "Finished iteration {}/{} of epoch {} ({:.2f} images/sec)"
log.info(fmt.format(i + 1, epoch_iters, epoch, total_batch_size / dt))
prefix = "{}_{}".format(
train_model._device_prefix,
train_model._devices[0])
accuracy = workspace.FetchBlob(prefix + '/accuracy')
loss = workspace.FetchBlob(prefix + '/loss')
train_fmt = "Training loss: {}, accuracy: {}"
log.info(train_fmt.format(loss, accuracy))
num_images = epoch * epoch_iters * total_batch_size
prefix = "{}_{}".format(train_model._device_prefix, train_model._devices[0])
accuracy = workspace.FetchBlob(prefix + '/accuracy')
loss = workspace.FetchBlob(prefix + '/loss')
learning_rate = workspace.FetchBlob(
data_parallel_model.GetLearningRateBlobNames(train_model)[0]
)
test_accuracy = 0
if (test_model is not None):
# Run 100 iters of testing
ntests = 0
for _ in range(0, 100):
workspace.RunNet(test_model.net.Proto().name)
for g in test_model._devices:
test_accuracy += np.asscalar(workspace.FetchBlob(
"{}_{}".format(test_model._device_prefix, g) + '/accuracy'
))
ntests += 1
test_accuracy /= ntests
else:
test_accuracy = (-1)
explog.log(
input_count=num_images,
batch_count=(i + epoch * epoch_iters),
additional_values={
'accuracy': accuracy,
'loss': loss,
'learning_rate': learning_rate,
'epoch': epoch,
'test_accuracy': test_accuracy,
}
)
assert loss < 40, "Exploded gradients :("
# TODO: add checkpointing
return epoch + 1
def Train(args):
# Either use specified device list or generate one
if args.gpus is not None:
gpus = [int(x) for x in args.gpus.split(',')]
num_gpus = len(gpus)
else:
gpus = list(range(args.num_gpus))
num_gpus = args.num_gpus
log.info("Running on GPUs: {}".format(gpus))
# Verify valid batch size
total_batch_size = args.batch_size
batch_per_device = total_batch_size // num_gpus
assert \
total_batch_size % num_gpus == 0, \
"Number of GPUs must divide batch size"
# Round down epoch size to closest multiple of batch size across machines
global_batch_size = total_batch_size * args.num_shards
epoch_iters = int(args.epoch_size / global_batch_size)
assert \
epoch_iters > 0, \
"Epoch size must be larger than batch size times shard count"
args.epoch_size = epoch_iters * global_batch_size
log.info("Using epoch size: {}".format(args.epoch_size))
# Create ModelHelper object
train_arg_scope = {
'order': 'NCHW',
'use_cudnn': True,
'cudnn_exhaustive_search': True,
'ws_nbytes_limit': (args.cudnn_workspace_limit_mb * 1024 * 1024),
}
train_model = model_helper.ModelHelper(
name="resnet50", arg_scope=train_arg_scope
)
num_shards = args.num_shards
shard_id = args.shard_id
# Expect interfaces to be comma separated.
# Use of multiple network interfaces is not yet complete,
# so simply use the first one in the list.
interfaces = args.distributed_interfaces.split(",")
# Rendezvous using MPI when run with mpirun
if os.getenv("OMPI_COMM_WORLD_SIZE") is not None:
num_shards = int(os.getenv("OMPI_COMM_WORLD_SIZE", 1))
shard_id = int(os.getenv("OMPI_COMM_WORLD_RANK", 0))
if num_shards > 1:
rendezvous = dict(
kv_handler=None,
num_shards=num_shards,
shard_id=shard_id,
engine="GLOO",
transport=args.distributed_transport,
interface=interfaces[0],
mpi_rendezvous=True,
exit_nets=None)
elif num_shards > 1:
# Create rendezvous for distributed computation
store_handler = "store_handler"
if args.redis_host is not None:
# Use Redis for rendezvous if Redis host is specified
workspace.RunOperatorOnce(
core.CreateOperator(
"RedisStoreHandlerCreate", [], [store_handler],
host=args.redis_host,
port=args.redis_port,
prefix=args.run_id,
)
)
else:
# Use filesystem for rendezvous otherwise
workspace.RunOperatorOnce(
core.CreateOperator(
"FileStoreHandlerCreate", [], [store_handler],
path=args.file_store_path,
prefix=args.run_id,
)
)
rendezvous = dict(
kv_handler=store_handler,
shard_id=shard_id,
num_shards=num_shards,
engine="GLOO",
transport=args.distributed_transport,
interface=interfaces[0],
exit_nets=None)
else:
rendezvous = None
# Model building functions
def create_resnet50_model_ops(model, loss_scale):
initializer = (PseudoFP16Initializer if args.dtype == 'float16'
else Initializer)
with brew.arg_scope([brew.conv, brew.fc],
WeightInitializer=initializer,
BiasInitializer=initializer,
enable_tensor_core=args.enable_tensor_core,
float16_compute=args.float16_compute):
pred = resnet.create_resnet50(
model,
"data",
num_input_channels=args.num_channels,
num_labels=args.num_labels,
no_bias=True,
no_loss=True,
)
if args.dtype == 'float16':
pred = model.net.HalfToFloat(pred, pred + '_fp32')
softmax, loss = model.SoftmaxWithLoss([pred, 'label'],
['softmax', 'loss'])
loss = model.Scale(loss, scale=loss_scale)
brew.accuracy(model, [softmax, "label"], "accuracy")
return [loss]
def add_optimizer(model):
stepsz = int(30 * args.epoch_size / total_batch_size / num_shards)
if args.float16_compute:
# TODO: merge with multi-prceision optimizer
opt = optimizer.build_fp16_sgd(
model,
args.base_learning_rate,
momentum=0.9,
nesterov=1,
weight_decay=args.weight_decay, # weight decay included
policy="step",
stepsize=stepsz,
gamma=0.1
)
else:
optimizer.add_weight_decay(model, args.weight_decay)
opt = optimizer.build_multi_precision_sgd(
model,
args.base_learning_rate,
momentum=0.9,
nesterov=1,
policy="step",
stepsize=stepsz,
gamma=0.1
)
return opt
# Define add_image_input function.
# Depends on the "train_data" argument.
# Note that the reader will be shared with between all GPUS.
if args.train_data == "null":
def add_image_input(model):
AddNullInput(
model,
None,
batch_size=batch_per_device,
img_size=args.image_size,
dtype=args.dtype,
)
else:
reader = train_model.CreateDB(
"reader",
db=args.train_data,
db_type=args.db_type,
num_shards=num_shards,
shard_id=shard_id,
)
def add_image_input(model):
AddImageInput(
model,
reader,
batch_size=batch_per_device,
img_size=args.image_size,
dtype=args.dtype,
is_test=False,
)
def add_post_sync_ops(model):
"""Add ops applied after initial parameter sync."""
for param_info in model.GetOptimizationParamInfo(model.GetParams()):
if param_info.blob_copy is not None:
model.param_init_net.HalfToFloat(
param_info.blob,
param_info.blob_copy[core.DataType.FLOAT]
)
# Create parallelized model
data_parallel_model.Parallelize(
train_model,
input_builder_fun=add_image_input,
forward_pass_builder_fun=create_resnet50_model_ops,
optimizer_builder_fun=add_optimizer,
post_sync_builder_fun=add_post_sync_ops,
devices=gpus,
rendezvous=rendezvous,
optimize_gradient_memory=False,
cpu_device=args.use_cpu,
shared_model=args.use_cpu,
combine_spatial_bn=args.use_cpu,
)
if args.model_parallel:
# Shift half of the activations to another GPU
assert workspace.NumCudaDevices() >= 2 * args.num_gpus
activations = data_parallel_model_utils.GetActivationBlobs(train_model)
data_parallel_model_utils.ShiftActivationDevices(
train_model,
activations=activations[len(activations) // 2:],
shifts={g: args.num_gpus + g for g in range(args.num_gpus)},
)
data_parallel_model.OptimizeGradientMemory(train_model, {}, set(), False)
workspace.RunNetOnce(train_model.param_init_net)
workspace.CreateNet(train_model.net)
# Add test model, if specified
test_model = None
if (args.test_data is not None):
log.info("----- Create test net ----")
test_arg_scope = {
'order': "NCHW",
'use_cudnn': True,
'cudnn_exhaustive_search': True,
}
test_model = model_helper.ModelHelper(
name="resnet50_test", arg_scope=test_arg_scope, init_params=False
)
test_reader = test_model.CreateDB(
"test_reader",
db=args.test_data,
db_type=args.db_type,
)
def test_input_fn(model):
AddImageInput(
model,
test_reader,
batch_size=batch_per_device,
img_size=args.image_size,
dtype=args.dtype,
is_test=True,
)
data_parallel_model.Parallelize(
test_model,
input_builder_fun=test_input_fn,
forward_pass_builder_fun=create_resnet50_model_ops,
post_sync_builder_fun=add_post_sync_ops,
param_update_builder_fun=None,
devices=gpus,
cpu_device=args.use_cpu,
)
workspace.RunNetOnce(test_model.param_init_net)
workspace.CreateNet(test_model.net)
epoch = 0
# load the pre-trained model and reset epoch
if args.load_model_path is not None:
LoadModel(args.load_model_path, train_model)
# Sync the model params
data_parallel_model.FinalizeAfterCheckpoint(train_model)
# reset epoch. load_model_path should end with *_X.mdl,
# where X is the epoch number
last_str = args.load_model_path.split('_')[-1]
if last_str.endswith('.mdl'):
epoch = int(last_str[:-4])
log.info("Reset epoch to {}".format(epoch))
else:
log.warning("The format of load_model_path doesn't match!")
expname = "resnet50_gpu%d_b%d_L%d_lr%.2f_v2" % (
args.num_gpus,
total_batch_size,
args.num_labels,
args.base_learning_rate,
)
explog = experiment_util.ModelTrainerLog(expname, args)
# Run the training one epoch a time
while epoch < args.num_epochs:
epoch = RunEpoch(
args,
epoch,
train_model,
test_model,
total_batch_size,
num_shards,
expname,
explog
)
# Save the model for each epoch
SaveModel(args, train_model, epoch)
model_path = "%s/%s_" % (
args.file_store_path,
args.save_model_name
)
# remove the saved model from the previous epoch if it exists
if os.path.isfile(model_path + str(epoch - 1) + ".mdl"):
os.remove(model_path + str(epoch - 1) + ".mdl")
def main():
# TODO: use argv
parser = argparse.ArgumentParser(
description="Caffe2: Resnet-50 training"
)
parser.add_argument("--train_data", type=str, default=None, required=True,
help="Path to training data (or 'null' to simulate)")
parser.add_argument("--test_data", type=str, default=None,
help="Path to test data")
parser.add_argument("--db_type", type=str, default="lmdb",
help="Database type (such as lmdb or leveldb)")
parser.add_argument("--gpus", type=str,
help="Comma separated list of GPU devices to use")
parser.add_argument("--num_gpus", type=int, default=1,
help="Number of GPU devices (instead of --gpus)")
parser.add_argument("--model_parallel", type=bool, default=False,
help="Split model over 2 x num_gpus")
parser.add_argument("--num_channels", type=int, default=3,
help="Number of color channels")
parser.add_argument("--image_size", type=int, default=227,
help="Input image size (to crop to)")
parser.add_argument("--num_labels", type=int, default=1000,
help="Number of labels")
parser.add_argument("--batch_size", type=int, default=32,
help="Batch size, total over all GPUs")
parser.add_argument("--epoch_size", type=int, default=1500000,
help="Number of images/epoch, total over all machines")
parser.add_argument("--num_epochs", type=int, default=1000,
help="Num epochs.")
parser.add_argument("--base_learning_rate", type=float, default=0.1,
help="Initial learning rate.")
parser.add_argument("--weight_decay", type=float, default=1e-4,
help="Weight decay (L2 regularization)")
parser.add_argument("--cudnn_workspace_limit_mb", type=int, default=64,
help="CuDNN workspace limit in MBs")
parser.add_argument("--num_shards", type=int, default=1,
help="Number of machines in distributed run")
parser.add_argument("--shard_id", type=int, default=0,
help="Shard id.")
parser.add_argument("--run_id", type=str,
help="Unique run identifier (e.g. uuid)")
parser.add_argument("--redis_host", type=str,
help="Host of Redis server (for rendezvous)")
parser.add_argument("--redis_port", type=int, default=6379,
help="Port of Redis server (for rendezvous)")
parser.add_argument("--file_store_path", type=str, default="/tmp",
help="Path to directory to use for rendezvous")
parser.add_argument("--save_model_name", type=str, default="resnet50_model",
help="Save the trained model to a given name")
parser.add_argument("--load_model_path", type=str, default=None,
help="Load previously saved model to continue training")
parser.add_argument("--use_cpu", type=bool, default=False,
help="Use CPU instead of GPU")
parser.add_argument('--dtype', default='float',
choices=['float', 'float16'],
help='Data type used for training')
parser.add_argument('--float16_compute', action='store_true',
help="Use float 16 compute, if available")
parser.add_argument('--enable_tensor_core', action='store_true',
help='Enable Tensor Core math for Conv and FC ops')
parser.add_argument("--distributed_transport", type=str, default="tcp",
help="Transport to use for distributed run [tcp|ibverbs]")
parser.add_argument("--distributed_interfaces", type=str, default="",
help="Network interfaces to use for distributed run")
args = parser.parse_args()
Train(args)
if __name__ == '__main__':
workspace.GlobalInit(['caffe2', '--caffe2_log_level=2'])
main()
|
## @package char_rnn
# Module caffe2.python.examples.char_rnn
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace, model_helper, utils, brew
from caffe2.python.rnn_cell import LSTM
from caffe2.proto import caffe2_pb2
from caffe2.python.optimizer import build_sgd
import argparse
import logging
import numpy as np
from datetime import datetime
'''
This script takes a text file as input and uses a recurrent neural network
to learn to predict next character in a sequence.
'''
logging.basicConfig()
log = logging.getLogger("char_rnn")
log.setLevel(logging.DEBUG)
# Default set() here is intentional as it would accumulate values like a global
# variable
def CreateNetOnce(net, created_names=set()): # noqa
name = net.Name()
if name not in created_names:
created_names.add(name)
workspace.CreateNet(net)
class CharRNN(object):
def __init__(self, args):
self.seq_length = args.seq_length
self.batch_size = args.batch_size
self.iters_to_report = args.iters_to_report
self.hidden_size = args.hidden_size
with open(args.train_data) as f:
self.text = f.read()
self.vocab = list(set(self.text))
self.char_to_idx = {ch: idx for idx, ch in enumerate(self.vocab)}
self.idx_to_char = {idx: ch for idx, ch in enumerate(self.vocab)}
self.D = len(self.char_to_idx)
print("Input has {} characters. Total input size: {}".format(
len(self.vocab), len(self.text)))
def CreateModel(self):
log.debug("Start training")
model = model_helper.ModelHelper(name="char_rnn")
input_blob, seq_lengths, hidden_init, cell_init, target = \
model.net.AddExternalInputs(
'input_blob',
'seq_lengths',
'hidden_init',
'cell_init',
'target',
)
hidden_output_all, self.hidden_output, _, self.cell_state = LSTM(
model, input_blob, seq_lengths, (hidden_init, cell_init),
self.D, self.hidden_size, scope="LSTM")
output = brew.fc(
model,
hidden_output_all,
None,
dim_in=self.hidden_size,
dim_out=self.D,
axis=2
)
# axis is 2 as first two are T (time) and N (batch size).
# We treat them as one big batch of size T * N
softmax = model.net.Softmax(output, 'softmax', axis=2)
softmax_reshaped, _ = model.net.Reshape(
softmax, ['softmax_reshaped', '_'], shape=[-1, self.D])
# Create a copy of the current net. We will use it on the forward
# pass where we don't need loss and backward operators
self.forward_net = core.Net(model.net.Proto())
xent = model.net.LabelCrossEntropy([softmax_reshaped, target], 'xent')
# Loss is average both across batch and through time
# Thats why the learning rate below is multiplied by self.seq_length
loss = model.net.AveragedLoss(xent, 'loss')
model.AddGradientOperators([loss])
# use build_sdg function to build an optimizer
build_sgd(
model,
base_learning_rate=0.1 * self.seq_length,
policy="step",
stepsize=1,
gamma=0.9999
)
self.model = model
self.predictions = softmax
self.loss = loss
self.prepare_state = core.Net("prepare_state")
self.prepare_state.Copy(self.hidden_output, hidden_init)
self.prepare_state.Copy(self.cell_state, cell_init)
def _idx_at_pos(self, pos):
return self.char_to_idx[self.text[pos]]
def TrainModel(self):
log.debug("Training model")
workspace.RunNetOnce(self.model.param_init_net)
# As though we predict the same probability for each character
smooth_loss = -np.log(1.0 / self.D) * self.seq_length
last_n_iter = 0
last_n_loss = 0.0
num_iter = 0
N = len(self.text)
# We split text into batch_size pieces. Each piece will be used only
# by a corresponding batch during the training process
text_block_positions = np.zeros(self.batch_size, dtype=np.int32)
text_block_size = N // self.batch_size
text_block_starts = list(range(0, N, text_block_size))
text_block_sizes = [text_block_size] * self.batch_size
text_block_sizes[self.batch_size - 1] += N % self.batch_size
assert sum(text_block_sizes) == N
# Writing to output states which will be copied to input
# states within the loop below
workspace.FeedBlob(self.hidden_output, np.zeros(
[1, self.batch_size, self.hidden_size], dtype=np.float32
))
workspace.FeedBlob(self.cell_state, np.zeros(
[1, self.batch_size, self.hidden_size], dtype=np.float32
))
workspace.CreateNet(self.prepare_state)
# We iterate over text in a loop many times. Each time we peak
# seq_length segment and feed it to LSTM as a sequence
last_time = datetime.now()
progress = 0
while True:
workspace.FeedBlob(
"seq_lengths",
np.array([self.seq_length] * self.batch_size,
dtype=np.int32)
)
workspace.RunNet(self.prepare_state.Name())
input = np.zeros(
[self.seq_length, self.batch_size, self.D]
).astype(np.float32)
target = np.zeros(
[self.seq_length * self.batch_size]
).astype(np.int32)
for e in range(self.batch_size):
for i in range(self.seq_length):
pos = text_block_starts[e] + text_block_positions[e]
input[i][e][self._idx_at_pos(pos)] = 1
target[i * self.batch_size + e] =\
self._idx_at_pos((pos + 1) % N)
text_block_positions[e] = (
text_block_positions[e] + 1) % text_block_sizes[e]
progress += 1
workspace.FeedBlob('input_blob', input)
workspace.FeedBlob('target', target)
CreateNetOnce(self.model.net)
workspace.RunNet(self.model.net.Name())
num_iter += 1
last_n_iter += 1
if num_iter % self.iters_to_report == 0:
new_time = datetime.now()
print("Characters Per Second: {}". format(
int(progress / (new_time - last_time).total_seconds())
))
print("Iterations Per Second: {}". format(
int(self.iters_to_report /
(new_time - last_time).total_seconds())
))
last_time = new_time
progress = 0
print("{} Iteration {} {}".
format('-' * 10, num_iter, '-' * 10))
loss = workspace.FetchBlob(self.loss) * self.seq_length
smooth_loss = 0.999 * smooth_loss + 0.001 * loss
last_n_loss += loss
if num_iter % self.iters_to_report == 0:
self.GenerateText(500, np.random.choice(self.vocab))
log.debug("Loss since last report: {}"
.format(last_n_loss / last_n_iter))
log.debug("Smooth loss: {}".format(smooth_loss))
last_n_loss = 0.0
last_n_iter = 0
def GenerateText(self, num_characters, ch):
# Given a starting symbol we feed a fake sequence of size 1 to
# our RNN num_character times. After each time we use output
# probabilities to pick a next character to feed to the network.
# Same character becomes part of the output
CreateNetOnce(self.forward_net)
text = '' + ch
for _i in range(num_characters):
workspace.FeedBlob(
"seq_lengths", np.array([1] * self.batch_size, dtype=np.int32))
workspace.RunNet(self.prepare_state.Name())
input = np.zeros([1, self.batch_size, self.D]).astype(np.float32)
input[0][0][self.char_to_idx[ch]] = 1
workspace.FeedBlob("input_blob", input)
workspace.RunNet(self.forward_net.Name())
p = workspace.FetchBlob(self.predictions)
next = np.random.choice(self.D, p=p[0][0])
ch = self.idx_to_char[next]
text += ch
print(text)
@utils.debug
def main():
parser = argparse.ArgumentParser(
description="Caffe2: Char RNN Training"
)
parser.add_argument("--train_data", type=str, default=None,
help="Path to training data in a text file format",
required=True)
parser.add_argument("--seq_length", type=int, default=25,
help="One training example sequence length")
parser.add_argument("--batch_size", type=int, default=1,
help="Training batch size")
parser.add_argument("--iters_to_report", type=int, default=500,
help="How often to report loss and generate text")
parser.add_argument("--hidden_size", type=int, default=100,
help="Dimension of the hidden representation")
parser.add_argument("--gpu", action="store_true",
help="If set, training is going to use GPU 0")
args = parser.parse_args()
device = core.DeviceOption(
caffe2_pb2.CUDA if args.gpu else caffe2_pb2.CPU, 0)
with core.DeviceScope(device):
model = CharRNN(args)
model.CreateModel()
model.TrainModel()
if __name__ == '__main__':
workspace.GlobalInit(['caffe2', '--caffe2_log_level=2'])
main()
|
## @package predictor_py_utils
# Module caffe2.python.predictor.predictor_py_utils
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, scope
def create_predict_net(predictor_export_meta):
"""
Return the input prediction net.
"""
# Construct a new net to clear the existing settings.
net = core.Net(predictor_export_meta.predict_net.name or "predict")
net.Proto().op.extend(predictor_export_meta.predict_net.op)
net.Proto().external_input.extend(
predictor_export_meta.inputs + predictor_export_meta.parameters)
net.Proto().external_output.extend(predictor_export_meta.outputs)
net.Proto().arg.extend(predictor_export_meta.predict_net.arg)
if predictor_export_meta.net_type is not None:
net.Proto().type = predictor_export_meta.net_type
if predictor_export_meta.num_workers is not None:
net.Proto().num_workers = predictor_export_meta.num_workers
return net.Proto()
def create_predict_init_net(ws, predictor_export_meta):
"""
Return an initialization net that zero-fill all the input and
output blobs, using the shapes from the provided workspace. This is
necessary as there is no shape inference functionality in Caffe2.
"""
net = core.Net("predict-init")
def zero_fill(blob):
shape = predictor_export_meta.shapes.get(blob)
if shape is None:
if blob not in ws.blobs:
raise Exception(
"{} not in workspace but needed for shape: {}".format(
blob, ws.blobs))
shape = ws.blobs[blob].fetch().shape
# Explicitly null-out the scope so users (e.g. PredictorGPU)
# can control (at a Net-global level) the DeviceOption of
# these filling operators.
with scope.EmptyDeviceScope():
net.ConstantFill([], blob, shape=shape, value=0.0)
external_blobs = predictor_export_meta.inputs + \
predictor_export_meta.outputs
for blob in external_blobs:
zero_fill(blob)
net.Proto().external_input.extend(external_blobs)
if predictor_export_meta.extra_init_net:
net.AppendNet(predictor_export_meta.extra_init_net)
# Add the model_id in the predict_net to the init_net
AddModelIdArg(predictor_export_meta, net.Proto())
return net.Proto()
def get_comp_name(string, name):
if name:
return string + '_' + name
return string
def _ProtoMapGet(field, key):
'''
Given the key, get the value of the repeated field.
Helper function used by protobuf since it doesn't have map construct
'''
for v in field:
if (v.key == key):
return v.value
return None
def GetPlan(meta_net_def, key):
return _ProtoMapGet(meta_net_def.plans, key)
def GetPlanOriginal(meta_net_def, key):
return _ProtoMapGet(meta_net_def.plans, key)
def GetBlobs(meta_net_def, key):
blobs = _ProtoMapGet(meta_net_def.blobs, key)
if blobs is None:
return []
return blobs
def GetNet(meta_net_def, key):
return _ProtoMapGet(meta_net_def.nets, key)
def GetNetOriginal(meta_net_def, key):
return _ProtoMapGet(meta_net_def.nets, key)
def GetApplicationSpecificInfo(meta_net_def, key):
return _ProtoMapGet(meta_net_def.applicationSpecificInfo, key)
def AddBlobs(meta_net_def, blob_name, blob_def):
blobs = _ProtoMapGet(meta_net_def.blobs, blob_name)
if blobs is None:
blobs = meta_net_def.blobs.add()
blobs.key = blob_name
blobs = blobs.value
for blob in blob_def:
blobs.append(blob)
def AddPlan(meta_net_def, plan_name, plan_def):
meta_net_def.plans.add(key=plan_name, value=plan_def)
def AddNet(meta_net_def, net_name, net_def):
meta_net_def.nets.add(key=net_name, value=net_def)
def GetArgumentByName(net_def, arg_name):
for arg in net_def.arg:
if arg.name == arg_name:
return arg
return None
def AddModelIdArg(meta_net_def, net_def):
"""Takes the model_id from the predict_net of meta_net_def (if it is
populated) and adds it to the net_def passed in. This is intended to be
called on init_nets, as their model_id is not populated by default, but
should be the same as that of the predict_net
"""
# Get model_id from the predict_net, assuming it's an integer
model_id = GetArgumentByName(meta_net_def.predict_net, "model_id")
if model_id is None:
return
model_id = model_id.i
# If there's another model_id on the net, replace it with the new one
old_id = GetArgumentByName(net_def, "model_id")
if old_id is not None:
old_id.i = model_id
return
# Add as an integer argument, this is also assumed above
arg = net_def.arg.add()
arg.name = "model_id"
arg.i = model_id
|
## @package predictor_exporter
# Module caffe2.python.predictor.predictor_exporter
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.proto import caffe2_pb2
from caffe2.proto import metanet_pb2
from caffe2.python import workspace, core, scope
from caffe2.python.predictor_constants import predictor_constants
import caffe2.python.predictor.serde as serde
import caffe2.python.predictor.predictor_py_utils as utils
from builtins import bytes
import collections
def get_predictor_exporter_helper(submodelNetName):
""" constracting stub for the PredictorExportMeta
Only used to construct names to subfields,
such as calling to predict_net_name
Args:
submodelNetName - name of the model
"""
stub_net = core.Net(submodelNetName)
pred_meta = PredictorExportMeta(predict_net=stub_net,
parameters=[],
inputs=[],
outputs=[],
shapes=None,
name=submodelNetName,
extra_init_net=None)
return pred_meta
class PredictorExportMeta(collections.namedtuple(
'PredictorExportMeta',
'predict_net, parameters, inputs, outputs, shapes, name, \
extra_init_net, net_type, num_workers')):
"""
Metadata to be used for serializaing a net.
parameters, inputs, outputs could be either BlobReference or blob's names
predict_net can be either core.Net, NetDef, PlanDef or object
Override the named tuple to provide optional name parameter.
name will be used to identify multiple prediction nets.
net_type is the type field in caffe2 NetDef - can be 'simple', 'dag', etc.
num_workers specifies for net type 'dag' how many threads should run ops
"""
def __new__(
cls,
predict_net,
parameters,
inputs,
outputs,
shapes=None,
name="",
extra_init_net=None,
net_type=None,
num_workers=None,
):
inputs = [str(i) for i in inputs]
outputs = [str(o) for o in outputs]
assert len(set(inputs)) == len(inputs), (
"All inputs to the predictor should be unique")
parameters = [str(p) for p in parameters]
assert set(parameters).isdisjoint(inputs), (
"Parameters and inputs are required to be disjoint. "
"Intersection: {}".format(set(parameters).intersection(inputs)))
assert set(parameters).isdisjoint(outputs), (
"Parameters and outputs are required to be disjoint. "
"Intersection: {}".format(set(parameters).intersection(outputs)))
shapes = shapes or {}
if isinstance(predict_net, (core.Net, core.Plan)):
predict_net = predict_net.Proto()
assert isinstance(predict_net, (caffe2_pb2.NetDef, caffe2_pb2.PlanDef))
return super(PredictorExportMeta, cls).__new__(
cls, predict_net, parameters, inputs, outputs, shapes, name,
extra_init_net, net_type, num_workers)
def inputs_name(self):
return utils.get_comp_name(predictor_constants.INPUTS_BLOB_TYPE,
self.name)
def outputs_name(self):
return utils.get_comp_name(predictor_constants.OUTPUTS_BLOB_TYPE,
self.name)
def parameters_name(self):
return utils.get_comp_name(predictor_constants.PARAMETERS_BLOB_TYPE,
self.name)
def global_init_name(self):
return utils.get_comp_name(predictor_constants.GLOBAL_INIT_NET_TYPE,
self.name)
def predict_init_name(self):
return utils.get_comp_name(predictor_constants.PREDICT_INIT_NET_TYPE,
self.name)
def predict_net_name(self):
return utils.get_comp_name(predictor_constants.PREDICT_NET_TYPE,
self.name)
def train_init_plan_name(self):
return utils.get_comp_name(predictor_constants.TRAIN_INIT_PLAN_TYPE,
self.name)
def train_plan_name(self):
return utils.get_comp_name(predictor_constants.TRAIN_PLAN_TYPE,
self.name)
def prepare_prediction_net(filename, db_type, device_option=None):
'''
Helper function which loads all required blobs from the db
and returns prediction net ready to be used
'''
metanet_def = load_from_db(filename, db_type, device_option)
global_init_net = utils.GetNet(
metanet_def, predictor_constants.GLOBAL_INIT_NET_TYPE)
workspace.RunNetOnce(global_init_net)
predict_init_net = utils.GetNet(
metanet_def, predictor_constants.PREDICT_INIT_NET_TYPE)
workspace.RunNetOnce(predict_init_net)
predict_net = core.Net(
utils.GetNet(metanet_def, predictor_constants.PREDICT_NET_TYPE))
workspace.CreateNet(predict_net)
return predict_net
def _global_init_net(predictor_export_meta):
net = core.Net("global-init")
net.Load(
[predictor_constants.PREDICTOR_DBREADER],
predictor_export_meta.parameters)
net.Proto().external_input.extend([predictor_constants.PREDICTOR_DBREADER])
net.Proto().external_output.extend(predictor_export_meta.parameters)
# Add the model_id in the predict_net to the global_init_net
utils.AddModelIdArg(predictor_export_meta, net.Proto())
return net.Proto()
def get_meta_net_def(predictor_export_meta, ws=None):
"""
"""
ws = ws or workspace.C.Workspace.current
meta_net_def = metanet_pb2.MetaNetDef()
# Predict net is the core network that we use.
utils.AddNet(meta_net_def, predictor_export_meta.predict_init_name(),
utils.create_predict_init_net(ws, predictor_export_meta))
utils.AddNet(meta_net_def, predictor_export_meta.global_init_name(),
_global_init_net(predictor_export_meta))
utils.AddNet(meta_net_def, predictor_export_meta.predict_net_name(),
utils.create_predict_net(predictor_export_meta))
utils.AddBlobs(meta_net_def, predictor_export_meta.parameters_name(),
predictor_export_meta.parameters)
utils.AddBlobs(meta_net_def, predictor_export_meta.inputs_name(),
predictor_export_meta.inputs)
utils.AddBlobs(meta_net_def, predictor_export_meta.outputs_name(),
predictor_export_meta.outputs)
return meta_net_def
def set_model_info(meta_net_def, project_str, model_class_str, version):
assert isinstance(meta_net_def, metanet_pb2.MetaNetDef)
meta_net_def.modelInfo.project = project_str
meta_net_def.modelInfo.modelClass = model_class_str
meta_net_def.modelInfo.version = version
def save_to_db(db_type, db_destination, predictor_export_meta):
meta_net_def = get_meta_net_def(predictor_export_meta)
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
workspace.FeedBlob(
predictor_constants.META_NET_DEF,
serde.serialize_protobuf_struct(meta_net_def)
)
blobs_to_save = [predictor_constants.META_NET_DEF] + \
predictor_export_meta.parameters
op = core.CreateOperator(
"Save",
blobs_to_save, [],
absolute_path=True,
db=db_destination, db_type=db_type)
workspace.RunOperatorOnce(op)
def load_from_db(filename, db_type, device_option=None):
# global_init_net in meta_net_def will load parameters from
# predictor_constants.PREDICTOR_DBREADER
create_db = core.CreateOperator(
'CreateDB', [],
[core.BlobReference(predictor_constants.PREDICTOR_DBREADER)],
db=filename, db_type=db_type)
assert workspace.RunOperatorOnce(create_db), (
'Failed to create db {}'.format(filename))
# predictor_constants.META_NET_DEF is always stored before the parameters
load_meta_net_def = core.CreateOperator(
'Load',
[core.BlobReference(predictor_constants.PREDICTOR_DBREADER)],
[core.BlobReference(predictor_constants.META_NET_DEF)])
assert workspace.RunOperatorOnce(load_meta_net_def)
blob = workspace.FetchBlob(predictor_constants.META_NET_DEF)
meta_net_def = serde.deserialize_protobuf_struct(
blob if isinstance(blob, bytes)
else str(blob).encode('utf-8'),
metanet_pb2.MetaNetDef)
if device_option is None:
device_option = scope.CurrentDeviceScope()
if device_option is not None:
# Set the device options of all loaded blobs
for kv in meta_net_def.nets:
net = kv.value
for op in net.op:
op.device_option.CopyFrom(device_option)
return meta_net_def
|
## @package serde
# Module caffe2.python.predictor.serde
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
def serialize_protobuf_struct(protobuf_struct):
return protobuf_struct.SerializeToString()
def deserialize_protobuf_struct(serialized_protobuf, struct_type):
deser = struct_type()
deser.ParseFromString(serialized_protobuf)
return deser
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python.test_util import TestCase
from caffe2.python import workspace, brew
from caffe2.python.model_helper import ModelHelper
from caffe2.python.predictor import mobile_exporter
import numpy as np
class TestMobileExporter(TestCase):
def test_mobile_exporter(self):
model = ModelHelper(name="mobile_exporter_test_model")
# Test LeNet
brew.conv(model, 'data', 'conv1', dim_in=1, dim_out=20, kernel=5)
brew.max_pool(model, 'conv1', 'pool1', kernel=2, stride=2)
brew.conv(model, 'pool1', 'conv2', dim_in=20, dim_out=50, kernel=5)
brew.max_pool(model, 'conv2', 'pool2', kernel=2, stride=2)
brew.fc(model, 'pool2', 'fc3', dim_in=50 * 4 * 4, dim_out=500)
brew.relu(model, 'fc3', 'fc3')
brew.fc(model, 'fc3', 'pred', 500, 10)
brew.softmax(model, 'pred', 'out')
# Create our mobile exportable networks
workspace.RunNetOnce(model.param_init_net)
init_net, predict_net = mobile_exporter.Export(
workspace, model.net, model.params
)
# Populate the workspace with data
np_data = np.random.rand(1, 1, 28, 28).astype(np.float32)
workspace.FeedBlob("data", np_data)
workspace.CreateNet(model.net)
workspace.RunNet(model.net)
ref_out = workspace.FetchBlob("out")
# Clear the workspace
workspace.ResetWorkspace()
# Populate the workspace with data
workspace.RunNetOnce(init_net)
# Fake "data" is populated by init_net, we have to replace it
workspace.FeedBlob("data", np_data)
# Overwrite the old net
workspace.CreateNet(predict_net, True)
workspace.RunNet(predict_net.name)
manual_run_out = workspace.FetchBlob("out")
np.testing.assert_allclose(
ref_out, manual_run_out, atol=1e-10, rtol=1e-10
)
# Clear the workspace
workspace.ResetWorkspace()
# Predictor interface test (simulates writing to disk)
predictor = workspace.Predictor(
init_net.SerializeToString(), predict_net.SerializeToString()
)
# Output is a vector of outputs but we only care about the first and only result
predictor_out = predictor.run([np_data])
assert len(predictor_out) == 1
predictor_out = predictor_out[0]
np.testing.assert_allclose(
ref_out, predictor_out, atol=1e-10, rtol=1e-10
)
def test_mobile_exporter_datatypes(self):
model = ModelHelper(name="mobile_exporter_test_model")
model.Copy("data_int", "out")
model.params.append("data_int")
# Create our mobile exportable networks
workspace.RunNetOnce(model.param_init_net)
np_data_int = np.random.randint(100, size=(1, 1, 28, 28), dtype=np.int32)
workspace.FeedBlob("data_int", np_data_int)
init_net, predict_net = mobile_exporter.Export(
workspace, model.net, model.params
)
workspace.CreateNet(model.net)
workspace.RunNet(model.net)
ref_out = workspace.FetchBlob("out")
# Clear the workspace
workspace.ResetWorkspace()
# Populate the workspace with data
workspace.RunNetOnce(init_net)
# Overwrite the old net
workspace.CreateNet(predict_net, True)
workspace.RunNet(predict_net.name)
manual_run_out = workspace.FetchBlob("out")
np.testing.assert_allclose(
ref_out, manual_run_out, atol=1e-10, rtol=1e-10
)
# Clear the workspace
workspace.ResetWorkspace()
# Predictor interface test (simulates writing to disk)
predictor = workspace.Predictor(
init_net.SerializeToString(), predict_net.SerializeToString()
)
# Output is a vector of outputs but we only care about the first and only result
predictor_out = predictor.run([])
assert len(predictor_out) == 1
predictor_out = predictor_out[0]
np.testing.assert_allclose(
ref_out, predictor_out, atol=1e-10, rtol=1e-10
)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tempfile
import unittest
import numpy as np
from caffe2.python import cnn, workspace, core
from future.utils import viewitems
from caffe2.python.predictor_constants import predictor_constants as pc
import caffe2.python.predictor.predictor_exporter as pe
import caffe2.python.predictor.predictor_py_utils as pred_utils
from caffe2.proto import caffe2_pb2, metanet_pb2
class MetaNetDefTest(unittest.TestCase):
def test_minimal(self):
'''
Tests that a NetsMap message can be created with a NetDef message
'''
# This calls the constructor for a metanet_pb2.NetsMap
metanet_pb2.NetsMap(key="test_key", value=caffe2_pb2.NetDef())
def test_adding_net(self):
'''
Tests that NetDefs can be added to MetaNetDefs
'''
meta_net_def = metanet_pb2.MetaNetDef()
net_def = caffe2_pb2.NetDef()
meta_net_def.nets.add(key="test_key", value=net_def)
class PredictorExporterTest(unittest.TestCase):
def _create_model(self):
m = cnn.CNNModelHelper()
m.FC("data", "y",
dim_in=5, dim_out=10,
weight_init=m.XavierInit,
bias_init=m.XavierInit)
return m
def setUp(self):
np.random.seed(1)
m = self._create_model()
self.predictor_export_meta = pe.PredictorExportMeta(
predict_net=m.net.Proto(),
parameters=[str(b) for b in m.params],
inputs=["data"],
outputs=["y"],
shapes={"y": (1, 10), "data": (1, 5)},
)
workspace.RunNetOnce(m.param_init_net)
self.params = {
param: workspace.FetchBlob(param)
for param in self.predictor_export_meta.parameters}
# Reset the workspace, to ensure net creation proceeds as expected.
workspace.ResetWorkspace()
def test_meta_constructor(self):
'''
Test that passing net itself instead of proto works
'''
m = self._create_model()
pe.PredictorExportMeta(
predict_net=m.net,
parameters=m.params,
inputs=["data"],
outputs=["y"],
shapes={"y": (1, 10), "data": (1, 5)},
)
def test_param_intersection(self):
'''
Test that passes intersecting parameters and input/output blobs
'''
m = self._create_model()
with self.assertRaises(Exception):
pe.PredictorExportMeta(
predict_net=m.net,
parameters=m.params,
inputs=["data"] + m.params,
outputs=["y"],
shapes={"y": (1, 10), "data": (1, 5)},
)
with self.assertRaises(Exception):
pe.PredictorExportMeta(
predict_net=m.net,
parameters=m.params,
inputs=["data"],
outputs=["y"] + m.params,
shapes={"y": (1, 10), "data": (1, 5)},
)
def test_meta_net_def_net_runs(self):
for param, value in viewitems(self.params):
workspace.FeedBlob(param, value)
extra_init_net = core.Net('extra_init')
extra_init_net.ConstantFill('data', 'data', value=1.0)
pem = pe.PredictorExportMeta(
predict_net=self.predictor_export_meta.predict_net,
parameters=self.predictor_export_meta.parameters,
inputs=self.predictor_export_meta.inputs,
outputs=self.predictor_export_meta.outputs,
shapes=self.predictor_export_meta.shapes,
extra_init_net=extra_init_net,
net_type='dag',
)
db_type = 'minidb'
db_file = tempfile.NamedTemporaryFile(
delete=False, suffix=".{}".format(db_type))
pe.save_to_db(
db_type=db_type,
db_destination=db_file.name,
predictor_export_meta=pem)
workspace.ResetWorkspace()
meta_net_def = pe.load_from_db(
db_type=db_type,
filename=db_file.name,
)
self.assertTrue("data" not in workspace.Blobs())
self.assertTrue("y" not in workspace.Blobs())
init_net = pred_utils.GetNet(meta_net_def, pc.PREDICT_INIT_NET_TYPE)
# 0-fills externalblobs blobs and runs extra_init_net
workspace.RunNetOnce(init_net)
self.assertTrue("data" in workspace.Blobs())
self.assertTrue("y" in workspace.Blobs())
print(workspace.FetchBlob("data"))
np.testing.assert_array_equal(
workspace.FetchBlob("data"), np.ones(shape=(1, 5)))
np.testing.assert_array_equal(
workspace.FetchBlob("y"), np.zeros(shape=(1, 10)))
# Load parameters from DB
global_init_net = pred_utils.GetNet(meta_net_def,
pc.GLOBAL_INIT_NET_TYPE)
workspace.RunNetOnce(global_init_net)
# Run the net with a reshaped input and verify we are
# producing good numbers (with our custom implementation)
workspace.FeedBlob("data", np.random.randn(2, 5).astype(np.float32))
predict_net = pred_utils.GetNet(meta_net_def, pc.PREDICT_NET_TYPE)
self.assertEqual(predict_net.type, 'dag')
workspace.RunNetOnce(predict_net)
np.testing.assert_array_almost_equal(
workspace.FetchBlob("y"),
workspace.FetchBlob("data").dot(self.params["y_w"].T) +
self.params["y_b"])
def test_load_device_scope(self):
for param, value in self.params.items():
workspace.FeedBlob(param, value)
pem = pe.PredictorExportMeta(
predict_net=self.predictor_export_meta.predict_net,
parameters=self.predictor_export_meta.parameters,
inputs=self.predictor_export_meta.inputs,
outputs=self.predictor_export_meta.outputs,
shapes=self.predictor_export_meta.shapes,
net_type='dag',
)
db_type = 'minidb'
db_file = tempfile.NamedTemporaryFile(
delete=False, suffix=".{}".format(db_type))
pe.save_to_db(
db_type=db_type,
db_destination=db_file.name,
predictor_export_meta=pem)
workspace.ResetWorkspace()
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU, 1)):
meta_net_def = pe.load_from_db(
db_type=db_type,
filename=db_file.name,
)
init_net = core.Net(pred_utils.GetNet(meta_net_def,
pc.GLOBAL_INIT_NET_TYPE))
predict_init_net = core.Net(pred_utils.GetNet(
meta_net_def, pc.PREDICT_INIT_NET_TYPE))
# check device options
for op in list(init_net.Proto().op) + list(predict_init_net.Proto().op):
self.assertEqual(1, op.device_option.cuda_gpu_id)
self.assertEqual(caffe2_pb2.CPU, op.device_option.device_type)
def test_db_fails_without_params(self):
with self.assertRaises(Exception):
for db_type in ["minidb"]:
db_file = tempfile.NamedTemporaryFile(
delete=False, suffix=".{}".format(db_type))
pe.save_to_db(
db_type=db_type,
db_destination=db_file.name,
predictor_export_meta=self.predictor_export_meta)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import numpy as np
from caffe2.python import workspace, core
from caffe2.proto import caffe2_pb2
class TestPredictor(unittest.TestCase):
def setUp(self):
np.random.seed(1)
self.predict_net = self._predict_net
self.init_net = self._init_net
@property
def _predict_net(self):
net = caffe2_pb2.NetDef()
net.name = 'test-predict-net'
net.external_input[:] = ['A', 'B']
net.external_output[:] = ['C']
net.op.extend([
core.CreateOperator(
'MatMul',
['A', 'B'],
['C'],
)
])
return net.SerializeToString()
@property
def _init_net(self):
net = caffe2_pb2.NetDef()
net.name = 'test-init-net'
net.external_output[:] = ['A', 'B']
net.op.extend([
core.CreateOperator(
'GivenTensorFill',
[],
['A'],
shape=(2, 3),
values=np.zeros((2, 3), np.float32).flatten().tolist(),
),
core.CreateOperator(
'GivenTensorFill',
[],
['B'],
shape=(3, 4),
values=np.zeros((3, 4), np.float32).flatten().tolist(),
),
])
return net.SerializeToString()
def test_run(self):
A = np.ones((2, 3), np.float32)
B = np.ones((3, 4), np.float32)
predictor = workspace.Predictor(self.init_net, self.predict_net)
outputs = predictor.run([A, B])
self.assertEqual(len(outputs), 1)
np.testing.assert_almost_equal(np.dot(A, B), outputs[0])
def test_run_map(self):
A = np.zeros((2, 3), np.float32)
B = np.ones((3, 4), np.float32)
predictor = workspace.Predictor(self.init_net, self.predict_net)
outputs = predictor.run({
'B': B,
})
self.assertEqual(len(outputs), 1)
np.testing.assert_almost_equal(np.dot(A, B), outputs[0])
|
## @package mobile_exporter
# Module caffe2.python.mobile_exporter
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, utils
from caffe2.proto import caffe2_pb2
import numpy as np
def add_tensor(net, name, blob):
''' Create an operator to store the tensor 'blob',
run the operator to put the blob to workspace.
uint8 is stored as an array of string with one element.
'''
kTypeNameMapper = {
np.dtype('float32'): "GivenTensorFill",
np.dtype('int32'): "GivenTensorIntFill",
np.dtype('int64'): "GivenTensorInt64Fill",
np.dtype('uint8'): "GivenTensorStringFill",
}
shape = blob.shape
values = blob
# pass array of uint8 as a string to save storage
# storing uint8_t has a large overhead for now
if blob.dtype == np.dtype('uint8'):
shape = [1]
values = [str(blob.data)]
op = core.CreateOperator(
kTypeNameMapper[blob.dtype],
[], [name],
arg=[
utils.MakeArgument("shape", shape),
utils.MakeArgument("values", values),
]
)
net.op.extend([op])
def Export(workspace, net, params):
"""Returns init_net and predict_net suitable for writing to disk
and loading into a Predictor"""
proto = net if isinstance(net, caffe2_pb2.NetDef) else net.Proto()
predict_net = caffe2_pb2.NetDef()
predict_net.CopyFrom(proto)
init_net = caffe2_pb2.NetDef()
# Populate the init_net.
ssa, blob_versions = core.get_ssa(net)
inputs = []
for versioned_inputs, _ in ssa:
inputs += [name for name, _ in versioned_inputs]
input_blobs = [blob_name for blob_name, version in
blob_versions.items()
if version == 0 and blob_name not in params]
# Blobs that are never used as an input to another layer,
# i.e. strictly output blobs.
output_blobs = [blob_name for blob_name, version in
blob_versions.items()
if version != 0 and blob_name not in inputs]
for blob_ref in params:
blob_name = str(blob_ref)
blob = workspace.FetchBlob(blob_name)
add_tensor(init_net, blob_name, blob)
# We have to make sure the blob exists in the namespace
# and we can do so with fake data. (Which is immediately overwritten
# by any typical usage)
for blob_name in input_blobs:
init_net.op.extend(
[
core.CreateOperator(
"GivenTensorFill", [], [blob_name],
arg=[
utils.MakeArgument("shape", [1, 1]),
utils.MakeArgument("values", [0.0])
]
)
]
)
# Now we make input/output_blobs line up with what Predictor expects.
del predict_net.external_input[:]
predict_net.external_input.extend(input_blobs)
# For populating weights
predict_net.external_input.extend(proto.external_input)
# Ensure the output is also consistent with what we want
del predict_net.external_output[:]
predict_net.external_output.extend(output_blobs)
return init_net, predict_net
|
## @package elementwise_linear
# Module caffe2.python.helpers.elementwise_linear
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from caffe2.python.modeling.parameter_info import ParameterTags
def _elementwise_linear(
model, op_call, blob_in, blob_out, dim,
weight_init=None, bias_init=None, **kwargs
):
"""Elementwise_Linear"""
weight_init = weight_init or ('ConstantFill', {'value': 1.0})
bias_init = bias_init or ('ConstantFill', {'value': 0.0})
blob_out = blob_out or model.net.NextName()
if model.init_params:
weight = model.param_init_net.__getattr__(weight_init[0])(
[],
blob_out + '_w',
shape=[dim],
**weight_init[1]
)
bias = model.param_init_net.__getattr__(bias_init[0])(
[],
blob_out + '_b',
shape=[dim],
**bias_init[1]
)
else:
weight = core.ScopedBlobReference(
blob_out + '_w', model.param_init_net)
bias = core.ScopedBlobReference(
blob_out + '_b', model.param_init_net)
model.AddParameter(weight, ParameterTags.WEIGHT)
model.AddParameter(bias, ParameterTags.BIAS)
return op_call([blob_in, weight, bias], blob_out, **kwargs)
def elementwise_linear(model, *args, **kwargs):
return _elementwise_linear(
model, model.net.ElementwiseLinear, *args, **kwargs)
|
## @package fc
# Module caffe2.python.helpers.fc
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from caffe2.python.modeling import initializers
from caffe2.python.modeling.parameter_info import ParameterTags
def _FC_or_packed_FC(
model, op_call, blob_in, blob_out, dim_in, dim_out, weight_init=None,
bias_init=None, WeightInitializer=None, BiasInitializer=None,
enable_tensor_core=False, float16_compute=False, **kwargs
):
WeightInitializer = initializers.update_initializer(
WeightInitializer, weight_init, ("XavierFill", {})
)
BiasInitializer = initializers.update_initializer(
BiasInitializer, bias_init, ("ConstantFill", {})
)
if not model.init_params:
WeightInitializer = initializers.ExternalInitializer()
BiasInitializer = initializers.ExternalInitializer()
blob_out = blob_out or model.net.NextName()
bias_tags = [ParameterTags.BIAS]
if 'freeze_bias' in kwargs:
bias_tags.append(ParameterTags.COMPUTED_PARAM)
weight = model.create_param(
param_name=blob_out + '_w',
shape=[dim_out, dim_in],
initializer=WeightInitializer,
tags=ParameterTags.WEIGHT
)
bias = model.create_param(
param_name=blob_out + '_b',
shape=[dim_out, ],
initializer=BiasInitializer,
tags=bias_tags
)
# enable TensorCore by setting appropriate engine
if enable_tensor_core:
kwargs['engine'] = 'TENSORCORE'
# Enable float 16 compute kernel (relevant for CUDA)
if float16_compute:
kwargs['float16_compute'] = True
return op_call([blob_in, weight, bias], blob_out, **kwargs)
def fc(model, *args, **kwargs):
return _FC_or_packed_FC(model, model.net.FC, *args, **kwargs)
def packed_fc(model, *args, **kwargs):
return _FC_or_packed_FC(model, model.net.PackedFC, *args, **kwargs)
def fc_decomp(
model, blob_in, blob_out, dim_in, dim_out,
rank_approx=5, weight_init=None, bias_init=None,
WeightInitializer=None, BiasInitializer=None, **kwargs
):
"""FC_Decomp version
Here we assume that the rank of original input is bigger than 5.
"""
WeightInitializer = initializers.update_initializer(
WeightInitializer, weight_init, ("XavierFill", {})
)
BiasInitializer = initializers.update_initializer(
BiasInitializer, bias_init, ("ConstantFill", {})
)
blob_out = blob_out or model.net.NextName()
u = model.create_param(
param_name=blob_out + '_u',
shape=[dim_out, rank_approx],
initializer=WeightInitializer,
)
v = model.create_param(
param_name=blob_out + '_v',
shape=[dim_in, rank_approx],
initializer=WeightInitializer,
)
bias = model.create_param(
param_name=blob_out + '_b',
shape=[dim_out, ],
initializer=BiasInitializer,
)
return model.net.FC_Decomp([blob_in, u, v, bias], blob_out, **kwargs)
def fc_prune(
model, blob_in, blob_out, dim_in, dim_out,
weight_init=None, bias_init=None, mask_init=None,
threshold=0.00001, need_compress_rate=False,
comp_lb=0.05,
**kwargs
):
"""FC_Prune version
Runnable so far. Great!:)
"""
weight_init = weight_init if weight_init else ('XavierFill', {})
bias_init = bias_init if bias_init else ('ConstantFill', {})
mask_init = mask_init if mask_init else ('ConstantFill', {})
blob_out = blob_out or model.net.NextName()
compress_rate = blob_out + '_compress_rate'
if model.init_params:
compress_lb = model.param_init_net.ConstantFill(
[],
blob_out + '_lb',
shape=[1],
value=comp_lb
)
weight = model.param_init_net.__getattr__(weight_init[0])(
[],
blob_out + '_w',
shape=[dim_out, dim_in],
**weight_init[1]
)
mask = model.param_init_net.ConstantFill(
[],
blob_out + '_m',
shape=[dim_out, dim_in],
value=1.0
)
ag_dw = model.param_init_net.__getattr__(mask_init[0])(
[],
blob_out + '_ag_dw',
shape=[dim_out, dim_in],
**mask_init[1]
)
bias = model.param_init_net.__getattr__(bias_init[0])(
[],
blob_out + '_b',
shape=[dim_out, ],
**bias_init[1]
)
mask_seq = model.param_init_net.__getattr__(mask_init[0])(
[],
blob_out + '_mask_seq',
shape=[dim_out, dim_in],
**mask_init[1]
)
thres = model.param_init_net.ConstantFill(
[],
blob_out + '_thres',
shape=[1],
value=threshold
)
else:
compress_lb = core.ScopedBlobReference(
blob_out + '_lb', model.param_init_net)
weight = core.ScopedBlobReference(
blob_out + '_w', model.param_init_net)
bias = core.ScopedBlobReference(
blob_out + '_b', model.param_init_net)
mask = core.ScopedBlobReference(
blob_out + '_m', model.param_init_net)
ag_dw = core.ScopedBlobReference(
blob_out + '_ag_dw', model.param_init_net)
mask_seq = core.ScopedBlobReference(
blob_out + '_mask_seq', model.param_init_net)
thres = core.ScopedBlobReference(
blob_out + '_thres', model.param_init_net)
model.AddParameter(weight)
model.AddParameter(bias)
if need_compress_rate:
return model.net.FC_Prune([blob_in, weight, mask, bias, ag_dw, mask_seq,
thres, compress_lb],
[blob_out, compress_rate], **kwargs)
else:
return model.net.FC_Prune([blob_in, weight, mask,
bias, ag_dw, mask_seq,
thres, compress_lb],
blob_out, **kwargs)
def fc_sparse(
model, blob_in, blob_out, w_csr, iw, jw, bias,
**kwargs
):
"""FC_Sparse: Only takes in alocated weights"""
if not (w_csr and iw and jw and bias):
print("Warning...")
model.AddParameter(w_csr)
model.AddParameter(iw)
model.AddParameter(jw)
model.AddParameter(bias)
return model.net.FC_Sparse([blob_in, w_csr, iw, jw, bias],
blob_out, **kwargs)
|
## @package algebra
# Module caffe2.python.helpers.algebra
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
def transpose(model, blob_in, blob_out, use_cudnn=False, **kwargs):
"""Transpose."""
if use_cudnn:
kwargs['engine'] = 'CUDNN'
return model.net.Transpose(blob_in, blob_out, **kwargs)
def sum(model, blob_in, blob_out, **kwargs):
"""Sum"""
return model.net.Sum(blob_in, blob_out, **kwargs)
def batch_mat_mul(model, blob_in, blob_out,
enable_tensor_core=False, **kwargs):
if enable_tensor_core:
kwargs['engine'] = 'TENSORCORE'
return model.net.BatchMatMul(blob_in, blob_out, **kwargs)
|
## @package tools
# Module caffe2.python.helpers.tools
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
def image_input(
model, blob_in, blob_out, order="NCHW", use_gpu_transform=False, **kwargs
):
assert 'is_test' in kwargs, "Argument 'is_test' is required"
if order == "NCHW":
if (use_gpu_transform):
kwargs['use_gpu_transform'] = 1 if use_gpu_transform else 0
# GPU transform will handle NHWC -> NCHW
outputs = model.net.ImageInput(blob_in, blob_out, **kwargs)
pass
else:
outputs = model.net.ImageInput(
blob_in, [blob_out[0] + '_nhwc'] + blob_out[1:], **kwargs
)
outputs_list = list(outputs)
outputs_list[0] = model.net.NHWC2NCHW(outputs_list[0], blob_out[0])
outputs = tuple(outputs_list)
else:
outputs = model.net.ImageInput(blob_in, blob_out, **kwargs)
return outputs
def video_input(model, blob_in, blob_out, **kwargs):
# size of outputs can vary depending on kwargs
outputs = model.net.VideoInput(blob_in, blob_out, **kwargs)
return outputs
|
## @package pooling
# Module caffe2.python.helpers.pooling
## @package fc
# Module caffe2.python.helpers.pooling
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
def max_pool(model, blob_in, blob_out, use_cudnn=False, order="NCHW", **kwargs):
"""Max pooling"""
if use_cudnn:
kwargs['engine'] = 'CUDNN'
return model.net.MaxPool(blob_in, blob_out, order=order, **kwargs)
def average_pool(model, blob_in, blob_out, use_cudnn=False, order="NCHW",
**kwargs):
"""Average pooling"""
if use_cudnn:
kwargs['engine'] = 'CUDNN'
return model.net.AveragePool(
blob_in,
blob_out,
order=order,
**kwargs
)
def max_pool_with_index(model, blob_in, blob_out, order="NCHW", **kwargs):
"""Max pooling with an explicit index of max position"""
return model.net.MaxPoolWithIndex(
blob_in,
[blob_out, blob_out + "_index"],
order=order,
**kwargs
)[0]
|
## @package arra_helpers
# Module caffe2.python.helpers.array_helpers
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
def concat(model, blobs_in, blob_out, **kwargs):
"""Depth Concat."""
if kwargs.get('order') and kwargs.get('axis'):
# The backend throws an error if both are given
kwargs.pop('order')
return model.net.Concat(
blobs_in,
[blob_out, "_" + blob_out + "_concat_dims"],
**kwargs
)[0]
def depth_concat(model, blobs_in, blob_out, **kwargs):
"""The old depth concat function - we should move to use concat."""
print("DepthConcat is deprecated. use Concat instead.")
return concat(blobs_in, blob_out, **kwargs)
|
## @package nonlinearity
# Module caffe2.python.helpers.nonlinearity
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
def prelu(model, blob_in, blob_out, num_channels=1, slope_init=None,
**kwargs):
"""PRelu"""
slope_init = (
slope_init if slope_init else ('ConstantFill', {'value': 0.25}))
if model.init_params:
slope = model.param_init_net.__getattr__(slope_init[0])(
[],
blob_out + '_slope',
shape=[num_channels],
**slope_init[1]
)
else:
slope = core.ScopedBlobReference(
blob_out + '_slope', model.param_init_net)
model.AddParameter(slope)
return model.net.PRelu([blob_in, slope], [blob_out])
def relu(model, blob_in, blob_out, use_cudnn=False, order="NCHW", **kwargs):
"""Relu."""
if use_cudnn:
kwargs['engine'] = 'CUDNN'
return model.net.Relu(blob_in, blob_out, order=order, **kwargs)
def tanh(model, blob_in, blob_out, use_cudnn=False, order="NCHW", **kwargs):
"""Tanh."""
if use_cudnn:
kwargs['engine'] = 'CUDNN'
return model.net.Tanh(blob_in, blob_out, order=order, **kwargs)
|
## @package train
# Module caffe2.python.helpers.train
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, scope
from caffe2.proto import caffe2_pb2
def _get_weights(model, namescope=None):
if namescope is None:
namescope = scope.CurrentNameScope()
if namescope == '':
return model.weights[:]
else:
return [w for w in model.weights if w.GetNameScope() == namescope]
def iter(model, blob_out, **kwargs):
if 'device_option' in kwargs:
del kwargs['device_option']
model.param_init_net.ConstantFill(
[],
blob_out,
shape=[1],
value=0,
dtype=core.DataType.INT64,
device_option=core.DeviceOption(caffe2_pb2.CPU, 0),
**kwargs
)
return model.net.Iter(blob_out, blob_out, **kwargs)
def accuracy(model, blob_in, blob_out, **kwargs):
dev = kwargs['device_option'] if 'device_option' in kwargs \
else scope.CurrentDeviceScope()
is_cpu = dev is None or dev.device_type == caffe2_pb2.CPU
# We support top_k > 1 only on CPU
if not is_cpu and 'top_k' in kwargs and kwargs['top_k'] > 1:
pred_host = model.net.CopyGPUToCPU(blob_in[0], blob_in[0] + "_host")
label_host = model.net.CopyGPUToCPU(blob_in[1], blob_in[1] + "_host")
# Now use the Host version of the accuracy op
model.net.Accuracy(
[pred_host, label_host],
blob_out,
device_option=core.DeviceOption(caffe2_pb2.CPU, 0),
**kwargs
)
else:
model.net.Accuracy(blob_in, blob_out)
def add_weight_decay(model, weight_decay):
"""Adds a decay to weights in the model.
This is a form of L2 regularization.
Args:
weight_decay: strength of the regularization
"""
if weight_decay <= 0.0:
return
wd = model.param_init_net.ConstantFill(
[], 'wd', shape=[1], value=weight_decay
)
ONE = model.param_init_net.ConstantFill([], "ONE", shape=[1], value=1.0)
for param in _get_weights(model):
# Equivalent to: grad += wd * param
grad = model.param_to_grad[param]
model.net.WeightedSum(
[grad, ONE, param, wd],
grad,
)
|
## @package control_ops
# Module caffe2.python.helpers.control_ops
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python.control_ops_util import add_if_op, add_while_op
def cond(model, cond_blob, external_blobs, then_model, else_model=None):
"""Condition"""
add_if_op(
model.net,
cond_blob,
external_blobs,
then_model.net,
else_model.net if else_model else None)
def loop(model, cond_blob, external_blobs, loop_model, cond_model=None):
"""Loop"""
add_while_op(
model.net,
cond_blob,
external_blobs,
loop_model.net,
cond_model.net if cond_model else None)
|
## @package dropout
# Module caffe2.python.helpers.dropout
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
def dropout(model, blob_in, blob_out, use_cudnn=False, **kwargs):
"""dropout"""
if use_cudnn:
kwargs['engine'] = 'CUDNN'
else:
kwargs['engine'] = 'DEFAULT'
assert 'is_test' in kwargs, "Argument 'is_test' is required"
return model.net.Dropout(
blob_in, [blob_out, "_" + blob_out + "_mask"], **kwargs)[0]
|
## @package conv
# Module caffe2.python.helpers.conv
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from caffe2.python.modeling import initializers
from caffe2.python.modeling.parameter_info import ParameterTags
def _ConvBase(
model,
is_nd,
blob_in,
blob_out,
dim_in,
dim_out,
kernel,
weight_init=None,
bias_init=None,
WeightInitializer=None,
BiasInitializer=None,
group=1,
transform_inputs=None,
use_cudnn=False,
order="NCHW",
cudnn_exhaustive_search=False,
ws_nbytes_limit=None,
float16_compute=False,
**kwargs
):
kernels = []
if is_nd:
if not isinstance(kernel, list):
kernels = [kernel]
else:
kernels = kernel
else:
if isinstance(kernel, list):
assert len(kernel) == 2, "Conv support only a 2D kernel."
kernels = kernel
else:
kernels = [kernel] * 2
requested_engine = kwargs.get('engine')
if requested_engine is not None:
if use_cudnn and requested_engine != 'CUDNN':
raise ValueError(
'When use_cudnn=True, the only engine you can specify is '
'"CUDNN"')
elif not use_cudnn and requested_engine == 'CUDNN':
raise ValueError(
'When use_cudnn=False, the only engine you can specify is '
'""')
if use_cudnn:
kwargs['engine'] = 'CUDNN'
kwargs['exhaustive_search'] = cudnn_exhaustive_search
if ws_nbytes_limit:
kwargs['ws_nbytes_limit'] = ws_nbytes_limit
use_bias =\
False if ("no_bias" in kwargs and kwargs["no_bias"]) else True
blob_out = blob_out or model.net.NextName()
weight_shape = [dim_out]
if order == "NCHW":
weight_shape.append(int(dim_in / group))
weight_shape.extend(kernels)
else:
weight_shape.extend(kernels)
weight_shape.append(int(dim_in / group))
WeightInitializer = initializers.update_initializer(
WeightInitializer, weight_init, ("XavierFill", {})
)
BiasInitializer = initializers.update_initializer(
BiasInitializer, bias_init, ("ConstantFill", {})
)
if not model.init_params:
WeightInitializer = initializers.ExternalInitializer()
BiasInitializer = initializers.ExternalInitializer()
weight = model.create_param(
param_name=blob_out + '_w',
shape=weight_shape,
initializer=WeightInitializer,
tags=ParameterTags.WEIGHT
)
if use_bias:
bias = model.create_param(
param_name=blob_out + '_b',
shape=[dim_out, ],
initializer=BiasInitializer,
tags=ParameterTags.BIAS
)
if use_bias:
inputs = [blob_in, weight, bias]
else:
inputs = [blob_in, weight]
if transform_inputs is not None:
transform_inputs(model, blob_out, inputs)
# Enable float 16 compute kernel (relevant for CUDA)
if float16_compute:
kwargs['float16_compute'] = True
# For the operator, we no longer need to provide the no_bias field
# because it can automatically figure this out from the number of
# inputs.
if 'no_bias' in kwargs:
del kwargs['no_bias']
if group != 1:
kwargs['group'] = group
if is_nd:
return model.net.Conv(
inputs,
blob_out,
kernels=kernels,
order=order,
**kwargs)
else:
if isinstance(kernel, list):
return model.net.Conv(
inputs,
blob_out,
kernel_h=kernel[0],
kernel_w=kernel[1],
order=order,
**kwargs)
else:
return model.net.Conv(
inputs,
blob_out,
kernel=kernel,
order=order,
**kwargs)
def conv_nd(
model,
blob_in,
blob_out,
dim_in,
dim_out,
kernel,
weight_init=None,
bias_init=None,
WeightInitializer=None,
BiasInitializer=None,
group=1,
transform_inputs=None,
order="NCHW",
**kwargs
):
"""N-dimensional convolution for inputs with NCHW storage order.
"""
assert order == "NCHW", "ConvNd only supported for NCHW storage."
return _ConvBase(model, True, blob_in, blob_out, dim_in, dim_out, kernel,
weight_init, bias_init, WeightInitializer, BiasInitializer,
group, transform_inputs, order=order, **kwargs)
def conv(
model,
blob_in,
blob_out,
dim_in,
dim_out,
kernel,
weight_init=None,
bias_init=None,
WeightInitializer=None,
BiasInitializer=None,
group=1,
transform_inputs=None,
**kwargs
):
"""2-dimensional convolution.
"""
return _ConvBase(model, False, blob_in, blob_out, dim_in, dim_out, kernel,
weight_init, bias_init, WeightInitializer, BiasInitializer,
group, transform_inputs, **kwargs)
def conv_transpose(
model,
blob_in,
blob_out,
dim_in,
dim_out,
kernel,
weight_init=None,
bias_init=None,
use_cudnn=False,
order="NCHW",
cudnn_exhaustive_search=False,
ws_nbytes_limit=None,
**kwargs
):
"""ConvTranspose.
"""
weight_init = weight_init if weight_init else ('XavierFill', {})
bias_init = bias_init if bias_init else ('ConstantFill', {})
blob_out = blob_out or model.net.NextName()
weight_shape = (
[dim_in, dim_out, kernel, kernel]
if order == "NCHW" else [dim_in, kernel, kernel, dim_out]
)
if model.init_params:
weight = model.param_init_net.__getattr__(weight_init[0])(
[],
blob_out + '_w',
shape=weight_shape,
**weight_init[1]
)
bias = model.param_init_net.__getattr__(bias_init[0])(
[],
blob_out + '_b',
shape=[dim_out, ],
**bias_init[1]
)
else:
weight = core.ScopedBlobReference(
blob_out + '_w', model.param_init_net)
bias = core.ScopedBlobReference(
blob_out + '_b', model.param_init_net)
model.AddParameter(weight, ParameterTags.WEIGHT)
model.AddParameter(bias, ParameterTags.BIAS)
if use_cudnn:
kwargs['engine'] = 'CUDNN'
kwargs['exhaustive_search'] = cudnn_exhaustive_search
if ws_nbytes_limit:
kwargs['ws_nbytes_limit'] = ws_nbytes_limit
return model.net.ConvTranspose(
[blob_in, weight, bias],
blob_out,
kernel=kernel,
order=order,
**kwargs
)
def group_conv(
model,
blob_in,
blob_out,
dim_in,
dim_out,
kernel,
weight_init=None,
bias_init=None,
group=1,
**kwargs
):
"""Group Convolution.
This is essentially the same as Conv with a group argument passed in.
We specialize this for backward interface compatibility.
"""
return conv(model, blob_in, blob_out, dim_in, dim_out, kernel,
weight_init=weight_init, bias_init=bias_init,
group=group, **kwargs)
def group_conv_deprecated(
model,
blob_in,
blob_out,
dim_in,
dim_out,
kernel,
weight_init=None,
bias_init=None,
group=1,
use_cudnn=False,
order="NCHW",
cudnn_exhaustive_search=False,
ws_nbytes_limit=None,
**kwargs
):
"""GroupConvolution's deprecated interface.
This is used to simulate a group convolution via split and concat. You
should always use the new group convolution in your new code.
"""
weight_init = weight_init if weight_init else ('XavierFill', {})
bias_init = bias_init if bias_init else ('ConstantFill', {})
use_bias = False if ("no_bias" in kwargs and kwargs["no_bias"]) else True
if use_cudnn:
kwargs['engine'] = 'CUDNN'
kwargs['exhaustive_search'] = cudnn_exhaustive_search
if ws_nbytes_limit:
kwargs['ws_nbytes_limit'] = ws_nbytes_limit
if dim_in % group:
raise ValueError("dim_in should be divisible by group.")
if dim_out % group:
raise ValueError("dim_out should be divisible by group.")
splitted_blobs = model.net.DepthSplit(
blob_in,
['_' + blob_out + '_gconv_split_' + str(i) for i in range(group)],
dimensions=[int(dim_in / group) for i in range(group)],
order=order
)
weight_shape = (
[dim_out / group, dim_in / group, kernel, kernel]
if order == "NCHW" else
[dim_out / group, kernel, kernel, dim_in / group]
)
# Make sure that the shapes are of int format. Especially for py3 where
# int division gives float output.
weight_shape = [int(v) for v in weight_shape]
conv_blobs = []
for i in range(group):
if model.init_params:
weight = model.param_init_net.__getattr__(weight_init[0])(
[],
blob_out + '_gconv_%d_w' % i,
shape=weight_shape,
**weight_init[1]
)
if use_bias:
bias = model.param_init_net.__getattr__(bias_init[0])(
[],
blob_out + '_gconv_%d_b' % i,
shape=[int(dim_out / group)],
**bias_init[1]
)
else:
weight = core.ScopedBlobReference(
blob_out + '_gconv_%d_w' % i, model.param_init_net)
if use_bias:
bias = core.ScopedBlobReference(
blob_out + '_gconv_%d_b' % i, model.param_init_net)
model.AddParameter(weight, ParameterTags.WEIGHT)
if use_bias:
model.AddParameter(bias, ParameterTags.BIAS)
if use_bias:
inputs = [weight, bias]
else:
inputs = [weight]
if 'no_bias' in kwargs:
del kwargs['no_bias']
conv_blobs.append(
splitted_blobs[i].Conv(
inputs,
blob_out + '_gconv_%d' % i,
kernel=kernel,
order=order,
**kwargs
)
)
concat, concat_dims = model.net.Concat(
conv_blobs,
[blob_out,
"_" + blob_out + "_concat_dims"],
order=order
)
return concat
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import contextlib
import copy
import threading
_threadlocal_scope = threading.local()
@contextlib.contextmanager
def arg_scope(single_helper_or_list, **kwargs):
global _threadlocal_scope
if not isinstance(single_helper_or_list, list):
assert callable(single_helper_or_list), \
"arg_scope is only supporting single or a list of helper functions."
single_helper_or_list = [single_helper_or_list]
old_scope = copy.deepcopy(get_current_scope())
for helper in single_helper_or_list:
assert callable(helper), \
"arg_scope is only supporting a list of callable helper functions."
helper_key = helper.__name__
if helper_key not in old_scope:
_threadlocal_scope.current_scope[helper_key] = {}
_threadlocal_scope.current_scope[helper_key].update(kwargs)
yield
_threadlocal_scope.current_scope = old_scope
def get_current_scope():
global _threadlocal_scope
if not hasattr(_threadlocal_scope, "current_scope"):
_threadlocal_scope.current_scope = {}
return _threadlocal_scope.current_scope
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.