python_code
stringlengths 0
258k
|
---|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from caffe2.python import core, workspace
from caffe2.proto import caffe2_pb2
import time
def build_net(net_name, cross_socket):
net = core.Net(net_name)
net.Proto().type = "async_scheduling"
numa_device_option = caffe2_pb2.DeviceOption()
numa_device_option.device_type = caffe2_pb2.CPU
numa_device_option.numa_node_id = 0
net.XavierFill([], net_name + "/input_blob", shape=[1024, 1024],
device_option=numa_device_option)
if cross_socket:
numa_device_option.numa_node_id = 1
net.Copy(net_name + "/input_blob", net_name + "/output_blob",
device_option=numa_device_option)
return net
def main():
assert workspace.IsNUMAEnabled() and workspace.GetNumNUMANodes() >= 2
single_net = build_net("single_net", False)
cross_net = build_net("cross_net", True)
workspace.CreateNet(single_net)
workspace.CreateNet(cross_net)
for _ in range(4):
t = time.time()
workspace.RunNet(single_net.Name(), 5000)
print("Single socket time:", time.time() - t)
t = time.time()
workspace.RunNet(cross_net.Name(), 5000)
print("Cross socket time:", time.time() - t)
if __name__ == '__main__':
core.GlobalInit(["caffe2", "--caffe2_cpu_numa_enabled=1"])
main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import unittest
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import brew, core, model_helper, rnn_cell
import caffe2.python.workspace as ws
class TestObservers(unittest.TestCase):
def setUp(self):
core.GlobalInit(["python", "caffe2"])
ws.ResetWorkspace()
self.model = model_helper.ModelHelper()
brew.fc(self.model, "data", "y",
dim_in=4, dim_out=2,
weight_init=('ConstantFill', dict(value=1.0)),
bias_init=('ConstantFill', dict(value=0.0)),
axis=0)
ws.FeedBlob("data", np.zeros([4], dtype='float32'))
ws.RunNetOnce(self.model.param_init_net)
ws.CreateNet(self.model.net)
def testObserver(self):
ob = self.model.net.AddObserver("TimeObserver")
ws.RunNet(self.model.net)
print(ob.average_time())
num = self.model.net.NumObservers()
self.model.net.RemoveObserver(ob)
assert(self.model.net.NumObservers() + 1 == num)
@given(
num_layers=st.integers(1, 4),
forward_only=st.booleans()
)
def test_observer_rnn_executor(self, num_layers, forward_only):
'''
Test that the RNN executor produces same results as
the non-executor (i.e running step nets as sequence of simple nets).
'''
Tseq = [2, 3, 4]
batch_size = 10
input_dim = 3
hidden_dim = 3
run_cnt = [0] * len(Tseq)
avg_time = [0] * len(Tseq)
for j in range(len(Tseq)):
T = Tseq[j]
ws.ResetWorkspace()
ws.FeedBlob(
"seq_lengths",
np.array([T] * batch_size, dtype=np.int32)
)
ws.FeedBlob("target", np.random.rand(
T, batch_size, hidden_dim).astype(np.float32))
ws.FeedBlob("hidden_init", np.zeros(
[1, batch_size, hidden_dim], dtype=np.float32
))
ws.FeedBlob("cell_init", np.zeros(
[1, batch_size, hidden_dim], dtype=np.float32
))
model = model_helper.ModelHelper(name="lstm")
model.net.AddExternalInputs(["input"])
init_blobs = []
for i in range(num_layers):
hidden_init, cell_init = model.net.AddExternalInputs(
"hidden_init_{}".format(i),
"cell_init_{}".format(i)
)
init_blobs.extend([hidden_init, cell_init])
output, last_hidden, _, last_state = rnn_cell.LSTM(
model=model,
input_blob="input",
seq_lengths="seq_lengths",
initial_states=init_blobs,
dim_in=input_dim,
dim_out=[hidden_dim] * num_layers,
drop_states=True,
forward_only=forward_only,
return_last_layer_only=True,
)
loss = model.AveragedLoss(
model.SquaredL2Distance([output, "target"], "dist"),
"loss"
)
# Add gradient ops
if not forward_only:
model.AddGradientOperators([loss])
# init
for init_blob in init_blobs:
ws.FeedBlob(init_blob, np.zeros(
[1, batch_size, hidden_dim], dtype=np.float32
))
ws.RunNetOnce(model.param_init_net)
# Run with executor
self.enable_rnn_executor(model.net, 1, forward_only)
np.random.seed(10022015)
input_shape = [T, batch_size, input_dim]
ws.FeedBlob(
"input",
np.random.rand(*input_shape).astype(np.float32)
)
ws.FeedBlob(
"target",
np.random.rand(
T,
batch_size,
hidden_dim
).astype(np.float32)
)
ws.CreateNet(model.net, overwrite=True)
time_ob = model.net.AddObserver("TimeObserver")
run_cnt_ob = model.net.AddObserver("RunCountObserver")
ws.RunNet(model.net)
avg_time[j] = time_ob.average_time()
run_cnt[j] = int(''.join(x for x in run_cnt_ob.debug_info() if x.isdigit()))
model.net.RemoveObserver(time_ob)
model.net.RemoveObserver(run_cnt_ob)
print(avg_time)
print(run_cnt)
self.assertTrue(run_cnt[1] > run_cnt[0] and run_cnt[2] > run_cnt[1])
self.assertEqual(run_cnt[1] - run_cnt[0], run_cnt[2] - run_cnt[1])
def enable_rnn_executor(self, net, value, forward_only):
num_found = 0
for op in net.Proto().op:
if op.type.startswith("RecurrentNetwork"):
for arg in op.arg:
if arg.name == 'enable_rnn_executor':
arg.i = value
num_found += 1
# This sanity check is so that if someone changes the
# enable_rnn_executor parameter name, the test will
# start failing as this function will become defective.
self.assertEqual(1 if forward_only else 2, num_found)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import unittest
from caffe2.python import core, workspace, tt_core
import caffe2.python.hypothesis_test_util as hu
class TestTTSVD(hu.HypothesisTestCase):
def test_full_tt_svd(self):
size = 256
np.random.seed(1234)
X = np.expand_dims(
np.random.rand(size).astype(np.float32), axis=0)
W = np.random.rand(size, size).astype(np.float32)
b = np.zeros(size).astype(np.float32)
inp_sizes = [4, 4, 4, 4]
out_sizes = [4, 4, 4, 4]
op_fc = core.CreateOperator(
"FC",
["X", "W", "b"],
["Y"],
)
workspace.FeedBlob("X", X)
workspace.FeedBlob("W", W)
workspace.FeedBlob("b", b)
workspace.RunOperatorOnce(op_fc)
Y_fc = workspace.FetchBlob("Y").flatten()
# Testing TT-decomposition with high ranks
full_tt_ranks = [1, 16, 256, 16, 1]
full_cores = tt_core.matrix_to_tt(W, inp_sizes, out_sizes,
full_tt_ranks)
full_op_tt = core.CreateOperator(
"TT",
["X", "b", "cores"],
["Y"],
inp_sizes=inp_sizes,
out_sizes=out_sizes,
tt_ranks=full_tt_ranks,
)
workspace.FeedBlob("X", X)
workspace.FeedBlob("b", b)
workspace.FeedBlob("cores", full_cores)
workspace.RunOperatorOnce(full_op_tt)
Y_full_tt = workspace.FetchBlob("Y").flatten()
assert(len(Y_fc) == len(Y_full_tt))
self.assertAlmostEquals(np.linalg.norm(Y_fc - Y_full_tt), 0, delta=1e-3)
# Testing TT-decomposition with minimal ranks
sparse_tt_ranks = [1, 1, 1, 1, 1]
sparse_cores = tt_core.matrix_to_tt(W, inp_sizes, out_sizes,
sparse_tt_ranks)
sparse_op_tt = core.CreateOperator(
"TT",
["X", "b", "cores"],
["Y"],
inp_sizes=inp_sizes,
out_sizes=out_sizes,
tt_ranks=sparse_tt_ranks,
)
workspace.FeedBlob("X", X)
workspace.FeedBlob("b", b)
workspace.FeedBlob("cores", sparse_cores)
workspace.RunOperatorOnce(sparse_op_tt)
Y_sparse_tt = workspace.FetchBlob("Y").flatten()
assert(len(Y_fc) == len(Y_sparse_tt))
self.assertAlmostEquals(np.linalg.norm(Y_fc - Y_sparse_tt),
39.974, delta=1e-3)
if __name__ == '__main__':
unittest.main()
|
## @package lstm_benchmark
# Module caffe2.python.lstm_benchmark
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.proto import caffe2_pb2
from caffe2.python import workspace, core, utils, rnn_cell, model_helper
from caffe2.python import recurrent
import argparse
import numpy as np
import time
import logging
logging.basicConfig()
log = logging.getLogger("lstm_bench")
log.setLevel(logging.DEBUG)
def generate_data(T, shape, num_labels, fixed_shape):
'''
Fill a queue with input data
'''
log.info("Generating T={} sequence batches".format(T))
generate_input_init_net = core.Net('generate_input_init')
queue = generate_input_init_net.CreateBlobsQueue(
[], "inputqueue", num_blobs=1, capacity=T,
)
label_queue = generate_input_init_net.CreateBlobsQueue(
[], "labelqueue", num_blobs=1, capacity=T,
)
workspace.RunNetOnce(generate_input_init_net)
generate_input_net = core.Net('generate_input')
generate_input_net.EnqueueBlobs([queue, "scratch"], ["scratch"])
generate_input_net.EnqueueBlobs([label_queue, "label_scr"], ["label_scr"])
np.random.seed(2603)
entry_counts = []
for t in range(T):
if (t % (max(10, T // 10)) == 0):
print("Generating data {}/{}".format(t, T))
# Randomize the seqlength
random_shape = (
[np.random.randint(1, shape[0])] + shape[1:]
if t > 0 and not fixed_shape else shape
)
X = np.random.rand(*random_shape).astype(np.float32)
batch_size = random_shape[1]
L = num_labels * batch_size
labels = (np.random.rand(random_shape[0]) * L).astype(np.int32)
workspace.FeedBlob("scratch", X)
workspace.FeedBlob("label_scr", labels)
workspace.RunNetOnce(generate_input_net.Proto())
entry_counts.append(random_shape[0] * random_shape[1])
log.info("Finished data generation")
return queue, label_queue, entry_counts
def create_model(args, queue, label_queue, input_shape):
model = model_helper.ModelHelper(name="LSTM_bench")
seq_lengths, target = \
model.net.AddExternalInputs(
'seq_lengths',
'target',
)
input_blob = model.net.DequeueBlobs(queue, "input_data")
labels = model.net.DequeueBlobs(label_queue, "label")
init_blobs = []
if args.implementation in ["own", "static", "static_dag"]:
T = None
if "static" in args.implementation:
assert args.fixed_shape, \
"Random input length is not static RNN compatible"
T = args.seq_length
print("Using static RNN of size {}".format(T))
for i in range(args.num_layers):
hidden_init, cell_init = model.net.AddExternalInputs(
"hidden_init_{}".format(i),
"cell_init_{}".format(i)
)
init_blobs.extend([hidden_init, cell_init])
output, last_hidden, _, last_state = rnn_cell.LSTM(
model=model,
input_blob=input_blob,
seq_lengths=seq_lengths,
initial_states=init_blobs,
dim_in=args.input_dim,
dim_out=[args.hidden_dim] * args.num_layers,
scope="lstm1",
memory_optimization=args.memory_optimization,
forward_only=args.forward_only,
drop_states=True,
return_last_layer_only=True,
static_rnn_unroll_size=T,
)
if "dag" in args.implementation:
print("Using DAG net type")
model.net.Proto().type = 'dag'
model.net.Proto().num_workers = 4
elif args.implementation == "cudnn":
# We need to feed a placeholder input so that RecurrentInitOp
# can infer the dimensions.
init_blobs = model.net.AddExternalInputs("hidden_init", "cell_init")
model.param_init_net.ConstantFill([], input_blob, shape=input_shape)
output, last_hidden, _ = rnn_cell.cudnn_LSTM(
model=model,
input_blob=input_blob,
initial_states=init_blobs,
dim_in=args.input_dim,
dim_out=args.hidden_dim,
scope="cudnnlstm",
num_layers=args.num_layers,
)
else:
assert False, "Unknown implementation"
weights = model.net.UniformFill(labels, "weights")
softmax, loss = model.net.SoftmaxWithLoss(
[model.Flatten(output), labels, weights],
['softmax', 'loss'],
)
if not args.forward_only:
model.AddGradientOperators([loss])
# carry states over
for init_blob in init_blobs:
model.net.Copy(last_hidden, init_blob)
sz = args.hidden_dim
if args.implementation == "cudnn":
sz *= args.num_layers
workspace.FeedBlob(init_blob, np.zeros(
[1, args.batch_size, sz], dtype=np.float32
))
if args.rnn_executor:
for op in model.net.Proto().op:
if op.type.startswith('RecurrentNetwork'):
recurrent.set_rnn_executor_config(
op,
num_threads=args.rnn_executor_num_threads,
max_cuda_streams=args.rnn_executor_max_cuda_streams,
)
return model, output
def Caffe2LSTM(args):
T = args.data_size // args.batch_size
input_blob_shape = [args.seq_length, args.batch_size, args.input_dim]
queue, label_queue, entry_counts = generate_data(T // args.seq_length,
input_blob_shape,
args.hidden_dim,
args.fixed_shape)
workspace.FeedBlob(
"seq_lengths",
np.array([args.seq_length] * args.batch_size, dtype=np.int32)
)
model, output = create_model(args, queue, label_queue, input_blob_shape)
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
start_time = time.time()
num_iters = T // args.seq_length
total_iters = 0
# Run the Benchmark
log.info("------ Warming up ------")
workspace.RunNet(model.net.Proto().name)
if (args.gpu):
log.info("Memory stats:")
stats = utils.GetGPUMemoryUsageStats()
log.info("GPU memory:\t{} MB".format(stats['max_total'] / 1024 / 1024))
log.info("------ Starting benchmark ------")
start_time = time.time()
last_time = time.time()
for iteration in range(1, num_iters, args.iters_to_report):
iters_once = min(args.iters_to_report, num_iters - iteration)
total_iters += iters_once
workspace.RunNet(model.net.Proto().name, iters_once)
new_time = time.time()
log.info(
"Iter: {} / {}. Entries Per Second: {}k.".format(
iteration,
num_iters,
np.sum(entry_counts[iteration:iteration + iters_once]) /
(new_time - last_time) // 100 / 10,
)
)
last_time = new_time
log.info("Done. Total EPS excluding 1st iteration: {}k {}".format(
np.sum(entry_counts[1:]) / (time.time() - start_time) // 100 / 10,
" (with RNN executor)" if args.rnn_executor else "",
))
if (args.gpu):
log.info("Memory stats:")
stats = utils.GetGPUMemoryUsageStats()
log.info("GPU memory:\t{} MB".format(stats['max_total'] / 1024 / 1024))
if (stats['max_total'] != stats['total']):
log.warning(
"Max usage differs from current total usage: {} > {}".
format(stats['max_total'], stats['total'])
)
log.warning("This means that costly deallocations occured.")
return time.time() - start_time
@utils.debug
def Benchmark(args):
return Caffe2LSTM(args)
def GetArgumentParser():
parser = argparse.ArgumentParser(description="LSTM benchmark.")
parser.add_argument(
"--hidden_dim",
type=int,
default=800,
help="Hidden dimension",
)
parser.add_argument(
"--input_dim",
type=int,
default=40,
help="Input dimension",
)
parser.add_argument(
"--batch_size",
type=int,
default=128,
help="The batch size."
)
parser.add_argument(
"--seq_length",
type=int,
default=20,
help="Max sequence length"
)
parser.add_argument(
"--data_size",
type=int,
default=1000000,
help="Number of data points to generate"
)
parser.add_argument(
"--iters_to_report",
type=int,
default=20,
help="Number of iteration to report progress"
)
parser.add_argument(
"--gpu",
action="store_true",
help="Run all on GPU",
)
parser.add_argument(
"--implementation",
type=str,
default="own",
help="'cudnn', 'own', 'static' or 'static_dag'",
)
parser.add_argument(
"--fixed_shape",
action="store_true",
help=("Whether to randomize shape of input batches. "
"Static RNN requires fixed shape"),
)
parser.add_argument(
"--memory_optimization",
action="store_true",
help="Whether to use memory optimized LSTM or not",
)
parser.add_argument(
"--forward_only",
action="store_true",
help="Whether to run only forward pass"
)
parser.add_argument(
"--num_layers",
type=int,
default=1,
help="Number of LSTM layers. All output dimensions are going to be"
"of hidden_dim size",
)
parser.add_argument(
"--rnn_executor",
action="store_true",
help="Whether to use RNN executor"
)
parser.add_argument(
"--rnn_executor_num_threads",
type=int,
default=None,
help="Number of threads used by CPU RNN Executor"
)
parser.add_argument(
"--rnn_executor_max_cuda_streams",
type=int,
default=None,
help="Maximum number of CUDA streams used by RNN executor on GPU"
)
return parser
if __name__ == '__main__':
args, extra_args = GetArgumentParser().parse_known_args()
rnn_executor_opt = 1 if args.rnn_executor else 0
workspace.GlobalInit([
'caffe2',
'--caffe2_log_level=0',
'--caffe2_print_blob_sizes_at_exit=0',
'--caffe2_rnn_executor={}'.format(rnn_executor_opt),
'--caffe2_gpu_memory_tracking=1'] + extra_args)
device = core.DeviceOption(
caffe2_pb2.CUDA if args.gpu else caffe2_pb2.CPU, 4)
with core.DeviceScope(device):
Benchmark(args)
|
## @package tt_core
# Module caffe2.python.tt_core
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
"""
The following methods are various utility methods for using the Tensor-Train
decomposition, or TT-decomposition introduced by I. V. Oseledets (2011) in his
paper (http://epubs.siam.org/doi/abs/10.1137/090752286).
Broadly speaking, these methods are used to replace fully connected layers in
neural networks with Tensor-Train layers introduced by A. Novikov et. al. (2015)
in their paper (http://arxiv.org/abs/1509.06569). More details about each of
the methods are provided in each respective docstring.
"""
def init_tt_cores(inp_sizes, out_sizes, tt_ranks, seed=1234):
"""
Initialize randomized orthogonalized TT-cores.
This method should be used when a TT-layer is trained from scratch. The
sizes of each of the cores are specified by the inp_sizes and out_sizes, and
the respective tt_ranks will dictate the ranks of each of the cores. Note
that a larger set of tt_ranks will result in slower computation but will
result in more accurate approximations. The size of the ith core is:
tt_ranks[i] * inp_sizes[i] * out_sizes[i] * tt_ranks[i + 1].
Note that the following relationships of lengths of each input is expected:
len(inp_sizes) == len(out_sizes) == len(tt_ranks) - 1.
Args:
inp_sizes: list of the input dimensions of the respective cores
out_sizes: list of the output dimensions of the respective cores
tt_ranks: list of the ranks of the respective cores
seed: integer to seed the random number generator
Returns:
cores: One-dimensional list of cores concatentated along an axis
"""
np.random.seed(seed)
# Assert that the sizes of each input is correct
assert(len(inp_sizes) == len(out_sizes)), \
"The number of input dimensions (" + str(len(inp_sizes)) + \
") must be equal to the number of output dimensions (" + \
str(len(out_sizes)) + ")."
assert(len(tt_ranks) == len(inp_sizes) + 1), \
"The number of tt-ranks (" + str(len(tt_ranks)) + ") must be " + \
"one more than the number of input and output dims (" + \
str(len(out_sizes)) + ")."
# Convert to numpy arrays
inp_sizes = np.array(inp_sizes)
out_sizes = np.array(out_sizes)
tt_ranks = np.array(tt_ranks)
# Initialize the cores array
cores_len = np.sum(
inp_sizes * out_sizes * tt_ranks[1:] * tt_ranks[:-1])
cores = np.zeros(cores_len)
cores_idx = 0
rv = 1
# Compute the full list of cores by computing each individual one
for i in range(inp_sizes.shape[0]):
shape = [tt_ranks[i],
inp_sizes[i],
out_sizes[i],
tt_ranks[i + 1]]
# Precompute the shape of each core
tall_shape = (np.prod(shape[:3]), shape[3])
# Randomly initialize the current core using a normal distribution
curr_core = np.dot(rv, np.random.normal(
0, 1, size=(shape[0], np.prod(shape[1:]))))
curr_core = curr_core.reshape(tall_shape)
# Orthogonalize the initialized current core and append to cores list
if i < inp_sizes.shape[0] - 1:
curr_core, rv = np.linalg.qr(curr_core)
cores[cores_idx:cores_idx +
curr_core.size] = curr_core.flatten()
cores_idx += curr_core.size
# Normalize the list of arrays using this Glarot trick
glarot_style = (np.prod(inp_sizes) *
np.prod(tt_ranks))**(1.0 / inp_sizes.shape[0])
return (0.1 / glarot_style) * np.array(cores).astype(np.float32)
def matrix_to_tt(W, inp_sizes, out_sizes, tt_ranks):
"""
Convert a matrix into the TT-format.
This method will consume a 2D weight matrix such as those used in fully
connected layers in a neural network and will compute the TT-decomposition
of the weight matrix and return the TT-cores of the resulting computation.
This method should be used when converting a trained, fully connected layer,
into a TT-layer for increased speed and decreased parameter size. The size
of the ith core is:
tt_ranks[i] * inp_sizes[i] * out_sizes[i] * tt_ranks[i + 1].
Note that the following relationships of lengths of each input is expected:
len(inp_sizes) == len(out_sizes) == len(tt_ranks) - 1.
We also require that np.prod(inp_sizes) == W.shape[0] and that
np.prod(out_sizes) == W.shape[1].
Args:
W: two-dimensional weight matrix numpy array representing a fully
connected layer to be converted to TT-format; note that the weight
matrix is transposed before decomposed because we want to emulate the
X * W^T operation that the FC layer performs.
inp_sizes: list of the input dimensions of the respective cores
out_sizes: list of the output dimensions of the respective cores
tt_ranks: list of the ranks of the respective cores
Returns:
new_cores: One-dimensional list of cores concatentated along an axis
"""
# Assert that the sizes of each input is correct
assert(len(inp_sizes) == len(out_sizes)), \
"The number of input dimensions (" + str(len(inp_sizes)) + \
") must be equal to the number of output dimensions (" + \
str(len(out_sizes)) + ")."
assert(len(tt_ranks) == len(inp_sizes) + 1), \
"The number of tt-ranks (" + str(len(tt_ranks)) + ") must be " + \
"one more than the number of input and output dimensions (" + \
str(len(out_sizes)) + ")."
assert(W.shape[0] == np.prod(inp_sizes)), \
"The product of the input sizes (" + str(np.prod(inp_sizes)) + \
") must be equal to first dimension of W (" + str(W.shape[0]) + ")."
assert(W.shape[1] == np.prod(out_sizes)), \
"The product of the output sizes (" + str(np.prod(out_sizes)) + \
") must be equal to second dimension of W (" + str(W.shape[1]) + ")."
# W is transposed so that the multiplication X * W^T can be computed, just
# as it is in the FC layer.
W = W.transpose()
# Convert to numpy arrays
inp_sizes = np.array(inp_sizes)
out_sizes = np.array(out_sizes)
tt_ranks = np.array(tt_ranks)
# Copy the original weight matrix in order to permute and reshape the weight
# matrix. In addition, the inp_sizes and out_sizes are combined to a single
# sizes array to use the tt_svd helper method, which only consumes a single
# sizes array.
W_copy = W.copy()
total_inp_size = inp_sizes.size
W_copy = np.reshape(W_copy, np.concatenate((inp_sizes, out_sizes)))
order = np.repeat(np.arange(0, total_inp_size), 2) + \
np.tile([0, total_inp_size], total_inp_size)
W_copy = np.transpose(W_copy, axes=order)
W_copy = np.reshape(W_copy, inp_sizes * out_sizes)
# Use helper method to convert the W matrix copy into the preliminary
# cores array.
cores = tt_svd(W_copy, inp_sizes * out_sizes, tt_ranks)
# Permute the dimensions of each of the cores to be compatible with the
# TT-layer.
new_cores = np.zeros(cores.shape).astype(np.float32)
idx = 0
for i in range(len(inp_sizes)):
shape = (tt_ranks[i], inp_sizes[i], out_sizes[i], tt_ranks[i + 1])
current_core = cores[idx:idx + np.prod(shape)].reshape(shape)
current_core = current_core.transpose((1, 3, 0, 2))
new_cores[new_cores.shape[0] - idx - np.prod(shape):
new_cores.shape[0] - idx] \
= current_core.flatten()
idx += np.prod(shape)
return new_cores
def tt_svd(W, sizes, tt_ranks):
"""
Helper method for the matrix_to_tt() method performing the TT-SVD
decomposition.
Uses the TT-decomposition algorithm to convert a matrix to TT-format using
multiple reduced SVD operations.
Args:
W: two-dimensional weight matrix representing a fully connected layer to
be converted to TT-format preprocessed by the matrix_to_tt() method.
sizes: list of the dimensions of each of the cores
tt_ranks: list of the ranks of the respective cores
Returns:
cores: One-dimensional list of cores concatentated along an axis
"""
assert(len(tt_ranks) == len(sizes) + 1)
C = W.copy()
total_size = sizes.size
core = np.zeros(np.sum(tt_ranks[:-1] * sizes * tt_ranks[1:]),
dtype='float32')
# Compute iterative reduced SVD operations and store each resulting U matrix
# as an individual core.
pos = 0
for i in range(0, total_size - 1):
shape = tt_ranks[i] * sizes[i]
C = np.reshape(C, [shape, -1])
U, S, V = np.linalg.svd(C, full_matrices=False)
U = U[:, 0:tt_ranks[i + 1]]
S = S[0:tt_ranks[i + 1]]
V = V[0:tt_ranks[i + 1], :]
core[pos:pos + tt_ranks[i] * sizes[i] * tt_ranks[i + 1]] = U.ravel()
pos += tt_ranks[i] * sizes[i] * tt_ranks[i + 1]
C = np.dot(np.diag(S), V)
core[pos:pos + tt_ranks[total_size - 1] *
sizes[total_size - 1] * tt_ranks[total_size]] = C.ravel()
return core
# TODO(Surya) Write a method to convert an entire network where all fully
# connected layers are replaced by an TT layer.
def fc_net_to_tt_net(net):
pass
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, scope
from caffe2.python.modeling.parameter_sharing import (
ParameterSharing,
)
from caffe2.python.layer_test_util import LayersTestCase
class ParameterSharingTest(LayersTestCase):
def test_layer_parameter_name(self):
output_dims = 2
with scope.NameScope('global_scope'):
fc1_output = self.model.FC(
self.model.input_feature_schema.float_features,
output_dims
)
self.assertEquals(self.model.layers[-1].w, 'global_scope/fc/w')
self.assertEquals(fc1_output(), 'global_scope/fc/output')
with scope.NameScope('nested_scope'):
fc2_output = self.model.FC(
fc1_output,
output_dims
)
self.assertEquals(self.model.layers[-1].w,
'global_scope/nested_scope/fc/w')
self.assertEquals(fc2_output(),
'global_scope/nested_scope/fc/output')
fc3_output = self.model.FC(
fc1_output,
output_dims
)
self.assertEquals(self.model.layers[-1].w,
'global_scope/nested_scope/fc_auto_0/w')
self.assertEquals(fc3_output(),
'global_scope/nested_scope/fc_auto_0/output')
def test_layer_shared_parameter_name_different_namescopes(self):
output_dims = 2
with scope.NameScope('global_scope'):
with ParameterSharing({'scope_1': 'scope_0'}):
with scope.NameScope('scope_0'):
fc1_output = self.model.FC(
self.model.input_feature_schema.float_features,
output_dims
)
self.assertEquals(self.model.layers[-1].w,
'global_scope/scope_0/fc/w')
self.assertEquals(fc1_output(),
'global_scope/scope_0/fc/output')
with scope.NameScope('scope_1'):
fc2_output = self.model.FC(
self.model.input_feature_schema.float_features,
output_dims
)
self.assertEquals(self.model.layers[-1].w,
'global_scope/scope_0/fc/w')
self.assertEquals(fc2_output(),
'global_scope/scope_1/fc/output')
def test_layer_shared_parameter_name_within_same_namescope(self):
output_dims = 2
with scope.NameScope('global_scope'):
with ParameterSharing({'fc_auto_0': 'fc'}):
self.model.FC(
self.model.input_feature_schema.float_features,
output_dims
)
self.assertEquals(self.model.layers[-1].w,
'global_scope/fc/w')
self.model.FC(
self.model.input_feature_schema.float_features,
output_dims
)
self.assertEquals(self.model.layers[-1].w,
'global_scope/fc/w')
def test_layer_shared_parameter_name_within_same_namescope_customized_name(self):
output_dims = 2
with scope.NameScope('global_scope'):
with ParameterSharing({'new_fc': 'shared_fc'}):
self.model.FC(
self.model.input_feature_schema.float_features,
output_dims,
name='shared_fc'
)
self.assertEquals(self.model.layers[-1].w,
'global_scope/shared_fc/w')
self.model.FC(
self.model.input_feature_schema.float_features,
output_dims,
name='new_fc'
)
self.assertEquals(self.model.layers[-1].w,
'global_scope/shared_fc/w')
def test_layer_shared_parameter_name_different_shapes(self):
output_dims = 2
with scope.NameScope('global_scope'):
with ParameterSharing({'fc_auto_0': 'fc'}):
self.model.FC(
self.model.input_feature_schema.float_features,
output_dims
)
self.assertEquals(self.model.layers[-1].w,
'global_scope/fc/w')
with self.assertRaisesRegexp(ValueError, 'Got inconsistent shapes .*'):
self.model.FC(
self.model.input_feature_schema.float_features,
output_dims + 1
)
def test_layer_duplicated_parameter_init(self):
output_dims = 2
with scope.NameScope('global_scope'):
with ParameterSharing({'new_fc': 'shared_fc'}):
self.model.FC(
self.model.input_feature_schema.float_features,
output_dims,
name='shared_fc'
)
self.model.FC(
self.model.input_feature_schema.float_features,
output_dims,
name='new_fc'
)
train_init_net = core.Net('train_init_net')
train_net = core.Net('train_net')
for layer in self.model.layers:
layer.add_operators(train_net, train_init_net)
op_outputs = []
for op in train_init_net._net.op:
op_outputs.extend(op.output)
# only fill these parameter blobs once
self.assertEquals(
sorted(op_outputs),
['global_scope/shared_fc/b', 'global_scope/shared_fc/w']
)
|
## @package workspace
# Module caffe2.python.workspace
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import contextlib
from google.protobuf.message import Message
from multiprocessing import Process
import os
from collections import defaultdict
import logging
import numpy as np
from past.builtins import basestring
import shutil
import socket
import tempfile
from caffe2.proto import caffe2_pb2
from caffe2.python import scope, utils
import caffe2.python._import_c_extension as C
logger = logging.getLogger(__name__)
Blobs = C.blobs
CreateBlob = C.create_blob
CurrentWorkspace = C.current_workspace
DeserializeBlob = C.deserialize_blob
GlobalInit = C.global_init
HasBlob = C.has_blob
RegisteredOperators = C.registered_operators
SerializeBlob = C.serialize_blob
SwitchWorkspace = C.switch_workspace
RootFolder = C.root_folder
Workspaces = C.workspaces
BenchmarkNet = C.benchmark_net
GetStats = C.get_stats
operator_tracebacks = defaultdict(dict)
is_asan = C.is_asan
has_gpu_support = C.has_gpu_support
if has_gpu_support:
NumCudaDevices = C.num_cuda_devices
GetCUDAVersion = C.get_cuda_version
GetCuDNNVersion = C.get_cudnn_version
def GetCudaPeerAccessPattern():
return np.asarray(C.get_cuda_peer_access_pattern())
GetDeviceProperties = C.get_device_properties
else:
NumCudaDevices = lambda: 0 # noqa
GetCuDNNVersion = lambda: 0 # noqa
GetCuDNNVersion = lambda: 0 # noqa
GetCudaPeerAccessPattern = lambda: np.array([]) # noqa
GetDeviceProperties = lambda x: None # noqa
IsNUMAEnabled = C.is_numa_enabled
GetNumNUMANodes = C.get_num_numa_nodes
GetBlobNUMANode = C.get_blob_numa_node
def _GetFreeFlaskPort():
"""Get a free flask port."""
# We will prefer to use 5000. If not, we will then pick a random port.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex(('127.0.0.1', 5000))
if result == 0:
return 5000
else:
s = socket.socket()
s.bind(('', 0))
port = s.getsockname()[1]
s.close()
# Race condition: between the interval we close the socket and actually
# start a mint process, another process might have occupied the port. We
# don't do much here as this is mostly for convenience in research
# rather than 24x7 service.
return port
def StartMint(root_folder=None, port=None):
"""Start a mint instance.
TODO(Yangqing): this does not work well under ipython yet. According to
https://github.com/ipython/ipython/issues/5862
writing up some fix is a todo item.
"""
from caffe2.python.mint import app
if root_folder is None:
# Get the root folder from the current workspace
root_folder = C.root_folder()
if port is None:
port = _GetFreeFlaskPort()
process = Process(
target=app.main,
args=(
['-p', str(port), '-r', root_folder],
)
)
process.start()
print('Mint running at http://{}:{}'.format(socket.getfqdn(), port))
return process
def StringifyProto(obj):
"""Stringify a protocol buffer object.
Inputs:
obj: a protocol buffer object, or a Pycaffe2 object that has a Proto()
function.
Outputs:
string: the output protobuf string.
Raises:
AttributeError: if the passed in object does not have the right attribute.
"""
if isinstance(obj, basestring):
return obj
else:
if isinstance(obj, Message):
# First, see if this object is a protocol buffer, which we can
# simply serialize with the SerializeToString() call.
return obj.SerializeToString()
elif hasattr(obj, 'Proto'):
return obj.Proto().SerializeToString()
else:
raise ValueError("Unexpected argument to StringifyProto of type " +
type(obj).__name__)
def ResetWorkspace(root_folder=None):
if root_folder is None:
# Reset the workspace, but keep the current root folder setting.
return C.reset_workspace(C.root_folder())
else:
if not os.path.exists(root_folder):
os.makedirs(root_folder)
return C.reset_workspace(root_folder)
def CreateNet(net, overwrite=False, input_blobs=None):
if input_blobs is None:
input_blobs = []
for input_blob in input_blobs:
C.create_blob(input_blob)
return CallWithExceptionIntercept(
C.create_net,
C.Workspace.current._last_failed_op_net_position,
GetNetName(net),
StringifyProto(net), overwrite,
)
def Predictor(init_net, predict_net):
return C.Predictor(StringifyProto(init_net), StringifyProto(predict_net))
def GetOperatorCost(operator, blobs):
return C.get_operator_cost(StringifyProto(operator), blobs)
def RunOperatorOnce(operator):
return C.run_operator_once(StringifyProto(operator))
def RunOperatorsOnce(operators):
for op in operators:
success = RunOperatorOnce(op)
if not success:
return False
return True
def CallWithExceptionIntercept(func, op_id_fetcher, net_name, *args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
op_id = op_id_fetcher()
net_tracebacks = operator_tracebacks.get(net_name, None)
print('Original python traceback for operator {} in network `{}` in '
'exception above (most recent call last):'.format(
op_id, net_name))
if net_tracebacks and op_id in net_tracebacks:
tb = net_tracebacks[op_id]
for line in reversed(tb):
print(' File "{}", line {}, in {}'.format(
line[0], line[1], line[2]))
raise
def RunNetOnce(net):
return CallWithExceptionIntercept(
C.run_net_once,
C.Workspace.current._last_failed_op_net_position,
GetNetName(net),
StringifyProto(net),
)
def RunNet(name, num_iter=1, allow_fail=False):
"""Runs a given net.
Inputs:
name: the name of the net, or a reference to the net.
num_iter: number of iterations to run
allow_fail: if True, does not assert on net exec failure but returns False
Returns:
True or an exception.
"""
return CallWithExceptionIntercept(
C.run_net,
C.Workspace.current._last_failed_op_net_position,
GetNetName(name),
StringifyNetName(name), num_iter, allow_fail,
)
def RunPlan(plan_or_step):
# TODO(jiayq): refactor core.py/workspace.py to avoid circular deps
import caffe2.python.core as core
if isinstance(plan_or_step, core.ExecutionStep):
plan_or_step = core.Plan(plan_or_step)
return C.run_plan(StringifyProto(plan_or_step))
def InferShapesAndTypes(nets, blob_dimensions=None):
"""Infers the shapes and types for the specified nets.
Inputs:
nets: the list of nets
blob_dimensions (optional): a dictionary of blobs and their dimensions.
If not specified, the workspace blobs are used.
Returns:
A tuple of (shapes, types) dictionaries keyed by blob name.
"""
net_protos = [StringifyProto(n.Proto()) for n in nets]
if blob_dimensions is None:
blobdesc_prototxt = C.infer_shapes_and_types_from_workspace(net_protos)
else:
blobdesc_prototxt = C.infer_shapes_and_types_from_map(
net_protos, blob_dimensions
)
blobdesc_proto = caffe2_pb2.TensorShapes()
blobdesc_proto.ParseFromString(blobdesc_prototxt)
shapes = {}
types = {}
for ts in blobdesc_proto.shapes:
if not ts.unknown_shape:
shapes[ts.name] = list(ts.dims)
types[ts.name] = ts.data_type
return (shapes, types)
def _StringifyName(name, expected_type):
if isinstance(name, basestring):
return name
assert type(name).__name__ == expected_type, \
"Expected a string or %s" % expected_type
return str(name)
def StringifyBlobName(name):
return _StringifyName(name, "BlobReference")
def StringifyNetName(name):
return _StringifyName(name, "Net")
def GetNetName(net):
if isinstance(net, basestring):
return net
if type(net).__name__ == "Net":
return net.Name()
if isinstance(net, caffe2_pb2.NetDef):
return net.name
raise Exception("Not a Net object: {}".format(str(net)))
def FeedBlob(name, arr, device_option=None):
"""Feeds a blob into the workspace.
Inputs:
name: the name of the blob.
arr: either a TensorProto object or a numpy array object to be fed into
the workspace.
device_option (optional): the device option to feed the data with.
Returns:
True or False, stating whether the feed is successful.
"""
if type(arr) is caffe2_pb2.TensorProto:
arr = utils.Caffe2TensorToNumpyArray(arr)
if type(arr) is np.ndarray and arr.dtype.kind in 'SU':
# Plain NumPy strings are weird, let's use objects instead
arr = arr.astype(np.object)
if device_option is None:
device_option = scope.CurrentDeviceScope()
if device_option and device_option.device_type == caffe2_pb2.CUDA:
if arr.dtype == np.dtype('float64'):
logger.warning(
"CUDA operators do not support 64-bit doubles, " +
"please use arr.astype(np.float32) or np.int32 for ints." +
" Blob: {}".format(name) +
" type: {}".format(str(arr.dtype))
)
name = StringifyBlobName(name)
if device_option is not None:
return C.feed_blob(name, arr, StringifyProto(device_option))
else:
return C.feed_blob(name, arr)
def FetchBlobs(names):
"""Fetches a list of blobs from the workspace.
Inputs:
names: list of names of blobs - strings or BlobReferences
Returns:
list of fetched blobs
"""
return [FetchBlob(name) for name in names]
def FetchBlob(name):
"""Fetches a blob from the workspace.
Inputs:
name: the name of the blob - a string or a BlobReference
Returns:
Fetched blob (numpy array or string) if successful
"""
return C.fetch_blob(StringifyBlobName(name))
def ApplyTransform(transform_key, net):
"""Apply a Transform to a NetDef protobuf object, and returns the new
transformed NetDef.
Inputs:
transform_key: the name of the transform, as it is stored in the registry
net: a NetDef protobuf object
Returns:
Transformed NetDef protobuf object.
"""
transformed_net = caffe2_pb2.NetDef()
transformed_str = C.apply_transform(
str(transform_key).encode('utf-8'),
net.SerializeToString(),
)
transformed_net.ParseFromString(transformed_str)
return transformed_net
def ApplyTransformIfFaster(transform_key, net, init_net, **kwargs):
"""Apply a Transform to a NetDef protobuf object, and returns the new
transformed NetDef, only if it runs faster than the original.
The runs are performed on the current active workspace (gWorkspace).
You should initialize that workspace before making a call to this function.
Inputs:
transform_key: the name of the transform, as it is stored in the registry
net: a NetDef protobuf object
init_net: The net to initialize the workspace.
warmup_runs (optional):
Determines how many times the net is run before testing.
Will be 5 by default.
main_runs (optional):
Determines how many times the net is run during testing.
Will be 10 by default.
improvement_threshold (optional):
Determines the factor which the new net needs to be faster
in order to replace the old. Will be 1.01 by default.
Returns:
Either a Transformed NetDef protobuf object, or the original netdef.
"""
warmup_runs = kwargs['warmup_runs'] if 'warmup_runs' in kwargs else 5
main_runs = kwargs['main_runs'] if 'main_runs' in kwargs else 10
improvement_threshold = kwargs['improvement_threshold'] \
if 'improvement_threshold' in kwargs else 1.01
transformed_net = caffe2_pb2.NetDef()
transformed_str = C.apply_transform_if_faster(
str(transform_key).encode('utf-8'),
net.SerializeToString(),
init_net.SerializeToString(),
warmup_runs,
main_runs,
float(improvement_threshold),
)
transformed_net.ParseFromString(transformed_str)
return transformed_net
def GetNameScope():
"""Return the current namescope string. To be used to fetch blobs"""
return scope.CurrentNameScope()
class _BlobDict(object):
"""Provides python dict compatible way to do fetching and feeding"""
def __getitem__(self, key):
return FetchBlob(key)
def __setitem__(self, key, value):
return FeedBlob(key, value)
def __len__(self):
return len(C.blobs())
def __iter__(self):
return C.blobs().__iter__()
def __contains__(self, item):
return C.has_blob(item)
blobs = _BlobDict()
################################################################################
# Utilities for immediate mode
#
# Caffe2's immediate mode implements the following behavior: between the two
# function calls StartImmediate() and StopImmediate(), for any operator that is
# called through CreateOperator(), we will also run that operator in a workspace
# that is specific to the immediate mode. The user is explicitly expected to
# make sure that these ops have proper inputs and outputs, i.e. one should not
# run an op where an external input is not created or fed.
#
# Users can use FeedImmediate() and FetchImmediate() to interact with blobs
# in the immediate workspace.
#
# Once StopImmediate() is called, all contents in the immediate workspace is
# freed up so one can continue using normal runs.
#
# The immediate mode is solely for debugging purposes and support will be very
# sparse.
################################################################################
_immediate_mode = False
_immediate_workspace_name = "_CAFFE2_IMMEDIATE"
_immediate_root_folder = ''
def IsImmediate():
return _immediate_mode
@contextlib.contextmanager
def WorkspaceGuard(workspace_name):
current = CurrentWorkspace()
SwitchWorkspace(workspace_name, True)
yield
SwitchWorkspace(current)
def StartImmediate(i_know=False):
global _immediate_mode
global _immediate_root_folder
if IsImmediate():
# already in immediate mode. We will kill the previous one
# and start from fresh.
StopImmediate()
_immediate_mode = True
with WorkspaceGuard(_immediate_workspace_name):
_immediate_root_folder = tempfile.mkdtemp()
ResetWorkspace(_immediate_root_folder)
if i_know:
# if the user doesn't want to see the warning message, sure...
return
print("""
Enabling immediate mode in caffe2 python is an EXTREMELY EXPERIMENTAL
feature and may very easily go wrong. This is because Caffe2 uses a
declarative way of defining operators and models, which is essentially
not meant to run things in an interactive way. Read the following carefully
to make sure that you understand the caveats.
(1) You need to make sure that the sequences of operators you create are
actually runnable sequentially. For example, if you create an op that takes
an input X, somewhere earlier you should have already created X.
(2) Caffe2 immediate uses one single workspace, so if the set of operators
you run are intended to be under different workspaces, they will not run.
To create boundaries between such use cases, you can call FinishImmediate()
and StartImmediate() manually to flush out everything no longer needed.
(3) Underlying objects held by the immediate mode may interfere with your
normal run. For example, if there is a leveldb that you opened in immediate
mode and did not close, your main run will fail because leveldb does not
support double opening. Immediate mode may also occupy a lot of memory esp.
on GPUs. Call FinishImmediate() as soon as possible when you no longer
need it.
(4) Immediate is designed to be slow. Every immediate call implicitly
creates a temp operator object, runs it, and destroys the operator. This
slow-speed run is by design to discourage abuse. For most use cases other
than debugging, do NOT turn on immediate mode.
(5) If there is anything FATAL happening in the underlying C++ code, the
immediate mode will immediately (pun intended) cause the runtime to crash.
Thus you should use immediate mode with extra care. If you still would
like to, have fun [https://xkcd.com/149/].
""")
def StopImmediate():
"""Stops an immediate mode run."""
# Phew, that was a dangerous ride.
global _immediate_mode
global _immediate_root_folder
if not IsImmediate():
return
with WorkspaceGuard(_immediate_workspace_name):
ResetWorkspace()
shutil.rmtree(_immediate_root_folder)
_immediate_root_folder = ''
_immediate_mode = False
def ImmediateBlobs():
with WorkspaceGuard(_immediate_workspace_name):
return Blobs()
def RunOperatorImmediate(op):
with WorkspaceGuard(_immediate_workspace_name):
RunOperatorOnce(op)
def FetchImmediate(*args, **kwargs):
with WorkspaceGuard(_immediate_workspace_name):
return FetchBlob(*args, **kwargs)
def FeedImmediate(*args, **kwargs):
with WorkspaceGuard(_immediate_workspace_name):
return FeedBlob(*args, **kwargs)
# CWorkspace utilities
def _Workspace_create_net_with_exception_intercept(ws, net, overwrite=False):
return CallWithExceptionIntercept(
ws._create_net,
ws._last_failed_op_net_position,
GetNetName(net),
StringifyProto(net), overwrite,
)
C.Workspace.create_net = _Workspace_create_net_with_exception_intercept
def _Workspace_run(ws, obj):
if hasattr(obj, 'Proto'):
obj = obj.Proto()
if isinstance(obj, caffe2_pb2.PlanDef):
return ws._run_plan(obj.SerializeToString())
if isinstance(obj, caffe2_pb2.NetDef):
return CallWithExceptionIntercept(
ws._run_net,
ws._last_failed_op_net_position,
GetNetName(obj),
obj.SerializeToString(),
)
# return ws._run_net(obj.SerializeToString())
if isinstance(obj, caffe2_pb2.OperatorDef):
return ws._run_operator(obj.SerializeToString())
raise ValueError(
"Don't know how to do Workspace.run() on {}".format(type(obj)))
C.Workspace.run = _Workspace_run
def _Blob_feed(blob, arg, device_option=None):
if device_option is not None:
device_option = StringifyProto(device_option)
return blob._feed(arg, device_option)
C.Blob.feed = _Blob_feed
|
## @package net_drawer
# Module caffe2.python.net_drawer
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import json
import logging
from collections import defaultdict
from caffe2.python import utils
from future.utils import viewitems
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
try:
import pydot
except ImportError:
logger.info(
'Cannot import pydot, which is required for drawing a network. This '
'can usually be installed in python with "pip install pydot". Also, '
'pydot requires graphviz to convert dot files to pdf: in ubuntu, this '
'can usually be installed with "sudo apt-get install graphviz".'
)
print(
'net_drawer will not run correctly. Please install the correct '
'dependencies.'
)
pydot = None
from caffe2.proto import caffe2_pb2
OP_STYLE = {
'shape': 'box',
'color': '#0F9D58',
'style': 'filled',
'fontcolor': '#FFFFFF'
}
BLOB_STYLE = {'shape': 'octagon'}
def _rectify_operator_and_name(operators_or_net, name):
"""Gets the operators and name for the pydot graph."""
if isinstance(operators_or_net, caffe2_pb2.NetDef):
operators = operators_or_net.op
if name is None:
name = operators_or_net.name
elif hasattr(operators_or_net, 'Proto'):
net = operators_or_net.Proto()
if not isinstance(net, caffe2_pb2.NetDef):
raise RuntimeError(
"Expecting NetDef, but got {}".format(type(net)))
operators = net.op
if name is None:
name = net.name
else:
operators = operators_or_net
if name is None:
name = "unnamed"
return operators, name
def _escape_label(name):
# json.dumps is poor man's escaping
return json.dumps(name)
def GetOpNodeProducer(append_output, **kwargs):
def ReallyGetOpNode(op, op_id):
if op.name:
node_name = '%s/%s (op#%d)' % (op.name, op.type, op_id)
else:
node_name = '%s (op#%d)' % (op.type, op_id)
if append_output:
for output_name in op.output:
node_name += '\n' + output_name
return pydot.Node(node_name, **kwargs)
return ReallyGetOpNode
def GetPydotGraph(
operators_or_net,
name=None,
rankdir='LR',
node_producer=None
):
if node_producer is None:
node_producer = GetOpNodeProducer(False, **OP_STYLE)
operators, name = _rectify_operator_and_name(operators_or_net, name)
graph = pydot.Dot(name, rankdir=rankdir)
pydot_nodes = {}
pydot_node_counts = defaultdict(int)
for op_id, op in enumerate(operators):
op_node = node_producer(op, op_id)
graph.add_node(op_node)
# print 'Op: %s' % op.name
# print 'inputs: %s' % str(op.input)
# print 'outputs: %s' % str(op.output)
for input_name in op.input:
if input_name not in pydot_nodes:
input_node = pydot.Node(
_escape_label(
input_name + str(pydot_node_counts[input_name])),
label=_escape_label(input_name),
**BLOB_STYLE
)
pydot_nodes[input_name] = input_node
else:
input_node = pydot_nodes[input_name]
graph.add_node(input_node)
graph.add_edge(pydot.Edge(input_node, op_node))
for output_name in op.output:
if output_name in pydot_nodes:
# we are overwriting an existing blob. need to updat the count.
pydot_node_counts[output_name] += 1
output_node = pydot.Node(
_escape_label(
output_name + str(pydot_node_counts[output_name])),
label=_escape_label(output_name),
**BLOB_STYLE
)
pydot_nodes[output_name] = output_node
graph.add_node(output_node)
graph.add_edge(pydot.Edge(op_node, output_node))
return graph
def GetPydotGraphMinimal(
operators_or_net,
name=None,
rankdir='LR',
minimal_dependency=False,
node_producer=None,
):
"""Different from GetPydotGraph, hide all blob nodes and only show op nodes.
If minimal_dependency is set as well, for each op, we will only draw the
edges to the minimal necessary ancestors. For example, if op c depends on
op a and b, and op b depends on a, then only the edge b->c will be drawn
because a->c will be implied.
"""
if node_producer is None:
node_producer = GetOpNodeProducer(False, **OP_STYLE)
operators, name = _rectify_operator_and_name(operators_or_net, name)
graph = pydot.Dot(name, rankdir=rankdir)
# blob_parents maps each blob name to its generating op.
blob_parents = {}
# op_ancestry records the ancestors of each op.
op_ancestry = defaultdict(set)
for op_id, op in enumerate(operators):
op_node = node_producer(op, op_id)
graph.add_node(op_node)
# Get parents, and set up op ancestry.
parents = [
blob_parents[input_name] for input_name in op.input
if input_name in blob_parents
]
op_ancestry[op_node].update(parents)
for node in parents:
op_ancestry[op_node].update(op_ancestry[node])
if minimal_dependency:
# only add nodes that do not have transitive ancestry
for node in parents:
if all(
[node not in op_ancestry[other_node]
for other_node in parents]
):
graph.add_edge(pydot.Edge(node, op_node))
else:
# Add all parents to the graph.
for node in parents:
graph.add_edge(pydot.Edge(node, op_node))
# Update blob_parents to reflect that this op created the blobs.
for output_name in op.output:
blob_parents[output_name] = op_node
return graph
def GetOperatorMapForPlan(plan_def):
operator_map = {}
for net_id, net in enumerate(plan_def.network):
if net.HasField('name'):
operator_map[plan_def.name + "_" + net.name] = net.op
else:
operator_map[plan_def.name + "_network_%d" % net_id] = net.op
return operator_map
def _draw_nets(nets, g):
nodes = []
for i, net in enumerate(nets):
nodes.append(pydot.Node(_escape_label(net)))
g.add_node(nodes[-1])
if i > 0:
g.add_edge(pydot.Edge(nodes[-2], nodes[-1]))
return nodes
def _draw_steps(steps, g, skip_step_edges=False): # noqa
kMaxParallelSteps = 3
def get_label():
label = [step.name + '\n']
if step.report_net:
label.append('Reporter: {}'.format(step.report_net))
if step.should_stop_blob:
label.append('Stopper: {}'.format(step.should_stop_blob))
if step.concurrent_substeps:
label.append('Concurrent')
if step.only_once:
label.append('Once')
return '\n'.join(label)
def substep_edge(start, end):
return pydot.Edge(start, end, arrowhead='dot', style='dashed')
nodes = []
for i, step in enumerate(steps):
parallel = step.concurrent_substeps
nodes.append(pydot.Node(_escape_label(get_label()), **OP_STYLE))
g.add_node(nodes[-1])
if i > 0 and not skip_step_edges:
g.add_edge(pydot.Edge(nodes[-2], nodes[-1]))
if step.network:
sub_nodes = _draw_nets(step.network, g)
elif step.substep:
if parallel:
sub_nodes = _draw_steps(
step.substep[:kMaxParallelSteps], g, skip_step_edges=True)
else:
sub_nodes = _draw_steps(step.substep, g)
else:
raise ValueError('invalid step')
if parallel:
for sn in sub_nodes:
g.add_edge(substep_edge(nodes[-1], sn))
if len(step.substep) > kMaxParallelSteps:
ellipsis = pydot.Node('{} more steps'.format(
len(step.substep) - kMaxParallelSteps), **OP_STYLE)
g.add_node(ellipsis)
g.add_edge(substep_edge(nodes[-1], ellipsis))
else:
g.add_edge(substep_edge(nodes[-1], sub_nodes[0]))
return nodes
def GetPlanGraph(plan_def, name=None, rankdir='TB'):
graph = pydot.Dot(name, rankdir=rankdir)
_draw_steps(plan_def.execution_step, graph)
return graph
def GetGraphInJson(operators_or_net, output_filepath):
operators, _ = _rectify_operator_and_name(operators_or_net, None)
blob_strid_to_node_id = {}
node_name_counts = defaultdict(int)
nodes = []
edges = []
for op_id, op in enumerate(operators):
op_label = op.name + '/' + op.type if op.name else op.type
op_node_id = len(nodes)
nodes.append({
'id': op_node_id,
'label': op_label,
'op_id': op_id,
'type': 'op'
})
for input_name in op.input:
strid = _escape_label(
input_name + str(node_name_counts[input_name]))
if strid not in blob_strid_to_node_id:
input_node = {
'id': len(nodes),
'label': input_name,
'type': 'blob'
}
blob_strid_to_node_id[strid] = len(nodes)
nodes.append(input_node)
else:
input_node = nodes[blob_strid_to_node_id[strid]]
edges.append({
'source': blob_strid_to_node_id[strid],
'target': op_node_id
})
for output_name in op.output:
strid = _escape_label(
output_name + str(node_name_counts[output_name]))
if strid in blob_strid_to_node_id:
# we are overwriting an existing blob. need to update the count.
node_name_counts[output_name] += 1
strid = _escape_label(
output_name + str(node_name_counts[output_name]))
if strid not in blob_strid_to_node_id:
output_node = {
'id': len(nodes),
'label': output_name,
'type': 'blob'
}
blob_strid_to_node_id[strid] = len(nodes)
nodes.append(output_node)
edges.append({
'source': op_node_id,
'target': blob_strid_to_node_id[strid]
})
with open(output_filepath, 'w') as f:
json.dump({'nodes': nodes, 'edges': edges}, f)
# A dummy minimal PNG image used by GetGraphPngSafe as a
# placeholder when rendering fail to run.
_DummyPngImage = (
b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00\x00'
b'\x01\x01\x00\x00\x00\x007n\xf9$\x00\x00\x00\nIDATx\x9cc`\x00\x00'
b'\x00\x02\x00\x01H\xaf\xa4q\x00\x00\x00\x00IEND\xaeB`\x82')
def GetGraphPngSafe(func, *args, **kwargs):
"""
Invokes `func` (e.g. GetPydotGraph) with args. If anything fails - returns
and empty image instead of throwing Exception
"""
try:
graph = func(*args, **kwargs)
if not isinstance(graph, pydot.Dot):
raise ValueError("func is expected to return pydot.Dot")
return graph.create_png()
except Exception as e:
logger.error("Failed to draw graph: {}".format(e))
return _DummyPngImage
def main():
parser = argparse.ArgumentParser(description="Caffe2 net drawer.")
parser.add_argument(
"--input",
type=str, required=True,
help="The input protobuf file."
)
parser.add_argument(
"--output_prefix",
type=str, default="",
help="The prefix to be added to the output filename."
)
parser.add_argument(
"--minimal", action="store_true",
help="If set, produce a minimal visualization."
)
parser.add_argument(
"--minimal_dependency", action="store_true",
help="If set, only draw minimal dependency."
)
parser.add_argument(
"--append_output", action="store_true",
help="If set, append the output blobs to the operator names.")
parser.add_argument(
"--rankdir", type=str, default="LR",
help="The rank direction of the pydot graph."
)
args = parser.parse_args()
with open(args.input, 'r') as fid:
content = fid.read()
graphs = utils.GetContentFromProtoString(
content, {
caffe2_pb2.PlanDef: lambda x: GetOperatorMapForPlan(x),
caffe2_pb2.NetDef: lambda x: {x.name: x.op},
}
)
for key, operators in viewitems(graphs):
if args.minimal:
graph = GetPydotGraphMinimal(
operators,
name=key,
rankdir=args.rankdir,
node_producer=GetOpNodeProducer(args.append_output, **OP_STYLE),
minimal_dependency=args.minimal_dependency)
else:
graph = GetPydotGraph(
operators,
name=key,
rankdir=args.rankdir,
node_producer=GetOpNodeProducer(args.append_output, **OP_STYLE))
filename = args.output_prefix + graph.get_name() + '.dot'
graph.write(filename, format='raw')
pdf_filename = filename[:-3] + 'pdf'
try:
graph.write_pdf(pdf_filename)
except Exception:
print(
'Error when writing out the pdf file. Pydot requires graphviz '
'to convert dot files to pdf, and you may not have installed '
'graphviz. On ubuntu this can usually be installed with "sudo '
'apt-get install graphviz". We have generated the .dot file '
'but will not be able to generate pdf file for now.'
)
if __name__ == '__main__':
main()
|
## @package control_ops_grad
# Module caffe2.python.control_ops_grad
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.proto import caffe2_pb2
def gen_do_gradient(op, g_output):
"""
Generates gradient Do operator, given forward Do op and a list
of gradient blobs corresponding to forward op's outputs
Returns a gradient op and a list of blobs corresponding to input gradients
"""
from caffe2.python.core import BlobReference
subnet, outer_to_inner_map, inner_to_outer_map, workspace_blob_name = \
_do_op_sanity_check_and_process(op)
assert len(g_output) == len(op.output), \
"Different number of gradient blobs and Do op outputs"
grad_ops, deduped_g_output = dedupe_g_output(op, g_output)
g_output = deduped_g_output
# From the outer net point of view:
# Do is an operator that has some number of inputs and outputs;
# we have to generate a gradient operator that writes into
# corresponding input gradient blobs and has access to inputs, outputs
# and gradient output blobs
# From the inner net point of view:
# Do is an operator with a subnet and blob bindings,
# we need to forward Do's output blob gradients into inner workspace,
# use them to run backward pass generation and forward Do's input blob
# gradients back into outer workspace
op_output = [str(o) for o in op.output]
op_output = op_output[:-1] # remove workspace pointer blob
op_input = [str(i) for i in op.input]
op_input = op_input[:-1] # remove workspace pointer blob
ordered_inner_output_blob_names = [outer_to_inner_map[o] for o in op_output]
backward_pass_initial_grad_map = {}
initial_grad_map = {}
for inner_output_name, outer_grad_output_name in \
zip(ordered_inner_output_blob_names, g_output):
# link inner_output_name to corresponding inner_grad_output_name for
# backward pass generation;
if outer_grad_output_name:
inner_grad_output_name = inner_output_name + "/_DO_OPERATOR_INNER_GRAD_"
backward_pass_initial_grad_map[BlobReference(inner_output_name)] = \
BlobReference(inner_grad_output_name)
initial_grad_map[inner_grad_output_name] = str(outer_grad_output_name)
assert len(initial_grad_map) > 0, "Empty initial gradient map for Do op"
inner_grad_ops, inner_grad_names_map = _gen_subgradient_pass(
subnet, backward_pass_initial_grad_map)
if len(inner_grad_ops) == 0:
return [], []
grad_copy_ops = []
g_input = []
new_op_outputs = []
new_blob_bindings = {}
for outer_input_name in op_input:
inner_input_name = outer_to_inner_map[outer_input_name]
if inner_input_name in inner_grad_names_map:
inner_grad_input_name = inner_grad_names_map[inner_input_name]
outer_grad_input_name = outer_input_name + "_grad"
# It is possible that inner_grad_input_name will need to be
# linked to another outer blob. For example:
#
# // y - param initialized in init_net
# x = ...
# z = ...
# with ops.IfNet(...):
# ops.Add([z, x], y) # inner Do block
# loss = f(..., y, ...)
#
# In this case x, y and z are external for the inner Do block,
# the inputs of the Do block are z and x and the output is y.
# When computing the gradient of input x given the gradient
# of output y it's easy to see that they are equal.
# During the generation of gradient Do operator, we link
# external gradient y (y_grad) to the internal name
# (y/_DO_OPERATOR_INNER_GRAD_) and generate the backward pass
# for the internal Do net. As a result we get gradient operators
# for the gradient Do and gradient map that maps internal Do
# blobs to their computed gradients.
# In this example, gradient map may have blob x linked to
# gradient blob y/_DO_OPERATOR_INNER_GRAD_.
# We should export gradient for x outside of Do, so
# we add a blob mapping from inner gradient blob
# (y/_DO_OPERATOR_INNER_GRAD_) to a new outer name (x_grad).
#
# (Note: since we use transparent blob mapping between outer and
# inner (Do's) workspace, these operations do not involve copying
# but are merely using blobs in outer workspace in the Do's operator
# workspace under (possibly) different names)
#
# At the same time, we need to add a blob mapping from inner name
# y/_DO_OPERATOR_INNER_GRAD_ to the outer blob y_grad
# Hence in this case, we cannot use existing blob mapping scheme
# that requires a bijection between subset of inner blob names and
# a set of all (Do's input and output) outer blob names
# TODO(iliacher): Remove unnecessary blob copying
new_inner_grad_input_name = \
inner_input_name + "/_DO_OPERATOR_INNER_GRAD_COPY_"
grad_copy_ops.append(_prepare_blob_copy_op(
inner_grad_input_name, new_inner_grad_input_name))
new_blob_bindings[new_inner_grad_input_name] = outer_grad_input_name
new_op_outputs.append(outer_grad_input_name)
g_input.append(outer_grad_input_name)
else:
g_input.append(None)
new_op_inputs = []
overwritten_names = set()
saved_local_blob_names = set()
for grad_op in inner_grad_ops:
grad_op_input = [str(i) for i in grad_op.input]
grad_op_output = [str(o) for o in grad_op.output]
for grad_op_input_name in grad_op_input:
if grad_op_input_name in overwritten_names:
continue
# check if this is an external blob
outer_name = inner_to_outer_map.get(grad_op_input_name, None)
if not outer_name:
# check if this is an external gradient blob
outer_name = initial_grad_map.get(grad_op_input_name, None)
if outer_name:
outer_name = str(outer_name)
if outer_name not in new_op_inputs:
new_op_inputs.append(outer_name)
new_blob_bindings[grad_op_input_name] = outer_name
else:
# this is a local blob, we'll get it's value from
# a saved forward op workspace
saved_local_blob_names.add(grad_op_input_name)
overwritten_names.update(grad_op_output)
# add inner gradient copy ops
inner_grad_ops += grad_copy_ops
gradient_do_def = _prepare_gradient_do_op(
fwd_op=op,
fwd_net=subnet,
grad_ops=inner_grad_ops,
inputs=new_op_inputs,
outputs=new_op_outputs,
blob_bindings=new_blob_bindings,
saved_fwd_blobs=saved_local_blob_names,
workspace_blob_name=workspace_blob_name)
grad_ops.append(gradient_do_def)
_do_op_sanity_check_and_process(gradient_do_def)
return grad_ops, g_input
def dedupe_g_output(op, g_output):
# When generation a gradient op it's possible to receive the same gradient
# blob corresponding to different forward op output blobs, Do operator
# requires a bijection between inner and outer names, make sure we do
# deduplication
grad_ops = []
deduped_g_output = []
init_grad_map = {}
for output_name, grad_name in zip(op.output, g_output):
if not grad_name:
deduped_g_output.append(grad_name)
continue
if output_name in init_grad_map:
deduped_g_output.append(init_grad_map[output_name])
else:
if grad_name not in init_grad_map.values():
init_grad_map[output_name] = grad_name
deduped_g_output.append(grad_name)
else:
deduped_grad_name = output_name + "_" + grad_name + "_DEDUP"
assert deduped_grad_name not in init_grad_map.values()
grad_copy_op = caffe2_pb2.OperatorDef()
grad_copy_op.type = "Copy"
grad_copy_op.input.extend([grad_name])
grad_copy_op.output.extend([deduped_grad_name])
grad_ops.append(grad_copy_op)
deduped_g_output.append(deduped_grad_name)
init_grad_map[output_name] = deduped_grad_name
return grad_ops, deduped_g_output
def gen_while_gradient(op, g_output):
"""
Generates gradient While operator
"""
from caffe2.python.core import BlobReference
assert op.type == "While", "Expected While op"
assert len(op.input) > 0, "Expected at least one input in While op"
assert len(op.output) == len(g_output), \
"Different number of gradient blobs and While op outputs"
grad_ops, deduped_g_output = dedupe_g_output(op, g_output)
g_output = deduped_g_output
init_grad_map = {}
op_output = [str(o) for o in op.output]
for output_name, grad_output_name in zip(op_output, g_output):
if grad_output_name:
init_grad_map[BlobReference(output_name)] = \
BlobReference(grad_output_name)
assert len(init_grad_map) > 0, "Empty initial gradient map for While op"
loop_net = _get_net_argument(op, "loop_net")
assert loop_net, "Expected loop subnet in While op"
assert len(loop_net.op) == 1 and loop_net.op[0].type == "Do", \
"Gradient While op requires single Do op as a loop body"
do_op = loop_net.op[0]
do_args = _get_do_arguments(do_op)
assert "reuse_workspace" not in do_args or not do_args["reuse_workspace"], \
"Gradient While op requires Do loop body op without reuse_workspace set"
assert len(do_op.output) > 0, "Expected Do op with at least one output"
workspace_blob = do_op.output[-1]
loop_grad_net, loop_grad_map, loop_input_names, loop_output_names = \
_gen_subnet_gradient(loop_net, init_grad_map)
assert loop_grad_net, "Failed to get gradient net for loop body in While op"
grad_ops += _prepare_gradient_while_ops(
fwd_op=op,
input_names=loop_input_names,
output_names=loop_output_names,
loop_grad_net=loop_grad_net,
workspace_blob=workspace_blob,
init_grad_map=init_grad_map,
loop_grad_map=loop_grad_map)
op_input = [str(i) for i in op.input]
g_input = [loop_grad_map.get(i, None) for i in op_input]
return grad_ops, g_input
# Constructs gradient While op, arguments:
# fwd_op - forward While op
# input_names - input blob names for a gradient op
# output_names - output blob names for a gradient op
# loop_grad_net - gradient loop body net
# workspace_blob - blob that holds forward workspaces stack
# init_grad_map - initial gradient to forward blob map
# loop_grad_map - gradient blob map for loop's body
def _prepare_gradient_while_ops(
fwd_op, input_names, output_names, loop_grad_net, workspace_blob,
init_grad_map, loop_grad_map):
gradient_while_def = caffe2_pb2.OperatorDef()
gradient_while_def.CopyFrom(fwd_op)
if gradient_while_def.name:
gradient_while_def.name += "_grad"
loop_net_arg = caffe2_pb2.Argument()
loop_net_arg.name = "loop_net"
loop_net_arg.n.CopyFrom(loop_grad_net)
cond_net_arg = caffe2_pb2.Argument()
cond_net_arg.name = "cond_net"
from caffe2.python.core import Net, BlobReference
# Construct condition net - check that there're still forward workspaces
# left using HasScope op
cond_net = Net('gradient_loop_cond_net')
cond_init_net = Net('gradient_loop_cond_net_init')
cond_blob = cond_net.NextScopedBlob(cond_net.Name() + '/cond')
cond_init_net.HasScope(workspace_blob, cond_blob)
cond_net.HasScope(workspace_blob, cond_blob)
for blob, init_grad_blob in init_grad_map.items():
blob_name = str(blob)
init_grad_blob_name = str(init_grad_blob)
if blob_name in loop_grad_map and \
loop_grad_map[blob_name] != init_grad_blob_name:
cond_net.Copy(
BlobReference(loop_grad_map[blob_name]), init_grad_blob)
cond_init_net.Copy(
init_grad_blob, BlobReference(loop_grad_map[blob_name]))
cond_net_arg.n.CopyFrom(cond_net.Proto())
del gradient_while_def.arg[:]
gradient_while_def.arg.extend([loop_net_arg, cond_net_arg])
del gradient_while_def.control_input[:]
del gradient_while_def.input[:]
gradient_while_def.input.extend(
[str(cond_blob).encode('utf-8')] + list(input_names))
del gradient_while_def.output[:]
gradient_while_def.output.extend(output_names)
gradient_while_def.is_gradient_op = True
return [o for o in cond_init_net.Proto().op] + [gradient_while_def]
def _get_do_arguments(do_op):
assert do_op.type == "Do", "Expected Do op"
args = {}
for arg in do_op.arg:
if not arg.name:
continue
if arg.name == "net":
assert arg.n, "Expected non empty net argument"
args["net"] = arg.n
elif arg.name == "reuse_workspace":
assert arg.i, "Expected non empty reuse_workspace argument"
args["reuse_workspace"] = bool(arg.i)
elif arg.name == "inner_blobs":
assert arg.strings, "Expected non empty inner_blobs argument"
args["inner_blobs"] = arg.strings
elif arg.name == "outer_blobs_idx":
assert arg.ints, "Expected non empty outer_blobs_idx argument"
args["outer_blobs_idx"] = arg.ints
return args
def gen_if_gradient(op, g_output):
"""
Generates gradient If operator, given forward If op and a list
of gradient blobs corresponding to forward op's outputs
Returns a gradient op and a list of blobs corresponding to input gradients
"""
from caffe2.python.core import BlobReference
assert op.type == "If", "Expected If op"
# first input is the condition blob
assert len(op.input) > 0, "Expected at least one input in If op"
assert len(op.output) == len(g_output), \
"Different number of gradient blobs and If op outputs"
grad_ops, deduped_g_output = dedupe_g_output(op, g_output)
g_output = deduped_g_output
init_grad_map = {} # map from if's output blob to output gradient blob
op_input = [str(i) for i in op.input]
op_output = [str(o) for o in op.output]
for output_name, grad_output_name in zip(op_output, g_output):
if grad_output_name:
init_grad_map[BlobReference(output_name)] = \
BlobReference(grad_output_name)
# shouldn't call without at least one output gradient available
assert len(init_grad_map) > 0, "Empty initial gradient map for If op"
grad_map = {} # map from blob to gradient blob
then_net = _get_net_argument(op, "then_net")
assert then_net, "Expected then subnet in If op"
then_grad_net, then_grad_map, then_input_names, then_output_names = \
_gen_subnet_gradient(then_net, init_grad_map)
assert then_grad_net, "Failed to get gradient net for then in If op"
grad_map.update(then_grad_map)
else_input_names = set()
else_output_names = set()
else_grad_map = {}
else_grad_net = None
else_net = _get_net_argument(op, "else_net")
if else_net:
else_grad_net, else_grad_map, else_input_names, else_output_names = \
_gen_subnet_gradient(else_net, init_grad_map)
assert else_grad_net, "Failed to get gradient net for else in If op"
# consider case: else doesn't update blob's gradient and keeps original
# from init_grad_map, but then updates the gradient
for else_blob, else_grad_blob in else_grad_map.items():
if else_blob in then_grad_map:
then_grad_blob = then_grad_map[else_blob]
# if both then and else branches have grad blob name for the same
# blob and grad names are different, then one of the branches
# doesn't use blob and has original grad blob name in it's grad map,
# and another branch uses blob and has <blob_name>_grad name
# in it's grad map (might be different from original grad blob)
if then_grad_blob != else_grad_blob:
init_grad_name = init_grad_map[else_blob] \
if else_blob in init_grad_map else None
if then_grad_blob == init_grad_name:
grad_map[else_blob] = else_grad_blob
elif else_grad_blob == init_grad_name:
grad_map[else_blob] = then_grad_blob
else:
raise "Unexpected grad blob name " + else_blob + ", " + \
else_grad_blob + ", " + then_grad_blob
else:
grad_map[else_blob] = else_grad_blob
# make sure gradients of blobs that were not computed
# by the selected if's branch are initialized with zeros
then_other_output_names = \
then_output_names - (then_output_names & else_output_names)
then_other_grad_output_names = set(
[o for o in then_other_output_names if o in then_grad_map.values()])
zero_then = _gen_grad_zero_init_ops(
init_grad_map, then_grad_map, then_other_grad_output_names)
if else_grad_net:
else_grad_net.op.extend(zero_then)
elif len(zero_then) > 0:
else_grad_net = caffe2_pb2.NetDef()
else_grad_net.CopyFrom(then_grad_net)
if else_grad_net.name:
else_grad_net.name += "_auto_else_zero_blobs_"
del else_grad_net.op[:]
else_grad_net.op.extend(zero_then)
del else_grad_net.external_input[:]
del else_grad_net.external_output[:]
else_other_output_names = \
else_output_names - (then_output_names & else_output_names)
else_other_grad_output_names = set(
[o for o in else_other_output_names if o in else_grad_map.values()])
zero_else = _gen_grad_zero_init_ops(
init_grad_map, else_grad_map, else_other_grad_output_names)
then_grad_net.op.extend(zero_else)
output_names = list(then_output_names | else_output_names)
input_names = then_input_names | else_input_names
# make sure condition blob is the first in the list
input_names = [op_input[0]] + list(input_names - set(op_input[0]))
gradient_if_def = _prepare_gradient_if_op(
fwd_op=op,
input_names=input_names,
output_names=output_names,
then_grad_net=then_grad_net,
else_grad_net=else_grad_net)
g_input = [grad_map.get(i, None) for i in op_input]
return grad_ops + [gradient_if_def], g_input
def _gen_subnet_gradient(subnet, init_grad):
grad_ops, grad_names_map = _gen_subgradient_pass(
subnet, init_grad)
output_names = set()
input_names = set()
for grad_op in grad_ops:
for grad_op_input in grad_op.input:
if str(grad_op_input) not in output_names:
input_names.add(str(grad_op_input))
for grad_op_output in grad_op.output:
output_names.add(str(grad_op_output))
gradient_net_def = caffe2_pb2.NetDef()
gradient_net_def.CopyFrom(subnet)
if gradient_net_def.name:
gradient_net_def.name += "_grad"
del gradient_net_def.op[:]
gradient_net_def.op.extend(grad_ops)
del gradient_net_def.external_input[:]
del gradient_net_def.external_output[:]
return gradient_net_def, grad_names_map, input_names, output_names
def _get_net_argument(op, net_name):
for arg in op.arg:
if arg.name and arg.name == net_name:
assert arg.n, "Expected non empty net argument " + net_name
return arg.n
return None
def _gen_subgradient_pass(subnet, init_grad):
from caffe2.python.core import IR
subnet_ir = IR(subnet.op)
grad_ops, grad_blob_map = \
subnet_ir.GetBackwardPass(init_grad)
grad_names_map = {}
for b, g in grad_blob_map.items():
grad_names_map[str(b)] = str(g)
return grad_ops, grad_names_map
def _do_op_sanity_check_and_process(op):
assert op.type == "Do", "Expected Do op"
subnet = _get_net_argument(op, "net")
assert subnet, "No net argument found in Do op"
inner_blobs = None
outer_blobs_idx = None
for arg in op.arg:
if arg.name and arg.name == "inner_blobs":
assert not inner_blobs, "inner_blobs redefinition"
assert arg.strings and len(arg.strings) > 0, \
"Empty inner_blobs argument in Do op"
inner_blobs = [s.decode('utf-8') for s in arg.strings]
if arg.name and arg.name == "outer_blobs_idx":
assert not outer_blobs_idx, "outer_blobs_idx redefinition"
assert arg.ints and len(arg.ints) > 0, \
"Empty outer_blobs_idx argument in Do op"
outer_blobs_idx = arg.ints
if inner_blobs and outer_blobs_idx:
break
assert inner_blobs, "No inner_blobs argument found in Do op"
assert outer_blobs_idx, "No outer_blobs_idx argument found in Do op"
assert len(inner_blobs) == len(outer_blobs_idx), \
"Arguments inner_blobs and outer_blobs_idx of different length in Do op"
all_inner_blobs = set(inner_blobs)
assert len(all_inner_blobs) == len(inner_blobs), \
"Found duplicates in inner_blobs in Do op"
op_input = [str(i) for i in op.input]
assert len(op_input) > 0, "Expected at least one input blob"
# remove last input blob that holds pointer to workspace
input_workspace_blob_name = op_input[-1]
op_input = op_input[:-1]
op_output = [str(o) for o in op.output]
assert len(op_output) > 0, "Expected at least one output blob"
# remove last output blob that holds pointer to workspace
workspace_blob_name = op_output[-1]
assert input_workspace_blob_name == workspace_blob_name, \
"Expected same input/output workspace blob"
op_output = op_output[:-1]
all_op_input_blob_names = set(op_input)
assert len(all_op_input_blob_names) == len(op_input), \
"Found duplicates in Do op inputs"
all_op_output_blob_names = set(op_output)
assert len(all_op_output_blob_names) == len(op_output), \
"Found duplicates in Do op outputs"
ordered_outer_blob_names = op_input + op_output
all_outer_blob_names = set(ordered_outer_blob_names)
used_outer_blob_names = set()
outer_to_inner_map = {}
inner_to_outer_map = {}
for inner_name, outer_blob_idx in zip(inner_blobs, outer_blobs_idx):
assert outer_blob_idx >= 0 and \
outer_blob_idx < len(ordered_outer_blob_names), \
"Outer blob index is out of bounds in Do op"
outer_name = ordered_outer_blob_names[outer_blob_idx]
assert outer_name not in used_outer_blob_names, \
"Reusage of outer blob name " + outer_name + " in Do op"
used_outer_blob_names.add(outer_name)
outer_to_inner_map[outer_name] = inner_name
inner_to_outer_map[inner_name] = outer_name
assert len(used_outer_blob_names) == len(all_outer_blob_names), \
"Not all outer blob names are used in blob bindings in Do op"
return subnet, outer_to_inner_map, inner_to_outer_map, workspace_blob_name
def _prepare_blob_copy_op(from_name, to_name):
copy_op_def = caffe2_pb2.OperatorDef()
copy_op_def.type = "Copy"
copy_op_def.input.extend([from_name])
copy_op_def.output.extend([to_name])
return copy_op_def
def _prepare_gradient_do_op(
fwd_op, fwd_net, grad_ops, inputs, outputs, blob_bindings, saved_fwd_blobs,
workspace_blob_name):
gradient_net_def = caffe2_pb2.NetDef()
gradient_net_def.CopyFrom(fwd_net)
if gradient_net_def.name:
gradient_net_def.name += "_grad"
del gradient_net_def.op[:]
gradient_net_def.op.extend(grad_ops)
del gradient_net_def.external_input[:]
del gradient_net_def.external_output[:]
gradient_do_def = caffe2_pb2.OperatorDef()
gradient_do_def.CopyFrom(fwd_op)
if gradient_do_def.name and len(gradient_do_def.name) > 0:
gradient_do_def.name += "_grad"
del gradient_do_def.input[:]
gradient_do_def.input.extend(inputs)
# workspace pointer blob
gradient_do_def.input.append(workspace_blob_name)
del gradient_do_def.output[:]
gradient_do_def.output.extend(outputs)
# workspace pointer blob
gradient_do_def.output.append(workspace_blob_name)
net_arg = caffe2_pb2.Argument()
net_arg.name = "net"
net_arg.n.CopyFrom(gradient_net_def)
ordered_new_outer_names = inputs + outputs
inner_blobs = blob_bindings.keys()
new_outer_blobs_idx = [ordered_new_outer_names.index(blob_bindings[b])
for b in inner_blobs]
inner_blobs_arg = caffe2_pb2.Argument()
inner_blobs_arg.name = "inner_blobs"
inner_blobs_arg.strings.extend([b.encode('utf-8') for b in inner_blobs])
outer_blobs_idx_arg = caffe2_pb2.Argument()
outer_blobs_idx_arg.name = "outer_blobs_idx"
outer_blobs_idx_arg.ints.extend(new_outer_blobs_idx)
saved_blobs_arg = caffe2_pb2.Argument()
saved_blobs_arg.name = "saved_fwd_blobs"
saved_blobs_arg.strings.extend(
[b.encode('utf-8') for b in saved_fwd_blobs])
del gradient_do_def.arg[:]
gradient_do_def.arg.extend([
net_arg, inner_blobs_arg, outer_blobs_idx_arg, saved_blobs_arg])
del gradient_do_def.control_input[:]
gradient_do_def.is_gradient_op = True
return gradient_do_def
def _gen_grad_zero_init_ops(init_grad_map, grad_map, grad_output_names):
grad_init_ops = []
for grad_output in grad_output_names:
# get the corresponding output name blob and use it in ConstantFill
# so that grad_output has the same shape
output_name = None
for o, g in grad_map.items():
if g == grad_output:
output_name = o
break
assert output_name, "Unknown gradient output " + grad_output
grad_init_op = None
# make sure that we do not overwrite existing gradients with zeros
if output_name in init_grad_map:
init_grad_name = init_grad_map[output_name]
# in case we use a different gradient blob name, copy gradient
if init_grad_name != grad_output:
grad_init_op = caffe2_pb2.OperatorDef()
grad_init_op.type = "Copy"
grad_init_op.input.extend([str(init_grad_name)])
grad_init_op.output.extend([str(grad_output)])
else:
grad_init_op = caffe2_pb2.OperatorDef()
grad_init_op.type = "ConstantFill"
grad_init_op.input.extend([output_name])
grad_init_op.output.extend([grad_output])
value_arg = caffe2_pb2.Argument()
value_arg.name = "value"
value_arg.f = 0.0
grad_init_op.arg.extend([value_arg])
if grad_init_op:
grad_init_ops.append(grad_init_op)
return grad_init_ops
def _prepare_gradient_if_op(
fwd_op, input_names, output_names, then_grad_net, else_grad_net):
gradient_if_def = caffe2_pb2.OperatorDef()
gradient_if_def.CopyFrom(fwd_op)
del gradient_if_def.input[:]
gradient_if_def.input.extend(input_names)
del gradient_if_def.output[:]
gradient_if_def.output.extend(output_names)
then_net_arg = caffe2_pb2.Argument()
then_net_arg.name = "then_net"
then_net_arg.n.CopyFrom(then_grad_net)
gradient_args = [then_net_arg]
if else_grad_net:
else_net_arg = caffe2_pb2.Argument()
else_net_arg.name = "else_net"
else_net_arg.n.CopyFrom(else_grad_net)
gradient_args.append(else_net_arg)
del gradient_if_def.arg[:]
gradient_if_def.arg.extend(gradient_args)
if gradient_if_def.name:
gradient_if_def.name += "_grad"
del gradient_if_def.control_input[:]
gradient_if_def.is_gradient_op = True
return gradient_if_def
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import numpy as np
def FakeQuantization8BitsRowwise(data):
min_el = np.min(data, axis=1)
max_el = np.max(data, axis=1)
scale = (max_el - min_el) / 255.
bias = min_el
inv_scale = 1. / scale
data = data.T
data = np.round((data - bias) * inv_scale) * scale + bias
return data.T
class TestQuantize8bits(hu.HypothesisTestCase):
def test_quantize_op(self):
op = core.CreateOperator(
'FloatToRowwiseQuantized8Bits',
['input_data'],
['quantized_input', 'scale_bias'])
input_data = np.float32(np.asarray([[801., 786, 235.2, 2353.3434],
[5., 11., 9., -2.]]))
workspace.FeedBlob('input_data', input_data)
workspace.RunOperatorOnce(op)
op1 = core.CreateOperator(
'Rowwise8BitQuantizedToFloat',
['quantized_input', 'scale_bias'],
['dequantized_input'])
workspace.RunOperatorOnce(op1)
result = workspace.FetchBlob('dequantized_input')
ground_truth = FakeQuantization8BitsRowwise(input_data)
np.testing.assert_array_almost_equal(
result, ground_truth)
def test_quantize_tensor_with_const_row_op(self):
op = core.CreateOperator(
'FloatToRowwiseQuantized8Bits',
['input_data'],
['quantized_input', 'scale_bias'])
input_data = np.float32(np.asarray([[801., 786, 235.2, 2353.3434],
[9., 9., 9., 9.]]))
workspace.FeedBlob('input_data', input_data)
workspace.RunOperatorOnce(op)
op1 = core.CreateOperator(
'Rowwise8BitQuantizedToFloat',
['quantized_input', 'scale_bias'],
['dequantized_input'])
workspace.RunOperatorOnce(op1)
result = workspace.FetchBlob('dequantized_input')
ground_truth = FakeQuantization8BitsRowwise(input_data)
ground_truth[1, :] = 9.
np.testing.assert_array_almost_equal(
result, ground_truth)
def test_SparseSegmentUint8(self):
init_net = core.Net("init")
net = core.Net("bench")
size = 10**3
isize = 10**2
# input preparation
d = init_net.UniformFill([], shape=[size, 32])
w = init_net.UniformFill([], shape=[isize, ])
i = init_net.UniformIntFill([], shape=[isize], max=size - 1)
i = init_net.Cast([i], to=core.DataType.INT64)
l = init_net.ConstantFill(
[],
['l'],
shape=[isize // 10],
value=10,
dtype=core.DataType.INT32,
)
net.FloatToRowwiseQuantized8Bits([d],
['quantized_data', 'scale_bias'])
net.Rowwise8BitQuantizedToFloat(['quantized_data', 'scale_bias'],
['dequantized_data'])
# SparseLengthsWeightedSum
net.SparseLengthsWeightedSum(['dequantized_data', w, i, l],
['PositionWeighted_0'], engine='fp16')
net.SparseLengthsWeightedSum8BitsRowwise(
['quantized_data', w, i, l, 'scale_bias'],
['PositionWeighted_1'])
# SparseLengthsSum
net.SparseLengthsSum(['dequantized_data', i, l],
['Sum_0'], engine='fp16')
net.SparseLengthsSum8BitsRowwise(
['quantized_data', i, l, 'scale_bias'],
['Sum_1'])
# SparseLengthsWeightedMean
# net.SparseLengthsWeightedMean(['dequantized_data', w, i, l],
# ['WeightedMean_0'])
# net.SparseLengthsWeightedMean8BitsRowwise(
# ['quantized_data', w, i, l, 'scale_bias'],
# ['WeightedMean_1'])
# SparseLengthsMean
net.SparseLengthsMean(['dequantized_data', i, l],
['Mean_0'], engine='fp16')
net.SparseLengthsMean8BitsRowwise(
['quantized_data', i, l, 'scale_bias'],
['Mean_1'])
gathered_w = net.Gather(['quantized_data', i],
engine='fp16')
gathered_scale_bias = net.Gather(['scale_bias', i],
engine='fp16')
net.Rowwise8BitQuantizedToFloat(
[gathered_w, gathered_scale_bias],
'Gathered_1')
net.Gather(['dequantized_data', i], 'Gathered_0')
workspace.GlobalInit(['caffe2', '--caffe2_log_level=0'])
workspace.RunNetOnce(init_net)
workspace.CreateNet(net)
workspace.RunNetOnce(net)
PositionWeighted_1 = workspace.FetchBlob('PositionWeighted_1')
ground_truth_posw = workspace.FetchBlob('PositionWeighted_0')
np.testing.assert_array_almost_equal(PositionWeighted_1,
ground_truth_posw, decimal=5)
Sum_1 = workspace.FetchBlob('Sum_1')
ground_truth_sum = workspace.FetchBlob('Sum_0')
np.testing.assert_array_almost_equal(Sum_1,
ground_truth_sum, decimal=5)
Mean_1 = workspace.FetchBlob('Mean_1')
ground_truth_mean = workspace.FetchBlob('Mean_0')
np.testing.assert_array_almost_equal(Mean_1,
ground_truth_mean, decimal=5)
Gathered_1 = workspace.FetchBlob('Gathered_1')
ground_truth_gathered = workspace.FetchBlob('Gathered_0')
np.testing.assert_array_almost_equal(Gathered_1,
ground_truth_gathered, decimal=5)
|
## @package extension_loader
# Module caffe2.python.extension_loader
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import contextlib
import ctypes
import sys
_set_global_flags = (
hasattr(sys, 'getdlopenflags') and hasattr(sys, 'setdlopenflags'))
@contextlib.contextmanager
def DlopenGuard():
if _set_global_flags:
old_flags = sys.getdlopenflags()
sys.setdlopenflags(old_flags | ctypes.RTLD_GLOBAL)
yield
if _set_global_flags:
sys.setdlopenflags(old_flags)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python.dataio import Reader, ReaderWithLimit, ReaderWithTimeLimit
from caffe2.python.dataset import Dataset
from caffe2.python.pipeline import pipe
from caffe2.python.schema import Struct, NewRecord, FeedRecord
from caffe2.python.session import LocalSession
from caffe2.python.task import TaskGroup, final_output, WorkspaceType
from caffe2.python.test_util import TestCase
from caffe2.python.cached_reader import CachedReader
from caffe2.python import core, workspace
from caffe2.python.net_builder import ops
import numpy as np
import os
import shutil
import tempfile
import time
def init_dataset(ws, size=100):
src_init = core.Net('src_init')
with core.NameScope('src'):
src_values = Struct(('label', np.array(range(size))))
src_blobs = NewRecord(src_init, src_values)
src_ds = Dataset(src_blobs)
FeedRecord(src_blobs, src_values, ws)
ws.run(src_init)
return src_ds
def read_all_data(ws, reader, session):
dst_init = core.Net('dst_init')
with core.NameScope('dst'):
dst_ds = Dataset(reader.schema().clone_schema())
dst_ds.init_empty(dst_init)
session.run(dst_init)
with TaskGroup(workspace_type=WorkspaceType.GLOBAL) as tg:
pipe(reader, dst_ds.writer(), num_runtime_threads=8)
session.run(tg)
return ws.blobs[str(dst_ds.content().label())].fetch()
class ReaderWithDelay(Reader):
"""Test reader class that inserts a delay between reading batches."""
def __init__(self, reader, delay):
Reader.__init__(self, schema=reader._schema)
self.reader = reader
self.delay = delay
def setup_ex(self, global_init_net, global_finish_net):
self.reader.setup_ex(global_init_net, global_finish_net)
def read_ex(self, local_init_net, local_finish_net):
read_net = core.Net('reader_body')
def sleep_op(*args, **argd):
time.sleep(self.delay)
read_net.Python(sleep_op)([], [])
return ([read_net], ) + self.reader.read(read_net)
class TestReaderWithLimit(TestCase):
def test_runtime_threads(self):
ws = workspace.C.Workspace()
session = LocalSession(ws)
src_ds = init_dataset(ws)
totals = [None] * 3
def proc(rec):
# executed once
with ops.task_init():
counter1 = ops.CreateCounter([], ['global_counter'])
counter2 = ops.CreateCounter([], ['global_counter2'])
counter3 = ops.CreateCounter([], ['global_counter3'])
# executed once per thread
with ops.task_instance_init():
task_counter = ops.CreateCounter([], ['task_counter'])
# executed on each iteration
ops.CountUp(counter1)
ops.CountUp(task_counter)
# executed once per thread
with ops.task_instance_exit():
with ops.loop(ops.RetrieveCount(task_counter)):
ops.CountUp(counter2)
ops.CountUp(counter3)
# executed once
with ops.task_exit():
totals[0] = final_output(ops.RetrieveCount(counter1))
totals[1] = final_output(ops.RetrieveCount(counter2))
totals[2] = final_output(ops.RetrieveCount(counter3))
return rec
# Read full data set from original reader
with TaskGroup() as tg:
pipe(src_ds.reader(), num_runtime_threads=8, processor=proc)
session.run(tg)
self.assertEqual(totals[0].fetch(), 100)
self.assertEqual(totals[1].fetch(), 100)
self.assertEqual(totals[2].fetch(), 8)
# Read with a count-limited reader
with TaskGroup() as tg:
q1 = pipe(src_ds.reader(), num_runtime_threads=2)
q2 = pipe(
ReaderWithLimit(q1.reader(), num_iter=25),
num_runtime_threads=3)
pipe(q2, processor=proc, num_runtime_threads=6)
session.run(tg)
self.assertEqual(totals[0].fetch(), 25)
self.assertEqual(totals[1].fetch(), 25)
self.assertEqual(totals[2].fetch(), 6)
def _test_limit_reader_init_shared(self, size):
ws = workspace.C.Workspace()
session = LocalSession(ws)
# Build test dataset
src_ds = init_dataset(ws, size=size)
# Create an identically sized empty destnation dataset
dst_init = core.Net('dst_init')
with core.NameScope('dst'):
dst_ds = Dataset(src_ds.content().clone_schema())
dst_ds.init_empty(dst_init)
ws.run(dst_init)
return ws, session, src_ds, dst_init, dst_ds
def _test_limit_reader_shared(self, reader_class, size, expected_read_len,
expected_finish, num_threads, read_delay,
**limiter_args):
ws, session, src_ds, dst_init, dst_ds = \
self._test_limit_reader_init_shared(size)
# Read without limiter
# WorkspaceType.GLOBAL is required because we are fetching
# reader.data_finished() after the TaskGroup finishes.
with TaskGroup(workspace_type=WorkspaceType.GLOBAL) as tg:
if read_delay > 0:
reader = reader_class(ReaderWithDelay(src_ds.reader(),
read_delay),
**limiter_args)
else:
reader = reader_class(src_ds.reader(), **limiter_args)
pipe(reader, dst_ds.writer(), num_runtime_threads=num_threads)
session.run(tg)
read_len = len(sorted(ws.blobs[str(dst_ds.content().label())].fetch()))
self.assertEqual(read_len, expected_read_len)
self.assertEqual(
sorted(ws.blobs[str(dst_ds.content().label())].fetch()),
list(range(expected_read_len))
)
self.assertEqual(ws.blobs[str(reader.data_finished())].fetch(),
expected_finish)
def test_count_limit_reader_without_limit(self):
# No iter count specified, should read all records.
self._test_limit_reader_shared(ReaderWithLimit,
size=100,
expected_read_len=100,
expected_finish=True,
num_threads=8,
read_delay=0,
num_iter=None)
def test_count_limit_reader_with_zero_limit(self):
# Zero iter count specified, should read 0 records.
self._test_limit_reader_shared(ReaderWithLimit,
size=100,
expected_read_len=0,
expected_finish=False,
num_threads=8,
read_delay=0,
num_iter=0)
def test_count_limit_reader_with_low_limit(self):
# Read with limit smaller than size of dataset
self._test_limit_reader_shared(ReaderWithLimit,
size=100,
expected_read_len=10,
expected_finish=False,
num_threads=8,
read_delay=0,
num_iter=10)
def test_count_limit_reader_with_high_limit(self):
# Read with limit larger than size of dataset
self._test_limit_reader_shared(ReaderWithLimit,
size=100,
expected_read_len=100,
expected_finish=True,
num_threads=8,
read_delay=0,
num_iter=110)
def test_time_limit_reader_without_limit(self):
# No duration specified, should read all records.
self._test_limit_reader_shared(ReaderWithTimeLimit,
size=100,
expected_read_len=100,
expected_finish=True,
num_threads=8,
read_delay=0.1,
duration=0)
def test_time_limit_reader_with_short_limit(self):
# Read with insufficient time limit
size = 50
num_threads = 4
sleep_duration = 0.25
duration = 1
expected_read_len = int(round(num_threads * duration / sleep_duration))
# Because the time limit check happens before the delay + read op,
# subtract a little bit of time to ensure we don't get in an extra read
duration = duration - 0.25 * sleep_duration
self._test_limit_reader_shared(ReaderWithTimeLimit,
size=size,
expected_read_len=expected_read_len,
expected_finish=False,
num_threads=num_threads,
read_delay=sleep_duration,
duration=duration)
def test_time_limit_reader_with_long_limit(self):
# Read with ample time limit
self._test_limit_reader_shared(ReaderWithTimeLimit,
size=50,
expected_read_len=50,
expected_finish=True,
num_threads=4,
read_delay=0.25,
duration=6)
def test_cached_reader(self):
ws = workspace.C.Workspace()
session = LocalSession(ws)
def build_source_reader(size):
src_ds = init_dataset(ws, size)
return src_ds.reader()
with tempfile.NamedTemporaryFile(delete=False) as f:
path = f.name
f.close()
os.remove(path)
# Read data for the first time.
cached_reader1 = CachedReader(build_source_reader(100))
init_step = cached_reader1.build_cache(path)
session.run(init_step)
data = read_all_data(ws, cached_reader1, session)
self.assertEqual(sorted(data), list(range(100)))
# Read data from cache.
workspace.ResetWorkspace()
cached_reader2 = CachedReader(build_source_reader(200))
init_step = cached_reader2.build_cache(path)
session.run(init_step)
data = read_all_data(ws, cached_reader2, session)
self.assertEqual(sorted(data), list(range(100)))
shutil.rmtree(path)
# We removed cache so we expect to receive data from original reader
workspace.ResetWorkspace()
cached_reader3 = CachedReader(build_source_reader(300))
init_step = cached_reader3.build_cache(path)
session.run(init_step)
data = read_all_data(ws, cached_reader3, session)
self.assertEqual(sorted(data), list(range(300)))
shutil.rmtree(path)
|
## @package dyndep
# Module caffe2.python.dyndep
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import ctypes
import os
from caffe2.python import core, extension_loader
def InitOpsLibrary(name):
"""Loads a dynamic library that contains custom operators into Caffe2.
Since Caffe2 uses static variable registration, you can optionally load a
separate .so file that contains custom operators and registers that into
the caffe2 core binary. In C++, this is usually done by either declaring
dependency during compilation time, or via dynload. This allows us to do
registration similarly on the Python side.
Args:
name: a name that ends in .so, such as "my_custom_op.so". Otherwise,
the command will simply be ignored.
Returns:
None
"""
if not os.path.exists(name):
# Note(jiayq): if the name does not exist, instead of immediately
# failing we will simply print a warning, deferring failure to the
# time when an actual call is made.
print('Ignoring {} as it is not a valid file.'.format(name))
return
_init_impl(name)
_IMPORTED_DYNDEPS = set()
def GetImportedOpsLibraries():
return _IMPORTED_DYNDEPS
def _init_impl(path):
_IMPORTED_DYNDEPS.add(path)
with extension_loader.DlopenGuard():
ctypes.CDLL(path)
# reinitialize available ops
core.RefreshRegisteredOperators()
|
import numpy as np
import unittest
from caffe2.python import core, workspace, test_util
class TestToyRegression(test_util.TestCase):
def testToyRegression(self):
"""Tests a toy regression end to end.
The test code carries a simple toy regression in the form
y = 2.0 x1 + 1.5 x2 + 0.5
by randomly generating gaussian inputs and calculating the ground
truth outputs in the net as well. It uses a standard SGD to then
train the parameters.
"""
workspace.ResetWorkspace()
init_net = core.Net("init")
W = init_net.UniformFill([], "W", shape=[1, 2], min=-1., max=1.)
B = init_net.ConstantFill([], "B", shape=[1], value=0.0)
W_gt = init_net.GivenTensorFill(
[], "W_gt", shape=[1, 2], values=[2.0, 1.5])
B_gt = init_net.GivenTensorFill([], "B_gt", shape=[1], values=[0.5])
LR = init_net.ConstantFill([], "LR", shape=[1], value=-0.1)
ONE = init_net.ConstantFill([], "ONE", shape=[1], value=1.)
ITER = init_net.ConstantFill([], "ITER", shape=[1], value=0,
dtype=core.DataType.INT32)
train_net = core.Net("train")
X = train_net.GaussianFill([], "X", shape=[64, 2], mean=0.0, std=1.0)
Y_gt = X.FC([W_gt, B_gt], "Y_gt")
Y_pred = X.FC([W, B], "Y_pred")
dist = train_net.SquaredL2Distance([Y_gt, Y_pred], "dist")
loss = dist.AveragedLoss([], ["loss"])
# Get gradients for all the computations above. Note that in fact we
# don't need to get the gradient the Y_gt computation, but we'll just
# leave it there. In many cases, I am expecting one to load X and Y
# from the disk, so there is really no operator that will calculate the
# Y_gt input.
input_to_grad = train_net.AddGradientOperators([loss], skip=2)
# updates
train_net.Iter(ITER, ITER)
train_net.LearningRate(ITER, "LR", base_lr=-0.1,
policy="step", stepsize=20, gamma=0.9)
train_net.WeightedSum([W, ONE, input_to_grad[str(W)], LR], W)
train_net.WeightedSum([B, ONE, input_to_grad[str(B)], LR], B)
for blob in [loss, W, B]:
train_net.Print(blob, [])
# the CPU part.
plan = core.Plan("toy_regression")
plan.AddStep(core.ExecutionStep("init", init_net))
plan.AddStep(core.ExecutionStep("train", train_net, 200))
workspace.RunPlan(plan)
W_result = workspace.FetchBlob("W")
B_result = workspace.FetchBlob("B")
np.testing.assert_array_almost_equal(W_result, [[2.0, 1.5]], decimal=2)
np.testing.assert_array_almost_equal(B_result, [0.5], decimal=2)
workspace.ResetWorkspace()
if __name__ == '__main__':
unittest.main()
|
## @package data_workers
# Module caffe2.python.data_workers
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
'''
This module provides a python-land multithreaded data input mechanism
for Caffe2 nets.
Basic usage is as follows:
coordinator = data_workers.init_data_input_workers(
net,
["data", "label"],
my_fetch_fun,
batch_size=32,
input_source_name="train",
dont_rebatch=False
)
...
coordinator.start()
First argument is the Caffe2 net (or model helper), and second argument
is list of input blobs that are to be fed.
Argument 'input_source_name' is used to distinguish different sources of data,
such as train or test data. This is to ensure the data does not get mixed up,
although two nets would share blobs.
To do the actual data loading, one defines a "fetcher function"
that has call signature
my_fetch_fun(worker_id, batch_size)
Optionally, one can define a "init function" that is called once before
threads start, and has call signature:
my_init_fun(data_coordinator, global_coordinator)
If dont_rebatch is set to True, the data input is not batched into equal sized
chunks but data directly provided by fetchers is used.
'batch_columns' can be used to specify which dimension is the batch dimension,
for each of the inputs. Default is 0 for all iputs.
'timeout' is the timeout in seconds after which if no data is available, the
net will fail (default 600s = 10 mins).
This function returns a list of numpy arrays corresponding to the different
input blobs. In the example above, it would return two arrays, one for the
data blob and another for the labels. These arrays can have arbitrary number
of elements (i.e they do not need to match the batch size). The batch size
is provided for the function as a hint only.
For example, fetcher function could download images from a remote service or
load random images from a directory on a file system.
For a dummy example, see the data_workers_test unit test.
Note that for data_parallel_models, init_data_input_workers will be called
for each GPU. Note that the 'coordinator' returned by the function is same
each time.
'''
try:
import Queue
except ImportError:
# Py3
import queue as Queue
from itertools import chain
import logging
import threading
import numpy as np
import time
from caffe2.python import workspace, core, scope, utils
from caffe2.proto import caffe2_pb2
from caffe2.python.parallel_workers import Metrics, State, \
WorkerCoordinator, GlobalWorkerCoordinator, Worker, run_worker
log = logging.getLogger("data_workers")
log.setLevel(logging.INFO)
LOG_INT_SECS = 60
def get_worker_ids(num_workers):
return list(range(0, num_workers))
def init_data_input_workers(
net,
input_blob_names,
fetch_fun,
batch_size,
num_worker_threads=2,
input_source_name="train",
max_buffered_batches=800,
init_fun=None,
external_loggers=None,
dont_rebatch=False,
batch_columns=None,
timeout=600
):
global global_coordinator
device_option = scope.CurrentDeviceScope()
if (device_option is None):
device_option = caffe2_pb2.DeviceOption(device_type=caffe2_pb2.CPU)
metrics = Metrics(external_loggers)
batch_feeder = BatchFeeder(
net,
input_blob_names,
batch_size,
device_option,
scope.CurrentNameScope(),
input_source_name,
global_coordinator.get_queue(input_source_name, max_buffered_batches),
metrics,
dont_rebatch,
batch_columns,
timeout=timeout
)
# Create coordinator object
coordinator = WorkerCoordinator(
input_source_name, init_fun, batch_feeder)
# Launch fetch worker threads
worker_ids = [
global_coordinator.get_new_worker_id()
for i in range(num_worker_threads)
]
workers = [
threading.Thread(
target=run_worker,
name="data_workers fetcher id {}".format(worker_id),
args=[coordinator,
DataWorker(coordinator, worker_id, fetch_fun, metrics,
batch_size, batch_feeder)],
) for worker_id in worker_ids
]
workers.append(threading.Thread(
target=enqueuer,
name="Enqueuer {} {}".format(input_source_name, scope.CurrentNameScope()),
args=[coordinator, batch_feeder]))
coordinator._workers = workers
global_coordinator.add(coordinator)
return global_coordinator
class BatchFeeder(State):
def __init__(self, net, input_blob_names, batch_size,
device_option, namescope, input_source_name, queue,
metrics, dont_rebatch, batch_columns, timeout=600):
self._counter = 0
self._input_blob_names = input_blob_names
self._batch_size = batch_size
self._internal_queue = queue
self._queues = []
self._device_option = device_option
self._namescope = namescope
self._timeout = timeout
self._input_source_name = input_source_name
self._c2_queue_capacity = 4
self._create_caffe2_queues(net)
self._create_caffe2_ops(net)
self._inputs = 0
self._prev_seconds = 0
self._last_warning = time.time()
self._dont_rebatch = dont_rebatch
self._init_scratch()
self._metrics = metrics
if batch_columns is None:
batch_columns = [0 for _ in input_blob_names]
self._batch_columns = batch_columns
def start(self):
self._inputs = 0
self._prev_seconds = time.time()
def stop(self):
try:
for q in self._queues:
workspace.RunOperatorOnce(
core.CreateOperator("CloseBlobsQueue", [q], [])
)
finally:
self._log_inputs_per_interval(0, force=True)
def cleanup(self):
utils.ResetBlobs(self._scratch_blob.values())
utils.ResetBlobs(self._scratch_status.values())
def _get(self, data_input_coordinator):
start_time = time.time()
last_warning = time.time()
while data_input_coordinator.is_active():
try:
return self._internal_queue.get(block=True, timeout=0.5)
except Queue.Empty:
if time.time() - last_warning > 10.0:
log.warning("** Data input is slow: (still) no data in {} secs.".format(
time.time() - start_time))
last_warning = time.time()
continue
return None
def _validate_chunk(self, chunk):
if chunk is None:
log.warning("Fetcher function returned None")
return False
assert len(chunk) == len(self._input_blob_names), \
"Expecting data blob for each input"
for d in chunk:
assert isinstance(d, np.ndarray), \
"Fetcher function must return a numpy array"
if not self._dont_rebatch:
j = 1
for d in chunk[1:]:
assert d.shape[self._batch_columns[j]] == \
chunk[0].shape[self._batch_columns[0]], \
"Each returned input must have equal number of samples"
j += 1
if len(chunk) == 0:
log.warning("Worker provided zero length input")
return False
return True
def put(self, chunk, data_input_coordinator):
if not self._validate_chunk(chunk):
return
while data_input_coordinator.is_active():
try:
qsize = self._internal_queue.qsize()
if qsize < 2 and (time.time() - self._last_warning) > LOG_INT_SECS:
log.warning("Warning, data loading lagging behind: " +
"name={}".format(qsize, self._input_source_name))
self._last_warning = time.time()
self._counter += 1
self._internal_queue.put(chunk, block=True, timeout=0.5)
self._log_inputs_per_interval(chunk[0].shape[0])
return
except Queue.Full:
log.debug("Queue full: stalling fetchers...")
continue
def _enqueue_batch_direct(self, data_input_coordinator):
data = self._get(data_input_coordinator)
if data is None:
return
if data_input_coordinator.is_active():
for b, q, c in zip(self._input_blob_names, self._queues, data):
self._enqueue(b, q, c)
def _enqueue_batch(self, data_input_coordinator):
'''
This pulls data from the python-side queue and collects them
into batch-sized pieces, unless dont_rebatch is set to true.
'''
if self._dont_rebatch:
self._enqueue_batch_direct(data_input_coordinator)
return
cur_batch = [np.array([]) for d in self._input_blob_names]
first_batch_col = self._batch_columns[0]
# Collect data until we have a full batch size
while (
cur_batch[0].shape[0] == 0 or
cur_batch[0].shape[first_batch_col] < self._batch_size
) and data_input_coordinator.is_active():
chunk = self._get(data_input_coordinator)
if chunk is None:
continue
for j, chunk_elem in enumerate(chunk):
if cur_batch[j].shape[0] == 0:
cur_batch[j] = chunk_elem.copy()
else:
cur_batch[j] = np.append(
cur_batch[j], chunk_elem, axis=self._batch_columns[j]
)
start_time = time.time()
try:
# Return data over the batch size back to queue
if cur_batch[0].shape[0] > 0 and cur_batch[0].shape[
first_batch_col
] > self._batch_size:
leftover = []
trimmed_batch = []
for j, b in enumerate(cur_batch):
[c, l] = np.split(
b, [self._batch_size], axis=self._batch_columns[j]
)
leftover.append(l)
trimmed_batch.append(c)
cur_batch = trimmed_batch
try:
self._internal_queue.put(leftover, block=False)
except Queue.Full:
pass
assert cur_batch[0].shape[first_batch_col] == self._batch_size
if data_input_coordinator.is_active():
for b, q, c in zip(
self._input_blob_names, self._queues, cur_batch
):
self._enqueue(b, q, c)
finally:
self._metrics.put_metric('enqueue_time', time.time() - start_time)
def _init_scratch(self):
self._scratch_blob = {}
self._scratch_status = {}
for blob_name in self._input_blob_names:
scratch_name = self._namescope + blob_name + \
"_scratch_" + self._input_source_name
self._scratch_blob[blob_name] = core.BlobReference(scratch_name)
self._scratch_status[blob_name] = core.BlobReference(
scratch_name + "_status"
)
# Feed empty arrays to the scratch blobs here, so that there won't be
# race conditions when calling FeedBlob (which calls wworkspace
# CreateBlob()) from enqueue threads
for b in chain(
self._scratch_blob.values(), self._scratch_status.values()
):
workspace.FeedBlob(
b,
np.array([]).astype(np.float32),
device_option=self._device_option,
)
def _enqueue(self, blob_name, queue, data_arr):
'''
Enqueue the correctly sized batch arrays to Caffe2's queue.
'''
workspace.FeedBlob(
self._scratch_blob[blob_name],
data_arr,
device_option=self._device_option
)
op = core.CreateOperator(
"SafeEnqueueBlobs",
[queue, self._scratch_blob[blob_name]],
[self._scratch_blob[blob_name], self._scratch_status[blob_name]],
device_option=self._device_option
)
workspace.RunOperatorOnce(op)
def _create_caffe2_queues(self, net):
'''
Creates queues on caffe2 side
'''
def create_queue(queue_name, num_blobs, capacity):
workspace.RunOperatorOnce(
core.CreateOperator(
"CreateBlobsQueue",
[], [queue_name],
num_blobs=1,
capacity=capacity))
return core.ScopedBlobReference(queue_name)
for blob_name in self._input_blob_names:
qname = blob_name + "_c2queue" + "_" + self._input_source_name
q = create_queue(
qname, num_blobs=1, capacity=self._c2_queue_capacity
)
self._queues.append(q)
def _create_caffe2_ops(self, net):
'''
Creates dequeue-ops on caffe2 side
'''
for q, blob_name in zip(self._queues, self._input_blob_names):
# Add operator to the Caffe2 network to dequeue
net.DequeueBlobs(q, blob_name, timeout_secs=float(self._timeout))
def _log_inputs_per_interval(self, inputs, force=False):
self._inputs += inputs
current_seconds = time.time()
delta_seconds = current_seconds - self._prev_seconds
if delta_seconds >= LOG_INT_SECS or force:
inputs_per_sec = int(self._inputs / delta_seconds)
qsize = self._internal_queue.qsize()
log.info("{}/{}: {} inputs/sec".format(
self._input_source_name,
self._namescope,
inputs_per_sec,
))
log.info("-- queue: {} batches".format(qsize))
# log and reset perf metrics
self._metrics.put_metric(
'inputs_per_sec', inputs_per_sec, False)
self._metrics.put_metric('queue_size', qsize, False)
self._metrics.put_metric(
'time_elapsed', delta_seconds, False)
self._metrics.log_metrics()
self._metrics.reset_metrics()
self._inputs = 0
self._prev_seconds = current_seconds
class GlobalCoordinator(GlobalWorkerCoordinator):
def __init__(self):
GlobalWorkerCoordinator.__init__(self)
self._queues = {}
def get_queue(self, queue_name, max_buffered_batches):
assert isinstance(max_buffered_batches, int)
if queue_name not in self._queues:
self._queues[queue_name] = Queue.Queue(maxsize=max_buffered_batches)
return self._queues[queue_name]
def reset_data_input(self, namescope, name, net, batch_size):
log.info("Reset data input {}, batch size {}: ".format(name, batch_size))
for c in self._coordinators:
if c._worker_name == name and c._state._namescope == namescope:
c._state._batch_size = batch_size
c._state._create_caffe2_ops(net)
class DataWorker(Worker):
def __init__(
self,
coordinator,
worker_id,
worker_fun,
metrics,
batch_size,
batch_feeder
):
Worker.__init__(self, coordinator, worker_id, worker_fun=worker_fun,
metrics=metrics)
self._batch_size = batch_size
self._batch_feeder = batch_feeder
def run(self):
input_data = self._worker_fun(self._worker_id, self._batch_size)
self._batch_feeder.put(input_data, self._coordinator)
def finish(self):
self._metrics.put_metric(
'fetcher_time', time.time() - self._start_time)
global_coordinator = GlobalCoordinator()
def enqueuer(coordinator, batch_feeder):
while coordinator.is_active():
batch_feeder._enqueue_batch(coordinator)
|
## @package data_parallel_model
# Module caffe2.python.data_parallel_model
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
from future.utils import viewitems, viewkeys, viewvalues
import logging
import copy
from caffe2.python import \
model_helper, dyndep, scope, workspace, core, memonger, utils
from caffe2.proto import caffe2_pb2
import numpy as np
dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/nccl:nccl_ops")
dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/gloo:gloo_ops")
dyndep.InitOpsLibrary("@/caffe2/caffe2/contrib/gloo:gloo_ops_gpu")
log = logging.getLogger("data_parallel_model")
log.setLevel(logging.INFO)
_DEFAULT_TIMEOUT_SEC = 30
def Parallelize_GPU(*args, **kwargs):
kwargs['cpu_device'] = False
Parallelize(*args, **kwargs)
def Parallelize_CPU(*args, **kwargs):
kwargs['cpu_device'] = True
Parallelize(*args, **kwargs)
def Parallelize(
model_helper_obj,
input_builder_fun,
forward_pass_builder_fun,
param_update_builder_fun=None,
optimizer_builder_fun=None,
post_sync_builder_fun=None,
devices=None,
rendezvous=None,
net_type='dag',
broadcast_computed_params=True,
optimize_gradient_memory=False,
dynamic_memory_management=False,
blobs_to_keep=None,
use_nccl=False,
max_concurrent_distributed_ops=16,
cpu_device=False,
num_threads_per_device=4,
shared_model=False,
combine_spatial_bn=False,
):
'''
Function to create a model that can run on many GPUs or CPUs.
model_helper_obj: an object of ModelHelper
input_builder_fun:
Function that adds the input operators
Note: Remember to instantiate reader outside of this
function so all devices share same reader object.
Signature: input_builder_fun(model)
forward_pass_builder_fun:
Function to add the operators to the model.
Must return list of loss-blob references that
are used to build the gradient. Loss scale parameter
is passed, as you should scale the loss of your model
by 1.0 / the total number of devices.
Signature: forward_pass_builder_fun(model, loss_scale)
param_update_builder_fun:
Function that adds operators that are run after
gradient update, such as updating the weights and
weight decaying. This is called for each GPU separately.
Signature: param_update_builder_fun(model)
optimizer_builder_fun:
Alternative to param_update_builder_fun, allows one
to add an optimizer for the whole model. Called only
once, without name or devicescope.
post_sync_builder_fun:
Function applied after initial parameter sync has been
completed, such as keeping multi-precision parameters
in sync.
Signature: post_sync_builder_fun(model)
devices: List of GPU ids, such as [0, 1, 2, 3],
rendezvous: used for rendezvous in distributed computation, if None
then only one node is used. To create rendezvous,
use <TBD>.
net_type: Network type
optimize_gradient_memory: whether to apply 'memonger' to share blobs
shared_model (only for CPU) use same parameters on each device
in gradient computation to reduce memory footprint.
dynamic_memory_management: Whether to apply dynamic memory optimization
by freeing unused blobs. The underlying (de)allocation
uses cached allocator. For GPU training PLEASE MAKE SURE
caffe2_cuda_memory_pool is set.
blobs_to_keep : A list of blob names to keep and don't free during
dynamic memory optimization (for example loss blob).
cpu_device Use CPU instead of GPU.
combine_spatial_bn:
When set to True, applies batch normalization across
all devices within the node. If False, batch
normalization will be done separately for each device.
This option is currently only supported on the CPU.
'''
assert scope.CurrentDeviceScope() is None \
or scope.CurrentDeviceScope().device_type == caffe2_pb2.CPU, \
"Parallelize must be called without device-scope, \
device scope was: {}".format(scope.CurrentDeviceScope())
if devices is None:
devices = list(range(0, workspace.NumCudaDevices())),
if not cpu_device:
for gpu in devices:
if gpu >= workspace.NumCudaDevices():
log.warning("** Only {} GPUs available, GPUs {} requested".format(
workspace.NumCudaDevices(), devices))
break
model_helper_obj._device_type = caffe2_pb2.CUDA
model_helper_obj._device_prefix = "gpu"
model_helper_obj._shared_model = False
device_name = "GPU"
assert shared_model is False, "Shared model only supported on CPU"
else:
model_helper_obj._device_type = caffe2_pb2.CPU
model_helper_obj._device_prefix = "cpu"
device_name = "CPU"
model_helper_obj._shared_model = shared_model
if shared_model and rendezvous is not None:
assert "Shared model only supported on single-node currently"
log.info("Parallelizing model for devices: {}".format(devices))
extra_workers = 8 if rendezvous is not None else 0 # best-guess
num_workers = len(devices) * num_threads_per_device + extra_workers
max_concurrent_distributed_ops =\
min(max_concurrent_distributed_ops, num_workers - 1)
model_helper_obj.net.Proto().num_workers = num_workers
model_helper_obj.net.Proto().type = net_type
# Store some information in the model -- a bit ugly
model_helper_obj._devices = devices
model_helper_obj._rendezvous = rendezvous
model_helper_obj._broadcast_context = None
model_helper_obj._grad_names = []
assert isinstance(model_helper_obj, model_helper.ModelHelper)
# Keep track of params that were in the model before: they are not
# data parallel, so we need to handle them separately
non_datapar_params = copy.copy(model_helper_obj.params)
# Add input and model
log.info("Create input and model training operators")
losses_by_gpu = {}
num_shards = 1 if rendezvous is None else rendezvous['num_shards']
loss_scale = 1.0 / (len(devices) * num_shards)
has_parameter_updates = param_update_builder_fun is not None or \
optimizer_builder_fun is not None
assert not (
param_update_builder_fun is not None and
optimizer_builder_fun is not None
), 'Can only specify one of param_update_builder_fun, optimizer_builder_fun'
# Check that a model that is used for validation/testing has
# init_params False, otherwise running the param init net will overwrite
# synchronized values by the training net
if not has_parameter_updates and model_helper_obj.init_params:
log.warning('')
log.warning("############# WARNING #############")
log.warning("Model {}/{} is used for testing/validation but".format(
model_helper_obj.name, model_helper_obj))
log.warning("has init_params=True!")
log.warning("This can conflict with model training.")
log.warning("Please ensure model = ModelHelper(init_params=False)")
log.warning('####################################')
log.warning('')
# TODO: make into assert
for device in devices:
device_opt = core.DeviceOption(model_helper_obj._device_type, device)
with core.DeviceScope(device_opt):
with core.NameScope("{}_{}".format(model_helper_obj._device_prefix,
device)):
log.info("Model for {} : {}".format(device_name, device))
input_builder_fun(model_helper_obj)
losses = forward_pass_builder_fun(model_helper_obj, loss_scale)
# Losses are not needed for test net
if has_parameter_updates:
assert isinstance(losses, list), \
'Model builder function must return list of loss blobs'
for loss in losses:
assert isinstance(loss, core.BlobReference), \
'Model builder func must return list of loss blobs'
losses_by_gpu[device] = losses
_ValidateParams(model_helper_obj.params)
# Create parameter map
model_helper_obj._device_grouped_blobs =\
_GroupByDevice(model_helper_obj, devices,
model_helper_obj.params, non_datapar_params)
# computed params
computed_params_grouped =\
_GroupByDevice(model_helper_obj, devices,
model_helper_obj.GetComputedParams(''), [])
model_helper_obj._device_grouped_blobs.update(computed_params_grouped)
model_helper_obj._param_names =\
list(viewkeys(model_helper_obj._device_grouped_blobs))
model_helper_obj._computed_param_names =\
list(viewkeys(computed_params_grouped))
if not has_parameter_updates:
log.info("Parameter update function not defined --> only forward")
_InferBlobDevice(model_helper_obj)
return
log.info("Adding gradient operators")
_AddGradientOperators(devices, model_helper_obj, losses_by_gpu)
if combine_spatial_bn:
assert(cpu_device), \
'combine_spatial_bn is currently only supported on the CPU'
assert(has_parameter_updates), \
'combine_spatial_bn should only be used for train model'
_InterleaveOps(model_helper_obj)
_InterDeviceBatchNormalization(model_helper_obj)
_ValidateParams(model_helper_obj.params)
# Group gradients by device and register to blob lookup
param_to_grad = model_helper_obj.param_to_grad
grads_ordered = [param_to_grad[p] for p in
model_helper_obj.params if p in param_to_grad]
non_datapar_grads = [param_to_grad[p] for p in non_datapar_params]
gradients_grouped = _GroupByDevice(
model_helper_obj,
devices,
grads_ordered,
non_datapar_grads
)
model_helper_obj._device_grouped_blobs.update(gradients_grouped)
model_helper_obj._grad_names = list(viewkeys(gradients_grouped))
model_helper_obj._losses_by_gpu = losses_by_gpu
_InferBlobDevice(model_helper_obj)
log.info("Add gradient all-reduces for SyncSGD")
if broadcast_computed_params:
_BroadcastComputedParams(devices, model_helper_obj, rendezvous, use_nccl)
if len(model_helper_obj._grad_names) > 0:
# Gradients in reverse order
reverse_ordered_grads = _GetReverseOrderedGrads(model_helper_obj)
assert(len(reverse_ordered_grads) > 0)
_AllReduceBlobs(
reverse_ordered_grads,
devices,
model_helper_obj,
model_helper_obj.net,
rendezvous,
use_nccl,
max_concurrent_distributed_ops,
)
else:
log.info("NOTE: Param builder function did not create any parameters.")
log.info("Post-iteration operators for updating params")
num_shards = 1 if rendezvous is None else rendezvous['num_shards']
all_params = set(model_helper_obj.GetParams(''))
if shared_model:
_PruneParametersForSharing(model_helper_obj)
if param_update_builder_fun is not None:
for device in devices:
device_opt = core.DeviceOption(model_helper_obj._device_type, device)
with core.DeviceScope(device_opt):
with core.NameScope(
"{}_{}".format(model_helper_obj._device_prefix, device)
):
param_update_builder_fun(model_helper_obj)
else:
log.info("Calling optimizer builder function")
optimizer = optimizer_builder_fun(model_helper_obj)
model_helper_obj._optimizer = optimizer
(sync_blobs, sync_names) = _ComputeBlobsToSync(model_helper_obj)
sync_blobs_grouped = _GroupByDevice(
model_helper_obj,
devices,
sync_blobs,
[],
)
model_helper_obj._device_grouped_blobs.update(sync_blobs_grouped)
_InferBlobDevice(model_helper_obj)
_AnalyzeOperators(model_helper_obj)
# Configure dagnet to run with only one worker on the first iteration,
# to prevent concurrency problems with allocs and nccl.
arg = model_helper_obj.Proto().arg.add()
arg.name = "first_iter_only_one_worker"
arg.i = 1
# Add initial parameter syncs
log.info("Add initial parameter sync")
_SyncAllParams(
devices,
model_helper_obj,
model_helper_obj.param_init_net,
model_helper_obj.param_init_net,
rendezvous,
sync_names,
max_concurrent_distributed_ops=1
)
# Handle any operations that need to be done after parameter sync
# i.e. making sure multi-precision copies of parameters are up-to-date
if post_sync_builder_fun is not None:
for device in devices:
device_opt = core.DeviceOption(model_helper_obj._device_type, device)
with core.DeviceScope(device_opt):
with core.NameScope(
"{}_{}".format(model_helper_obj._device_prefix, device)
):
post_sync_builder_fun(model_helper_obj)
assert not (optimize_gradient_memory and dynamic_memory_management), \
"""It is not advised to use gradient optimization ('memonger')
with dynamic memory management."""
if optimize_gradient_memory:
_OptimizeGradientMemorySimple(model_helper_obj, losses_by_gpu, devices)
if dynamic_memory_management:
_AddDynamicMemoryOptimization(model_helper_obj, blobs_to_keep, devices)
model_helper_obj._data_parallel_model_init_nets = [
model_helper_obj.param_init_net,
]
model_helper_obj._data_parallel_model_nets = [model_helper_obj.net]
if shared_model:
_RemapParameterBlobsForSharedModel(model_helper_obj, all_params)
def Parallelize_GPU_BMUF(*args, **kwargs):
kwargs['cpu_device'] = False
Parallelize_BMUF(*args, **kwargs)
def Parallelize_CPU_BMUF(*args, **kwargs):
kwargs['cpu_device'] = True
Parallelize_BMUF(*args, **kwargs)
def Parallelize_BMUF(
model_helper_obj,
input_builder_fun,
forward_pass_builder_fun,
param_update_builder_fun,
block_learning_rate=1.0,
block_momentum=None,
devices=None,
rendezvous=None,
net_type='dag',
master_device=None,
use_nccl=False,
nesterov=False,
optimize_gradient_memory=False,
reset_momentum_sgd=False,
warmup_iterations=None,
max_concurrent_distributed_ops=4,
add_blobs_to_sync=None,
num_threads_per_device=4,
cpu_device=False
):
'''
Function to create model that run on many GPUs and creates a net for
parameter_updates that can be run independently for number of iterations
then followed by another net that runs once to compute the final parameter
updates according to block wise model update filtering rule described
in : Scalable Training of Deep Learning Machines by Incremental Block
Training with Intra-block Parallel Optimization and Blockwise Model-Update
Filtering (ICASSP 2016).
'''
assert scope.CurrentDeviceScope() is None \
or scope.CurrentDeviceScope().device_type == caffe2_pb2.CPU, \
"Parallelize must be called without device-scope, \
device scope was: {}".format(scope.CurrentDeviceScope())
assert isinstance(model_helper_obj, model_helper.ModelHelper)
if devices is None:
devices = list(range(0, workspace.NumCudaDevices()))
if master_device is None:
master_device = devices[0]
if not cpu_device:
for gpu in devices:
if gpu >= workspace.NumCudaDevices():
log.warning("** Only {} GPUs available, GPUs {} requested".format(
workspace.NumCudaDevices(), devices))
break
model_helper_obj._device_type = caffe2_pb2.CUDA
model_helper_obj._device_prefix = "gpu"
else:
model_helper_obj._device_type = caffe2_pb2.CPU
model_helper_obj._device_prefix = "cpu"
model_helper_obj._devices = devices
model_helper_obj._rendezvous = rendezvous
model_helper_obj._broadcast_context = None
model_helper_obj._shared_model = False
master_dev_opt = core.DeviceOption(model_helper_obj._device_type, master_device)
# question: rendezvous structure
num_shards = rendezvous['num_shards'] if rendezvous else 1
# num_devices is #devices across all machines
num_devices = len(devices) * num_shards
# num_workers is #threads to execute the DAG per shard
num_workers = num_threads_per_device * len(devices)
if rendezvous:
num_workers += 8
loss_scale = 1.0 / num_devices
if block_momentum is None:
block_momentum = 1.0 - 1.0 / num_devices
max_concurrent_distributed_ops = min(
max_concurrent_distributed_ops,
num_workers - 1
)
model_helper_obj.net.Proto().num_workers = num_workers
model_helper_obj.net.Proto().type = net_type
# A net for initializing global model parameters. Its called once in the
# same step as net parameters initialization.
model_helper_obj._global_model_init_net = core.Net('global_model_init')
model_helper_obj._global_model_init_net.Proto().type = net_type
model_helper_obj._global_model_init_net.Proto().num_workers = \
num_workers
# A net for computing final parameter updates. Its will run once after
# running net (local models updates) for `num_local_iterations` times.
model_helper_obj._global_model_param_updates_net = core.Net('global_model')
model_helper_obj._global_model_param_updates_net.Proto().type = net_type
model_helper_obj._global_model_param_updates_net.Proto().num_workers = \
num_workers
def _v(param):
return "{}_v".format(param)
def _g(param):
return "{}_g".format(param)
def _v_prev(param):
return "{}_prev".format(param)
# Keep track of params that were in the model before: they are not
# data parallel, so we need to handle them separately
non_datapar_params = copy.copy(model_helper_obj.params)
model_helper_obj._losses_by_gpu = {}
def _InitializeModels(gpu_id):
input_builder_fun(model_helper_obj)
loss = forward_pass_builder_fun(model_helper_obj, loss_scale)
model_helper_obj._losses_by_gpu[gpu_id] = loss
_ForEachDevice(
devices,
_InitializeModels,
device_type=model_helper_obj._device_type,
device_prefix=model_helper_obj._device_prefix,
scoped=True
)
_ValidateParams(model_helper_obj.params)
model_helper_obj._device_grouped_blobs =\
_GroupByDevice(model_helper_obj, devices,
model_helper_obj.params, non_datapar_params)
model_helper_obj._param_names =\
list(viewkeys(model_helper_obj._device_grouped_blobs))
_AddGradientOperators(
devices, model_helper_obj, model_helper_obj._losses_by_gpu
)
_ValidateParams(model_helper_obj.params)
_InferBlobDevice(model_helper_obj)
def _InitializeParamUpdate(gpu_id):
param_update_builder_fun(model_helper_obj)
_ForEachDevice(
devices,
_InitializeParamUpdate,
device_type=model_helper_obj._device_type,
device_prefix=model_helper_obj._device_prefix,
scoped=True
)
model_parameter_names = list(
viewkeys(model_helper_obj._device_grouped_blobs)
)
if warmup_iterations is not None:
model_helper_obj._warmup_iterations = warmup_iterations
# A net for broadcasting gpu-0 (master shard) parameters after
# running net for `warmup_iterartions`.
model_helper_obj._warmup_broadcast = core.Net('warmup-broadcast')
model_helper_obj._warmup_broadcast.Proto().type = net_type
model_helper_obj._warmup_broadcast.Proto().num_workers = \
num_workers
_SyncAllParams(
devices,
model_helper_obj,
model_helper_obj.param_init_net,
model_helper_obj._warmup_broadcast,
rendezvous,
model_parameter_names,
max_concurrent_distributed_ops
)
for param_name in viewkeys(model_helper_obj._device_grouped_blobs):
param = model_helper_obj._device_grouped_blobs[param_name][master_device]
with core.DeviceScope(master_dev_opt):
model_helper_obj._warmup_broadcast.Copy(param, _g(param))
# (Step-0) Initialize momentum parameters on master device.
for param_name in viewkeys(model_helper_obj._device_grouped_blobs):
param = model_helper_obj._device_grouped_blobs[param_name][master_device]
with core.DeviceScope(master_dev_opt):
model_helper_obj._global_model_init_net.ConstantFill(
param, _v(param), value=0.0
)
model_helper_obj._global_model_init_net.Copy(param, _g(param))
if nesterov:
model_helper_obj._global_model_init_net.ConstantFill(
param, _v_prev(param), value=0.0
)
# (Step-1) Update models for num_local_iterations.
# (Step-2) Compute post-local-updates average of the params.
# Sum model params across GPUs and store resutls in param_avg blob.
_AllReduceBlobs(
model_parameter_names,
devices,
model_helper_obj,
model_helper_obj._global_model_param_updates_net,
rendezvous,
use_nccl,
max_concurrent_distributed_ops
)
# (Step-3) Update momentum params :
# param_v = block_momentum * param_v
# + block_learning_Rate * (param_avg - param)
# if nesterov momentum:
# param = param + param_v
# - block_momentum * (param_v - param_v_prev)
# param_v_prev = param_v
# else:
# param = param + param_v
for param_name in model_parameter_names:
param = model_helper_obj._device_grouped_blobs[param_name][master_device]
with core.DeviceScope(master_dev_opt):
# TODO(ataei) : Stop building the graph here to get model average ?
model_helper_obj._global_model_param_updates_net.Scale(
param, param, scale=1.0 / num_devices
)
model_helper_obj._global_model_param_updates_net.Sub(
[param, _g(param)], param
)
model_helper_obj._global_model_param_updates_net.Scale(
param, param, scale=block_learning_rate
)
model_helper_obj._global_model_param_updates_net.Scale(
_v(param), _v(param), scale=block_momentum
)
model_helper_obj._global_model_param_updates_net.Add(
[_v(param), param], _v(param)
)
model_helper_obj._global_model_param_updates_net.Add(
[_g(param), _v(param)], _g(param)
)
if nesterov:
model_helper_obj._global_model_param_updates_net.Sub(
[_v(param), _v_prev(param)], _v_prev(param)
)
model_helper_obj._global_model_param_updates_net.Scale(
_v_prev(param), _v_prev(param), scale=block_momentum
)
model_helper_obj._global_model_param_updates_net.Sub(
[_g(param), _v_prev(param)], _g(param)
)
model_helper_obj._global_model_param_updates_net.Copy(
_v(param), _v_prev(param)
)
model_helper_obj._global_model_param_updates_net.Copy(
_g(param), param
)
_SyncAllParams(
devices,
model_helper_obj,
model_helper_obj.param_init_net,
model_helper_obj._global_model_param_updates_net,
rendezvous,
model_parameter_names,
max_concurrent_distributed_ops
)
# Add additional syncs
if add_blobs_to_sync is not None:
AddBlobSync(
model_helper_obj,
add_blobs_to_sync,
net=model_helper_obj._global_model_param_updates_net)
# Reset momentum-SGD parameters
if reset_momentum_sgd:
momentum_ops = [op for op in model_helper_obj.net.Proto().op
if op.type == 'MomentumSGDUpdate']
for op in momentum_ops:
momentum_blob = op.input[1]
with core.DeviceScope(op.device_option):
model_helper_obj._global_model_param_updates_net.ConstantFill(
[momentum_blob], momentum_blob, value=0.0
)
if optimize_gradient_memory:
_OptimizeGradientMemorySimple(
model_helper_obj, model_helper_obj._losses_by_gpu, devices
)
model_helper_obj._data_parallel_model_init_nets = [
model_helper_obj.param_init_net,
model_helper_obj._global_model_init_net
]
model_helper_obj._data_parallel_model_nets = [
model_helper_obj.net,
(model_helper_obj._global_model_param_updates_net, 1)
]
def RunInitNet(model):
for init_net in model._data_parallel_model_init_nets:
workspace.RunNetOnce(init_net)
for net_iters in model._data_parallel_model_nets:
if isinstance(net_iters, tuple):
workspace.CreateNet(net_iters[0])
else:
workspace.CreateNet(net_iters)
def RunWarmup(model):
workspace.RunNet(model.net, model._warmup_iterations)
workspace.RunNetOnce(model._warmup_broadcast)
def RunNet(model, num_iterations):
for net_iter in model._data_parallel_model_nets:
if isinstance(net_iter, tuple):
workspace.RunNet(net_iter[0].Proto().name, net_iter[1])
else:
workspace.RunNet(net_iter, num_iterations)
barrier_instance = 0
def Synchronize(model, timeout_sec=_DEFAULT_TIMEOUT_SEC):
if model._rendezvous is None or model._rendezvous['num_shards'] <= 1:
# Single host case
return
log.info("Creating synchronization barrier net")
assert model._rendezvous['engine'] == 'GLOO', "Engine does not support barrier"
global barrier_instance
instance = barrier_instance
barrier_instance += 1
barrier_net = core.Net("sync_barrier_net_" + str(instance))
comm_world = _CreateOrCloneCommonWorld(
barrier_net,
"sync_barrier_cw_" + str(instance),
rendezvous=model._rendezvous,
status_blob="sync_barrier_cw_status_" + str(instance),
timeout_sec=timeout_sec,
)
barrier_net.Barrier(
inputs=[comm_world],
outputs=[],
engine=model._rendezvous['engine'],
status_blob="sync_barrier_status_" + str(instance),
)
workspace.RunNetOnce(barrier_net)
def ConvertNetForDevice(net, device=None):
'''
Converts all blobs in the net to have namescope gpu_X, and correct
device scope. You can use this to enable AppendNet with a
forward_pass_builder_fun:
def builder_fun(model):
...
model.net.AppendNet(
data_parallel_model.ConvertNetForDevice(othermodel.net))
model.param_init_net.AppendNet(
data_parallel_model.ConvertNetForDevice(othermodel.param_init_net))
'''
mnet = copy.deepcopy(net)
if device is None:
device = scope.CurrentDeviceScope()
device_prefix = "gpu" if device.device_type == caffe2_pb2.CUDA else "cpu"
namescope = "{}_{}/".format(device_prefix, device.cuda_gpu_id)
for op in mnet.Proto().op:
if "RecurrentNetwork" in op.type:
raise("RecurrentNetwork conversion not yet supported")
for i, inputb in enumerate(op.input):
op.input[i] = namescope + inputb
for i, outputb in enumerate(op.output):
op.output[i] = namescope + outputb
for i, blob in enumerate(op.control_input):
op.control_input[i] = namescope + blob
op.device_option.CopyFrom(device)
for i, einp in enumerate(mnet.Proto().external_input):
mnet.Proto().external_input[i] = namescope + einp
for i, eoutp in enumerate(mnet.Proto().external_output):
mnet.Proto().external_output[i] = namescope + eoutp
return mnet
def _ForEachDevice(devices, f, device_type, device_prefix, scoped=False,
*args, **kwargs):
for device in devices:
device_opt = core.DeviceOption(device_type, device)
with core.DeviceScope(device_opt):
if scoped:
with core.NameScope("{}_{}".format(device_prefix, device)):
f(device, *args, **kwargs)
else:
f(device, *args, **kwargs)
def _AddGradientOperators(devices, model, losses_by_gpu):
def create_grad(lossp):
return model.ConstantFill(lossp, str(lossp) + "_grad", value=1.0)
loss_grad = {}
# Explicitly need to create gradients on each GPU
for gpu_id in devices:
device = core.DeviceOption(model._device_type, gpu_id)
with core.DeviceScope(device):
for l in losses_by_gpu[gpu_id]:
lg = create_grad(l)
loss_grad[str(l)] = str(lg)
model.AddGradientOperators(loss_grad)
def ExtractPredictorNet(model, inputs, outputs, device):
'''
Returns (net, params) that can be exported to be used as a prediction
net.
'''
master_device = model._devices[0]
prefix = "{}_{}/".format(model._device_prefix, master_device)
prefix_inputs = [prefix + str(b) for b in inputs]
prefix_outputs = [prefix + str(b) for b in outputs]
(predictor_net, export_blobs) = model_helper.ExtractPredictorNet(
net_proto=model.net.Proto(),
input_blobs=prefix_inputs,
output_blobs=prefix_outputs,
device=device,
renames={
a: b
for (a, b) in zip(prefix_inputs + prefix_outputs, inputs + outputs)
},
)
return (predictor_net, export_blobs)
def GetCheckpointParams(model):
'''
Returns a set of blobs that are needed for a complete check point.
They are blobs for the first gpu and iteration blobs.
'''
(all_blobs, _) = _ComputeBlobsToSync(model)
first_gpu_blobs = {
b
for b in all_blobs
if str(b)
.startswith("{}_{}/".format(model._device_prefix, model._devices[0]))
}
# Add iteration blobs that do not have namescope separately, since
# it is important to checkpoint iteration counter
iteration_blobs = set()
for op in model.net.Proto().op:
if op.type == 'Iter' or op.type == 'AtomicIter':
if not op.output[0].startswith("{}_".format(model._device_prefix)):
iteration_blobs.add(op.output[0])
return first_gpu_blobs.union(iteration_blobs)
def FinalizeAfterCheckpoint(model, blobs=None):
'''
This function should be called after loading parameters from a
checkpoint / initial parameters file.
'''
if not hasattr(model, "_checkpoint_net"):
if blobs is None:
(_, uniq_blob_names) = _ComputeBlobsToSync(model)
else:
uniq_blob_names = [stripBlobName(p) for p in blobs]
# Synchronize to the blob lookup map, as the provided
# blobs might have non-parameters, such as momemtum blobs.
log.info("Creating checkpoint synchronization net")
devices = model.GetDevices()
for name in uniq_blob_names:
if name not in model._device_grouped_blobs:
grouped = {
d:
core.BlobReference("{}_{}{}{}".format(
model._device_prefix,
d,
scope._NAMESCOPE_SEPARATOR,
name)
) for d in devices}
model._device_grouped_blobs[name] = grouped
model._checkpoint_net = core.Net("checkpoint_sync_net")
model._checkpoint_net.RunAllOnGPU()
checkpoint_init_net = None
if (model._rendezvous is not None and model._rendezvous['num_shards'] > 1):
checkpoint_init_net = core.Net("checkpoint_init_net")
checkpoint_init_net.RunAllOnGPU()
_SyncAllParams(
devices,
model,
checkpoint_init_net,
model._checkpoint_net,
model._rendezvous,
uniq_blob_names,
max_concurrent_distributed_ops=1
)
if (checkpoint_init_net):
workspace.RunNetOnce(checkpoint_init_net)
workspace.CreateNet(model._checkpoint_net)
# Run the sync
log.info("Run checkpoint net")
workspace.RunNet(model._checkpoint_net.Proto().name)
def GetLearningRateBlobNames(model):
'''
Returns a list of learning rates blob names used in the optimizer.
'''
if model._optimizer is not None:
if model._device_type == caffe2_pb2.CPU:
return [model._optimizer.get_cpu_blob_name('lr')]
elif model._device_type == caffe2_pb2.CUDA:
return [model._optimizer.get_gpu_blob_name('lr', gpu, '')
for gpu in model._devices]
else:
raise Exception(
"Unsupported device type : {}".format(model._device_type)
)
else:
lr_blob_names = []
for op in model.net.Proto().op:
if op.type == "LearningRate":
lr_blob_names.append(op.output(0))
return lr_blob_names
def _Broadcast(devices, model, net, param, use_nccl=False):
# Copy params from gpu_0 to other
master_dev = devices[0]
if use_nccl:
if _IsGPUBlob(model, param):
master_device_opt = core.DeviceOption(model._device_type, master_dev)
with core.DeviceScope(master_device_opt):
# Note that the root is the root _rank_ and not the root
# _device_. Thus we always use root=0, regardless of the
# devices used.
net.NCCLBroadcast(
list(viewvalues(model._device_grouped_blobs[param])),
list(viewvalues(model._device_grouped_blobs[param])),
root=0,
)
return
for dev_idx in devices[1:]:
if _IsGPUBlob(model, param):
device_opt = core.DeviceOption(caffe2_pb2.CUDA, dev_idx)
else:
device_opt = core.DeviceOption(caffe2_pb2.CPU, 0)
with core.DeviceScope(device_opt):
net.Copy(
model._device_grouped_blobs[param][master_dev],
model._device_grouped_blobs[param][dev_idx]
)
def _AllReduce(devices, model, net, param, use_nccl=False, control_input=None):
blobs_group = list(viewvalues(model._device_grouped_blobs[param]))
if model._device_type == caffe2_pb2.CUDA and use_nccl:
# TODO: for _shared_model, do only NCCLReduce
model.NCCLAllreduce(
blobs_group, blobs_group, control_input=control_input
)
return
if model._device_type == caffe2_pb2.CUDA:
p2p_access_pattern = workspace.GetCudaPeerAccessPattern()
else:
p2p_access_pattern = None
def sumN(*dev_indices):
"""Create a Sum op for 2 or more blobs on different devices.
Saves the result on the first device.
Arguments:
dev_indices -- a list of device indices, which can be translated into
CUDA identifiers with model._devices
"""
devices = [model._devices[idx] for idx in dev_indices]
blobs = [blobs_group[idx] for idx in dev_indices]
for i, peer in enumerate(devices):
if i == 0:
continue # Skip the first device
if p2p_access_pattern is not None and not p2p_access_pattern[
devices[0], peer
]:
# Copy from peer to d0
blobs[i] = model.Copy(
blobs[i],
'gpu_{}/{}_gpu{}_copy'.format(devices[0], param, peer)
)
device_opt = core.DeviceOption(model._device_type, devices[0])
with core.DeviceScope(device_opt):
net.Sum(blobs, [blobs[0]], name='dpm')
if len(devices) == 16:
# Special tree reduction for 16 gpus, TODO generalize like in muji.py
for j in range(8):
sumN(j * 2, j * 2 + 1)
for j in range(4):
sumN(j * 4, j * 4 + 2)
for j in range(2):
sumN(j * 8, j * 8 + 4)
sumN(0, 8)
elif len(devices) == 8:
for j in range(4):
sumN(j * 2, j * 2 + 1)
for j in range(2):
sumN(j * 4, j * 4 + 2)
sumN(0, 4)
elif len(devices) == 4:
sumN(0, 1)
sumN(2, 3)
sumN(0, 2)
else:
sumN(*range(len(devices)))
# TODO: for _shared_model, no need to broadcast
_Broadcast(devices, model, net, param)
def _SyncAllParams(
devices,
model,
init_net,
net,
rendezvous,
unique_param_names,
max_concurrent_distributed_ops=4
):
if rendezvous is None or rendezvous['num_shards'] <= 1:
_SyncAllParamsSingleHost(devices, model, net, unique_param_names)
else:
_SyncAllParamsDistributed(
devices,
model,
init_net,
net,
rendezvous,
unique_param_names,
max_concurrent_distributed_ops
)
def AddBlobSync(model, blobs, net=None):
'''
Sync a blob across devices and hosts
'''
if len(blobs) == 0:
return
net = model.net if net is None else net
for b in blobs:
assert not b.startswith(model._device_prefix), \
"Provide unprefixed blob name: {}".format(b)
model._device_grouped_blobs[b] = {
d: core.BlobReference("{}_{}/{}".format(model._device_prefix, d, b))
for d in model._devices
}
_SyncAllParams(
model._devices,
model,
model.param_init_net,
net,
model._rendezvous,
set(blobs))
def AddDistributedBlobSync(model, blobs):
'''
Sync blobs across machines (but not across devices)
'''
if model._rendezvous is None:
return
synth_name = "_".join([str(b) for b in blobs])
comm_world = _CreateOrCloneCommonWorld(
model.param_init_net,
"blob_sync_cw_" + synth_name,
rendezvous=model._rendezvous,
status_blob="create_blob_sync_cw_{}_cw_status".format(
synth_name,
),
)
model.net.Allreduce(
inputs=[comm_world] + blobs,
outputs=blobs,
engine=model._rendezvous['engine'],
status_blob="blob_sync_allred_{}_status".format(synth_name),
)
def _SyncAllParamsDistributed(
devices,
model,
init_net,
net,
rendezvous,
unique_param_names,
max_concurrent_distributed_ops
):
assert rendezvous['num_shards'] > 1
gpu_device_opt = core.DeviceOption(model._device_type, devices[0])
cpu_device_opt = core.DeviceOption(caffe2_pb2.CPU)
if model._broadcast_context is None:
model._broadcast_context = CollectivesConcurrencyControl(
"broadcast",
max_concurrent_distributed_ops,
init_net,
rendezvous
)
context = model._broadcast_context
for param_name in sorted(unique_param_names):
master_param = model._device_grouped_blobs[param_name][devices[0]]
params_group = list(viewvalues(model._device_grouped_blobs[param_name]))
def broadcast(params):
comm_world, control_input = context.get_control_and_context(params)
net.Broadcast(
inputs=[comm_world] + params,
outputs=params,
name=param_name,
engine=rendezvous['engine'],
status_blob="broadcast_{}_status".format(str(param_name)),
control_input=control_input
)
device_opt = gpu_device_opt if _IsGPUBlob(
model, param_name
) else cpu_device_opt
if rendezvous['engine'] == 'GLOO':
with core.DeviceScope(device_opt):
broadcast(params_group)
else:
# Copy between GPU and CPU
with core.DeviceScope(device_opt):
param_cpu = net.CopyGPUToCPU(
master_param,
str(master_param) + "cpu"
)
with core.DeviceScope(cpu_device_opt):
broadcast([param_cpu])
with core.DeviceScope(device_opt):
net.CopyCPUToGPU(param_cpu, master_param)
# Broadcast locally
_Broadcast(devices, model, net, param_name)
def _SyncAllParamsSingleHost(devices, model, net, unique_param_names):
for param in unique_param_names:
_Broadcast(devices, model, net, param)
def _AllReduceBlobs(blob_names, devices, model, net, rendezvous, use_nccl,
max_concurrent_distributed_ops):
if rendezvous is None or rendezvous['num_shards'] <= 1:
_AllReduceBlobsSingleHost(
blob_names,
devices,
model,
net,
use_nccl
)
else:
_AllReduceBlobsDistributed(
blob_names,
devices,
model,
net,
rendezvous,
max_concurrent_distributed_ops,
)
def _PruneParametersForSharing(model):
assert model._shared_model
master_prefix = "{}_{}/".format(model._device_prefix, model._devices[0])
# Remove non-master parameters so that they will not receive parameter
# update operators.
model.params = model.GetParams(master_prefix)
paramset = set(model.params)
model.param_to_grad = {
p: model.param_to_grad[p]
for p in model.param_to_grad if p in paramset
}
model.weights = [w for w in model.weights if w in paramset]
model.biases = [w for w in model.biases if w in paramset]
def _RemapParameterBlobsForSharedModel(model, all_params):
assert model._shared_model
master_prefix = "{}_{}/".format(
model._device_prefix, model._devices[0])
log.info("Remapping param blobs to master -> {}".format(master_prefix))
master_params = set(model.GetParams())
# Remove all but master params
def modify_ops(net):
ops = []
for op in net.Proto().op:
delete_op = False
# Delete ops that output non-master version of parameter
for outp in op.output:
if outp in all_params and outp not in master_params:
delete_op = True
log.debug("Delete b/c {}: {}".format(outp, str(op)))
break
if delete_op:
continue
# Remap inputs to point to the master param
for j, inp in enumerate(op.input):
if inp in all_params and inp not in master_params:
op.input[j] = master_prefix + stripBlobName(inp)
ops.append(op)
del net.Proto().op[:]
net.Proto().op.extend(ops)
modify_ops(model.param_init_net)
modify_ops(model.net)
class CollectivesConcurrencyControl(object):
"""
Creates common worlds (up to max_concurrent_context) and manage the
sequential execution of collectives that shares the same context with
cyclic control inputs.
"""
def __init__(
self,
name,
max_concurrent_context,
param_init_net,
rendezvous
):
self.name = name
self.param_init_net = param_init_net
self.max_concurrent_context = max_concurrent_context
self.counter = 0
self.common_worlds = []
self.control_inputs = []
self.rendezvous = rendezvous
def get_control_and_context(self, control_output_blob):
common_world, control_input = [None, None]
current_slot = self.counter % self.max_concurrent_context
if len(self.common_worlds) < self.max_concurrent_context:
common_world = _CreateOrCloneCommonWorld(
self.param_init_net,
"{}_{}_cw".format(self.name, current_slot),
rendezvous=self.rendezvous,
status_blob="create_{}_cw_{}_status".format(
self.name,
current_slot
),
)
self.common_worlds.append(common_world)
self.control_inputs.append(control_output_blob)
else:
common_world = self.common_worlds[current_slot]
control_input = self.control_inputs[current_slot]
self.control_inputs[current_slot] = control_output_blob
self.counter += 1
return common_world, control_input
def _AllReduceBlobsDistributed(
blob_names,
devices,
model,
net,
rendezvous,
max_concurrent_distributed_ops,
):
num_workers = model.net.Proto().num_workers
assert num_workers > 1, "Please specify more than 1 worker"
all_reduce_engine = rendezvous['engine']
master_device_opt = core.DeviceOption(model._device_type, devices[0])
reducing_device_opt = master_device_opt
context = CollectivesConcurrencyControl(
"allreduce",
max_concurrent_distributed_ops,
model.param_init_net,
rendezvous
)
nccl_control_blob = None
for blob_name in blob_names:
master_blob = model._device_grouped_blobs[blob_name][devices[0]]
blobs_group = list(viewvalues(model._device_grouped_blobs[blob_name]))
assert master_blob in blobs_group
# Remark: NCCLReduce does not support in-place modifications
# so we need a temporary blob
reduced_blob = str(master_blob) + "_red"
def allreduce(blobs, **kwargs):
with core.DeviceScope(reducing_device_opt):
comm_world, control_input = \
context.get_control_and_context(blobs[0])
net.Allreduce(
inputs=[comm_world] + blobs,
outputs=blobs,
name=blob_name,
engine=all_reduce_engine,
control_input=control_input,
status_blob="allreduce_{}_status".format(blob_name),
**kwargs
)
if rendezvous['engine'] == 'GLOO':
# With Gloo cross GPU and cross machine allreduce
# can be executed in a single operation.
# Try to use GPUDirect if transport == ibverbs.
allreduce(
blobs_group,
gpu_direct=(rendezvous.get("transport", None) == "ibverbs"),
)
else:
# Step 1: sum blobs from local GPUs to master GPU
with core.DeviceScope(master_device_opt):
model.ConstantFill(master_blob, reduced_blob, value=0.0)
# Temp fix since NCCLReduce does not work
net.NCCLAllreduce(
blobs_group,
blobs_group,
control_input=nccl_control_blob,
)
nccl_control_blob = blobs_group[0]
net.Copy(master_blob, reduced_blob)
# Step 2: allreduce between all hosts, between master GPUs
allreduce([reduced_blob])
with core.DeviceScope(master_device_opt):
net.Copy(reduced_blob, master_blob)
# Step 3: broadcast locally
_Broadcast(devices, model, net, blob_name)
def _AllReduceBlobsSingleHost(blob_names, devices, model, net, use_nccl):
"""Performs NCCL AllReduce to distribute blobs to all the GPUs."""
if len(devices) == 1:
return
# Now we need to Allreduce blobs on all the GPUs.
# Pick GPU #0 as a master GPU.
master_device_opt = core.DeviceOption(model._device_type, devices[0])
last_out = None
concatenated_idx = set()
for blob_name in blob_names:
# Group by blob_name for reduce.
blobs_group = list(viewvalues(model._device_grouped_blobs[blob_name]))
if len(blobs_group) == 1:
# Non-reducible
continue
assert len(blobs_group) == len(devices), \
"Each GPU from {}, should have a copy of {}.".format(
devices, blob_name)
if _IsGPUBlob(model, blob_name):
with core.DeviceScope(master_device_opt):
if not isinstance(blobs_group[0], core.GradientSlice):
_AllReduce(
devices, model, net, blob_name, use_nccl, last_out
)
# last_out is used to serialize the execution of nccls
last_out = blobs_group[0]
else:
# Sparse gradients: all-gather for indices and values
master_ns = "{}_{}".format(model._device_prefix, devices[0])
'''
Skip if we have already copied concatenated indices
to the indices of GradientSlice. This happens when two
or more grad blobs are gathered with the same indices
blob
'''
skip_idx_concat = False
for g in blobs_group:
if g.indices in concatenated_idx:
skip_idx_concat = True
if not skip_idx_concat:
grad_idx_concat, _ = net.Concat(
[g.indices for g in blobs_group],
["{}/{}_index_concat".format(master_ns, blob_name),
"{}/{}_index_splitinfo".format(master_ns, blob_name)],
axis=0,
name="note:data_parallel_model")
for gpu, g in viewitems(model._device_grouped_blobs[blob_name]):
device_opt = core.DeviceOption(model._device_type, gpu)
with core.DeviceScope(device_opt):
model.Copy(grad_idx_concat, g.indices)
concatenated_idx.add(g.indices)
grad_val_concat, _ = net.Concat(
[g.values for g in blobs_group],
["{}/{}_val_concat".format(master_ns, blob_name),
"{}/{}_val_splitinfo".format(master_ns, blob_name)],
axis=0, name="note:data_parallel_model")
for gpu, g in viewitems(model._device_grouped_blobs[blob_name]):
device_opt = core.DeviceOption(model._device_type, gpu)
with core.DeviceScope(device_opt):
model.Copy(grad_val_concat, g.values)
else:
assert not isinstance(blobs_group[0], core.GradientSlice), \
"Synchronizing gradient slices not supported"
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
# Poor man's allreduce
net.Sum(blobs_group, [blobs_group[0]])
if not model._shared_model:
_Broadcast(devices, model, net, blob_name)
def _BroadcastComputedParams(devices, model, rendezvous, use_nccl=False):
if rendezvous is None:
_BroadcastComputedParamsSingleHost(devices, model, use_nccl)
else:
_BroadcastComputedParamsDistributed(devices, model, rendezvous, use_nccl)
def _BroadcastComputedParamsDistributed(
devices,
model,
rendezvous,
use_nccl=False
):
_BroadcastComputedParamsSingleHost(devices, model, use_nccl)
log.warn("Distributed broadcast of computed params is not implemented yet")
def _BroadcastComputedParamsSingleHost(devices, model, use_nccl=False):
'''
Average computed params over all devices
'''
if len(devices) == 1:
return
for param_name in model._computed_param_names:
# Copy from master to others -- averaging would be perhaps better,
# but currently NCCLAllReduce is too prone to deadlock
_Broadcast(devices, model, model.net, param_name, use_nccl)
def _GetReverseOrderedGrads(model):
'''
Returns the gradients in reverse order (namespace stripped),
for the optimal synchronization order.
'''
return list(reversed(model._grad_names))
# A helper function to extract a parameter's name
def stripBlobName(param):
# Format is "a/b/c/d" -> "b/c/d"
if isinstance(param, core.GradientSlice):
return stripBlobName(param.indices) + ":" + stripBlobName(param.values)
else:
name = str(param)
return name[name.index(scope._NAMESCOPE_SEPARATOR) + 1:]
def _AnalyzeOperators(model):
'''
Look at all the operators and check that they do not cross device scopes
'''
for op in model.Proto().op:
if "NCCL" in op.type or "Copy" in op.type or "Concat" in op.type:
continue
if "Sum" == op.type and op.name == "dpm":
continue
if "Allreduce" in op.type and "GLOO" in op.engine:
continue
op_dev = op.device_option
op_gpu = op_dev.cuda_gpu_id
# This avoids failing on operators that are only for CPU
if op_dev.device_type != caffe2_pb2.CUDA:
continue
namescope = "{}_{}/".format(model._device_prefix, op_gpu)
for inp in list(op.input) + list(op.output):
if inp.startswith("{}_".format(model._device_prefix)
) and not inp.startswith(namescope):
raise Exception(
"Blob {} of op {}, should have namescope {}. Op: {}".format(
inp,
op.type,
"{}_{}/".format(model._device_prefix, op_gpu),
str(op),
)
)
def _InferBlobDevice(model):
'''
Assign blob to device option based on the operator outputing it
'''
mapping = {}
def map_ops(proto):
for op in proto.op:
device_option = op.device_option
if op.type == "Iter":
# Hack for Iters which have blob in CPU context
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CPU
for b in list(op.input) + list(op.output):
if b not in mapping:
mapping[b] = device_option
if op.type.startswith('RecurrentNetwork'):
step_args = [a for a in op.arg if a.name.endswith("step_net")]
for step_arg in step_args:
map_ops(step_arg.n)
map_ops(model.param_init_net.Proto())
map_ops(model.net.Proto())
model._blob_to_device = mapping
def _IsGPUBlob(model, blob_name):
if blob_name in model._blob_to_device:
return model._blob_to_device[blob_name].device_type == caffe2_pb2.CUDA
else:
blob_name = "{}_{}/{}".format(
model._device_prefix, model._devices[0], blob_name
)
if blob_name not in model._blob_to_device:
return model._device_type == caffe2_pb2.CUDA
return model._blob_to_device[blob_name].device_type == caffe2_pb2.CUDA
def _GroupByDevice(model, devices, params, non_data_params):
'''
Groups blobs by device, returning a map of [blobname] = {0: BlobRef, 1: ..}.
Returns ordered dictionary, ensuring the original order.
'''
grouped = OrderedDict()
# Only consider params that were created to be "data parallel"
params = params[len(non_data_params):]
for _i, p in enumerate(params):
assert isinstance(p, core.BlobReference) or \
isinstance(p, core.GradientSlice), \
"Param {} is not BlobReference or GradientSlice".format(p)
name = stripBlobName(p)
gpuid = None
if isinstance(p, core.BlobReference):
gpuid = int(p.GetNameScope().split("_")[1].split("/")[0])
assert "{}_{}/".format(model._device_prefix, gpuid) in p.GetNameScope(),\
"Param {} expected to have namescope '{}_{}'".format(str(p), model._device_prefix, gpuid)
else:
gpuid = int(p.indices.GetNameScope().split("_")[1].split("/")[0])
assert "{}_{}/".format(model._device_prefix, gpuid) in p.indices.GetNameScope(),\
"Indices {} expected to have namescope '{}_{}'".format(str(p), model._device_prefix, gpuid)
assert "{}_{}/".format(model._device_prefix, gpuid) in p.values.GetNameScope(),\
"Values {} expected to have namescope '{}_{}'".format(str(p), model._device_prefix, gpuid)
if name not in grouped:
grouped[name] = {}
grouped[name][gpuid] = p
return grouped
def _ValidateParams(params):
set_params = set(params)
if len(params) > len(set_params):
dupes = []
sp = sorted(params)
for j, p in enumerate(sp):
if j > 0 and sp[j - 1] == p:
dupes.append(p)
assert len(params) == len(set_params), \
"Duplicate entries in params: {}".format(dupes)
def _ComputeBlobsToSync(model):
'''
We sync all blobs that are generated by param init net and
are 'data parallel', i.e assigned to a device
'''
sync_names = set()
# We don't sync params if the model is shared
if model._shared_model:
blobs_to_sync = [str(p) for p in model.GetComputedParams('')]
sync_names = [stripBlobName(p) for p in blobs_to_sync]
else:
blobs_to_sync = []
for op in model.param_init_net.Proto().op:
dp_outputs = [
o for o in op.output
if o.startswith("{}_".format(model._device_prefix))
]
sync_names.update([stripBlobName(o) for o in dp_outputs])
blobs_to_sync.extend(dp_outputs)
# Sanity check
diff = set(model._param_names) - sync_names
assert diff == set(), \
"Some params not instantiated in param init net: {}".format(diff)
# Remove duplicates and sort
prefixlen = len(model._device_prefix) + 1
def extract_sort_key(b):
# Sort first based on device id, and then by whole string
deviceid = int(b[prefixlen:b.index(scope._NAMESCOPE_SEPARATOR)])
return (deviceid, b)
blobs_to_sync = sorted(
list(set(blobs_to_sync)),
key=extract_sort_key)
blobs_to_sync = [core.BlobReference(b) for b in blobs_to_sync]
return (blobs_to_sync, sync_names)
def _OptimizeGradientMemorySimple(model, losses_by_gpu, devices):
log.warning("------- DEPRECATED API, please use " +
"data_parallel_model.OptimizeGradientMemory() ----- ")
for device in devices:
namescope = "{}_{}/".format(model._device_prefix, device)
model.net._net = memonger.share_grad_blobs(
model.net,
losses_by_gpu[device],
set(viewvalues(model.param_to_grad)),
namescope,
share_activations=False,
)
def _AddDynamicMemoryOptimization(model, blobs_to_keep, devices):
blobs_to_keep_all_devices = set()
if blobs_to_keep is not None:
for device in devices:
for blob_name in blobs_to_keep:
blobs_to_keep_all_devices.add(
"{}_{}/{}".format(model._device_prefix, device, blob_name)
)
if model._rendezvous is not None:
# GLOO operators expect the tensor addresses to remain same over
# iterations so we need to remove param grads from the dynamic memory
# management.
blobs_to_keep_all_devices.update(
[str(b) for b in viewvalues(model.param_to_grad)]
)
model.net._net = memonger.release_blobs_when_used(
model.net.Proto(),
blobs_to_keep_all_devices
)
def OptimizeGradientMemory(model,
input_shapes,
excluded_blobs,
recycle_activations):
"""
Optimize memory usage of the backward pass by recycling blobs for gradient
inputs that have been 'used'.
input_shapes: dict of blob name to shape for the inputs of the model.
Pass empty dictionary if not known.
excluded_blobs: list of blobs that cannot be recycled. These are blobs
that you will access externally.
recycle_activations: whether to also recycle forward pass activations
"""
if input_shapes is not None:
input_shapes_all_devices = {}
for b, shp in viewitems(input_shapes):
for d in model._devices:
input_shapes_all_devices["{}_{}/{}".
format(model._device_prefix, d, b)] = shp
(shapes, types) = workspace.InferShapesAndTypes(
[model.param_init_net, model.net],
input_shapes_all_devices,
)
else:
shapes = None
for device in model._devices:
namescope = "{}_{}/".format(model._device_prefix, device)
excluded_blobs_by_device = set(namescope + b for b in excluded_blobs)
model.net._net = memonger.share_grad_blobs(
model.net,
model._losses_by_gpu[device],
set(viewvalues(model.param_to_grad)),
namescope,
dont_share_blobs=excluded_blobs_by_device,
share_activations=recycle_activations,
blob_shapes=shapes,
)
def _CreateOrCloneCommonWorld(
net,
common_world_blob,
rendezvous,
name=None,
status_blob=None,
timeout_sec=None):
if timeout_sec is None:
timeout_sec = _DEFAULT_TIMEOUT_SEC
timeout_ms = timeout_sec * 1000
# Check if there is an existing CreateCommonWorld
# with the same timeout we're looking for. If so,
# we can clone it instead of creating a new one.
existing = None
for op in net.Proto().op:
if op.type != "CreateCommonWorld":
continue
# Find common world timeout
op_timeout_ms = -1
for arg in op.arg:
if arg.name == 'timeout_ms':
op_timeout_ms = arg.i
break
if op_timeout_ms != timeout_ms:
continue
# This common world was created with the same timeout we're
# looking for, so we can clone it
existing = op.output[0]
break
if name is None:
name = "{}_op".format(common_world_blob)
if existing is not None:
comm_world = net.CloneCommonWorld(
[existing],
common_world_blob,
name=name,
engine=rendezvous['engine'],
status_blob=status_blob,
)
else:
kwargs=dict()
if 'transport' in rendezvous:
kwargs['transport'] = rendezvous['transport']
if 'interface' in rendezvous:
kwargs['interface'] = rendezvous['interface']
if 'mpi_rendezvous' in rendezvous:
kwargs['mpi_rendezvous'] = rendezvous['mpi_rendezvous']
comm_world = net.CreateCommonWorld(
rendezvous['kv_handler'] or [],
common_world_blob,
name=name,
size=rendezvous['num_shards'],
rank=rendezvous['shard_id'],
engine=rendezvous['engine'],
status_blob=status_blob,
timeout_ms=timeout_ms,
**kwargs
)
return comm_world
def _RunComparison(model, blob_name, device=None):
if device is None:
device = model._blob_to_device[blob_name]
with core.DeviceScope(device):
rendezvous = model._rendezvous
if rendezvous is None or rendezvous['num_shards'] == 1:
return True
test_data_arr = np.zeros(rendezvous['num_shards']).astype(np.float32)
test_data_arr[rendezvous['shard_id']] = 1
workspace.FeedBlob("compare_arr", test_data_arr)
comparison_net = core.Net("allcompare_net")
kwargs=dict()
if 'mpi_rendezvous' in rendezvous:
kwargs['mpi_rendezvous'] = rendezvous['mpi_rendezvous']
comm_world = comparison_net.CreateCommonWorld(
rendezvous['kv_handler'] or [],
"initial_sync",
name=model.net.Proto().name + ".cw_master_select",
size=rendezvous['num_shards'],
rank=rendezvous['shard_id'],
engine=rendezvous['engine'],
status_blob="cw_master_select",
**kwargs
)
blob_name_checksum = blob_name + "_checksum"
comparison_net.SumSqrElements(
[blob_name], [blob_name_checksum], average=False
)
blob_name_gather = blob_name + "_gather"
comparison_net.Mul(
inputs=["compare_arr", blob_name_checksum],
outputs=blob_name_gather,
broadcast=1
)
comparison_net.Allreduce(
inputs=[comm_world, blob_name_gather],
outputs=[blob_name_gather],
engine=rendezvous['engine'],
status_blob="all_reduce_master_select_status",
)
workspace.RunNetOnce(comparison_net)
gather_arr = workspace.FetchBlob(blob_name_gather)
baseline = gather_arr[0]
for i in range(rendezvous['num_shards']):
assert gather_arr[i] == baseline, \
"allcompare failed on shard {}.".format(rendezvous['shard_id'])
return True
def _InterleaveOps(model):
'''
Data Parallel Model creates a net with ops in one device grouped together.
This will interleave the ops so that each op for each device is next
to each other in the net. Kind of like combining decks of cards. This
ensures that progress is made along the critical path roughly concurrently
for each device, which is important due to the extra intra-node
synchronization required for multi-device batch normalization.
'''
orig_ops = list(model.net.Proto().op)
num_devices = len(model._devices)
num_ops_per_dev = len(orig_ops) // num_devices
assert num_devices * num_ops_per_dev == len(orig_ops), \
'Number of ops per device in original net is not uniform'
new_ops = []
ops = {d: [] for d in range(num_devices)}
for op in orig_ops:
ops[op.device_option.cuda_gpu_id].append(op)
for j in range(num_ops_per_dev):
tp = None
for d in model._devices:
if tp is None:
tp = ops[d][j].type
new_ops.append(ops[d][j])
# Sanity
assert ops[d][j].type == tp, \
"Type mismatch {} / {}".format(tp, ops[d][j].type)
del model.net.Proto().op[:]
model.net.Proto().op.extend(new_ops)
def _InterDeviceBatchNormalization(model):
orig_ops = list(model.net.Proto().op)
new_ops = []
num_devices = len(model._devices)
batch_norm_ops = []
injected_ops = []
spatial_bn_phase = False
sums_blobs = []
sumsq_blobs = []
name = []
input_blob_name = None
spatial_bn_gradient_phase = False
scale_grad_blobs = []
bias_grad_blobs = []
for op in orig_ops:
if op.type != 'SpatialBN' and op.type != 'SpatialBNGradient':
if spatial_bn_phase:
new_ops.extend(injected_ops)
new_ops.append(
core.CreateOperator("Sum",
sums_blobs,
input_blob_name + "_sums_combined"))
new_ops.append(
core.CreateOperator("Sum",
sumsq_blobs,
input_blob_name + "_sumsq_combined"))
new_ops.extend(batch_norm_ops)
injected_ops = []
batch_norm_ops = []
sums_blobs = []
sumsq_blobs = []
spatial_bn_phase = False
input_blob_name = None
elif spatial_bn_gradient_phase:
new_ops.extend(injected_ops)
scale_blob = \
"cpu_0/" + stripBlobName(scale_grad_blobs[0]) + "_combined"
bias_blob = \
"cpu_0/" + stripBlobName(bias_grad_blobs[0]) + "_combined"
new_ops.append(
core.CreateOperator("Sum", scale_grad_blobs, scale_blob))
new_ops.append(
core.CreateOperator("Sum", bias_grad_blobs, bias_blob))
for blob in scale_grad_blobs:
new_ops.append(
core.CreateOperator("Copy", scale_blob, blob))
for blob in bias_grad_blobs:
new_ops.append(core.CreateOperator("Copy", bias_blob, blob))
new_ops.extend(batch_norm_ops)
injected_ops = []
batch_norm_ops = []
scale_grad_blobs = []
bias_grad_blobs = []
spatial_bn_gradient_phase = False
new_ops.append(op)
elif op.type == 'SpatialBN':
spatial_bn_phase = True
if input_blob_name is None:
input_blob_name = op.input[0]
name = op.input[0]
injected_ops.append(
core.CreateOperator(
"ChannelStats",
name,
[name + "_sums", name + "_sumsq"]))
sums_blobs.append(name + "_sums")
sumsq_blobs.append(name + "_sumsq")
op.input.append(input_blob_name + "_sums_combined")
op.input.append(input_blob_name + "_sumsq_combined")
op.arg.extend([utils.MakeArgument("num_batches", num_devices)])
batch_norm_ops.append(op)
elif op.type == 'SpatialBNGradient':
spatial_bn_gradient_phase = True
injected_ops.append(
core.CreateOperator("ChannelBackpropStats",
[op.input[0], op.input[3], op.input[4],
op.input[2]],
[op.output[1], op.output[2]]))
scale_grad_blobs.append(op.output[1])
bias_grad_blobs.append(op.output[2])
op.arg.extend([utils.MakeArgument("num_batches", num_devices)])
op.input.extend([op.output[1], op.output[2]])
batch_norm_ops.append(op)
assert not spatial_bn_phase, \
"Net modification for inter-device batch normalization failed"
del model.net.Proto().op[:]
model.net.Proto().op.extend(new_ops)
|
## @package scope
# Module caffe2.python.scope
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import contextlib
import threading
from past.builtins import basestring
from caffe2.proto import caffe2_pb2
# The name scope and device scope when creating a new operator.
_NAMESCOPE_SEPARATOR = '/'
_threadlocal_scope = threading.local()
def CurrentNameScope():
global _threadlocal_scope
if not hasattr(_threadlocal_scope, "namescope"):
_threadlocal_scope.namescope = ''
return _threadlocal_scope.namescope
def CurrentDeviceScope():
global _threadlocal_scope
if not hasattr(_threadlocal_scope, "devicescope"):
_threadlocal_scope.devicescope = None
return _threadlocal_scope.devicescope
@contextlib.contextmanager
def NameScope(prefix, reset=False):
global _threadlocal_scope
assert isinstance(prefix, basestring) or prefix is None, \
"NameScope takes in a string as its argument."
old_scope = CurrentNameScope()
prefix = prefix + _NAMESCOPE_SEPARATOR if prefix else ''
if reset:
_threadlocal_scope.namescope = prefix
else:
_threadlocal_scope.namescope = _threadlocal_scope.namescope + prefix
try:
yield
finally:
assert _threadlocal_scope.namescope.endswith(prefix), \
"The namescope variable is changed from outside NameScope() calls."
_threadlocal_scope.namescope = old_scope
@contextlib.contextmanager
def DeviceScope(scope, node_name=None):
new_scope = caffe2_pb2.DeviceOption()
if scope:
assert isinstance(scope, caffe2_pb2.DeviceOption), \
"DeviceScope takes in a caffe2_pb2.DeviceOption as its argument."
new_scope.CopyFrom(scope)
else:
assert node_name, "At least one argument should be non-null in DeviceScope"
# rewrite node_name if it is explicitly given
if node_name:
new_scope.node_name = node_name
global _threadlocal_scope
old_scope = CurrentDeviceScope()
# nested scope should inherit the node_name if it is not explicitly set
if old_scope and old_scope.HasField('node_name') and \
not new_scope.HasField('node_name'):
new_scope.node_name = old_scope.node_name
_threadlocal_scope.devicescope = new_scope
try:
yield
finally:
assert _threadlocal_scope.devicescope == new_scope, \
"The device scope is changed from outside DeviceScope() calls."
_threadlocal_scope.devicescope = old_scope
@contextlib.contextmanager
def EmptyDeviceScope():
"""
Allow users to 'disable' the device scope behaviour (so it can be
controlled at a NetDef::DeviceOption level, not overridden at
OperatorDef::DeviceOption level).
This sets the CurrentDeviceScope() to None, so that the field is
not set in CreateOperator(...), etc.
"""
old_scope = CurrentDeviceScope()
try:
_threadlocal_scope.devicescope = None
yield
finally:
_threadlocal_scope.devicescope = old_scope
return
|
## @package model_helper_api
# Module caffe2.python.model_helper_api
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import copy
import inspect
from past.builtins import basestring
from caffe2.python.model_helper import ModelHelper
# flake8: noqa
from caffe2.python.helpers.algebra import *
from caffe2.python.helpers.arg_scope import *
from caffe2.python.helpers.array_helpers import *
from caffe2.python.helpers.control_ops import *
from caffe2.python.helpers.conv import *
from caffe2.python.helpers.db_input import *
from caffe2.python.helpers.dropout import *
from caffe2.python.helpers.elementwise_linear import *
from caffe2.python.helpers.fc import *
from caffe2.python.helpers.nonlinearity import *
from caffe2.python.helpers.normalization import *
from caffe2.python.helpers.pooling import *
from caffe2.python.helpers.tools import *
from caffe2.python.helpers.train import *
class HelperWrapper(object):
_registry = {
'arg_scope': arg_scope,
'fc': fc,
'packed_fc': packed_fc,
'fc_decomp': fc_decomp,
'fc_sparse': fc_sparse,
'fc_prune': fc_prune,
'dropout': dropout,
'max_pool': max_pool,
'average_pool': average_pool,
'max_pool_with_index' : max_pool_with_index,
'lrn': lrn,
'softmax': softmax,
'instance_norm': instance_norm,
'spatial_bn': spatial_bn,
'relu': relu,
'prelu': prelu,
'tanh': tanh,
'concat': concat,
'depth_concat': depth_concat,
'sum': sum,
'transpose': transpose,
'iter': iter,
'accuracy': accuracy,
'conv': conv,
'conv_nd': conv_nd,
'conv_transpose': conv_transpose,
'group_conv': group_conv,
'group_conv_deprecated': group_conv_deprecated,
'image_input': image_input,
'video_input': video_input,
'add_weight_decay': add_weight_decay,
'elementwise_linear': elementwise_linear,
'layer_norm': layer_norm,
'batch_mat_mul' : batch_mat_mul,
'cond' : cond,
'loop' : loop,
'db_input' : db_input,
}
def __init__(self, wrapped):
self.wrapped = wrapped
def __getattr__(self, helper_name):
if helper_name not in self._registry:
raise AttributeError(
"Helper function {} not "
"registered.".format(helper_name)
)
def scope_wrapper(*args, **kwargs):
new_kwargs = {}
if helper_name != 'arg_scope':
if len(args) > 0 and isinstance(args[0], ModelHelper):
model = args[0]
elif 'model' in kwargs:
model = kwargs['model']
else:
raise RuntimeError(
"The first input of helper function should be model. " \
"Or you can provide it in kwargs as model=<your_model>.")
new_kwargs = copy.deepcopy(model.arg_scope)
func = self._registry[helper_name]
var_names, _, varkw, _= inspect.getargspec(func)
if varkw is None:
# this helper function does not take in random **kwargs
new_kwargs = {
var_name: new_kwargs[var_name]
for var_name in var_names if var_name in new_kwargs
}
cur_scope = get_current_scope()
new_kwargs.update(cur_scope.get(helper_name, {}))
new_kwargs.update(kwargs)
return func(*args, **new_kwargs)
scope_wrapper.__name__ = helper_name
return scope_wrapper
def Register(self, helper):
name = helper.__name__
if name in self._registry:
raise AttributeError(
"Helper {} already exists. Please change your "
"helper name.".format(name)
)
self._registry[name] = helper
def has_helper(self, helper_or_helper_name):
helper_name = (
helper_or_helper_name
if isinstance(helper_or_helper_name, basestring) else
helper_or_helper_name.__name__
)
return helper_name in self._registry
sys.modules[__name__] = HelperWrapper(sys.modules[__name__])
|
## @package net_printer
# Module caffe2.python.net_printer
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.proto.caffe2_pb2 import OperatorDef, NetDef
from caffe2.python.checkpoint import Job
from caffe2.python.core import Net, ExecutionStep, Plan
from caffe2.python.task import Task, TaskGroup, WorkspaceType, TaskOutput
from collections import defaultdict
from contextlib import contextmanager
from copy import copy
from future.utils import viewkeys
from itertools import chain
from six import binary_type, text_type
class Visitor(object):
@classmethod
def register(cls, Type):
if not(hasattr(cls, 'visitors')):
cls.visitors = []
def _register(func):
cls.visitors.append((Type, func))
return func
return _register
def __call__(self, obj, *args, **kwargs):
if obj is None:
return
for Type, func in self.__class__.visitors:
if isinstance(obj, Type):
return func(self, obj, *args, **kwargs)
raise TypeError('%s: unsupported object type: %s' % (
self.__class__.__name__, type(obj)))
class Analyzer(Visitor):
PREFIXES_TO_IGNORE = {'distributed_ctx_init'}
def __init__(self):
self.workspaces = defaultdict(lambda: defaultdict(lambda: 0))
self.workspace_ctx = []
@property
def workspace(self):
return self.workspace_ctx[-1]
@contextmanager
def set_workspace(self, node=None, ws=None, do_copy=False):
if ws is not None:
ws = ws
elif node is not None:
ws = self.workspaces[str(node)]
else:
ws = self.workspace
if do_copy:
ws = copy(ws)
self.workspace_ctx.append(ws)
yield ws
del self.workspace_ctx[-1]
def define_blob(self, blob):
self.workspace[blob] += 1
def need_blob(self, blob):
if any(blob.startswith(p) for p in Analyzer.PREFIXES_TO_IGNORE):
return
assert blob in self.workspace, 'Blob undefined: %s' % blob
@Analyzer.register(OperatorDef)
def analyze_op(analyzer, op):
for x in op.input:
analyzer.need_blob(x)
for x in op.output:
analyzer.define_blob(x)
@Analyzer.register(Net)
def analyze_net(analyzer, net):
for x in net.Proto().op:
analyzer(x)
@Analyzer.register(ExecutionStep)
def analyze_step(analyzer, step):
proto = step.Proto()
with analyzer.set_workspace(do_copy=proto.create_workspace):
if proto.report_net:
with analyzer.set_workspace(do_copy=True):
analyzer(step.get_net(proto.report_net))
all_new_blobs = set()
substeps = step.Substeps() + [step.get_net(n) for n in proto.network]
for substep in substeps:
with analyzer.set_workspace(
do_copy=proto.concurrent_substeps) as ws_in:
analyzer(substep)
if proto.should_stop_blob:
analyzer.need_blob(proto.should_stop_blob)
if proto.concurrent_substeps:
new_blobs = set(viewkeys(ws_in)) - set(viewkeys(analyzer.workspace))
assert len(all_new_blobs & new_blobs) == 0, (
'Error: Blobs created by multiple parallel steps: %s' % (
', '.join(all_new_blobs & new_blobs)))
all_new_blobs |= new_blobs
for x in all_new_blobs:
analyzer.define_blob(x)
@Analyzer.register(Task)
def analyze_task(analyzer, task):
# check that our plan protobuf is not too large (limit of 64Mb)
step = task.get_step()
plan = Plan(task.node)
plan.AddStep(step)
proto_len = len(plan.Proto().SerializeToString())
assert proto_len < 2 ** 26, (
'Due to a protobuf limitation, serialized tasks must be smaller '
'than 64Mb, but this task has {} bytes.' % proto_len)
is_private = task.workspace_type() != WorkspaceType.GLOBAL
with analyzer.set_workspace(do_copy=is_private):
analyzer(step)
@Analyzer.register(TaskGroup)
def analyze_task_group(analyzer, tg):
for task in tg.tasks_by_node().tasks():
with analyzer.set_workspace(node=task.node):
analyzer(task)
@Analyzer.register(Job)
def analyze_job(analyzer, job):
analyzer(job.init_group)
analyzer(job.epoch_group)
def analyze(obj):
"""
Given a Job, visits all the execution steps making sure that:
- no undefined blobs will be found during excution
- no blob with same name is defined in concurrent steps
"""
Analyzer()(obj)
class Text(object):
def __init__(self):
self._indent = 0
self._lines_in_context = [0]
self.lines = []
@contextmanager
def context(self, text):
if text is not None:
self.add('with %s:' % text)
self._indent += 4
self._lines_in_context.append(0)
yield
if text is not None:
if self._lines_in_context[-1] == 0:
self.add('pass')
self._indent -= 4
del self._lines_in_context[-1]
def add(self, text):
self._lines_in_context[-1] += 1
self.lines.append((' ' * self._indent) + text)
def __str__(self):
return '\n'.join(self.lines)
class Printer(Visitor, Text):
def __init__(self, factor_prefixes=False, c2_syntax=True):
super(Visitor, self).__init__()
super(Text, self).__init__()
self.factor_prefixes = factor_prefixes
self.c2_syntax = c2_syntax
self.c2_net_name = None
def _sanitize_str(s):
if isinstance(s, text_type):
sanitized = s
elif isinstance(s, binary_type):
sanitized = s.decode('ascii', errors='ignore')
else:
sanitized = str(s)
if len(sanitized) < 64:
return "'%s'" % sanitized
else:
return "'%s'" % sanitized[:64] + '...<+len=%d>' % (len(sanitized) - 64)
def _arg_val(arg):
if arg.HasField('f'):
return str(arg.f)
if arg.HasField('i'):
return str(arg.i)
if arg.HasField('s'):
return _sanitize_str(arg.s)
if arg.floats:
return str(list(arg.floats))
if arg.ints:
return str(list(arg.ints))
if arg.strings:
return str([_sanitize_str(s) for s in arg.strings])
return '[]'
def commonprefix(m):
"Given a list of strings, returns the longest common prefix"
if not m:
return ''
s1 = min(m)
s2 = max(m)
for i, c in enumerate(s1):
if c != s2[i]:
return s1[:i]
return s1
def format_value(val):
if isinstance(val, list):
return '[%s]' % ', '.join("'%s'" % str(v) for v in val)
else:
return str(val)
def factor_prefix(vals, do_it):
vals = [format_value(v) for v in vals]
prefix = commonprefix(vals) if len(vals) > 1 and do_it else ''
joined = ', '.join(v[len(prefix):] for v in vals)
return '%s[%s]' % (prefix, joined) if prefix else joined
def call(op, inputs=None, outputs=None, factor_prefixes=False):
if not inputs:
inputs = ''
else:
inputs_v = [a for a in inputs if not isinstance(a, tuple)]
inputs_kv = [a for a in inputs if isinstance(a, tuple)]
inputs = ', '.join(
x
for x in chain(
[factor_prefix(inputs_v, factor_prefixes)],
('%s=%s' % kv for kv in inputs_kv),
)
if x
)
call = '%s(%s)' % (op, inputs)
return call if not outputs else '%s = %s' % (
factor_prefix(outputs, factor_prefixes), call)
def format_device_option(dev_opt):
if not dev_opt or not (
dev_opt.device_type or dev_opt.cuda_gpu_id or dev_opt.node_name):
return None
return call(
'DeviceOption',
[dev_opt.device_type, dev_opt.cuda_gpu_id, "'%s'" % dev_opt.node_name])
@Printer.register(OperatorDef)
def print_op(text, op):
args = [(a.name, _arg_val(a)) for a in op.arg]
dev_opt_txt = format_device_option(op.device_option)
if dev_opt_txt:
args.append(('device_option', dev_opt_txt))
if text.c2_net_name:
text.add(call(
text.c2_net_name + '.' + op.type,
[list(op.input), list(op.output)] + args))
else:
text.add(call(
op.type,
list(op.input) + args,
op.output,
factor_prefixes=text.factor_prefixes))
for arg in op.arg:
if arg.HasField('n'):
with text.context('arg: %s' % arg.name):
text(arg.n)
@Printer.register(NetDef)
def print_net_def(text, net_def):
if text.c2_syntax:
text.add(call('core.Net', ["'%s'" % net_def.name], [net_def.name]))
text.c2_net_name = net_def.name
else:
text.add('# net: %s' % net_def.name)
for op in net_def.op:
text(op)
if text.c2_syntax:
text.c2_net_name = None
@Printer.register(Net)
def print_net(text, net):
text(net.Proto())
def _get_step_context(step):
proto = step.Proto()
if proto.should_stop_blob:
return call('loop'), False
if proto.num_iter and proto.num_iter != 1:
return call('loop', [proto.num_iter]), False
if proto.num_concurrent_instances > 1:
return (
call('parallel',
[('num_instances', proto.num_concurrent_instances)]),
len(step.Substeps()) > 1)
concurrent = proto.concurrent_substeps and len(step.Substeps()) > 1
if concurrent:
return call('parallel'), True
if proto.report_net:
return call('run_once'), False
return None, False
@Printer.register(ExecutionStep)
def print_step(text, step):
proto = step.Proto()
step_ctx, do_substep = _get_step_context(step)
with text.context(step_ctx):
if proto.report_net:
with text.context(call('report_net', [proto.report_interval])):
text(step.get_net(proto.report_net))
substeps = step.Substeps() + [step.get_net(n) for n in proto.network]
for substep in substeps:
sub_proto = (
substep.Proto() if isinstance(substep, ExecutionStep) else None)
if sub_proto is not None and sub_proto.run_every_ms:
substep_ctx = call(
'reporter',
[str(substep), ('interval_ms', sub_proto.run_every_ms)])
elif do_substep:
title = (
'workspace'
if sub_proto is not None and sub_proto.create_workspace else
'step')
substep_ctx = call(title, [str(substep)])
else:
substep_ctx = None
with text.context(substep_ctx):
text(substep)
if proto.should_stop_blob:
text.add(call('yield stop_if', [proto.should_stop_blob]))
def _print_task_output(x):
assert isinstance(x, TaskOutput)
return 'Output[' + ', '.join(str(x) for x in x.names) + ']'
@Printer.register(Task)
def print_task(text, task):
outs = ', '.join(_print_task_output(o) for o in task.outputs())
context = [('node', task.node), ('name', task.name), ('outputs', outs)]
with text.context(call('Task', context)):
text(task.get_step())
@Printer.register(TaskGroup)
def print_task_group(text, tg, header=None):
with text.context(header or call('TaskGroup')):
for task in tg.tasks_by_node().tasks():
text(task)
@Printer.register(Job)
def print_job(text, job):
text(job.init_group, 'Job.current().init_group')
text(job.epoch_group, 'Job.current().epoch_group')
with text.context('Job.current().stop_signals'):
for out in job.stop_signals:
text.add(_print_task_output(out))
text(job.download_group, 'Job.current().download_group')
text(job.exit_group, 'Job.current().exit_group')
def to_string(obj, **kwargs):
"""
Given a Net, ExecutionStep, Task, TaskGroup or Job, produces a string
with detailed description of the execution steps.
"""
printer = Printer(**kwargs)
printer(obj)
return str(printer)
def debug_net(net):
"""
Given a Net, produce another net that logs info about the operator call
before each operator execution. Use for debugging purposes.
"""
assert isinstance(net, Net)
debug_net = Net(str(net))
assert isinstance(net, Net)
for op in net.Proto().op:
text = Text()
print_op(op, text)
debug_net.LogInfo(str(text))
debug_net.Proto().op.extend([op])
return debug_net
|
## @package schema
# Module caffe2.python.schema
"""
Defines a minimal set of data types that allow to represent datasets with
arbitrary nested structure, including objects of variable length, such as
maps and lists.
This defines a columnar storage format for such datasets on top of caffe2
tensors. In terms of capacity of representation, it can represent most of
the data types supported by Parquet, ORC, DWRF file formats.
See comments in operator_test/dataset_ops_test.py for an example and
walkthrough on how to use schema to store and iterate through a structured
in-memory dataset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import numpy as np
from caffe2.python import core
from caffe2.python import workspace
from caffe2.python.core import BlobReference
from collections import OrderedDict, namedtuple
from past.builtins import basestring
from future.utils import viewitems, viewkeys, viewvalues
from itertools import islice
from six import StringIO
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
FIELD_SEPARATOR = ':'
def _join_field_name(prefix, suffix):
if prefix and suffix:
return '{}{}{}'.format(prefix, FIELD_SEPARATOR, suffix)
elif prefix:
return prefix
elif suffix:
return suffix
else:
return ''
def _normalize_field(field_or_type_or_blob, keep_blobs=True):
"""Clones/normalizes a field before adding it to a container."""
if isinstance(field_or_type_or_blob, Field):
return field_or_type_or_blob.clone(keep_blobs=keep_blobs)
elif type(field_or_type_or_blob) in (type, np.dtype):
return Scalar(dtype=field_or_type_or_blob)
else:
return Scalar(blob=field_or_type_or_blob)
FeatureSpec = namedtuple(
'FeatureSpec',
[
'feature_type',
'feature_names',
'feature_ids',
'feature_is_request_only',
'desired_hash_size',
]
)
FeatureSpec.__new__.__defaults__ = (None, None, None, None, None)
class Metadata(
namedtuple(
'Metadata', ['categorical_limit', 'expected_value', 'feature_specs']
)
):
"""Represents additional information associated with a scalar in schema.
`categorical_limit` - for fields of integral type that are guaranteed to be
non-negative it specifies the maximum possible value plus one. It's often
used as a size of an embedding table.
`expected_value` - anticipated average value of elements in the field.
Usually makes sense for length fields of lists.
`feature_specs` - information about the features that contained in this
field. For example if field have more than 1 feature it can have list of
feature names contained in this field."""
__slots__ = ()
Metadata.__new__.__defaults__ = (None, None, None)
class Field(object):
"""Represents an abstract field type in a dataset.
"""
def __init__(self, children):
"""Derived classes must call this after their initialization."""
self._parent = (None, 0)
offset = 0
self._field_offsets = []
for child in children:
self._field_offsets.append(offset)
offset += len(child.field_names())
self._field_offsets.append(offset)
def clone_schema(self):
return self.clone(keep_blobs=False)
def field_names(self):
"""Return the children field names for this field."""
raise NotImplementedError('Field is an abstract class.')
def field_types(self):
"""Return the numpy.dtype for each of the children fields."""
raise NotImplementedError('Field is an abstract class.')
def field_metadata(self):
"""Return the Metadata for each of the children fields."""
raise NotImplementedError('Field is an abstract class.')
def field_blobs(self):
"""Return the list of blobs with contents for this Field.
Values can either be all numpy.ndarray or BlobReference.
If any of the fields doens't have a blob, throws.
"""
raise NotImplementedError('Field is an abstract class.')
def all_scalars(self):
"""Return the list of all Scalar instances in the Field.
The order is the same as for field_names() or field_blobs()"""
raise NotImplementedError('Field is an abstract class.')
def has_blobs(self):
"""Return True if every scalar of this field has blobs."""
raise NotImplementedError('Field is an abstract class.')
def clone(self, keep_blobs=True):
"""Clone this Field along with its children."""
raise NotImplementedError('Field is an abstract class.')
def _set_parent(self, parent, relative_id):
self._parent = (parent, relative_id)
def slice(self):
"""
Returns a slice representing the range of field ids that belong to
this field. This slice can be used to index a list of fields.
E.g.:
>>> s = Struct(
>>> ('a', Scalar()),
>>> ('b', Struct(
>>> ('b1', Scalar()),
>>> ('b2', Scalar()),
>>> )),
>>> ('c', Scalar()),
>>> )
>>> field_data = ['da', 'db1', 'db2', 'dc']
>>> field_data[s.b.split()]
['db1', 'db2']
"""
base_id = self._child_base_id()
return slice(base_id, base_id + len(self.field_names()))
def _child_base_id(self, child_index=None):
"""Get the base id of the given child"""
p, i = self._parent
pos = 0 if child_index is None else self._field_offsets[child_index]
if p:
pos += p._child_base_id(i)
return pos
def __eq__(self, other):
"""Equivalance of two schemas"""
return (
(self.field_names() == other.field_names()) and
(self.field_types() == other.field_types()) and
(self.field_metadata() == other.field_metadata())
)
def _pprint_impl(self, indent, str_buffer):
raise NotImplementedError('Field is an abstrct class.')
def __repr__(self):
str_buffer = StringIO()
self._pprint_impl(0, str_buffer)
contents = str_buffer.getvalue()
str_buffer.close()
return contents
class List(Field):
"""Represents a variable-length list.
Values of a list can also be complex fields such as Lists and Structs.
In addition to the fields exposed by its `values` field, a List exposes an
additional `lengths` field, which will contain the size of each list under
the parent domain.
"""
def __init__(self, values, lengths_blob=None):
if isinstance(lengths_blob, Field):
assert isinstance(lengths_blob, Scalar)
self.lengths = _normalize_field(lengths_blob)
else:
self.lengths = Scalar(np.int32, lengths_blob)
self._items = _normalize_field(values)
self.lengths._set_parent(self, 0)
self._items._set_parent(self, 1)
Field.__init__(self, [self.lengths, self._items])
def field_names(self):
value_fields = self._items.field_names()
return (
['lengths'] + [_join_field_name('values', v) for v in value_fields]
)
def field_types(self):
return self.lengths.field_types() + self._items.field_types()
def field_metadata(self):
return self.lengths.field_metadata() + self._items.field_metadata()
def field_blobs(self):
return self.lengths.field_blobs() + self._items.field_blobs()
def all_scalars(self):
return self.lengths.all_scalars() + self._items.all_scalars()
def has_blobs(self):
return self.lengths.has_blobs() and self._items.has_blobs()
def clone(self, keep_blobs=True):
return List(
_normalize_field(self._items, keep_blobs=keep_blobs),
_normalize_field(self.lengths, keep_blobs=keep_blobs)
)
def _pprint_impl(self, indent, str_buffer):
str_buffer.write(' ' * indent + "List(\n")
str_buffer.write(' ' * (indent + 1) + "lengths=\n")
self.lengths._pprint_impl(indent=indent + 2, str_buffer=str_buffer)
str_buffer.write(' ' * (indent + 1) + "_items=\n")
self._items._pprint_impl(indent=indent + 2, str_buffer=str_buffer)
str_buffer.write(' ' * indent + ")\n")
def __getattr__(self, item):
"""If the value of this list is a struct,
allow to introspect directly into its fields."""
if item.startswith('__'):
raise AttributeError(item)
if isinstance(self._items, Struct):
return getattr(self._items, item)
elif item == 'value' or item == 'items':
return self._items
else:
raise AttributeError('Field not found in list: %s.' % item)
def __getitem__(self, item):
names = item.split(FIELD_SEPARATOR, 1)
if len(names) == 1:
if item == 'lengths':
return self.lengths
elif item == 'values':
return self._items
else:
if names[0] == 'values':
return self._items[names[1]]
raise KeyError('Field not found in list: %s.' % item)
class Struct(Field):
"""Represents a named list of fields sharing the same domain.
"""
def __init__(self, *fields):
""" fields is a list of tuples in format of (name, field). The name is
a string of nested name, e.g., `a`, `a:b`, `a:b:c`. For example
Struct(
('a', Scalar()),
('b:c', Scalar()),
('b:d:e', Scalar()),
('b', Struct(
('f', Scalar()),
)),
)
is equal to
Struct(
('a', Scalar()),
('b', Struct(
('c', Scalar()),
('d', Struct(('e', Scalar()))),
('f', Scalar()),
)),
)
"""
for field in fields:
assert len(field) == 2
assert field[0], 'Field names cannot be empty'
assert field[0] != 'lengths', (
'Struct cannot contain a field named `lengths`.'
)
fields = [(name, _normalize_field(field)) for name, field in fields]
self.fields = OrderedDict()
for name, field in fields:
if FIELD_SEPARATOR in name:
name, field = self._struct_from_nested_name(name, field)
if name not in self.fields:
self.fields[name] = field
continue
if (
not isinstance(field, Struct) or
not isinstance(self.fields[name], Struct)
):
raise ValueError('Duplicate field name: %s' % name)
self.fields[name] = self.fields[name] + field
for id, (_, field) in enumerate(viewitems(self.fields)):
field._set_parent(self, id)
Field.__init__(self, viewvalues(self.fields))
self._frozen = True
def _struct_from_nested_name(self, nested_name, field):
def create_internal(nested_name, field):
names = nested_name.split(FIELD_SEPARATOR, 1)
if len(names) == 1:
added_field = field
else:
added_field = create_internal(names[1], field)
return Struct((names[0], added_field))
names = nested_name.split(FIELD_SEPARATOR, 1)
assert len(names) >= 2
return names[0], create_internal(names[1], field)
def get_children(self):
return list(viewitems(self.fields))
def field_names(self):
names = []
for name, field in viewitems(self.fields):
names += [_join_field_name(name, f) for f in field.field_names()]
return names
def field_types(self):
types = []
for _, field in viewitems(self.fields):
types += field.field_types()
return types
def field_metadata(self):
metadata = []
for _, field in viewitems(self.fields):
metadata += field.field_metadata()
return metadata
def field_blobs(self):
blobs = []
for _, field in viewitems(self.fields):
blobs += field.field_blobs()
return blobs
def all_scalars(self):
scalars = []
for _, field in viewitems(self.fields):
scalars += field.all_scalars()
return scalars
def has_blobs(self):
return all(field.has_blobs() for field in viewvalues(self.fields))
def clone(self, keep_blobs=True):
normalized_fields = [
(k, _normalize_field(v, keep_blobs=keep_blobs))
for k, v in viewitems(self.fields)
]
return Struct(*normalized_fields)
def _get_field_by_nested_name(self, nested_name):
names = nested_name.split(FIELD_SEPARATOR, 1)
field = self.fields.get(names[0], None)
if field is None:
return None
if len(names) == 1:
return field
try:
return field[names[1]]
except (KeyError, TypeError):
return None
def _pprint_impl(self, indent, str_buffer):
str_buffer.write(' ' * indent + "Struct( \n")
for name, field in viewitems(self.fields):
str_buffer.write(' ' * (indent + 1) + "{}=".format(name) + "\n")
field._pprint_impl(indent=indent + 2, str_buffer=str_buffer)
str_buffer.write(' ' * indent + ") \n")
def __contains__(self, item):
field = self._get_field_by_nested_name(item)
return field is not None
def __len__(self):
return len(self.fields)
def __getitem__(self, item):
"""
item can be a tuple or list of ints or strings, or a single
int or string. String item is a nested field name, e.g., "a", "a:b",
"a:b:c". Int item is the index of a field at the first level of the
Struct.
"""
if isinstance(item, list) or isinstance(item, tuple):
keys = list(viewkeys(self.fields))
return Struct(
* [
(
keys[k]
if isinstance(k, int) else k, self[k]
) for k in item
]
)
elif isinstance(item, int):
return next(islice(viewvalues(self.fields), item, None))
else:
field = self._get_field_by_nested_name(item)
if field is None:
raise KeyError('field "%s" not found' % (item))
return field
def get(self, item, default_value):
"""
similar to python's dictionary get method, return field of item if found
(i.e. self.item is valid) or otherwise return default_value
it's a syntax suger of python's builtin getattr method
"""
return getattr(self, item, default_value)
def __getattr__(self, item):
if item.startswith('__'):
raise AttributeError(item)
try:
return self.__dict__['fields'][item]
except KeyError:
raise AttributeError(item)
def __setattr__(self, key, value):
# Disable setting attributes after initialization to prevent false
# impression of being able to overwrite a field.
# Allowing setting internal states mainly so that _parent can be set
# post initialization.
if getattr(self, '_frozen', None) and not key.startswith('_'):
raise TypeError('Struct.__setattr__() is disabled after __init__()')
super(Struct, self).__setattr__(key, value)
def __add__(self, other):
"""
Allows to merge fields of two schema.Struct using '+' operator.
If two Struct have common field names, the merge is conducted
recursively. Here are examples:
Example 1
s1 = Struct(('a', Scalar()))
s2 = Struct(('b', Scalar()))
s1 + s2 == Struct(
('a', Scalar()),
('b', Scalar()),
)
Example 2
s1 = Struct(
('a', Scalar()),
('b', Struct(('c', Scalar()))),
)
s2 = Struct(('b', Struct(('d', Scalar()))))
s1 + s2 == Struct(
('a', Scalar()),
('b', Struct(
('c', Scalar()),
('d', Scalar()),
)),
)
"""
if not isinstance(other, Struct):
return NotImplemented
children = OrderedDict(self.get_children())
for name, right_field in other.get_children():
if name not in children:
children[name] = right_field
continue
left_field = children[name]
children[name] = left_field + right_field
return Struct(*(viewitems(children)))
def __sub__(self, other):
"""
Allows to remove common fields of two schema.Struct from self by
using '-' operator. If two Struct have common field names, the
removal is conducted recursively. If a child struct has no fields
inside, it will be removed from its parent. Here are examples:
Example 1
s1 = Struct(
('a', Scalar()),
('b', Scalar()),
)
s2 = Struct(('a', Scalar()))
s1 - s2 == Struct(('b', Scalar()))
Example 2
s1 = Struct(
('b', Struct(
('c', Scalar()),
('d', Scalar()),
))
)
s2 = Struct(
('b', Struct(('c', Scalar()))),
)
s1 - s2 == Struct(
('b', Struct(
('d', Scalar()),
)),
)
Example 3
s1 = Struct(
('a', Scalar()),
('b', Struct(
('d', Scalar()),
))
)
s2 = Struct(
('b', Struct(
('c', Scalar())
('d', Scalar())
)),
)
s1 - s2 == Struct(
('a', Scalar()),
)
"""
if not isinstance(other, Struct):
return NotImplemented
children = OrderedDict(self.get_children())
for name, right_field in other.get_children():
if name in children:
left_field = children[name]
if type(left_field) == type(right_field):
if isinstance(left_field, Struct):
child = left_field - right_field
if child.get_children():
children[name] = child
continue
children.pop(name)
else:
raise TypeError(
"Type of left_field, " + str(type(left_field)) +
", is not the same as that of right_field, " +
str(type(right_field)) +
", yet they have the same field name, " + name)
return Struct(*(children.items()))
class Scalar(Field):
"""Represents a typed scalar or tensor of fixed shape.
A Scalar is a leaf in a schema tree, translating to exactly one tensor in
the dataset's underlying storage.
Usually, the tensor storing the actual values of this field is a 1D tensor,
representing a series of values in its domain. It is possible however to
have higher rank values stored as a Scalar, as long as all entries have
the same shape.
E.g.:
Scalar(np.float64)
Scalar field of type float64. Caffe2 will expect readers and
datasets to expose it as a 1D tensor of doubles (vector), where
the size of the vector is determined by this fields' domain.
Scalar((np.int32, 5))
Tensor field of type int32. Caffe2 will expect readers and
datasets to implement it as a 2D tensor (matrix) of shape (L, 5),
where L is determined by this fields' domain.
Scalar((str, (10, 20)))
Tensor field of type str. Caffe2 will expect readers and
datasets to implement it as a 3D tensor of shape (L, 10, 20),
where L is determined by this fields' domain.
If the field type is unknown at construction time, call Scalar(), that will
default to np.void as its dtype.
It is an error to pass a structured dtype to Scalar, since it would contain
more than one field. Instead, use from_dtype, which will construct
a nested `Struct` field reflecting the given dtype's structure.
A Scalar can also contain a blob, which represents the value of this
Scalar. A blob can be either a numpy.ndarray, in which case it contain the
actual contents of the Scalar, or a BlobReference, which represents a
blob living in a caffe2 Workspace. If blob of different types are passed,
a conversion to numpy.ndarray is attempted.
"""
def __init__(self, dtype=None, blob=None, metadata=None):
self._metadata = None
self.set(dtype, blob, metadata, unsafe=True)
Field.__init__(self, [])
def field_names(self):
return ['']
def field_type(self):
return self.dtype
def field_types(self):
return [self.dtype]
def field_metadata(self):
return [self._metadata]
def has_blobs(self):
return self._blob is not None
def field_blobs(self):
assert self._blob is not None, 'Value is not set for this field.'
return [self._blob]
def all_scalars(self):
return [self]
def clone(self, keep_blobs=True):
return Scalar(
dtype=self._original_dtype,
blob=self._blob if keep_blobs else None,
metadata=self._metadata
)
def get(self):
"""Gets the current blob of this Scalar field."""
assert self._blob is not None, 'Value is not set for this field.'
return self._blob
def __call__(self):
"""Shortcut for self.get()"""
return self.get()
@property
def metadata(self):
return self._metadata
def set_metadata(self, value):
assert isinstance(value, Metadata), \
'metadata must be Metadata, got {}'.format(type(value))
self._metadata = value
self._validate_metadata()
def _validate_metadata(self):
if self._metadata is None:
return
if (self._metadata.categorical_limit is not None and
self.dtype is not None):
assert np.issubdtype(self.dtype, np.integer), \
"`categorical_limit` can be specified only in integral " + \
"fields but got {}".format(self.dtype)
def set_value(self, blob, throw_on_type_mismatch=False, unsafe=False):
"""Sets only the blob field still validating the existing dtype"""
if self.dtype.base != np.void and throw_on_type_mismatch:
assert isinstance(blob, np.ndarray), "Got {!r}".format(blob)
assert blob.dtype.base == self.dtype.base, (
"Expected {}, got {}".format(self.dtype.base, blob.dtype.base))
self.set(dtype=self._original_dtype, blob=blob, unsafe=unsafe)
def set(self, dtype=None, blob=None, metadata=None, unsafe=False):
"""Set the type and/or blob of this scalar. See __init__ for details.
Args:
dtype: can be any numpy type. If not provided and `blob` is
provided, it will be inferred. If no argument is provided,
this Scalar will be of type np.void.
blob: if provided, can be either a BlobReference or a
numpy.ndarray. If a value of different type is passed,
a conversion to numpy.ndarray is attempted. Strings aren't
accepted, since they can be ambiguous. If you want to pass
a string, to either BlobReference(blob) or np.array(blob).
metadata: optional instance of Metadata, if provided overrides
the metadata information of the scalar
"""
if not unsafe:
logger.warning(
"Scalar should be considered immutable. Only call Scalar.set() "
"on newly created Scalar with unsafe=True. This will become an "
"error soon."
)
if blob is not None and isinstance(blob, basestring):
raise ValueError(
'Passing str blob to Scalar.set() is ambiguous. '
'Do either set(blob=np.array(blob)) or '
'set(blob=BlobReference(blob))'
)
self._original_dtype = dtype
if dtype is not None:
dtype = np.dtype(dtype)
# If blob is not None and it is not a BlobReference, we assume that
# it is actual tensor data, so we will try to cast it to a numpy array.
if blob is not None and not isinstance(blob, BlobReference):
preserve_shape = isinstance(blob, np.ndarray)
if dtype is not None and dtype != np.void:
blob = np.array(blob, dtype=dtype.base)
# if array is empty we may need to reshape a little
if blob.size == 0 and not preserve_shape:
blob = blob.reshape((0, ) + dtype.shape)
else:
assert isinstance(blob, np.ndarray), (
'Invalid blob type: %s' % str(type(blob)))
# reshape scalars into 1D arrays
# TODO(azzolini): figure out better way of representing this
if len(blob.shape) == 0 and not preserve_shape:
blob = blob.reshape((1, ))
# infer inner shape from the blob given
# TODO(dzhulgakov): tweak this to make it work with PackedStruct
if (len(blob.shape) > 1 and dtype is not None and
dtype.base != np.void):
dtype = np.dtype((dtype.base, blob.shape[1:]))
# if we were still unable to infer the dtype
if dtype is None:
dtype = np.dtype(np.void)
assert not dtype.fields, (
'Cannot create Scalar with a structured dtype. ' +
'Use from_dtype instead.'
)
self.dtype = dtype
self._blob = blob
if metadata is not None:
self.set_metadata(metadata)
self._validate_metadata()
def set_type(self, dtype):
self._original_dtype = dtype
if dtype is not None:
self.dtype = np.dtype(dtype)
else:
self.dtype = np.dtype(np.void)
self._validate_metadata()
def _pprint_impl(self, indent, str_buffer):
str_buffer.write(' ' * (indent) +
'Scalar({!r}, {!r}, {!r})'.format(
self.dtype, self._blob, self._metadata) + "\n")
def id(self):
"""
Return the zero-indexed position of this scalar field in its schema.
Used in order to index into the field_blob list returned by readers or
accepted by writers.
"""
return self._child_base_id()
def Map(
keys,
values,
keys_name='keys',
values_name='values',
lengths_blob=None
):
"""A map is a List of Struct containing keys and values fields.
Optionally, you can provide custom name for the key and value fields.
"""
return List(
Struct((keys_name, keys), (values_name, values)),
lengths_blob=lengths_blob
)
def NamedTuple(name_prefix, *fields):
return Struct(* [('%s_%d' % (name_prefix, i), field)
for i, field in enumerate(fields)])
def Tuple(*fields):
"""
Creates a Struct with default, sequential, field names of given types.
"""
return NamedTuple('field', *fields)
def RawTuple(num_fields, name_prefix='field'):
"""
Creates a tuple of `num_field` untyped scalars.
"""
assert isinstance(num_fields, int)
assert num_fields >= 0
return NamedTuple(name_prefix, *([np.void] * num_fields))
def from_dtype(dtype, _outer_shape=()):
"""Constructs a Caffe2 schema from the given numpy's dtype.
Numpy supports scalar, array-like and structured datatypes, as long as
all the shapes are fixed. This function breaks down the given dtype into
a Caffe2 schema containing `Struct` and `Scalar` types.
Fields containing byte offsets are not currently supported.
"""
if not isinstance(dtype, np.dtype):
# wrap into a ndtype
shape = _outer_shape
dtype = np.dtype((dtype, _outer_shape))
else:
# concatenate shapes if necessary
shape = _outer_shape + dtype.shape
if shape != dtype.shape:
dtype = np.dtype((dtype.base, shape))
if not dtype.fields:
return Scalar(dtype)
struct_fields = []
for name, (fdtype, offset) in dtype.fields:
assert offset == 0, ('Fields with byte offsets are not supported.')
struct_fields += (name, from_dtype(fdtype, _outer_shape=shape))
return Struct(*struct_fields)
class _SchemaNode(object):
"""This is a private class used to represent a Schema Node"""
def __init__(self, name, type_str=''):
self.name = name
self.children = []
self.type_str = type_str
self.field = None
def add_child(self, name, type_str=''):
for child in self.children:
if child.name == name and child.type_str == type_str:
return child
child = _SchemaNode(name, type_str)
self.children.append(child)
return child
def get_field(self):
list_names = ['lengths', 'values']
map_names = ['lengths', 'keys', 'values']
if len(self.children) == 0 or self.field is not None:
if self.field is None:
return Struct()
else:
return self.field
child_names = []
for child in self.children:
child_names.append(child.name)
if (set(child_names) == set(list_names)):
for child in self.children:
if child.name == 'values':
values_field = child.get_field()
else:
lengths_field = child.get_field()
self.field = List(
values_field,
lengths_blob=lengths_field
)
self.type_str = "List"
return self.field
elif (set(child_names) == set(map_names)):
for child in self.children:
if child.name == 'keys':
key_field = child.get_field()
elif child.name == 'values':
values_field = child.get_field()
else:
lengths_field = child.get_field()
self.field = Map(
key_field,
values_field,
lengths_blob=lengths_field
)
self.type_str = "Map"
return self.field
else:
struct_fields = []
for child in self.children:
struct_fields.append((child.name, child.get_field()))
self.field = Struct(*struct_fields)
self.type_str = "Struct"
return self.field
def print_recursively(self):
for child in self.children:
child.print_recursively()
logger.info("Printing node: Name and type")
logger.info(self.name)
logger.info(self.type_str)
def from_column_list(
col_names, col_types=None,
col_blobs=None, col_metadata=None
):
"""
Given a list of names, types, and optionally values, construct a Schema.
"""
if col_types is None:
col_types = [None] * len(col_names)
if col_metadata is None:
col_metadata = [None] * len(col_names)
if col_blobs is None:
col_blobs = [None] * len(col_names)
assert len(col_names) == len(col_types), (
'col_names and col_types must have the same length.'
)
assert len(col_names) == len(col_metadata), (
'col_names and col_metadata must have the same length.'
)
assert len(col_names) == len(col_blobs), (
'col_names and col_blobs must have the same length.'
)
root = _SchemaNode('root', 'Struct')
for col_name, col_type, col_blob, col_metadata in zip(
col_names, col_types, col_blobs, col_metadata
):
columns = col_name.split(FIELD_SEPARATOR)
current = root
for i in range(len(columns)):
name = columns[i]
type_str = ''
field = None
if i == len(columns) - 1:
type_str = col_type
field = Scalar(
dtype=col_type,
blob=col_blob,
metadata=col_metadata
)
next = current.add_child(name, type_str)
if field is not None:
next.field = field
current = next
return root.get_field()
def from_blob_list(schema, values, throw_on_type_mismatch=False):
"""
Create a schema that clones the given schema, but containing the given
list of values.
"""
assert isinstance(schema, Field), 'Argument `schema` must be a Field.'
if isinstance(values, BlobReference):
values = [values]
record = schema.clone_schema()
scalars = record.all_scalars()
assert len(scalars) == len(values), (
'Values must have %d elements, got %d.' % (len(scalars), len(values))
)
for scalar, value in zip(scalars, values):
scalar.set_value(value, throw_on_type_mismatch, unsafe=True)
return record
def as_record(value):
if isinstance(value, Field):
return value
elif isinstance(value, list) or isinstance(value, tuple):
is_field_list = all(
f is tuple and len(f) == 2 and isinstance(f[0], basestring)
for f in value
)
if is_field_list:
return Struct(* [(k, as_record(v)) for k, v in value])
else:
return Tuple(* [as_record(f) for f in value])
elif isinstance(value, dict):
return Struct(* [(k, as_record(v)) for k, v in viewitems(value)])
else:
return _normalize_field(value)
def FetchRecord(blob_record, ws=None, throw_on_type_mismatch=False):
"""
Given a record containing BlobReferences, return a new record with same
schema, containing numpy arrays, fetched from the current active workspace.
"""
def fetch(v):
if ws is None:
return workspace.FetchBlob(str(v))
else:
return ws.blobs[str(v)].fetch()
assert isinstance(blob_record, Field)
field_blobs = blob_record.field_blobs()
assert all(isinstance(v, BlobReference) for v in field_blobs)
field_arrays = [fetch(value) for value in field_blobs]
return from_blob_list(blob_record, field_arrays, throw_on_type_mismatch)
def FeedRecord(blob_record, arrays, ws=None):
"""
Given a Record containing blob_references and arrays, which is either
a list of numpy arrays or a Record containing numpy arrays, feeds the
record to the current workspace.
"""
def feed(b, v):
if ws is None:
workspace.FeedBlob(str(b), v)
else:
ws.create_blob(str(b))
ws.blobs[str(b)].feed(v)
assert isinstance(blob_record, Field)
field_blobs = blob_record.field_blobs()
assert all(isinstance(v, BlobReference) for v in field_blobs)
if isinstance(arrays, Field):
# TODO: check schema
arrays = arrays.field_blobs()
assert len(arrays) == len(field_blobs), (
'Values must contain exactly %d ndarrays.' % len(field_blobs)
)
for blob, array in zip(field_blobs, arrays):
feed(blob, array)
def NewRecord(net, schema):
"""
Given a record of np.arrays, create a BlobReference for each one of them,
returning a record containing BlobReferences. The name of each returned blob
is NextScopedBlob(field_name), which guarantees unique name in the current
net. Use NameScope explicitly to avoid name conflictions between different
nets.
"""
if isinstance(schema, Scalar):
result = schema.clone()
result.set_value(
blob=net.NextScopedBlob('unnamed_scalar'),
unsafe=True,
)
return result
assert isinstance(schema, Field), 'Record must be a schema.Field instance.'
blob_refs = [
net.NextScopedBlob(prefix=name)
for name in schema.field_names()
]
return from_blob_list(schema, blob_refs)
def ConstRecord(net, array_record):
"""
Given a record of arrays, returns a record of blobs,
initialized with net.Const.
"""
blob_record = NewRecord(net, array_record)
for blob, array in zip(
blob_record.field_blobs(), array_record.field_blobs()
):
net.Const(array, blob)
return blob_record
def InitEmptyRecord(net, schema_or_record, enforce_types=False):
if not schema_or_record.has_blobs():
record = NewRecord(net, schema_or_record)
else:
record = schema_or_record
for blob_type, blob in zip(record.field_types(), record.field_blobs()):
try:
data_type = data_type_for_dtype(blob_type)
shape = [0] + list(blob_type.shape)
net.ConstantFill([], blob, shape=shape, dtype=data_type)
except TypeError:
logger.warning("Blob {} has type error".format(blob))
# If data_type_for_dtype doesn't know how to resolve given numpy
# type to core.DataType, that function can throw type error (for
# example that would happen for cases of unknown types such as
# np.void). This is not a problem for cases when the record if going
# to be overwritten by some operator later, though it might be an
# issue for type/shape inference.
if enforce_types:
raise
# If we don't enforce types for all items we'll create a blob with
# the default ConstantFill (FLOAT, no shape)
net.ConstantFill([], blob, shape=[0])
return record
_DATA_TYPE_FOR_DTYPE = [
(np.str, core.DataType.STRING),
(np.float16, core.DataType.FLOAT16),
(np.float32, core.DataType.FLOAT),
(np.float64, core.DataType.DOUBLE),
(np.bool, core.DataType.BOOL),
(np.int8, core.DataType.INT8),
(np.int16, core.DataType.INT16),
(np.int32, core.DataType.INT32),
(np.int64, core.DataType.INT64),
(np.uint8, core.DataType.UINT8),
(np.uint16, core.DataType.UINT16),
]
def is_schema_subset(schema, original_schema):
# TODO add more checks
return set(schema.field_names()).issubset(
set(original_schema.field_names()))
def equal_schemas(schema,
original_schema,
check_field_names=True,
check_field_types=True,
check_field_metas=False):
assert isinstance(schema, Field)
assert isinstance(original_schema, Field)
if check_field_names and (
schema.field_names() != original_schema.field_names()):
return False
if check_field_types and (
schema.field_types() != original_schema.field_types()):
return False
if check_field_metas and (
schema.field_metadata() != original_schema.field_metadata()):
return False
return True
def schema_check(schema, previous=None):
record = as_record(schema)
if previous is not None:
assert equal_schemas(schema, previous)
return record
def data_type_for_dtype(dtype):
for np_type, dt in _DATA_TYPE_FOR_DTYPE:
if dtype.base == np_type:
return dt
raise TypeError('Unknown dtype: ' + str(dtype.base))
def attach_metadata_to_scalars(field, metadata):
for f in field.all_scalars():
f.set_metadata(metadata)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python.schema import (
Struct, FetchRecord, NewRecord, FeedRecord, InitEmptyRecord)
from caffe2.python import core, workspace
from caffe2.python.session import LocalSession
from caffe2.python.dataset import Dataset
from caffe2.python.pipeline import pipe
from caffe2.python.task import TaskGroup
from caffe2.python.test_util import TestCase
import numpy as np
class TestLocalSession(TestCase):
def test_local_session(self):
init_net = core.Net('init')
src_values = Struct(
('uid', np.array([1, 2, 6])),
('value', np.array([1.4, 1.6, 1.7])))
expected_dst = Struct(
('uid', np.array([2, 4, 12])),
('value', np.array([0.0, 0.0, 0.0])))
with core.NameScope('init'):
src_blobs = NewRecord(init_net, src_values)
dst_blobs = InitEmptyRecord(init_net, src_values.clone_schema())
def proc1(rec):
net = core.Net('proc1')
with core.NameScope('proc1'):
out = NewRecord(net, rec)
net.Add([rec.uid(), rec.uid()], [out.uid()])
out.value.set(blob=rec.value(), unsafe=True)
return [net], out
def proc2(rec):
net = core.Net('proc2')
with core.NameScope('proc2'):
out = NewRecord(net, rec)
out.uid.set(blob=rec.uid(), unsafe=True)
net.Sub([rec.value(), rec.value()], [out.value()])
return [net], out
src_ds = Dataset(src_blobs)
dst_ds = Dataset(dst_blobs)
with TaskGroup() as tg:
out1 = pipe(src_ds.reader(), processor=proc1)
out2 = pipe(out1, processor=proc2)
pipe(out2, dst_ds.writer())
ws = workspace.C.Workspace()
FeedRecord(src_blobs, src_values, ws)
session = LocalSession(ws)
session.run(init_net)
session.run(tg)
output = FetchRecord(dst_blobs, ws=ws)
for a, b in zip(output.field_blobs(), expected_dst.field_blobs()):
np.testing.assert_array_equal(a, b)
|
## @package app
# Module caffe2.python.mint.app
import argparse
import flask
import glob
import numpy as np
import nvd3
import os
import sys
import tornado.httpserver
import tornado.wsgi
__folder__ = os.path.abspath(os.path.dirname(__file__))
app = flask.Flask(
__name__,
template_folder=os.path.join(__folder__, "templates"),
static_folder=os.path.join(__folder__, "static")
)
args = None
def jsonify_nvd3(chart):
chart.buildcontent()
# Note(Yangqing): python-nvd3 does not seem to separate the built HTML part
# and the script part. Luckily, it seems to be the case that the HTML part is
# only a <div>, which can be accessed by chart.container; the script part,
# while the script part occupies the rest of the html content, which we can
# then find by chart.htmlcontent.find['<script>'].
script_start = chart.htmlcontent.find('<script>') + 8
script_end = chart.htmlcontent.find('</script>')
return flask.jsonify(
result=chart.container,
script=chart.htmlcontent[script_start:script_end].strip()
)
def visualize_summary(filename):
try:
data = np.loadtxt(filename)
except Exception as e:
return 'Cannot load file {}: {}'.format(filename, str(e))
chart_name = os.path.splitext(os.path.basename(filename))[0]
chart = nvd3.lineChart(
name=chart_name + '_summary_chart',
height=args.chart_height,
y_axis_format='.03g'
)
if args.sample < 0:
step = max(data.shape[0] / -args.sample, 1)
else:
step = args.sample
xdata = np.arange(0, data.shape[0], step)
# data should have 4 dimensions.
chart.add_serie(x=xdata, y=data[xdata, 0], name='min')
chart.add_serie(x=xdata, y=data[xdata, 1], name='max')
chart.add_serie(x=xdata, y=data[xdata, 2], name='mean')
chart.add_serie(x=xdata, y=data[xdata, 2] + data[xdata, 3], name='m+std')
chart.add_serie(x=xdata, y=data[xdata, 2] - data[xdata, 3], name='m-std')
return jsonify_nvd3(chart)
def visualize_print_log(filename):
try:
data = np.loadtxt(filename)
if data.ndim == 1:
data = data[:, np.newaxis]
except Exception as e:
return 'Cannot load file {}: {}'.format(filename, str(e))
chart_name = os.path.splitext(os.path.basename(filename))[0]
chart = nvd3.lineChart(
name=chart_name + '_log_chart',
height=args.chart_height,
y_axis_format='.03g'
)
if args.sample < 0:
step = max(data.shape[0] / -args.sample, 1)
else:
step = args.sample
xdata = np.arange(0, data.shape[0], step)
# if there is only one curve, we also show the running min and max
if data.shape[1] == 1:
# We also print the running min and max for the steps.
trunc_size = data.shape[0] / step
running_mat = data[:trunc_size * step].reshape((trunc_size, step))
chart.add_serie(
x=xdata[:trunc_size],
y=running_mat.min(axis=1),
name='running_min'
)
chart.add_serie(
x=xdata[:trunc_size],
y=running_mat.max(axis=1),
name='running_max'
)
chart.add_serie(x=xdata, y=data[xdata, 0], name=chart_name)
else:
for i in range(0, min(data.shape[1], args.max_curves)):
# data should have 4 dimensions.
chart.add_serie(
x=xdata,
y=data[xdata, i],
name='{}[{}]'.format(chart_name, i)
)
return jsonify_nvd3(chart)
def visualize_file(filename):
fullname = os.path.join(args.root, filename)
if filename.endswith('summary'):
return visualize_summary(fullname)
elif filename.endswith('log'):
return visualize_print_log(fullname)
else:
return flask.jsonify(
result='Unsupport file: {}'.format(filename),
script=''
)
@app.route('/')
def index():
files = glob.glob(os.path.join(args.root, "*.*"))
files.sort()
names = [os.path.basename(f) for f in files]
return flask.render_template(
'index.html',
root=args.root,
names=names,
debug_messages=names
)
@app.route('/visualization/<string:name>')
def visualization(name):
ret = visualize_file(name)
return ret
def main(argv):
parser = argparse.ArgumentParser("The mint visualizer.")
parser.add_argument(
'-p',
'--port',
type=int,
default=5000,
help="The flask port to use."
)
parser.add_argument(
'-r',
'--root',
type=str,
default='.',
help="The root folder to read files for visualization."
)
parser.add_argument(
'--max_curves',
type=int,
default=5,
help="The max number of curves to show in a dump tensor."
)
parser.add_argument(
'--chart_height',
type=int,
default=300,
help="The chart height for nvd3."
)
parser.add_argument(
'-s',
'--sample',
type=int,
default=-200,
help="Sample every given number of data points. A negative "
"number means the total points we will sample on the "
"whole curve. Default 100 points."
)
global args
args = parser.parse_args(argv)
server = tornado.httpserver.HTTPServer(tornado.wsgi.WSGIContainer(app))
server.listen(args.port)
print("Tornado server starting on port {}.".format(args.port))
tornado.ioloop.IOLoop.instance().start()
if __name__ == '__main__':
main(sys.argv[1:])
|
# @package adaptive_weight
# Module caffe2.fb.python.layers.adaptive_weight
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
import numpy as np
'''
Implementation of adaptive weighting: https://arxiv.org/pdf/1705.07115.pdf
'''
class AdaptiveWeight(ModelLayer):
def __init__(
self,
model,
input_record,
name='adaptive_weight',
optimizer=None,
weights=None,
**kwargs
):
super(AdaptiveWeight,
self).__init__(model, name, input_record, **kwargs)
self.output_schema = schema.Scalar(
np.float32, self.get_next_blob_reference('adaptive_weight')
)
self.data = self.input_record.field_blobs()
self.num = len(self.data)
# mu_i = log(sigma_i^2)
if weights is None:
# mu_i is set such that all initial weights are 1. / num
initializer = ('ConstantFill', {'value': np.log(self.num / 2.)})
else:
assert len(weights) == self.num
weights = np.array(weights).astype(np.float32)
values = np.log(1. / 2. / weights)
initializer = (
'GivenTensorFill', {
'values': values,
'dtype': core.DataType.FLOAT
}
)
self.mu = self.create_param(
param_name='mu',
shape=[self.num],
initializer=initializer,
optimizer=optimizer,
)
def concat_data(self, net):
reshaped = [
net.NextScopedBlob('reshaped_data_%d' % i) for i in range(self.num)
]
# coerce shape for single real values
for i in range(self.num):
net.Reshape(
[self.data[i]],
[reshaped[i], net.NextScopedBlob('new_shape_%d' % i)],
shape=[1]
)
concated = net.NextScopedBlob('concated_data')
net.Concat(
reshaped, [concated, net.NextScopedBlob('concated_new_shape')],
axis=0
)
return concated
def compute_adaptive_sum(self, x, net):
mu_exp = net.NextScopedBlob('mu_exp')
net.Exp(self.mu, mu_exp)
mu_exp_double = net.NextScopedBlob('mu_exp_double')
net.Scale(mu_exp, mu_exp_double, scale=2.0)
weighted_x = net.NextScopedBlob('weighted_x')
net.Div([x, mu_exp_double], weighted_x)
weighted_elements = net.NextScopedBlob('weighted_elements')
net.Add([weighted_x, self.mu], weighted_elements)
net.SumElements(weighted_elements, self.output_schema())
def add_ops(self, net):
data = self.concat_data(net)
self.compute_adaptive_sum(data, net)
|
## @package sampling_train
# Module caffe2.python.layers.sampling_train
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer, get_layer_class
from caffe2.python.layers.sampling_trainable_mixin import SamplingTrainableMixin
class SamplingTrain(ModelLayer):
def __init__(
self,
model,
input_record,
prediction_layer,
output_dims,
subtract_log_odd=True,
name='sampling_train',
**kwargs
):
super(SamplingTrain, self).__init__(
model, name, input_record, **kwargs
)
layer_class = get_layer_class(prediction_layer)
assert issubclass(layer_class, SamplingTrainableMixin)
assert 'indices' in input_record
assert isinstance(input_record.indices, schema.Scalar),\
"input_record.indices is expected to be a schema.Scalar"
assert 'input' in input_record
self.subtract_log_odd = subtract_log_odd
if self.subtract_log_odd:
assert 'sampling_prob' in input_record
self._prediction_layer = layer_class(
model,
input_record.input,
output_dims=output_dims,
**kwargs
)
self._prediction_layer.train_param_blobs = [
model.net.NextBlob(str(blob) + '_sampled')
for blob in self._prediction_layer.param_blobs
]
self.params = self._prediction_layer.params
self.output_schema = self._prediction_layer.output_schema
def add_ops(self, net):
self._prediction_layer.add_ops(net)
def add_train_ops(self, net):
for full_blob, sampled_blob in zip(
self._prediction_layer.param_blobs,
self._prediction_layer.train_param_blobs
):
net.Gather([full_blob, self.input_record.indices()], sampled_blob)
self._prediction_layer.add_train_ops(net)
if not self.subtract_log_odd:
return
log_q = net.Log(self.input_record.sampling_prob(),
net.NextScopedBlob("log_q"))
net.Sub([self.output_schema(), log_q], self.output_schema(),
broadcast=1, use_grad_hack=1)
|
## @package tags
# Module caffe2.python.layers.tags
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import six
from caffe2.python import context
@context.define_context(allow_default=True)
class TagContext(object):
"""
Scope driven way to provide tags to the layers.
"""
def __init__(self, tags=None):
# Tags is expected to be list to keep order of adding/removing things
self.tags = tags or []
def add_tags(self, tags):
self.tags.extend(tags)
def remove_tags(self, tags):
assert self.tags[-len(tags):] == tags
self.tags = self.tags[:-len(tags)]
class Tags(object):
# TODO(amalevich): Tags might need to live in their own contexts, add this
# split later
EXCLUDE_FROM_TRAIN = 'exclude_from_train'
EXCLUDE_FROM_EVAL = 'exclude_from_eval'
EXCLUDE_FROM_PREDICTION = 'exclude_from_prediction'
EXCLUDE_FROM_ACCUMULATE_PRED = 'exclude_from_accumulate_pred'
PREPROCESSING = 'preprocessing'
HANDLE_AS_SPARSE_LAYER = 'handle_as_sparse_layer'
GRADIENT_FROM_PS = 'gradient_from_ps'
PREFER_GPU = 'prefer_gpu'
CPU_ONLY = 'cpu_only'
# The following three tags are hints to **distributed training framework**.
"""
Indicates a layer contains a sparse shardable parameter. The parameter
should be sharded nd operators on those parameters should be done on
distributed parameter servers.
"""
SPARSE_SHARDED = 'sparse_sharded'
"""
Indicates a layer contains a sparse parameters among others, and that the
parameters should not be sharded (i.e. should be placed together on a node).
"""
SPARSE_DONT_SHARD = 'sparse_dont_shard'
"""
Used to manually indicate a component for an operator. Parameters for
all operators with the same component should be colocated on the same
parameter server.
"""
COMPONENT = 'component:'
"""
Valid tag prefixes for distributed training framework.
"""
DT_TAGS = (SPARSE_SHARDED, SPARSE_DONT_SHARD, COMPONENT)
# In certain cases we want to have different schema for training and
# prediction, as an example in prediction we might need to have only
# subset of ids present in the orignal schema. This tag is one of the ways
# to mark operators that will be removed from prediction and should
# override schema for predictors.
PREDICTION_SCHEMA = 'prediction_schema'
def __init__(self, tags):
if not isinstance(tags, list):
tags = [tags]
self.tags = tags
def __enter__(self):
TagContext.current().add_tags(self.tags)
return self
def __exit__(self, type, value, traceback):
TagContext.current().remove_tags(self.tags)
def __call__(self, func):
@six.wraps(func)
def wrapper(*args, **kwargs):
with self:
return func(*args, **kwargs)
return wrapper
Tags.TRAIN_ONLY = [Tags.EXCLUDE_FROM_PREDICTION, Tags.EXCLUDE_FROM_EVAL,
Tags.EXCLUDE_FROM_ACCUMULATE_PRED]
Tags.EVAL_ONLY = [Tags.EXCLUDE_FROM_PREDICTION, Tags.EXCLUDE_FROM_TRAIN,
Tags.EXCLUDE_FROM_ACCUMULATE_PRED]
Tags.PREDICTION_ONLY = [Tags.EXCLUDE_FROM_TRAIN, Tags.EXCLUDE_FROM_EVAL,
Tags.EXCLUDE_FROM_ACCUMULATE_PRED]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
class MapToRange(ModelLayer):
"""
This layer aims to build a mapping from raw keys to indices within [0, max_index).
The mapping is continuously built during training. The mapping will be frozen during
evaluation and prediction. Unseen keys will be assigned to index 0.
"""
def __init__(
self, model,
input_record,
max_index,
name='map_to_range',
**kwargs
):
super(MapToRange, self).__init__(model, name, input_record, **kwargs)
assert max_index > 0
assert isinstance(input_record, schema.Scalar)
self.max_index = max_index
self.handler = self.create_param(
param_name='handler',
shape=None,
initializer=('LongIndexCreate', {'max_elements': self.max_index}),
optimizer=model.NoOptim
)
self.output_schema = schema.Struct(
('indices', schema.Scalar(
np.int64, self.get_next_blob_reference("indices")
)),
('handler', schema.Scalar(
np.void, self.handler
)),
)
def add_train_ops(self, net):
if self.input_record.field_type().base != np.int64:
keys = net.Cast(
self.input_record(),
net.NextScopedBlob("indices_before_mapping"),
to=core.DataType.INT64
)
else:
keys = self.input_record()
# Load keys into indices
indices = net.IndexGet([self.handler, keys],
self.output_schema.indices())
net.StopGradient(indices, indices)
def add_eval_ops(self, net):
net.IndexFreeze(self.handler, self.handler)
self.add_train_ops(net)
def add_ops(self, net):
self.add_eval_ops(net)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
import numpy as np
class BatchNormalization(ModelLayer):
def __init__(
self,
model,
input_record,
name='batch_normalization',
scale_optim=None,
bias_optim=None,
momentum=0.9,
order='NCHW',
**kwargs
):
super(BatchNormalization, self).__init__(
model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
self.input_shape = input_record.field_type().shape
if len(self.input_shape) == 3:
if order == "NCHW":
input_dims = self.input_shape[0]
elif order == "NHWC":
input_dims = self.input_shape[2]
else:
raise ValueError("Please specify a correct order")
else:
assert len(self.input_shape) == 1, (
"This layer supports only 4D or 2D tesnors")
input_dims = self.input_shape[0]
self.output_schema = schema.Scalar(
(np.float32, self.input_shape),
self.get_next_blob_reference('output')
)
self.momentum = momentum
self.order = order
self.scale = self.create_param(param_name='scale',
shape=[input_dims],
initializer=('ConstantFill', {'value': 1.0}),
optimizer=scale_optim)
self.bias = self.create_param(param_name='bias',
shape=[input_dims],
initializer=('ConstantFill', {'value': 0.0}),
optimizer=bias_optim)
self.rm = self.create_param(param_name='running_mean',
shape=[input_dims],
initializer=('ConstantFill', {'value': 0.0}),
optimizer=model.NoOptim)
self.riv = self.create_param(param_name='running_inv_var',
shape=[input_dims],
initializer=('ConstantFill', {'value': 1.0}),
optimizer=model.NoOptim)
def _add_ops(self, net, is_test, out_blob=None):
original_input_blob = self.input_record.field_blobs()
input_blob = net.NextScopedBlob('expand_input')
if len(self.input_shape) == 1:
input_blob = net.ExpandDims(original_input_blob,
dims=[2, 3])
else:
input_blob = original_input_blob[0]
if out_blob is None:
bn_output = self.output_schema.field_blobs()
else:
bn_output = out_blob
if is_test:
output_blobs = bn_output
else:
output_blobs = bn_output + [self.rm, self.riv,
net.NextScopedBlob('bn_saved_mean'),
net.NextScopedBlob('bn_saved_iv')]
net.SpatialBN([input_blob, self.scale,
self.bias, self.rm, self.riv],
output_blobs,
momentum=self.momentum,
is_test=is_test,
order=self.order)
if len(self.input_shape) == 1:
net.Squeeze(bn_output,
bn_output,
dims=[2, 3])
def add_train_ops(self, net):
self._add_ops(net, is_test=False)
def add_eval_ops(self, net):
self._add_ops(net, is_test=True)
def add_ops(self, net):
self.add_eval_ops(net)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import (
get_categorical_limit,
ModelLayer,
IdList
)
import numpy as np
class MergeIdLists(ModelLayer):
"""Merge multiple ID_LISTs into a single ID_LIST
Arguments:
model: A layer model instance
input_record: Tuple (Struct) of ID_LIST features to be
merged
Returns:
the merged ID_LIST feature
"""
def __init__(self, model, input_record, name='merged'):
super(MergeIdLists, self).__init__(model, name, input_record)
assert all(schema.equal_schemas(x, IdList) for x in input_record), \
"Inputs to MergeIdLists should all be IdLists."
assert all(record.items.metadata is not None
for record in self.input_record), \
"Features without metadata are not supported"
merge_dim = max(get_categorical_limit(record)
for record in self.input_record)
assert merge_dim is not None, "Unbounded features are not supported"
self.output_schema = schema.NewRecord(
model.net, schema.List(
schema.Scalar(
np.int64,
blob=model.net.NextBlob(name),
metadata=schema.Metadata(categorical_limit=merge_dim)
)))
def add_ops(self, net):
return net.MergeIdLists(self.input_record.field_blobs(),
self.output_schema.field_blobs())
|
# @package homotopy_weight
# Module caffe2.fb.python.layers.homotopy_weight
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
import numpy as np
import logging
logger = logging.getLogger(__name__)
'''
Homotopy Weighting between two weights x, y by doing: alpha x + (1-alpha) y
where x is a decreasing scalar parameter ranging from [min, max] (default, [0,
1]);
Homotopy methods first solves an "easy" problem (one to which the solution is
well known), and is gradually transformed into the target problem
'''
class HomotopyWeight(ModelLayer):
def __init__(
self,
model,
input_record,
name='homotopy_weight',
min_weight=0.,
max_weight=1.,
half_life=1e6,
quad_life=3e6,
atomic_iter=None,
**kwargs
):
super(HomotopyWeight,
self).__init__(model, name, input_record, **kwargs)
self.output_schema = schema.Scalar(
np.float32, self.get_next_blob_reference('homotopy_weight')
)
data = self.input_record.field_blobs()
assert len(data) == 2
self.x = data[0]
self.y = data[1]
# TODO: currently model building does not have access to iter counter or
# learning rate; it's added at optimization time;
self.use_external_iter = (atomic_iter is not None)
self.atomic_iter = (
atomic_iter if self.use_external_iter else self.create_atomic_iter()
)
# to map lr to [min, max]; alpha = scale * lr + offset
assert max_weight > min_weight
self.scale = float(max_weight - min_weight)
self.offset = self.model.add_global_constant(
'%s_offset_1dfloat' % self.name, float(min_weight)
)
self.gamma, self.power = self.solve_inv_lr_params(half_life, quad_life)
def solve_inv_lr_params(self, half_life, quad_life):
# ensure that the gamma, power is solvable
assert half_life > 0
# convex monotonically decreasing
assert quad_life > 2 * half_life
t = float(quad_life) / float(half_life)
x = t * (1.0 + np.sqrt(2.0)) / 2.0 - np.sqrt(2.0)
gamma = (x - 1.0) / float(half_life)
power = np.log(2.0) / np.log(x)
logger.info(
'homotopy_weighting: found lr param: gamma=%g, power=%g' %
(gamma, power)
)
return gamma, power
def create_atomic_iter(self):
self.mutex = self.create_param(
param_name=('%s_mutex' % self.name),
shape=None,
initializer=('CreateMutex', ),
optimizer=self.model.NoOptim,
)
self.atomic_iter = self.create_param(
param_name=('%s_atomic_iter' % self.name),
shape=[1],
initializer=(
'ConstantFill', {
'value': 0,
'dtype': core.DataType.INT64
}
),
optimizer=self.model.NoOptim,
)
return self.atomic_iter
def update_weight(self, net):
alpha = net.NextScopedBlob('alpha')
beta = net.NextScopedBlob('beta')
lr = net.NextScopedBlob('lr')
comp_lr = net.NextScopedBlob('complementary_lr')
scaled_lr = net.NextScopedBlob('scaled_lr')
scaled_comp_lr = net.NextScopedBlob('scaled_complementary_lr')
if not self.use_external_iter:
net.AtomicIter([self.mutex, self.atomic_iter], [self.atomic_iter])
net.LearningRate(
[self.atomic_iter],
[lr],
policy='inv',
gamma=self.gamma,
power=self.power,
base_lr=1.0,
)
net.Sub([self.model.global_constants['ONE'], lr], [comp_lr])
net.Scale([lr], [scaled_lr], scale=self.scale)
net.Scale([comp_lr], [scaled_comp_lr], scale=self.scale)
net.Add([lr, self.offset], [alpha])
net.Add([comp_lr, self.offset], beta)
return alpha, beta
def add_ops(self, net):
alpha, beta = self.update_weight(net)
# alpha x + beta y
net.WeightedSum([self.x, alpha, self.y, beta], self.output_schema())
|
## @package sampling_trainable_mixin
# Module caffe2.python.layers.sampling_trainable_mixin
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import abc
import six
class SamplingTrainableMixin(six.with_metaclass(abc.ABCMeta, object)):
def __init__(self, *args, **kwargs):
super(SamplingTrainableMixin, self).__init__(*args, **kwargs)
self._train_param_blobs = None
self._train_param_blobs_frozen = False
@property
@abc.abstractmethod
def param_blobs(self):
"""
List of parameter blobs for prediction net
"""
pass
@property
def train_param_blobs(self):
"""
If train_param_blobs is not set before used, default to param_blobs
"""
if self._train_param_blobs is None:
self.train_param_blobs = self.param_blobs
return self._train_param_blobs
@train_param_blobs.setter
def train_param_blobs(self, blobs):
assert not self._train_param_blobs_frozen
assert blobs is not None
self._train_param_blobs_frozen = True
self._train_param_blobs = blobs
@abc.abstractmethod
def _add_ops(self, net, param_blobs):
"""
Add ops to the given net, using the given param_blobs
"""
pass
def add_ops(self, net):
self._add_ops(net, self.param_blobs)
def add_train_ops(self, net):
self._add_ops(net, self.train_param_blobs)
|
## @package last_n_window_collector
# Module caffe2.python.layers.last_n_window_collector
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
class LastNWindowCollector(ModelLayer):
"""
Collect last-N samples from input record. If you have complex data,
use PackRecords to pack it before using this layer.
This layer is not thread safe.
"""
def __init__(self, model, input_record, num_to_collect,
name='last_n_window_collector', **kwargs):
super(LastNWindowCollector, self).__init__(
model, name, input_record, **kwargs)
assert num_to_collect > 0
self.num_to_collect = num_to_collect
assert isinstance(input_record, schema.Scalar), \
"Got {!r}".format(input_record)
self.last_n = self.create_param(param_name='last_n',
shape=[0],
initializer=('ConstantFill', {}),
optimizer=model.NoOptim)
self.next_blob = self.create_param(
param_name='next',
shape=[],
initializer=('ConstantFill',
{'value': 0, 'dtype': core.DataType.INT32}),
optimizer=model.NoOptim
)
self.mutex = self.create_param(
param_name='mutex',
shape=None,
initializer=('CreateMutex',),
optimizer=model.NoOptim,
)
self.num_visited_blob = self.create_param(
param_name='num_visited',
shape=[],
initializer=('ConstantFill', {
'value': 0,
'dtype': core.DataType.INT64,
}),
optimizer=model.NoOptim,
)
self.output_schema = schema.Struct(
(
'last_n',
schema.from_blob_list(input_record, [self.last_n])
),
('num_visited', schema.Scalar(blob=self.num_visited_blob)),
('mutex', schema.Scalar(blob=self.mutex)),
)
def add_ops(self, net):
net.LastNWindowCollector(
[self.last_n, self.next_blob, self.input_record(), self.mutex,
self.num_visited_blob],
[self.last_n, self.next_blob, self.num_visited_blob],
num_to_collect=self.num_to_collect,
)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
import numpy as np
class RandomFourierFeatures(ModelLayer):
"""
Implementation of random fourier feature map for feature processing.
Applies sqrt(2 / output_dims) * cos(wx+b), where:
output_dims is the output feature dimensions, and
wx + b applies FC using randomized, fixed weight and bias parameters
For more information, see the original paper:
https://people.eecs.berkeley.edu/~brecht/papers/07.rah.rec.nips.pdf
Inputs:
output_dims -- output feature dimensions
sigma -- bandwidth for the Gaussian kernel estimator
w_init -- initalization options for weight parameter
b_init -- initalization options for bias parameter
"""
def __init__(
self,
model,
input_record,
output_dims,
sigma, # bandwidth
w_init=None,
b_init=None,
name='random_fourier_features',
**kwargs):
super(RandomFourierFeatures, self).__init__(model, name, input_record,
**kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
input_dims = input_record.field_type().shape[0]
assert input_dims >= 1, "Expected input dimensions >= 1, got %s" \
% input_dims
self.output_dims = output_dims
assert self.output_dims >= 1, "Expected output dimensions >= 1, got %s" \
% self.output_dims
self.output_schema = schema.Scalar(
(np.float32, (self.output_dims, )),
self.get_next_blob_reference('output')
)
assert sigma > 0.0, "Expected bandwidth > 0, got %s" % sigma
# Initialize train_init_net parameters
w_init = w_init if w_init else (
'GaussianFill', {'mean': 0.0, 'std': 1.0 / sigma}
)
b_init = b_init if b_init else (
'UniformFill', {'min': 0.0, 'max': 2 * np.pi}
)
self.w = self.create_param(param_name='w',
shape=[self.output_dims, input_dims],
initializer=w_init,
optimizer=model.NoOptim)
self.b = self.create_param(param_name='b',
shape=[self.output_dims],
initializer=b_init,
optimizer=model.NoOptim)
def add_ops(self, net):
# Random features: wx + b
cosine_arg = net.FC(self.input_record.field_blobs() + [self.w, self.b],
net.NextScopedBlob("cosine_arg"))
# Apply cosine to new vectors
new_feature_vec = net.Cos([cosine_arg],
net.NextScopedBlob('new_feature_vec'))
# Multiply each element in vector by sqrt(2/D)
scale = np.sqrt(2.0 / self.output_dims)
net.Scale([new_feature_vec],
self.output_schema.field_blobs(),
scale=scale)
|
## @package fc
# Module caffe2.python.layers.fc
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
from caffe2.python.layers.sampling_trainable_mixin import SamplingTrainableMixin
import math
import numpy as np
class FC(SamplingTrainableMixin, ModelLayer):
def __init__(self, model, input_record, output_dims, weight_init=None,
bias_init=None, weight_optim=None, bias_optim=None, name='fc',
weight_reg=None, bias_reg=None, **kwargs):
super(FC, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), (
"Incorrect input type {}".format(input_record))
assert len(input_record.field_types()[0].shape) > 0, (
"FC expects limited dimensions of the input tensor")
input_dims = input_record.field_types()[0].shape[0]
assert input_dims > 0, (
"FC expects input dimensions > 0, got {}".format(input_dims))
scale = math.sqrt(1.0 / input_dims)
weight_init = weight_init if weight_init else (
'UniformFill', {'min': -scale, 'max': scale})
bias_init = bias_init if bias_init else (
'UniformFill', {'min': -scale, 'max': scale})
self.w = self.create_param(param_name='w',
shape=[output_dims, input_dims],
initializer=weight_init,
optimizer=weight_optim,
regularizer=weight_reg)
self.b = self.create_param(param_name='b',
shape=[output_dims, ],
initializer=bias_init,
optimizer=bias_optim,
regularizer=bias_reg)
self.output_schema = schema.Scalar(
(np.float32, (output_dims, )),
self.get_next_blob_reference('output')
)
def _add_ops(self, net, params):
net.FC(self.input_record.field_blobs() + params,
self.output_schema.field_blobs(), **self.kwargs)
@property
def param_blobs(self):
return [self.w, self.b]
|
## @package concat
# Module caffe2.python.layers.concat
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import (
ModelLayer,
)
from future.utils import viewitems
import numpy as np
import logging
logger = logging.getLogger(__name__)
class Concat(ModelLayer):
"""
Construct Concat layer
Assume that first dimension is batch,
Example:
embedding_dim = 64
input_record = self.new_record(schema.Struct(
('input1', schema.Scalar((np.float32, (embedding_dim, )))),
('input2', schema.Scalar((np.float32, (embedding_dim, )))),
('input3', schema.Scalar((np.float32, (embedding_dim, )))),
))
output = self.model.Concat(input_record)
self.assertEqual(
schema.Scalar((np.float32, ((len(input_record.fields) * embedding_dim, )))),
output
)
# Note that in Concat layer we assume first dimension is batch.
# so input is B * embedding_dim
# add_axis=1 make it B * 1 * embedding_dim
# Concat on axis=1 make it B * N * embedding_dim
output = self.model.Concat(input_record, axis=1, add_axis=1)
self.assertEqual(
schema.Scalar((np.float32, ((len(input_record.fields), embedding_dim)))),
output
)
"""
def __init__(self, model, input_record, axis=1, add_axis=0,
name='concat', **kwargs):
super(Concat, self).__init__(model, name, input_record, **kwargs)
self.axis = axis
self.add_axis = add_axis
assert not (axis == 0 and add_axis == 1), \
"It's not allowed to add axis=0"
assert isinstance(input_record, schema.Struct),\
"Incorrect input type. Excpected Struct, but received: {0}".\
format(input_record)
shapes = []
for field_name, field_type in viewitems(input_record.fields):
assert isinstance(field_type, schema.Scalar),\
"Incorrect input type for {}. Excpected Scalar, but got: {}".\
format(field_name, field_type)
# Assume that first dimension is batch, so actual axis in shape is
# axis - 1
shape = list(field_type.field_type().shape)
if add_axis:
shape.insert(axis - 1, 1)
assert len(shape) >= axis,\
"Concat expects that limited dimensions of the input tensor"
shapes.append(shape)
logger.info('Concat Layer input shapes: ' + str(shapes))
if axis == 0:
self.output_schema = schema.from_blob_list(
input_record[0],
[self.get_next_blob_reference('output')]
)
return
concat_dim = 0
for shape in shapes:
concat_dim += shape[axis - 1]
shape[axis - 1] = 0
assert shape == shapes[0],\
"Shapes {0} and {1} are not compatible for Concat".\
format(shape, shapes[0])
output_dims = shapes[0]
output_dims[axis - 1] = concat_dim
logger.info('Concat Layer output_dims: ' + str(output_dims))
self.output_schema = schema.Scalar(
(np.float32, output_dims),
self.get_next_blob_reference('output'))
def add_ops(self, net):
net.Concat(
self.input_record.field_blobs(),
[
self.output_schema.field_blobs()[0],
self.output_schema.field_blobs()[0] + "_concat_dims"
],
axis=self.axis,
add_axis=self.add_axis,
)
|
## @package batch_distill_lr_loss
# Module caffe2.python.layers.batch_distill_lr_loss
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import (
ModelLayer,
)
from caffe2.python.layers.tags import (
Tags
)
import numpy as np
class BatchDistillLRLoss(ModelLayer):
def __init__(
self, model, input_record,
name='batch_distill_lr_loss', teacherWeight=0.0, **kwargs):
super(BatchDistillLRLoss, self).__init__(model, name, input_record, **kwargs)
assert teacherWeight >= 0 and teacherWeight <= 1, (
'teacherWeight=%0.2f should be in [0, 1]' % teacherWeight
)
self._teacherWeight = teacherWeight
assert schema.is_schema_subset(
schema.Struct(
('teacher_label', schema.Scalar()),
('label', schema.Scalar()),
('logit', schema.Scalar()),
),
input_record
)
self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])
self.output_schema = schema.Scalar(
np.float32,
self.get_next_blob_reference('output')
)
def add_ops(self, net):
label = self.input_record.label()
if self.input_record.label.field_type() != np.float32:
label = net.Cast(
label,
net.NextScopedBlob('float_label'),
to=core.DataType.FLOAT,
)
# Assuming 1-D input
label = net.ExpandDims(label, net.NextScopedBlob('expanded_label'),
dims=[1])
teacher_label = self.input_record.teacher_label()
if self.input_record.teacher_label.field_type() != np.float32:
teacher_label = net.Cast(
teacher_label,
net.NextScopedBlob('float_teacher_label'),
to=core.DataType.FLOAT,
)
teacher_label = net.ExpandDims(
teacher_label, net.NextScopedBlob('expanded_teacher_label'),
dims=[1])
true_xent = net.SigmoidCrossEntropyWithLogits(
[self.input_record.logit(), label],
net.NextScopedBlob('cross_entropy')
)
teacher_xent = net.SigmoidCrossEntropyWithLogits(
[self.input_record.logit(), teacher_label],
net.NextScopedBlob('teacher_cross_entropy')
)
scaled_true_xent = net.Scale(
true_xent,
net.NextScopedBlob('scaled_cross_entropy'),
scale=1.0 - self._teacherWeight,
)
scaled_teacher_xent = net.Scale(
teacher_xent,
net.NextScopedBlob('scaled_teacher_cross_entropy'),
scale=self._teacherWeight,
)
true_loss = net.AveragedLoss(
scaled_true_xent,
net.NextScopedBlob('true_loss')
)
teacher_loss = net.AveragedLoss(
scaled_teacher_xent,
net.NextScopedBlob('teacher_loss')
)
net.Add(
[true_loss, teacher_loss],
self.output_schema.field_blobs()
)
|
## @package position_weighted
# Module caffe2.python.layers.position_weighted
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import numpy as np
from caffe2.python import schema
from caffe2.python.layers.layers import (
get_categorical_limit,
ModelLayer,
)
from caffe2.python.layers.tags import Tags
logger = logging.getLogger(__name__)
class PositionWeighted(ModelLayer):
def __init__(self, model, input_record, weight_optim=None,
name="position_weights"):
super(PositionWeighted, self).__init__(model, name, input_record)
assert isinstance(input_record, schema.List), "Incorrect input type"
length_metadata = input_record.lengths.metadata
max_length = (length_metadata.categorical_limit if length_metadata is
not None else None)
if max_length is not None:
self.shape = max_length
else:
self.shape = get_categorical_limit(input_record)
logger.warning(
'{}: categorical_limit of lengths is not available, using '
'categorical_limit of the keys: {}'.format(
str(input_record.lengths()), self.shape))
self.pos_w = self.create_param(param_name='pos_w',
shape=[self.shape, ],
initializer=('ConstantFill', {'value': 1.0}),
optimizer=weight_optim)
self.output_schema = schema.Struct(
('position_weights',
schema.Scalar((np.float32, self.shape),
self.get_next_blob_reference("pos_w_gather")))
)
self.tags.update({Tags.HANDLE_AS_SPARSE_LAYER})
self.tags.update({Tags.GRADIENT_FROM_PS})
def get_memory_usage(self):
return self.shape
def add_ops(self, net):
inc_seq = net.LengthsRangeFill(
[self.input_record.lengths()],
self.input_record.lengths() + '_pos_w_seq'
)
net.Gather(
[self.pos_w, inc_seq],
self.output_schema.position_weights.field_blobs())
|
## @package random_neg_rank_loss
# Module caffe2.python.layers.random_neg_rank_loss
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema, core
from caffe2.python.layers.layers import (
ModelLayer,
)
from caffe2.python.layers.tags import (
Tags
)
import numpy as np
class MarginRankLoss(ModelLayer):
def __init__(self, model, input_record, name='margin_rank_loss',
margin=0.1, average_loss=False, **kwargs):
super(MarginRankLoss, self).__init__(model, name, input_record, **kwargs)
assert margin >= 0, ('For hinge loss, margin should be no less than 0')
self._margin = margin
self._average_loss = average_loss
assert schema.is_schema_subset(
schema.Struct(
('pos_prediction', schema.Scalar()),
('neg_prediction', schema.List(np.float32)),
),
input_record
)
self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])
self.output_schema = schema.Scalar(
np.float32,
self.get_next_blob_reference('output'))
def add_ops(self, net):
neg_score = self.input_record.neg_prediction['values']()
pos_score = net.LengthsTile(
[
self.input_record.pos_prediction(),
self.input_record.neg_prediction['lengths']()
],
net.NextScopedBlob('pos_score_repeated')
)
const_1 = net.ConstantFill(
neg_score,
net.NextScopedBlob('const_1'),
value=1,
dtype=core.DataType.INT32
)
rank_loss = net.MarginRankingCriterion(
[pos_score, neg_score, const_1],
net.NextScopedBlob('rank_loss'),
margin=self._margin,
)
if self._average_loss:
net.AveragedLoss(rank_loss, self.output_schema.field_blobs())
else:
net.ReduceFrontSum(rank_loss, self.output_schema.field_blobs())
|
## @package batch_softmax_loss
# Module caffe2.python.layers.batch_softmax_loss
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
import numpy as np
class BatchSoftmaxLoss(ModelLayer):
def __init__(
self,
model,
input_record,
name='batch_softmax_loss',
label_smoothing_matrix=None,
label_prob=False,
**kwargs
):
super(BatchSoftmaxLoss, self).__init__(
model, name, input_record, **kwargs)
assert schema.is_schema_subset(
schema.Struct(
('label', schema.Scalar()),
('prediction', schema.Scalar()),
),
input_record
)
self.label_prob = label_prob
# label smoothing matrix: a K * K matrix where K is the label
# cardinality; (i, j) element is the value of for label i
# treated/smoothed as label j
self.label_smoothing_matrix = label_smoothing_matrix
if self.label_smoothing_matrix is not None:
self.initialize_label_smoothing_constants()
self.output_schema = schema.Struct(
(
'softmax', schema.Scalar(
input_record.prediction.field_type(),
self.get_next_blob_reference('softmax')
)
),
(
'loss', schema.Scalar(
np.float32, self.get_next_blob_reference('loss')
)
),
)
def initialize_label_smoothing_constants(self):
assert self.label_smoothing_matrix is not None
self.label_smoothing_matrix = np.array(
self.label_smoothing_matrix).astype(np.float32)
assert len(self.label_smoothing_matrix.shape) == 2
label_dim = self.label_smoothing_matrix.shape[0]
assert label_dim == self.label_smoothing_matrix.shape[1]
self.label_smoothing_matrix = self.model.add_global_constant(
'%s_label_smoothing_matrix' % self.name,
array=self.label_smoothing_matrix,
dtype=np.dtype(np.float32),
)
self.label_dim = self.model.add_global_constant(
'%s_label_dim' % self.name,
array=label_dim,
dtype=np.dtype(np.int64),
)
# default case: label is given NOT as target distribution
# but when used in label smoothing, the label must be in probabilities
self.label_prob = True
def compute_smoothed_label(self, net):
assert self.label_smoothing_matrix is not None
label = self.input_record.label()
original_label_type = self.input_record.label.field_type()
if original_label_type.base != np.int64:
int64_label = net.NextScopedBlob('int64_label')
net.Cast([label], [int64_label], to=core.DataType.INT64)
else:
int64_label = label
one_hot_label = net.NextScopedBlob('one_hot_label')
smoothed_label = net.NextScopedBlob('smoothed_label')
net.OneHot([int64_label, self.label_dim], [one_hot_label])
net.MatMul([one_hot_label, self.label_smoothing_matrix], smoothed_label)
return smoothed_label
def add_ops(self, net):
label = self.input_record.label.field_blobs()
if self.label_smoothing_matrix is not None:
label = [self.compute_smoothed_label(net)]
elif not self.label_prob:
if self.input_record.label.field_types()[0].base != np.int32:
label = [
net.Cast(label,
net.NextScopedBlob('int32_label'),
to=core.DataType.INT32)
]
softmax_input = self.input_record.prediction.field_blobs() + label
if 'weight' in self.input_record:
weight_blob = self.input_record.weight()
if self.input_record.weight.field_type().base != np.float32:
weight_blob = net.Cast(
weight_blob,
weight_blob + '_float32',
to=core.DataType.FLOAT
)
softmax_input += [weight_blob]
net.SoftmaxWithLoss(
softmax_input,
self.output_schema.field_blobs(),
label_prob=self.label_prob,
)
|
## @package add_bias
# Module caffe2.python.layers.add_bias
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
import math
class AddBias(ModelLayer):
def __init__(self, model, input_record, bias_init=None,
bias_optim=None, name='add_bias'):
super(AddBias, self).__init__(model, name, input_record)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
assert len(input_record.field_type().shape) > 0, (
"AddBias expects limited dimensions of the input tensor")
input_dims = input_record.field_type().shape[0]
assert input_dims > 0, (
"AddBias expects input dimensions > 0, got {}".format(input_dims))
scale = math.sqrt(1.0 / input_dims)
bias_init = bias_init if bias_init else (
'UniformFill', {'min': -scale, 'max': scale})
self.b = self.create_param(
param_name='b',
shape=[input_dims, ],
initializer=bias_init,
optimizer=bias_optim,
)
self.output_schema = schema.Scalar(
(input_record.field_type().base, (input_dims, )),
self.get_next_blob_reference('output')
)
def add_ops(self, net):
net.Add(self.input_record.field_blobs() + [self.b],
self.output_schema.field_blobs(), broadcast=1)
|
## @package fc_without_bias
# Module caffe2.python.layers.fc_without_bias
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
from caffe2.python.layers.sampling_trainable_mixin import SamplingTrainableMixin
import math
import numpy as np
class FCWithoutBias(SamplingTrainableMixin, ModelLayer):
def __init__(
self,
model,
input_record,
output_dims,
weight_init=None,
weight_optim=None,
name='fc_without_bias',
**kwargs
):
super(FCWithoutBias, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
assert len(input_record.field_types()[0].shape) > 0, (
"FCWithoutBias expects limited dimensions of the input tensor"
)
input_dims = input_record.field_types()[0].shape[0]
assert input_dims > 0, (
"FCWithoutBias expects input dimensions > 0, got {}".format(input_dims)
)
self.output_schema = schema.Scalar(
(np.float32, (output_dims, )),
self.get_next_blob_reference('output')
)
scale = math.sqrt(1.0 / input_dims)
weight_init = weight_init if weight_init else (
'UniformFill', {'min': -scale,
'max': scale}
)
self.w = self.create_param(param_name='w',
shape=[output_dims, input_dims],
initializer=weight_init,
optimizer=weight_optim)
def _add_ops(self, net, params):
net.MatMul(
self.input_record.field_blobs() + params,
self.output_schema.field_blobs(), trans_b=1, **self.kwargs
)
@property
def param_blobs(self):
return [self.w]
|
## @package dot_product
# Module caffe2.python.layers.dot_product
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import (
ModelLayer,
)
class PairwiseDotProduct(ModelLayer):
def __init__(self, model, input_record, output_dim,
name='pairwise_dot_product', **kwargs):
super(PairwiseDotProduct, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Struct), (
"Incorrect input type. Excpected Struct, but received: {0}".
format(input_record))
assert (
('all_embeddings' in input_record) ^
('x_embeddings' in input_record and 'y_embeddings' in input_record)
), (
"either (all_embeddings) xor (x_embeddings and y_embeddings) " +
"should be given."
)
if 'all_embeddings' in input_record:
x_embeddings = input_record['all_embeddings']
y_embeddings = input_record['all_embeddings']
else:
x_embeddings = input_record['x_embeddings']
y_embeddings = input_record['y_embeddings']
assert isinstance(x_embeddings, schema.Scalar), (
"Incorrect input type for x. Expected Scalar, " +
"but received: {0}".format(x_embeddings))
assert isinstance(y_embeddings, schema.Scalar), (
"Incorrect input type for y. Expected Scalar, " +
"but received: {0}".format(y_embeddings)
)
if 'indices_to_gather' in input_record:
indices_to_gather = input_record['indices_to_gather']
assert isinstance(indices_to_gather, schema.Scalar), (
"Incorrect type of indices_to_gather. "
"Expected Scalar, but received: {0}".format(indices_to_gather)
)
self.indices_to_gather = indices_to_gather
else:
self.indices_to_gather = None
self.x_embeddings = x_embeddings
self.y_embeddings = y_embeddings
dtype = x_embeddings.field_types()[0].base
self.output_schema = schema.Scalar(
(dtype, (output_dim,)),
self.get_next_blob_reference('output')
)
def add_ops(self, net):
Y = net.BatchMatMul(
[self.x_embeddings(), self.y_embeddings()],
[self.x_embeddings() + '_matmul'],
trans_b=1,
)
if self.indices_to_gather:
flattened = net.Flatten(
Y, Y + '_flatten',
)
net.BatchGather(
[flattened, self.indices_to_gather()],
self.output_schema(),
)
else:
net.Flatten(Y, self.output_schema())
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
import numpy as np
class ArcCosineFeatureMap(ModelLayer):
"""
A general version of the arc-cosine kernel feature map (s = 1 restores
the original arc-cosine kernel feature map).
Applies H(x) * x^s, where H is the Heaviside step function and x is the
input after applying FC (such that x = w * x_orig + b).
For more information, see the original paper:
http://cseweb.ucsd.edu/~saul/papers/nips09_kernel.pdf
Inputs :
output_dims -- dimensions of the output vector
s -- degree to raise transformed features
scale -- amount to scale the standard deviation
weight_init -- initialization distribution for weight parameter
bias_init -- initialization distribution for bias pararmeter
weight_optim -- optimizer for weight params; None for random features
bias_optim -- optimizer for bias param; None for random features
set_weight_as_global_constant -- if True, initialized random parameters
will be constant across all distributed
instances of the layer
initialize_output_schema -- if True, initialize output schema as Scalar
from Arc Cosine; else output schema is None
"""
def __init__(
self,
model,
input_record,
output_dims,
s=1,
scale=1.0,
weight_init=None,
bias_init=None,
weight_optim=None,
bias_optim=None,
set_weight_as_global_constant=False,
initialize_output_schema=True,
name='arc_cosine_feature_map',
**kwargs):
super(ArcCosineFeatureMap, self).__init__(model, name, input_record,
**kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
self.params = []
self.model = model
self.set_weight_as_global_constant = set_weight_as_global_constant
self.input_dims = input_record.field_type().shape[0]
assert self.input_dims >= 1, "Expected input dimensions >= 1, got %s" \
% self.input_dims
if initialize_output_schema:
self.output_schema = schema.Scalar(
(np.float32, (output_dims, )),
model.net.NextScopedBlob(name + '_output')
)
self.output_dims = output_dims
assert self.output_dims >= 1, "Expected output dimensions >= 1, got %s" \
% self.output_dims
self.s = s
assert (self.s >= 0), "Expected s >= 0, got %s" % self.s
assert isinstance(self.s, int), "Expected s to be type int, got type %s" \
% type(self.s)
assert (scale > 0.0), "Expected scale > 0, got %s" % scale
self.stddev = scale * np.sqrt(1.0 / self.input_dims)
# Initialize train_init_net parameters
# Random Parameters
if set_weight_as_global_constant:
w_init = np.random.normal(scale=self.stddev,
size=(self.output_dims, self.input_dims))
b_init = np.random.uniform(low=-0.5 * self.stddev,
high=0.5 * self.stddev,
size=self.output_dims)
self.random_w = self.model.add_global_constant(
name=self.name + "_fixed_rand_W",
array=w_init
)
self.random_b = self.model.add_global_constant(
name=self.name + "_fixed_rand_b",
array=b_init
)
else:
(self.random_w, self.random_b) = self._initialize_params(
'random_w',
'random_b',
w_init=weight_init,
b_init=bias_init,
w_optim=weight_optim,
b_optim=bias_optim
)
def _initialize_params(self, w_name, b_name, w_init=None, b_init=None,
w_optim=None, b_optim=None):
"""
Initializes the Layer Parameters for weight and bias terms for features
Inputs :
w_blob -- blob to contain w values
b_blob -- blob to contain b values
w_init -- initialization distribution for weight parameter
b_init -- initialization distribution for bias parameter
w_optim -- optimizer to use for w; if None, then will use no optimizer
b_optim -- optimizer to user for b; if None, then will use no optimizer
"""
w_init = w_init if w_init else (
'GaussianFill', {'mean': 0.0, 'std': self.stddev}
)
w_optim = w_optim if w_optim else self.model.NoOptim
b_init = b_init if b_init else (
'UniformFill', {'min': -0.5 * self.stddev, 'max': 0.5 * self.stddev}
)
b_optim = b_optim if b_optim else self.model.NoOptim
w_param = self.create_param(param_name=w_name,
shape=(self.output_dims, self.input_dims),
initializer=w_init,
optimizer=w_optim)
b_param = self.create_param(param_name=b_name,
shape=[self.output_dims],
initializer=b_init,
optimizer=b_optim)
return [w_param, b_param]
def _heaviside_with_power(self, net, input_features, output_blob, s):
"""
Applies Heaviside step function and Relu / exponentiation to features
depending on the value of s.
Inputs:
net -- net with operators
input_features -- features to processes
output_blob -- output blob reference
s -- degree to raise the transformed features
"""
if s == 0:
softsign_features = net.Softsign([input_features],
net.NextScopedBlob('softsign'))
return net.Relu(softsign_features, output_blob)
elif s == 1:
return net.Relu([input_features],
output_blob)
else:
relu_features = net.Relu([input_features],
net.NextScopedBlob('relu_rand'))
pow_features = net.Pow([input_features],
net.NextScopedBlob('pow_rand'),
exponent=float(s - 1))
return net.Mul([relu_features, pow_features],
output_blob)
def add_ops(self, net):
input_blob = self.input_record.field_blobs()
# Random features: wx + b
random_features = net.FC(input_blob + [self.random_w, self.random_b],
net.NextScopedBlob('random_features'))
# Process random features
self._heaviside_with_power(net,
random_features,
self.output_schema.field_blobs(),
self.s)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from importlib import import_module
import pkgutil
import sys
from . import layers
def import_recursive(package):
"""
Takes a package and imports all modules underneath it
"""
pkg_dir = package.__path__
module_location = package.__name__
for (_module_loader, name, ispkg) in pkgutil.iter_modules(pkg_dir):
module_name = "{}.{}".format(module_location, name) # Module/package
module = import_module(module_name)
if ispkg:
import_recursive(module)
def find_subclasses_recursively(base_cls, sub_cls):
cur_sub_cls = base_cls.__subclasses__()
sub_cls.update(cur_sub_cls)
for cls in cur_sub_cls:
find_subclasses_recursively(cls, sub_cls)
import_recursive(sys.modules[__name__])
model_layer_subcls = set()
find_subclasses_recursively(layers.ModelLayer, model_layer_subcls)
for cls in list(model_layer_subcls):
layers.register_layer(cls.__name__, cls)
|
## @package sparse_lookup
# Module caffe2.python.layers.sparse_lookup
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python.helpers.arg_scope import get_current_scope
from caffe2.python import schema
from caffe2.python.layers.layers import (
get_categorical_limit,
get_key,
IdList,
IdScoreList,
LayerPsParam,
ModelLayer,
)
import collections
import functools
import math
import numpy as np
import operator
def get_sparse_lookup_predictor_version(version):
assert version in {'fp32', 'fp16', 'uint8rowwise', 'fused_uint8rowwise'},\
"Unexpected version of sparse_lookup layer {0}".format(version)
return version
def _is_id_list(input_record):
return schema.equal_schemas(input_record, IdList)
def _is_id_score_list(input_record):
return schema.equal_schemas(input_record,
IdScoreList,
check_field_types=False)
class SparseLookup(ModelLayer):
_id_list_supported_reducers = [
'LogMeanExp', 'LogSumExp', 'Max', 'Mean', 'Sum',
'WeightedSum', 'WeightedMean', 'Sqrt', 'None']
_id_score_list_supported_reducers = [
'PositionWeighted', 'Mean', 'Sum', 'WeightedSum', 'WeightedMean', 'None']
def __init__(self, model, input_record, inner_shape, reducer,
weight_init=None, weight_optim=None,
name='sparse_lookup', regularizer=None, **kwargs):
super(SparseLookup, self).__init__(model, name, input_record, **kwargs)
# TODO Add some asserts about input type
if isinstance(inner_shape, int):
inner_shape = [inner_shape]
assert isinstance(inner_shape, list) or isinstance(inner_shape, tuple),\
"Unexpected type for inner_shape, expected list or tuple, got {0}".\
format(type(inner_shape))
if reducer == "PositionWeighted":
assert _is_id_score_list(self.input_record), (
"PositionWeighted only support IdScoreList, but got {} " +
"please use PositionWeighted layer to convert IdList " +
"to IdScoreList").format(repr(self.input_record))
self.external_weights = input_record.values()
self.reducer = reducer
input_dim = get_categorical_limit(input_record)
assert input_dim > 0, (
"{} should have categorical limit > 0, but got {}".format(
get_key(input_record)(), input_dim))
scale = math.sqrt(1.0 / input_dim)
self.shape = [input_dim] + inner_shape
self.weight_init = weight_init if weight_init else (
'UniformFill', {'min': -scale, 'max': scale})
if _is_id_list(self.input_record):
sparse_key = self.input_record.items()
elif _is_id_score_list(self.input_record):
sparse_key = self.input_record.keys()
else:
raise NotImplementedError()
if self.input_record.lengths.metadata:
avg_length = self.input_record.lengths.metadata.expected_value
else:
avg_length = None
self.w = self.create_param(
param_name='w',
shape=self.shape,
initializer=self.weight_init,
optimizer=weight_optim,
ps_param=LayerPsParam(
sparse_key=sparse_key,
average_length=avg_length),
regularizer=regularizer
)
self.scale_bias_init = ('ConstantFill', {'value': 0.0})
self.scale_bias = self.create_param(
param_name='scale_bias',
shape=[],
initializer=self.scale_bias_init,
optimizer=model.NoOptim,
)
self.output_schema = schema.Scalar(
(np.float32, inner_shape),
self.get_next_blob_reference('output'),
)
def get_memory_usage(self):
return functools.reduce(operator.mul, self.shape) * 4
def get_fp16_compatible_parameters(self):
return [self.w]
def support_8bit(self):
# Rowwise quantization makes sense only if shape it's 2D matrix with
# second dimension >= 8
if len(self.shape) != 2 or self.shape[1] < 8:
return False
return True
def get_8bits_compatible_parameters(self, fused=True):
if not self.support_8bit():
return []
if fused:
RowwiseQuantized8BitsWeight = collections.namedtuple(
'RowwiseQuantized8BitsWeight', 'w'
)
return [RowwiseQuantized8BitsWeight(self.w)]
else:
RowwiseQuantized8BitsWeight = collections.namedtuple(
'RowwiseQuantized8BitsWeight', 'w, scale_bias'
)
return [RowwiseQuantized8BitsWeight(self.w, self.scale_bias)]
def _gather_wrapper(self, net, version, in_indices, out):
# Gather can work on all kinds of input data types, and output
# data with the same type. Convert the output of Gather to float,
# because the follow-up Ops expect fp32.
if version == 'fp32':
return net.Gather([self.w, in_indices], out)
elif version == 'fp16':
gathered_w = net.Gather([self.w, in_indices], 'gathered_w')
return net.HalfToFloat(gathered_w, out)
elif version == 'uint8rowwise':
gathered_w = net.Gather([self.w, in_indices], 'gathered_w')
gathered_scale_bias = net.Gather(
[self.scale_bias, in_indices],
'gathered_scale_bias'
)
return net.Rowwise8BitQuantizedToFloat(
[gathered_w, gathered_scale_bias], out)
elif version == 'fused_uint8rowwise':
gathered_w = net.Gather([self.w, in_indices], 'gathered_w')
return net.Fused8BitRowwiseQuantizedToFloat(gathered_w, out)
else:
raise "Unsupported version of operators in SparseLookup " +\
"layer: {0}".format(version)
def _sparse_lengths_weighted_reducer(
self, in_indices, weights, reducer,
net, version, grad_on_weights=0):
op_input = [
self.w,
weights,
in_indices,
self.input_record.lengths()
]
layer_name = 'SparseLengths' + reducer
if version in ['fp32', 'fp16']:
# SparseLengths* Ops will accept either fp16 or fp32 embedding
# matrix and output fp32 pooled embedding
net.__getattr__(layer_name)(
op_input,
self.output_schema.field_blobs(),
grad_on_weights=grad_on_weights,
)
elif version == 'uint8rowwise':
op_input.insert(len(op_input), self.scale_bias)
net.__getattr__(layer_name + '8BitsRowwise')(
op_input, self.output_schema.field_blobs())
elif version == 'fused_uint8rowwise':
net.__getattr__(layer_name + 'Fused8BitRowwise')(
op_input, self.output_schema.field_blobs())
else:
raise "Unsupported version of operator in SparseLookUp " +\
"layer: {0}".format(version)
# deal with sparse features of id_list type
def _add_ops_id_list(self, net, version):
assert self.reducer in self._id_list_supported_reducers, (
"Unsupported reducer: {} for ID_LIST".format(self.reducer)
)
if self.reducer in ['Sum', 'Mean', 'WeightedSum', 'WeightedMean']:
op_input = [self.w,
self.input_record.items(),
self.input_record.lengths()]
# For id list features, the behaviors of 'Sum' and
# 'WeightedSum' are identical, since we can regard the weight on each
# id as 1. Similarly, for 'Mean' and 'WeightedMean'.
if self.reducer == 'WeightedSum':
self.reducer = 'Sum'
elif self.reducer == 'WeightedMean':
self.reducer = 'Mean'
layer_name = 'SparseLengths' + self.reducer
if version in ['fp32', 'fp16']:
# SparseLengths* Ops will accept either fp16 or fp32 embedding
# matrix and output fp32 pooled embedding
net.__getattr__(layer_name)(
op_input,
self.output_schema.field_blobs(),
)
elif version == 'uint8rowwise':
op_input.insert(len(op_input), self.scale_bias)
net.__getattr__(layer_name + '8BitsRowwise')(
op_input, self.output_schema.field_blobs())
elif version == 'fused_uint8rowwise':
net.__getattr__(layer_name + 'Fused8BitRowwise')(
op_input, self.output_schema.field_blobs())
else:
raise "Unsupported version of operator in SparseLookUp " +\
"layer: {0}".format(version)
elif self.reducer == 'Sqrt':
sqrt_weight = net.LengthsToWeights(
[self.input_record.lengths()],
[net.NextScopedBlob('lengths_sqrt')],
power=0.5,
)
self._sparse_lengths_weighted_reducer(
self.input_record.items(),
sqrt_weight,
'WeightedSum', net, version)
elif self.reducer == 'None':
# Gather operator will gather the embedding for each id of
# each IdList.
self._gather_wrapper(net, version, self.input_record.items(),
self.output_schema.field_blobs())
else:
table_rows = self._gather_wrapper(
net, version, self.input_record.items(), 'table_rows')
segment_ids = net.LengthsToSegmentIds(
self.input_record.lengths(),
self.input_record.lengths() + '_sid')
net.__getattr__('SortedSegmentRange' + self.reducer)(
[table_rows, segment_ids],
self.output_schema.field_blobs(),
)
# deal with sparse features of id_score_list type
def _add_ops_id_score_list(self, net, version):
assert self.reducer in self._id_score_list_supported_reducers, (
"Unsupported reducer: {} for ID_SCORE_LIST".format(self.reducer)
)
if self.reducer in ['WeightedSum', 'WeightedMean']:
self._sparse_lengths_weighted_reducer(
self.input_record.keys(),
self.input_record.values(),
self.reducer, net, version)
elif self.reducer in ['Sum', 'Mean']:
op_input = [self.w,
self.input_record.keys(),
self.input_record.lengths()]
layer_name = 'SparseLengths' + self.reducer
if version in ['fp32', 'fp16']:
net.__getattr__(layer_name)(
op_input,
self.output_schema.field_blobs(),
)
elif version == 'uint8rowwise':
net.__getattr__(layer_name + '8BitsRowwise')(
op_input, self.output_schema.field_blobs())
elif version == 'fused_uint8rowwise':
net.__getattr__(layer_name + 'Fused8BitRowwise')(
op_input, self.output_schema.field_blobs())
else:
raise "Unsupported version of operator in SparseLookUp " +\
"layer: {0}".format(version)
elif self.reducer == 'PositionWeighted':
self._sparse_lengths_weighted_reducer(
self.input_record.keys(),
self.external_weights,
'WeightedSum', net, version, grad_on_weights=1)
elif self.reducer == 'None':
# Gather operator will gather the embedding for each id of
# each IdList.
self._gather_wrapper(net, version, self.input_record.keys(),
self.output_schema.field_blobs())
else:
raise "Only Sum, Mean, None are supported for IdScoreList input." +\
"Trying to create with {}".format(self.reducer)
def add_ops(self, net):
cur_scope = get_current_scope()
version = get_sparse_lookup_predictor_version(
**cur_scope.get(get_sparse_lookup_predictor_version.__name__,
{'version': 'fp32'}))
# TODO(amalevich): Layer should not be responsible for decision about
# quantization.
if not self.support_8bit() and version in {'uint8rowwise',
'fused_uint8rowwise'}:
version = 'fp32'
if _is_id_list(self.input_record):
self._add_ops_id_list(net, version=version)
elif _is_id_score_list(self.input_record):
self._add_ops_id_score_list(net, version=version)
else:
raise "Unsupported input type {0}".format(self.input_record)
|
## @package batch_sigmoid_cross_entropy_loss
# Module caffe2.python.layers.batch_sigmoid_cross_entropy_loss
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
from caffe2.python.layers.tags import Tags
import numpy as np
class BatchSigmoidCrossEntropyLoss(ModelLayer):
def __init__(
self,
model,
input_record,
name='batch_sigmoid_cross_entropy_loss',
**kwargs
):
super(BatchSigmoidCrossEntropyLoss, self).__init__(
model, name, input_record, **kwargs)
assert schema.is_schema_subset(
schema.Struct(
('label', schema.Scalar(np.float32)),
('prediction', schema.Scalar(np.float32)),
),
input_record
)
assert input_record.prediction.field_type().shape == \
input_record.label.field_type().shape, \
"prediction and label must have the same shape"
self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])
self.output_schema = schema.Scalar(
(np.float32, tuple()), self.get_next_blob_reference('loss')
)
def add_ops(self, net):
sigmoid_cross_entropy = net.SigmoidCrossEntropyWithLogits(
[self.input_record.prediction(), self.input_record.label()],
net.NextScopedBlob('sigmoid_cross_entropy')
)
net.AveragedLoss(
sigmoid_cross_entropy, self.output_schema.field_blobs())
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.arc_cosine_feature_map import ArcCosineFeatureMap
import numpy as np
class SemiRandomFeatures(ArcCosineFeatureMap):
"""
Implementation of the semi-random kernel feature map.
Applies H(x_rand) * x_rand^s * x_learned, where
H is the Heaviside step function,
x_rand is the input after applying FC with randomized parameters,
and x_learned is the input after applying FC with learnable parameters.
If using multilayer model with semi-random layers, then input and output records
should have a 'full' and 'random' Scalar. The random Scalar will be passed as
input to process the random features.
For more information, see the original paper:
https://arxiv.org/pdf/1702.08882.pdf
Inputs :
output_dims -- dimensions of the output vector
s -- if s == 0, will obtain linear semi-random features;
else if s == 1, will obtain squared semi-random features;
else s >= 2, will obtain higher order semi-random features
scale_random -- amount to scale the standard deviation
(for random parameter initialization when weight_init or
bias_init hasn't been specified)
scale_learned -- amount to scale the standard deviation
(for learned parameter initialization when weight_init or
bias_init hasn't been specified)
weight_init_random -- initialization distribution for random weight parameter
(if None, will use Gaussian distribution)
bias_init_random -- initialization distribution for random bias pararmeter
(if None, will use Uniform distribution)
weight_init_learned -- initialization distribution for learned weight parameter
(if None, will use Gaussian distribution)
bias_init_learned -- initialization distribution for learned bias pararmeter
(if None, will use Uniform distribution)
weight_optim -- optimizer for weight params for learned features
bias_optim -- optimizer for bias param for learned features
set_weight_as_global_constant -- if True, initialized random parameters
will be constant across all distributed
instances of the layer
"""
def __init__(
self,
model,
input_record,
output_dims,
s=1,
scale_random=1.0,
scale_learned=1.0,
weight_init_random=None,
bias_init_random=None,
weight_init_learned=None,
bias_init_learned=None,
weight_optim=None,
bias_optim=None,
set_weight_as_global_constant=False,
name='semi_random_features',
**kwargs):
if isinstance(input_record, schema.Struct):
schema.is_schema_subset(
schema.Struct(
('full', schema.Scalar()),
('random', schema.Scalar()),
),
input_record
)
self.input_record_full = input_record.full
self.input_record_random = input_record.random
elif isinstance(input_record, schema.Scalar):
self.input_record_full = input_record
self.input_record_random = input_record
super(SemiRandomFeatures, self).__init__(
model,
self.input_record_full,
output_dims,
s=s,
scale=scale_random, # To initialize the random parameters
weight_init=weight_init_random,
bias_init=bias_init_random,
weight_optim=None,
bias_optim=None,
set_weight_as_global_constant=set_weight_as_global_constant,
initialize_output_schema=False,
name=name,
**kwargs)
self.output_schema = schema.Struct(
('full', schema.Scalar(
(np.float32, output_dims),
model.net.NextScopedBlob(name + '_full_output')
),),
('random', schema.Scalar(
(np.float32, output_dims),
model.net.NextScopedBlob(name + '_random_output')
),),
)
# To initialize the learnable parameters
assert (scale_learned > 0.0), \
"Expected scale (learned) > 0, got %s" % scale_learned
self.stddev = scale_learned * np.sqrt(1.0 / self.input_dims)
# Learned Parameters
(self.learned_w, self.learned_b) = self._initialize_params(
'learned_w',
'learned_b',
w_init=weight_init_learned,
b_init=bias_init_learned,
w_optim=weight_optim,
b_optim=bias_optim
)
def add_ops(self, net):
# Learned features: wx + b
learned_features = net.FC(self.input_record_full.field_blobs() +
[self.learned_w, self.learned_b],
net.NextScopedBlob('learned_features'))
# Random features: wx + b
random_features = net.FC(self.input_record_random.field_blobs() +
[self.random_w, self.random_b],
net.NextScopedBlob('random_features'))
processed_random_features = self._heaviside_with_power(
net,
random_features,
self.output_schema.random.field_blobs(),
self.s
)
net.Mul([processed_random_features, learned_features],
self.output_schema.full.field_blobs())
|
## @package batch_lr_loss
# Module caffe2.python.layers.batch_lr_loss
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import (
ModelLayer,
)
from caffe2.python.layers.tags import (
Tags
)
import numpy as np
class BatchLRLoss(ModelLayer):
def __init__(
self,
model,
input_record,
name='batch_lr_loss',
average_loss=True,
jsd_weight=0.0,
pos_label_target=1.0,
neg_label_target=0.0,
homotopy_weighting=False,
**kwargs
):
super(BatchLRLoss, self).__init__(model, name, input_record, **kwargs)
self.average_loss = average_loss
assert (schema.is_schema_subset(
schema.Struct(
('label', schema.Scalar()),
('logit', schema.Scalar())
),
input_record
))
self.jsd_fuse = False
assert jsd_weight >= 0 and jsd_weight <= 1
if jsd_weight > 0 or homotopy_weighting:
assert 'prediction' in input_record
self.init_weight(jsd_weight, homotopy_weighting)
self.jsd_fuse = True
self.homotopy_weighting = homotopy_weighting
assert pos_label_target <= 1 and pos_label_target >= 0
assert neg_label_target <= 1 and neg_label_target >= 0
assert pos_label_target >= neg_label_target
self.pos_label_target = pos_label_target
self.neg_label_target = neg_label_target
self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])
self.output_schema = schema.Scalar(
np.float32,
self.get_next_blob_reference('output')
)
def init_weight(self, jsd_weight, homotopy_weighting):
if homotopy_weighting:
self.mutex = self.create_param(
param_name=('%s_mutex' % self.name),
shape=None,
initializer=('CreateMutex', ),
optimizer=self.model.NoOptim,
)
self.counter = self.create_param(
param_name=('%s_counter' % self.name),
shape=[1],
initializer=(
'ConstantFill', {
'value': 0,
'dtype': core.DataType.INT64
}
),
optimizer=self.model.NoOptim,
)
self.xent_weight = self.create_param(
param_name=('%s_xent_weight' % self.name),
shape=[1],
initializer=(
'ConstantFill', {
'value': 1.,
'dtype': core.DataType.FLOAT
}
),
optimizer=self.model.NoOptim,
)
self.jsd_weight = self.create_param(
param_name=('%s_jsd_weight' % self.name),
shape=[1],
initializer=(
'ConstantFill', {
'value': 0.,
'dtype': core.DataType.FLOAT
}
),
optimizer=self.model.NoOptim,
)
else:
self.jsd_weight = self.model.add_global_constant(
'%s_jsd_weight' % self.name, jsd_weight
)
self.xent_weight = self.model.add_global_constant(
'%s_xent_weight' % self.name, 1. - jsd_weight
)
def update_weight(self, net):
net.AtomicIter([self.mutex, self.counter], [self.counter])
# iter = 0: lr = 1;
# iter = 1e6; lr = 0.5^0.1 = 0.93
# iter = 1e9; lr = 1e-3^0.1 = 0.50
net.LearningRate([self.counter], [self.xent_weight], base_lr=1.0,
policy='inv', gamma=1e-6, power=0.1,)
net.Sub(
[self.model.global_constants['ONE'], self.xent_weight],
[self.jsd_weight]
)
return self.xent_weight, self.jsd_weight
def add_ops(self, net):
# numerically stable log-softmax with crossentropy
label = self.input_record.label()
# mandatory cast to float32
# self.input_record.label.field_type().base is np.float32 but
# label type is actually int
label = net.Cast(
label,
net.NextScopedBlob('label_float32'),
to=core.DataType.FLOAT)
label = net.ExpandDims(label, net.NextScopedBlob('expanded_label'),
dims=[1])
if self.pos_label_target != 1.0 or self.neg_label_target != 0.0:
label = net.StumpFunc(
label,
net.NextScopedBlob('smoothed_label'),
threshold=0.5,
low_value=self.neg_label_target,
high_value=self.pos_label_target,
)
xent = net.SigmoidCrossEntropyWithLogits(
[self.input_record.logit(), label],
net.NextScopedBlob('cross_entropy'),
)
# fuse with JSD
if self.jsd_fuse:
jsd = net.BernoulliJSD(
[self.input_record.prediction(), label],
net.NextScopedBlob('jsd'),
)
if self.homotopy_weighting:
self.update_weight(net)
loss = net.WeightedSum(
[xent, self.xent_weight, jsd, self.jsd_weight],
net.NextScopedBlob('loss'),
)
else:
loss = xent
if 'weight' in self.input_record.fields:
weight_blob = self.input_record.weight()
if self.input_record.weight.field_type().base != np.float32:
weight_blob = net.Cast(
weight_blob,
weight_blob + '_float32',
to=core.DataType.FLOAT
)
weight_blob = net.StopGradient(
[weight_blob],
[net.NextScopedBlob('weight_stop_gradient')],
)
loss = net.Mul(
[loss, weight_blob],
net.NextScopedBlob('weighted_cross_entropy'),
)
if self.average_loss:
net.AveragedLoss(loss, self.output_schema.field_blobs())
else:
net.ReduceFrontSum(loss, self.output_schema.field_blobs())
|
## @package gather_record
# Module caffe2.python.layers.gather_record
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
class GatherRecord(ModelLayer):
"""
Given 1-D `indices` tensor, gather elements at `i` in `indices` from all the
blobs in `record`. If a blob is a values blob of a list, all the elements
included by the list's lengths blob are gathered. For example,
Input:
indices = [0, 2]
record:a = [[0, 1], [2, 3], [4, 5], [6, 7]]
record:b:lengths = [0, 1, 2, 3]
record:b:items = [0, 1, 2, 3, 4, 5]
Output:
a = [[0, 1], [4, 5]]
b:lengths = [0, 2]
b:items = [1, 2]
This supports nested list.
"""
def __init__(self, model, input_record, name='gather_record', **kwargs):
super(GatherRecord, self).__init__(model, name, input_record, **kwargs)
assert 'indices' in input_record
assert 'record' in input_record
self.output_schema = schema.NewRecord(
model.net, input_record.record.clone_schema())
self._indices = self.input_record.indices()
def _gather_scalar(self, net, record, lengths_blob, output_record):
if lengths_blob is None:
net.Gather([record(), self._indices], output_record())
else:
net.LengthsGather([record(), lengths_blob, self._indices],
output_record())
def _gather_struct(self, net, record, lengths_blob, output_record):
for name, field in record.get_children():
self._dispatch(net, field, lengths_blob, output_record[name])
def _gather_list(self, net, record, lengths_blob, output_record):
self._gather_scalar(
net, record.lengths, lengths_blob, output_record.lengths)
if lengths_blob is None:
lengths_blob = record.lengths()
else:
# TODO(kittipat): This is a hacky solution until LengthsSum for int
# is implemented
lengths_float = net.Cast(
record.lengths(),
net.NextScopedBlob(str(record.lengths()) + '_float'),
to=core.DataType.FLOAT,
)
lengths_blob_float = net.LengthsSum(
[lengths_float, lengths_blob],
net.NextScopedBlob(str(record.lengths()) + "_nested_float")
)
lengths_blob = net.Cast(
lengths_blob_float,
net.NextScopedBlob(str(record.lengths()) + "_nested"),
to=core.DataType.INT32,
)
self._dispatch(net, record._items, lengths_blob, output_record._items)
def _dispatch(self, net, record, lengths_blob, output_record):
if isinstance(record, schema.Scalar):
self._gather_scalar(net, record, lengths_blob, output_record)
elif isinstance(record, schema.Struct):
self._gather_struct(net, record, lengths_blob, output_record)
elif isinstance(record, schema.List):
self._gather_list(net, record, lengths_blob, output_record)
else:
raise NotImplementedError
def add_ops(self, net):
self._dispatch(net, self.input_record.record, None, self.output_schema)
|
# @package sparse_to_dense
# Module caffe2.python.layers.sparse_to_dense
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import (
ModelLayer,
)
import numpy as np
class FeatureSparseToDense(ModelLayer):
def __init__(self, model, input_record, input_specs,
name='feature_sparse_to_dense', **kwargs):
"""
`input_specs` follows the format of FeatureSpec from schema. To be more
precise it's a namedtuple that should have:
'feature_type', 'feature_names', 'feature_ids'
"""
super(FeatureSparseToDense, self).__init__(model, name,
input_record, **kwargs)
self.input_specs = input_specs
outputs = []
for field, feature_specs in self.input_specs:
assert len(feature_specs.feature_names) ==\
len(feature_specs.feature_ids)
if feature_specs.feature_type == 'FLOAT':
outputs.append((
field,
schema.Scalar(
(np.float32, (len(feature_specs.feature_ids), )),
self.get_next_blob_reference(field + '_output')
)
))
elif feature_specs.feature_type == 'ID_LIST':
outputs.append((
field,
schema.Struct(
('ranges',
schema.Scalar(
(
np.int32,
(len(feature_specs.feature_ids), 2)
),
self.get_next_blob_reference(
field + '_ranges')
),
),
('values',
schema.Scalar(np.int64,
self.get_next_blob_reference(
field + '_values')
),
)
)
))
elif feature_specs.feature_type == 'ID_SCORE_LIST':
outputs.append((
field,
schema.Struct(
('ranges',
schema.Scalar(
(
np.int32,
(len(feature_specs.feature_ids), 2)
),
self.get_next_blob_reference(
field + '_ranges')
),
),
('ids',
schema.Scalar(np.int64,
self.get_next_blob_reference(
field + '_ids')
),
),
('scores',
schema.Scalar(np.float32,
self.get_next_blob_reference(
field + '_scores')
),
)
)
))
elif feature_specs.feature_type == 'EMBEDDING':
# We don't know dimensions of embeddings in input data.
# Even though they should match dimensions from feature config,
# we keep ranges blob to check input data later.
outputs.append((
field,
schema.Struct(
('ranges',
schema.Scalar(
(
np.int32,
(len(feature_specs.feature_ids), 2)
),
self.get_next_blob_reference(
field + '_ranges')
),
),
('values',
schema.Scalar(np.float32,
self.get_next_blob_reference(
field + '_values')
),
)
)
))
else:
raise TypeError(
"Unsupported input type: {0}".
format(feature_specs.feature_type))
# TODO(amalevich): This schema is producing ranges. And thus if there is
# something using it it should support ranges as well. It might be
# confusing, if we don't add better support for ranges/have it as a
# first layer
self.output_schema = schema.Struct(
*outputs
)
# TODO(amalevich): Consider moving this data to schema, instead
# Structs doens't support attaching metadata to them and clonning
# will break things badly, but this is the most elegant way to pass
# this info around. Should we change it or it'll be too much work and
# not worse it?
for field, feature_specs in input_specs:
schema.attach_metadata_to_scalars(
self.output_schema[field],
schema.Metadata(
feature_specs=feature_specs)
)
self.zero = model.global_constants['ZERO']
self.zero_range = model.global_constants['ZERO_RANGE']
# Add operators to all types that need to be densified
def add_ops(self, net):
record = self.input_record
for field, feature_specs in self.input_specs:
if feature_specs.feature_type == 'FLOAT':
net.SparseToDenseMask(
[
record[field].keys(),
record[field].values(),
self.zero,
record[field].lengths(),
],
[
self.output_schema[field](),
],
mask=feature_specs.feature_ids,
)
elif feature_specs.feature_type == 'ID_LIST':
id_list_ranges = net.LengthsToRanges(
record[field].values.lengths(),
net.NextScopedBlob('id_list_ranges')
)
net.SparseToDenseMask(
[
record[field].keys(), id_list_ranges, self.zero_range,
record[field].lengths()
],
self.output_schema[field].ranges(),
mask=feature_specs.feature_ids,
)
# Alias helps to enforce the fact that all SparseToDense calls
# produce new blobs.
# Reusing blob names might result in some weird consequences
# during the delivery time, when content of the blobs is
# generated based on the inputSpecs.
net.Alias(record[field].values.items(),
self.output_schema[field].values())
elif feature_specs.feature_type == 'ID_SCORE_LIST':
# TODO: merge this to the case above?
id_list_ranges = net.LengthsToRanges(
record[field].values.lengths(),
net.NextScopedBlob('id_score_list_ranges')
)
net.SparseToDenseMask(
[
record[field].keys(), id_list_ranges, self.zero_range,
record[field].lengths()
],
self.output_schema[field].ranges(),
mask=feature_specs.feature_ids,
)
# Alias helps to enforce the fact that all SparseToDense calls
# produce new blobs.
# Reusing blob names might result in some weird consequences
# during the delivery time, when content of the blobs is
# generated based on the inputSpecs.
net.Alias(record[field].values.keys(),
self.output_schema[field].ids())
net.Alias(record[field].values.values(),
self.output_schema[field].scores())
elif feature_specs.feature_type == 'EMBEDDING':
ranges = net.LengthsToRanges(
record[field].values.lengths(),
net.NextScopedBlob('embeddings_ranges')
)
net.SparseToDenseMask(
[
record[field].keys(),
ranges,
self.zero_range,
record[field].lengths()
],
self.output_schema[field].ranges(),
mask=feature_specs.feature_ids,
)
# Alias helps to enforce the fact that all SparseToDense calls
# produce new blobs.
# Reusing blob names might result in some weird consequences
# during the delivery time, when content of the blobs is
# generated based on the inputSpecs.
net.Alias(record[field].values.items(),
self.output_schema[field].values())
def get_metadata(self):
metadata = []
for field, feature_specs in self.input_specs:
metadata.append(
(
{
'type': feature_specs.feature_type,
'names': feature_specs.feature_names,
'ids': feature_specs.feature_ids,
},
self.output_schema[field].field_blobs(),
self.output_schema[field].field_types()
)
)
if feature_specs.feature_type == 'FLOAT':
metadata[-1][0]['cardinality'] = 1
return metadata
|
# @package functional
# Module caffe2.python.layers.functional
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema, scope, workspace
from caffe2.python.layers.layers import (
ModelLayer,
)
import caffe2.proto.caffe2_pb2 as caffe2_pb2
import numpy as np
import six
import logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Functional(ModelLayer):
def __init__(self, model, input_record, output_names_or_num, function,
name='functional', output_dtypes=None, **kwargs):
# allow coercion
input_record = schema.as_record(input_record)
super(Functional, self).__init__(model, name, input_record, **kwargs)
self._function = function
self._kwargs = kwargs
return_struct = (
isinstance(output_names_or_num, list) or
(isinstance(output_names_or_num, six.integer_types) and
output_names_or_num != 1)
)
with scope.NameScope(self.name, reset=True):
if isinstance(output_names_or_num, int):
struct_output_schema = schema.NewRecord(
model.net, schema.RawTuple(output_names_or_num))
elif isinstance(output_names_or_num, schema.Field):
self.output_schema = output_names_or_num.clone(keep_blobs=True)
return
else:
if not isinstance(output_names_or_num, list):
output_names_or_num = [output_names_or_num]
out_tuple = [(out, np.void) for out in output_names_or_num]
struct_output_schema = schema.NewRecord(
model.net, schema.Struct(*out_tuple))
num_outputs = len(struct_output_schema.field_blobs())
# functional layer returns Struct if more than one outputs or output is
# a list, otherwise Scalar
if return_struct:
self.output_schema = struct_output_schema
else:
self.output_schema = struct_output_schema[0]
# If output_dtypes is provided, use it for output schema. Otherwise
# the shape and type will be inferred.
if output_dtypes is not None:
if not isinstance(output_dtypes, list):
output_dtypes = [output_dtypes] * num_outputs
assert len(output_dtypes) == num_outputs
for dtype, scalar in zip(output_dtypes,
self.output_schema.all_scalars()):
scalar.set_type(dtype)
return
# Fake execution of the function to infer shapes and types automatically
had_issues = False
try:
type_net = core.Net('_temp_type_and_shape_inference_net')
schema.InitEmptyRecord(type_net, input_record, enforce_types=True)
function(type_net, self.input_record, self.output_schema, **kwargs)
(shapes, types) = workspace.InferShapesAndTypes([type_net], {})
for i in range(num_outputs):
scalar_schema = (self.output_schema[i] if return_struct
else self.output_schema)
blob = scalar_schema()
if blob not in types or blob not in shapes:
had_issues = True
continue
if shapes[blob] == []:
# Scalar type
shape = tuple()
elif shapes[blob][0] == 0:
shape = tuple(shapes[blob][1:])
else:
logger.warning("unexpeced shape: {}".format(shapes[blob]))
# If batch dimension is not first - give up on shape
# inference for that blob
had_issues = True
continue
# TODO(amalevich): Move it to some shared library
dtype = None
if types[blob] == caffe2_pb2.TensorProto.DOUBLE:
dtype = (np.float64, shape)
elif types[blob] == caffe2_pb2.TensorProto.FLOAT:
dtype = (np.float32, shape)
elif types[blob] == caffe2_pb2.TensorProto.INT32:
dtype = (np.int32, shape)
elif types[blob] == caffe2_pb2.TensorProto.INT64:
dtype = (np.int64, shape)
elif types[blob] == caffe2_pb2.TensorProto.FLOAT16:
dtype = (np.float16, shape)
if dtype is not None:
scalar_schema.set_type(dtype)
except TypeError as ex:
had_issues = True
logger.warning(str(ex))
if had_issues:
logger.warning(
"Type inference had problems for layer: {}".format(self.name))
def add_ops(self, net):
self._function(
net, self.input_record, self.output_schema, **(self._kwargs))
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from caffe2.python import schema
from caffe2.python.layers.layers import (
InstantiationContext,
ModelLayer,
)
logger = logging.getLogger(__name__)
class SelectRecordByContext(ModelLayer):
"""
Allowing model to follow different paths for each instatiation context and
join later at some point. The implementation use `Alias` because schema
sometimes clone fields internally so we need static blob name for output
"""
def __init__(
self,
model,
input_record,
name='select_record_by_context',
check_field_metas=True,
use_copy=False,
default_output_record_field=None,
**kwargs
):
super(SelectRecordByContext, self).__init__(model, name, input_record,
**kwargs)
assert isinstance(input_record, schema.Struct)
assert len(input_record) > 1
self.use_copy = use_copy
self.default_output_record = (
input_record[default_output_record_field]
if (default_output_record_field is not None) else None
)
ref_record = input_record[0]
for record in input_record:
assert schema.equal_schemas(record, ref_record,
check_field_metas=check_field_metas)
self.output_schema = schema.NewRecord(model.net, ref_record)
def _set_output_blobs(self, net, context):
record = self.input_record.get(context, self.default_output_record)
assert record is not None, (
"{} context is not in input record without providing default"
" output".format(context)
)
for in_blob, out_blob in zip(
record.field_blobs(), self.output_schema.field_blobs()
):
if self.use_copy:
net.Copy(in_blob, out_blob)
else:
net.Alias(in_blob, out_blob)
def add_ops(self, net):
self._set_output_blobs(net, InstantiationContext.PREDICTION)
def add_eval_ops(self, net):
self._set_output_blobs(net, InstantiationContext.EVAL)
def add_train_ops(self, net):
self._set_output_blobs(net, InstantiationContext.TRAINING)
def add_ops_to_accumulate_pred(self, net):
self._set_output_blobs(net, InstantiationContext.ACCUMULATE_PRED)
|
## @package split
# Module caffe2.python.layers.split
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import (
ModelLayer,
)
class Split(ModelLayer):
def __init__(self, model, input_record, num_splits, axis=1,
name='split', **kwargs):
super(Split, self).__init__(model, name, input_record, **kwargs)
self.axis = axis
# Assume that first dimension is batch, so actual axis in shape is
# axis - 1
axis -= 1
assert axis >= 0
assert isinstance(input_record, schema.Scalar),\
"Incorrect input type. Excpected Scalar, but received: {0}".\
format(input_record)
input_shape = input_record.field_type().shape
assert len(input_shape) >= axis
assert input_shape[axis] % num_splits == 0
output_shape = list(input_shape)
output_shape[axis] = int(output_shape[axis] / num_splits)
data_type = input_record.field_type().base
output_scalars = [
schema.Scalar(
(data_type, output_shape),
self.get_next_blob_reference('output_{}'.format(i)),
)
for i in range(num_splits)
]
self.output_schema = schema.Tuple(*output_scalars)
def add_ops(self, net):
net.Split(
self.input_record.field_blobs(),
self.output_schema.field_blobs(),
axis=self.axis,
)
|
# Copyright (c) 2016-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
# @package label_smooth
# Module caffe2.python.layers.label_smooth
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
import numpy as np
class LabelSmooth(ModelLayer):
def __init__(
self, model, label, smooth_matrix, name='label_smooth', **kwargs
):
super(LabelSmooth, self).__init__(model, name, label, **kwargs)
self.label = label
# shape as a list
smooth_matrix = np.array(smooth_matrix).astype(np.float32).flatten()
self.set_dim(smooth_matrix)
self.set_smooth_matrix(smooth_matrix)
self.output_schema = schema.Scalar(
(np.float32, (self.dim, )),
self.get_next_blob_reference('smoothed_label')
)
def set_dim(self, smooth_matrix):
num_elements = smooth_matrix.size
self.binary_prob_label = (num_elements == 2)
if self.binary_prob_label:
self.dim = 1
else:
assert np.sqrt(num_elements)**2 == num_elements
self.dim = int(np.sqrt(num_elements))
def set_smooth_matrix(self, smooth_matrix):
if not self.binary_prob_label:
self.smooth_matrix = self.model.add_global_constant(
'%s_label_smooth_matrix' % self.name,
array=smooth_matrix.reshape((self.dim, self.dim)),
dtype=np.dtype(np.float32),
)
self.len = self.model.add_global_constant(
'%s_label_dim' % self.name,
array=self.dim,
dtype=np.dtype(np.int64),
)
else:
self.smooth_matrix = smooth_matrix
def add_ops_for_binary_prob_label(self, net):
if self.label.field_type().base != np.float32:
float32_label = net.NextScopedBlob('float32_label')
net.Cast([self.label()], [float32_label], to=core.DataType.FLOAT)
else:
float32_label = self.label()
net.StumpFunc(
float32_label,
self.output_schema(),
threshold=0.5,
low_value=self.smooth_matrix[0],
high_value=self.smooth_matrix[1],
)
def add_ops_for_categorical_label(self, net):
if self.label.field_type().base != np.int64:
int64_label = net.NextScopedBlob('int64_label')
net.Cast([self.label()], [int64_label], to=core.DataType.INT64)
else:
int64_label = self.label()
one_hot_label = net.NextScopedBlob('one_hot_label')
net.OneHot([int64_label, self.len], [one_hot_label])
net.MatMul([one_hot_label, self.smooth_matrix], self.output_schema())
def add_ops(self, net):
if self.binary_prob_label:
self.add_ops_for_binary_prob_label(net)
else:
self.add_ops_for_categorical_label(net)
|
## @package reservoir_sampling
# Module caffe2.python.layers.reservoir_sampling
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
class ReservoirSampling(ModelLayer):
"""
Collect samples from input record w/ reservoir sampling. If you have complex
data, use PackRecords to pack it before using this layer.
This layer is not thread safe.
"""
def __init__(self, model, input_record, num_to_collect,
name='reservoir_sampling', **kwargs):
super(ReservoirSampling, self).__init__(
model, name, input_record, **kwargs)
assert num_to_collect > 0
self.num_to_collect = num_to_collect
self.reservoir = self.create_param(
param_name='reservoir',
shape=[0],
initializer=('ConstantFill',),
optimizer=model.NoOptim,
)
self.num_visited_blob = self.create_param(
param_name='num_visited',
shape=[],
initializer=('ConstantFill', {
'value': 0,
'dtype': core.DataType.INT64,
}),
optimizer=model.NoOptim,
)
self.mutex = self.create_param(
param_name='mutex',
shape=None,
initializer=('CreateMutex',),
optimizer=model.NoOptim,
)
self.extra_input_blobs = []
self.extra_output_blobs = []
if 'object_id' in input_record:
object_to_pos = self.create_param(
param_name='object_to_pos',
initializer=('CreateMap', {
'key_dtype': core.DataType.INT64,
'valued_dtype': core.DataType.INT32,
}),
optimizer=model.NoOptim,
)
pos_to_object = self.create_param(
param_name='pos_to_object',
shape=[0],
initializer=('ConstantFill', {
'value': 0,
'dtype': core.DataType.INT64,
}),
optimizer=model.NoOptim,
)
self.extra_input_blobs.append(input_record.object_id())
self.extra_input_blobs.extend([object_to_pos, pos_to_object])
self.extra_output_blobs.extend([object_to_pos, pos_to_object])
self.output_schema = schema.Struct(
(
'reservoir',
schema.from_blob_list(input_record.data, [self.reservoir])
),
('num_visited', schema.Scalar(blob=self.num_visited_blob)),
('mutex', schema.Scalar(blob=self.mutex)),
)
def add_ops(self, net):
net.ReservoirSampling(
[self.reservoir, self.num_visited_blob, self.input_record.data(),
self.mutex] + self.extra_input_blobs,
[self.reservoir, self.num_visited_blob] + self.extra_output_blobs,
num_to_collect=self.num_to_collect,
)
|
## @package sparse_feature_hash
# Module caffe2.python.layers.sparse_feature_hash
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema, core
from caffe2.python.layers.layers import (
ModelLayer,
IdList,
IdScoreList,
)
from caffe2.python.layers.tags import (
Tags
)
import numpy as np
class SparseFeatureHash(ModelLayer):
def __init__(self, model, input_record, seed=0, modulo=None,
use_hashing=True, name='sparse_feature_hash', **kwargs):
super(SparseFeatureHash, self).__init__(model, name, input_record, **kwargs)
self.seed = seed
self.use_hashing = use_hashing
if schema.equal_schemas(input_record, IdList):
self.modulo = modulo or self.extract_hash_size(input_record.items.metadata)
metadata = schema.Metadata(
categorical_limit=self.modulo,
feature_specs=input_record.items.metadata.feature_specs,
expected_value=input_record.items.metadata.expected_value
)
with core.NameScope(name):
self.output_schema = schema.NewRecord(model.net, IdList)
self.output_schema.items.set_metadata(metadata)
elif schema.equal_schemas(input_record, IdScoreList):
self.modulo = modulo or self.extract_hash_size(input_record.keys.metadata)
metadata = schema.Metadata(
categorical_limit=self.modulo,
feature_specs=input_record.keys.metadata.feature_specs,
expected_value=input_record.keys.metadata.expected_value
)
with core.NameScope(name):
self.output_schema = schema.NewRecord(model.net, IdScoreList)
self.output_schema.keys.set_metadata(metadata)
else:
assert False, "Input type must be one of (IdList, IdScoreList)"
assert self.modulo >= 1, 'Unexpected modulo: {}'.format(self.modulo)
# operators in this layer do not have CUDA implementation yet.
# In addition, since the sparse feature keys that we are hashing are
# typically on CPU originally, it makes sense to have this layer on CPU.
self.tags.update([Tags.CPU_ONLY])
def extract_hash_size(self, metadata):
if metadata.feature_specs and metadata.feature_specs.desired_hash_size:
return metadata.feature_specs.desired_hash_size
elif metadata.categorical_limit is not None:
return metadata.categorical_limit
else:
assert False, "desired_hash_size or categorical_limit must be set"
def add_ops(self, net):
net.Copy(
self.input_record.lengths(),
self.output_schema.lengths()
)
if schema.equal_schemas(self.output_schema, IdList):
input_blob = self.input_record.items()
output_blob = self.output_schema.items()
elif schema.equal_schemas(self.output_schema, IdScoreList):
input_blob = self.input_record.keys()
output_blob = self.output_schema.keys()
net.Copy(
self.input_record.values(),
self.output_schema.values()
)
else:
raise NotImplementedError()
if self.use_hashing:
net.IndexHash(
input_blob, output_blob, seed=self.seed, modulo=self.modulo
)
else:
net.Mod(
input_blob, output_blob, divisor=self.modulo, sign_follow_divisor=True
)
|
# Module caffe2.python.layers.dropout
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
class Dropout(ModelLayer):
def __init__(
self,
model,
input_record,
name='dropout',
ratio=0.5,
**kwargs):
super(Dropout, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
assert (ratio >= 0 and ratio < 1.0), \
"Expected 0 <= ratio < 1, but got ratio of %s" % ratio
self.output_schema = input_record.clone_schema()
self.output_schema.set_value(self.get_next_blob_reference('output'))
self.ratio = ratio
def _add_ops(self, net, is_test):
input_blob = self.input_record.field_blobs()
output_blobs = self.output_schema.field_blobs() \
+ [net.NextScopedBlob('d_mask')]
net.Dropout(input_blob,
output_blobs,
ratio=self.ratio,
is_test=is_test)
def add_train_ops(self, net):
self._add_ops(net, is_test=False)
def add_eval_ops(self, net):
self._add_ops(net, is_test=True)
def add_ops(self, net):
self.add_eval_ops(net)
|
## @package conv
# Module caffe2.python.layers.conv
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import (
ModelLayer,
)
import numpy as np
class Conv(ModelLayer):
"""
Convolutional layer
Input:
- input_record: at least has the shape info of C (num_channels)
- output_dim: number of convolutional filters
- kernel_h, kernel_w: kernel size for h and w
- stride_h, stride_w: stride for h and w
- pad_b, pad_l, pad_r, pad_t: padding sizes, if stride == 1,
'None' value will do auto padding
- order: either 'NHWC' or 'NCHW'
"""
def __init__(self, model, input_record, output_dim, kernel_h, kernel_w,
stride_h, stride_w, pad_b=None, pad_l=None, pad_r=None,
pad_t=None, order='NHWC', kernel_init=None, bias_init=None,
kernel_optim=None, bias_optim=None,
name='conv', **kwargs):
super(Conv, self).__init__(model, name, input_record, **kwargs)
assert isinstance(input_record, schema.Scalar), "Incorrect input type"
# input num_channels (C) is needed
input_dims = input_record.field_type().shape
assert (kernel_h > 0 and isinstance(kernel_h, int)), (
"kernel_h should be positive integer")
assert (kernel_w > 0 and isinstance(kernel_w, int)), (
"kernel_w should be positive integer")
self.kernel_h = kernel_h
self.kernel_w = kernel_w
assert (stride_h > 0 and isinstance(stride_h, int)), (
"stride_h should be positive integer")
assert (stride_w > 0 and isinstance(stride_w, int)), (
"stride_w should be positive integer")
self.stride_h = stride_h
self.stride_w = stride_w
# output_dim calculation (http://cs231n.github.io/convolutional-networks/)
# output_dim_w = (input_dim_w - kernel_w + pad_r + pad_l) / stride_w + 1
# so, do auto_padding requires
# pad_r, pad_l = [(input_dim_w - 1) * stride_w - input_dim_w + kernel_w] / 2
# similair for pad_t and pad_b to auto pad kernel_h
# here we only do auto padding for stride = 1 case
if stride_h == 1:
pad_t = int((kernel_h - 1) / 2) if pad_t is None else pad_t
pad_b = int((kernel_h - 1) / 2) if pad_b is None else pad_b
else:
pad_t = 0 if pad_t is None else pad_t
pad_b = 0 if pad_b is None else pad_b
if stride_w == 1:
pad_r = int((kernel_w - 1) / 2) if pad_r is None else pad_r
pad_l = int((kernel_w - 1) / 2) if pad_l is None else pad_l
else:
pad_r = 0 if pad_r is None else pad_r
pad_l = 0 if pad_l is None else pad_l
assert (pad_t >= 0 and isinstance(pad_t, int)), "pad_t should be int >= 0"
assert (pad_b >= 0 and isinstance(pad_b, int)), "pad_b should be int >= 0"
assert (pad_r >= 0 and isinstance(pad_r, int)), "pad_r should be int >= 0"
assert (pad_l >= 0 and isinstance(pad_l, int)), "pad_l should be int >= 0"
self.pad_t = pad_t
self.pad_b = pad_b
self.pad_r = pad_r
self.pad_l = pad_l
assert order in ['NHWC', 'NCHW'], "order should either 'NHWC' or 'NCHW'"
self.order = order
if order == 'NHWC':
input_c = input_dims[-1]
kernel_shape = [output_dim, kernel_h, kernel_w, input_c]
elif order == 'NCHW':
input_c = input_dims[0]
kernel_shape = [output_dim, input_c, kernel_h, kernel_w]
assert input_c > 0, (
"Number of input channels in conv parameters should be positive")
kernel_init = kernel_init if kernel_init else (
'XavierFill', {}
)
bias_init = bias_init if bias_init else (
'ConstantFill', {'value': 0.0}
)
self.kernel = self.create_param(
param_name='conv_kernel',
shape=kernel_shape,
initializer=kernel_init,
optimizer=kernel_optim,
)
self.bias = self.create_param(
param_name='conv_bias',
shape=[output_dim],
initializer=bias_init,
optimizer=bias_optim,
)
# the output_schema only has the num of output channels
# output_h and output_w would be inferred internally
self.output_schema = schema.Scalar(
(np.float32, (output_dim,)),
self.get_next_blob_reference('output')
)
def add_ops(self, net):
net.Conv(
self.input_record.field_blobs() + [self.kernel, self.bias],
self.output_schema.field_blobs(),
kernel_h=self.kernel_h,
kernel_w=self.kernel_w,
stride_h=self.stride_h,
stride_w=self.stride_w,
pad_t=self.pad_t,
pad_l=self.pad_l,
pad_b=self.pad_b,
pad_r=self.pad_r,
order=self.order
)
|
## @package layers
# Module caffe2.python.layers.layers
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
from caffe2.python import core, schema, scope, utils, workspace
from caffe2.python.layers.tags import TagContext
from caffe2.proto import caffe2_pb2
from collections import namedtuple
import numpy as np
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# Some types to simplify descriptions of things traveling between ops
IdList = schema.List(np.int64)
IdScoreList = schema.Map(np.int64, np.float32)
def get_key(record):
if schema.equal_schemas(record, IdList):
key = 'values'
elif schema.equal_schemas(record, IdScoreList, check_field_types=False):
key = 'values:keys'
else:
raise NotImplementedError('Not implemented for {}'.format(record))
assert record[key].metadata is not None, (
"Blob {} doesn't have metadata".format(str(record[key]())))
return record[key]
def get_categorical_limit(record):
key = get_key(record)
return key.metadata.categorical_limit
def get_avg_length(record):
return record['lengths'].metadata.expected_value
def set_request_only(field):
for f in field.all_scalars():
categorical_limit, expected_value = None, None
if not f.metadata:
feature_specs = schema.FeatureSpec(
feature_is_request_only=True,
)
elif not f.metadata.feature_specs:
categorical_limit = f.metadata.categorical_limit
expected_value = f.metadata.expected_value
feature_specs = schema.FeatureSpec(
feature_is_request_only=True,
)
else:
categorical_limit = f.metadata.categorical_limit
expected_value = f.metadata.expected_value
feature_specs = schema.FeatureSpec(
feature_type=f.metadata.feature_specs.feature_type,
feature_names=f.metadata.feature_specs.feature_names,
feature_ids=f.metadata.feature_specs.feature_ids,
feature_is_request_only=True,
desired_hash_size=f.metadata.feature_specs.desired_hash_size,
)
# make sure not to set categorical_limit for a non-integer field
if not np.issubdtype(f.field_type(), np.integer):
assert categorical_limit is None, \
"categorical_limit shouldn't be set for no-integer field"
f.set_metadata(
schema.Metadata(
categorical_limit=categorical_limit,
expected_value=expected_value,
feature_specs=feature_specs,
)
)
class InstantiationContext(object):
"""
List of contexts where layer could be instantitated
"""
# The layers support this context will accumulate predictions, labels,
# weights. The accumulated data can later be used to compute
# calibration or for other
# purpose.
ACCUMULATE_PRED = 'accumulate_pred'
EVAL = 'eval'
PREDICTION = 'prediction'
TRAINING = 'training'
_LAYER_REGISTRY = {}
def register_layer(name, layer):
assert name not in _LAYER_REGISTRY, "{0} already exists".format(name)
_LAYER_REGISTRY[name] = layer
def layer_exists(name):
return name in _LAYER_REGISTRY
def get_layer_class(name):
return _LAYER_REGISTRY[name]
def create_layer(layer_name, *args, **kwargs):
return _LAYER_REGISTRY[layer_name](*args, **kwargs)
LayerPsParam = namedtuple('LayerPsParam', ['sparse_key', 'average_length'])
class LayerParameter(object):
def __init__(self, parameter=None, optimizer=None, initializer=None,
ps_param=None, regularizer=None):
assert isinstance(parameter, core.BlobReference), \
"expect {0} to be a blob reference".format(str(parameter))
# need to put the following line (shape) before initialier
# shape will be updated once initializer is (re)set
self._shape = None
self.parameter = parameter
self.optimizer = optimizer
self.initializer = initializer
self.ps_param = ps_param
self.regularizer = regularizer
@property
def initializer(self):
return self._initializer
@initializer.setter
def initializer(self, op):
assert op is None or core.IsOperator(getattr(op, 'type', None)), \
"initializer expects an operator, got type: {}".format(type(op))
self._initializer = op
if op is not None:
self.shape = self._infer_shape_from_initializer()
@property
def shape(self):
return self._shape
@shape.setter
def shape(self, shape):
assert self.shape is None or self.shape == shape, \
"inconsistent shape for layer parameter:"\
" {}, expect: {}, but got {}".format(self, self.shape, shape)
self._shape = shape
def _infer_shape_from_initializer(self):
for arg in self.initializer.arg:
if arg.name == 'shape':
return list(arg.ints)
with workspace.WorkspaceGuard("model_init_by_loading_params"):
try:
net = core.Net("shape_checker")
net._net.op.extend([self.initializer])
shape_blob = net.NextScopedBlob(self.parameter + "_shape")
net.Shape([self.parameter], shape_blob)
workspace.RunNetOnce(net)
shape = workspace.FetchBlob(shape_blob).tolist()
# ResetWorkspace to save memory
workspace.ResetWorkspace()
return shape
except RuntimeError as exp:
logger.warning(
"Cannot infer the shape of blob {} from operator {}: {}".format(
self.parameter, self.initializer.type, exp)
)
workspace.ResetWorkspace()
return None
def __str__(self):
return str(self.parameter)
def is_request_only_scalar(scalar):
if len(scalar.field_metadata()) == 0:
return False
for metadata in scalar.field_metadata():
if not (metadata and metadata.feature_specs and getattr(
metadata.feature_specs, 'feature_is_request_only', False)):
return False
return True
class ModelLayer(object):
def __init__(self, model, prefix, input_record,
predict_input_record_fields=None, tags=None, **kwargs):
"""
Base class for model layers. Layer is an abstraction that allows to
provide model description in terms of meta-operators, where each of the
meta-operators can have different implementations for training,
evaluation and prediction, that are instantiated later. As an example
SampledSoftmax can do something related to sampling depending on
supervision during the training and just apply softmax if it's used for
prediction/evaluation.
All inputs/outputs from layers are represented as a record (instance of
schema bounded to blobs) and are accessible through input_record and
output_schema. If Layer needs to have only a subset of inputs/provides
subset of outputs during the inference - it should provide
predict_input_record and predict_output_schema correspondingly (those
records are expected to be a subset of input_record/output_schema).
Each layer has a list of Tags associated with it, that depends on
current context and arguments. It's possible to use those tags during
the instantiation time.
"""
self.name = model.next_layer_name(prefix)
self.model = model
self.kwargs = kwargs
self._input_record = input_record
if predict_input_record_fields:
if not isinstance(predict_input_record_fields, list):
predict_input_record_fields = [predict_input_record_fields]
self._predict_input_record = self._input_record[
predict_input_record_fields]
else:
self._predict_input_record = None
self.request_only = True
if len(input_record.all_scalars()) == 0:
self.request_only = False
for scalar in input_record.all_scalars():
if not is_request_only_scalar(scalar):
self.request_only = False
break
self._output_schema = None
self._predict_output_schema = None
self.eval_output_schema = None
self.tags = set(tags or [])
self.tags.update(TagContext.current().tags)
self.params = []
self._export_output_for_metrics = False
self._export_params_for_metrics = False
def get_type(self):
return self.__class__.__name__
def _check_output_schema(self):
assert self._output_schema is not None, "Schema is not initialized"
assert (self._predict_output_schema is None or
schema.is_schema_subset(self._predict_output_schema,
self._output_schema)), (
"predict_output_schema is not a subset of the output_schema")
@property
def predict_input_record(self):
return self._predict_input_record or self._input_record
@property
def input_record(self):
return self._input_record
@property
def predict_output_schema(self):
self._check_output_schema()
return self._predict_output_schema or self._output_schema
@predict_output_schema.setter
def predict_output_schema(self, output_schema):
assert self._predict_output_schema is None
self._predict_output_schema = output_schema
@property
def output_schema(self):
if self.request_only:
set_request_only(self._output_schema)
self._check_output_schema()
return self._output_schema
@output_schema.setter
def output_schema(self, output_schema):
assert self._output_schema is None
self._output_schema = output_schema
def get_parameters(self):
return self.params
def get_fp16_compatible_parameters(self):
"""Return a subset of parameters which can be converted to fp16"""
return []
def get_memory_usage(self):
return 0
def add_init_params(self, init_net):
'''
Adds layer initialization operators to passed net.
'''
for param in self.params:
# TODO(amalevich): Either return back to lambdas, that add
# all params (looks a bit safer and breaking less
# abstractions) or extend Net interface to this type of
# operations better
# TODO(xlwang) init_net._net.op has type google.protobuf.\
# internal.containers.RepeatedCompositeFieldContainer, but
# the version of protobuf in fbcode does not support append
# so extend is used
init_op = param.initializer
current_device_scope = scope.CurrentDeviceScope()
if not init_op:
continue
if not init_op.HasField('device_option') and\
current_device_scope:
init_op = caffe2_pb2.OperatorDef()
init_op.CopyFrom(param.initializer)
init_op.device_option.CopyFrom(current_device_scope)
# do not add duplicated init ops
if any(utils.OpAlmostEqual(op, init_op, 'debug_info')
for op in init_net._net.op):
continue
init_net._net.op.extend([init_op])
def create_param(self, param_name, shape, initializer, optimizer,
ps_param=None, regularizer=None):
with scope.NameScope(self.name, reset=True):
param = self.model.create_param(param_name=param_name,
shape=shape,
initializer=initializer,
optimizer=optimizer,
ps_param=ps_param,
regularizer=regularizer)
# make sure we don't share parameters in the same layer
assert all(param.parameter != p.parameter for p in self.params)
self.params.append(param)
return param.parameter
def get_next_blob_reference(self, name):
with scope.NameScope(self.name, reset=True):
return self.model.net.NextScopedBlob(name)
def add_operators(self, net, init_net=None,
context=InstantiationContext.TRAINING):
'''
Adds layer trainig or initialization operators to the passed in net.
init_net can be None and can be called independently from add_init_params
'''
# Namescope below should warranty that all intermediate blobs will be
# assiciated with the layer that produces them
with scope.NameScope(self.name):
if context not in {InstantiationContext.PREDICTION,
InstantiationContext.EVAL,
InstantiationContext.ACCUMULATE_PRED}:
assert init_net, (
"Only prediction and eval context don't need init_net")
if init_net:
self.add_init_params(init_net)
if context == InstantiationContext.TRAINING:
self.add_train_ops(net)
elif context == InstantiationContext.EVAL:
self.add_eval_ops(net)
elif context == InstantiationContext.ACCUMULATE_PRED:
self.add_ops_to_accumulate_pred(net)
else:
self.add_ops(net)
if context in {InstantiationContext.TRAINING,
InstantiationContext.EVAL} \
and self._export_params_for_metrics:
self.add_param_copy_operators(net)
def add_ops(self, net):
# Predict layer implementation.
raise NotImplementedError
def add_eval_ops(self, net):
# Default eval layer implementation is completely matching
# predict layer implementation.
self.add_ops(net)
def add_train_ops(self, net):
# Default train layer implementation is completely matching
# eval layer implementation.
self.add_eval_ops(net)
def add_ops_to_accumulate_pred(self, net):
# This adds operators to accumulate predictions/labels/weights. The
# accumulated data can later be used to compute calibration or for other
# purpose. Default layer implementation is completely matching eval
# layer implementation.
self.add_eval_ops(net)
def add_param_copy_operators(self, net):
for param in self.params:
param_copy_ref = self.model.metrics_schema[str(param.parameter)]
net.Copy([param.parameter], param_copy_ref.field_blobs())
def export_output_for_metrics(self):
self._export_output_for_metrics = True
# Export output of the layer directly
export_name = self.name + "/output"
self.model.add_metric_field(export_name, self.output_schema)
def export_params_for_metrics(self):
self._export_params_for_metrics = True
# Export copies of parameters
for param in self.params:
param_copy_ref = self.get_next_blob_reference(
str(param).split("/")[-1] + "_copy")
self.model.add_metric_field(str(param.parameter), param_copy_ref)
|
## @package batch_mse_loss
# Module caffe2.python.layers.batch_mse_loss
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, schema
from caffe2.python.layers.layers import (
ModelLayer,
)
from caffe2.python.layers.tags import (
Tags
)
import numpy as np
class BatchMSELoss(ModelLayer):
def __init__(self, model, input_record, name='batch_mse_loss', **kwargs):
super(BatchMSELoss, self).__init__(model, name, input_record, **kwargs)
assert schema.is_schema_subset(
schema.Struct(
('label', schema.Scalar()),
('prediction', schema.Scalar())
),
input_record
)
self.tags.update([Tags.EXCLUDE_FROM_PREDICTION])
self.output_schema = schema.Scalar(
np.float32,
self.get_next_blob_reference('output'))
def add_ops(self, net):
prediction = net.Squeeze(
self.input_record.prediction(),
net.NextScopedBlob('squeezed_prediction'),
dims=[1]
)
label = self.input_record.label.field_blobs()
if self.input_record.label.field_type().base != (
self.input_record.prediction.field_type().base):
label = net.Cast(
label,
net.NextScopedBlob('cast_label'),
to=schema.data_type_for_dtype(
self.input_record.prediction.field_type()
)
)
label = net.StopGradient(
label,
net.NextScopedBlob('stopped_label')
)
l2dist = net.SquaredL2Distance(
[label, prediction],
net.NextScopedBlob('l2')
)
if 'weight' in self.input_record.fields:
weight_blob = self.input_record.weight()
if self.input_record.weight.field_type().base != np.float32:
weight_blob = net.Cast(
weight_blob,
weight_blob + '_float32',
to=core.DataType.FLOAT
)
weight_blob = net.StopGradient(
[weight_blob],
[net.NextScopedBlob('weight_stop_gradient')],
)
l2dist = net.Mul(
[l2dist, weight_blob],
net.NextScopedBlob('weighted_l2_distance'),
)
net.AveragedLoss(l2dist, self.output_schema.field_blobs())
|
# @package constant_weight
# Module caffe2.fb.python.layers.constant_weight
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import schema
from caffe2.python.layers.layers import ModelLayer
import numpy as np
class ConstantWeight(ModelLayer):
def __init__(
self,
model,
input_record,
weights=None,
name='constant_weight',
**kwargs
):
super(ConstantWeight,
self).__init__(model, name, input_record, **kwargs)
self.output_schema = schema.Scalar(
np.float32, self.get_next_blob_reference('adaptive_weight')
)
self.data = self.input_record.field_blobs()
self.num = len(self.data)
weights = (
weights if weights is not None else
[1. / self.num for _ in range(self.num)]
)
assert len(weights) == self.num
self.weights = [
self.model.add_global_constant(
'%s_weight_%d' % (self.name, i), float(weights[i])
) for i in range(self.num)
]
def add_ops(self, net):
net.WeightedSum(
[b for x_w_pair in zip(self.data, self.weights) for b in x_w_pair],
self.output_schema()
)
|
## @package uniform_sampling
# Module caffe2.python.layers.uniform_sampling
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from caffe2.python import core, schema
from caffe2.python.layers.layers import ModelLayer
class UniformSampling(ModelLayer):
"""
Uniform sampling `num_samples - len(input_record)` unique elements from the
range [0, num_elements). `samples` is the concatenation of input_record and
the samples. input_record is expected to be unique.
"""
def __init__(
self,
model,
input_record,
num_samples,
num_elements,
name='uniform_sampling',
**kwargs
):
super(UniformSampling, self).__init__(
model, name, input_record, **kwargs
)
assert num_elements > num_samples > 0
assert isinstance(input_record, schema.Scalar)
self.num_elements = num_elements
num_examples_init = ('GivenTensorInt64Fill',
{'values': [num_samples]})
self.num_samples = self.create_param(param_name='num_examples',
shape=(1,),
initializer=num_examples_init,
optimizer=model.NoOptim)
sampling_blob_init = ('ConstantFill',
{'value': float(num_samples) / num_elements,
'dtype': core.DataType.FLOAT})
self.sampling_prob = self.create_param(param_name='prob',
shape=(num_samples,),
initializer=sampling_blob_init,
optimizer=model.NoOptim)
self.output_schema = schema.Struct(
(
'samples', schema.Scalar(
np.int32, self.get_next_blob_reference("samples")
)
),
('sampling_prob', schema.Scalar(np.float32, self.sampling_prob)),
)
def add_ops(self, net):
net.StopGradient(self.sampling_prob, self.sampling_prob)
shape = net.Shape([self.input_record()], net.NextScopedBlob("shape"))
shape = net.Sub([self.num_samples, shape], shape)
samples = net.UniqueUniformFill(
[shape, self.input_record()],
net.NextScopedBlob("samples_before_concat"),
min=0,
max=self.num_elements - 1,
input_as_shape=True
)
net.Concat(
[self.input_record(), samples],
[self.output_schema.samples(), net.NextScopedBlob("split_info")],
axis=0
)
net.StopGradient(
self.output_schema.samples(), self.output_schema.samples()
)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
import numpy as np
import unittest
class DoOpTest(TestCase):
def test_operator(self):
def make_net():
subnet = core.Net('subnet')
subnet.Add(["X", "Y"], "Z")
net = core.Net("net")
net.CreateScope([], "W")
net.Do(
["outer_X", "outer_Y", "W"],
["outer_Z", "W"],
net=subnet.Proto(),
inner_blobs=["X", "Y", "Z"],
outer_blobs_idx=[0, 1, 2],
)
return net
net = make_net()
workspace.ResetWorkspace()
workspace.FeedBlob("outer_X", np.asarray([1, 2]))
workspace.FeedBlob("outer_Y", np.asarray([3, 4]))
workspace.RunNetOnce(net)
outer_Z_val = workspace.FetchBlob("outer_Z")
self.assertTrue(np.all(outer_Z_val == np.asarray([4, 6])))
def test_reuse_workspace(self):
def make_net():
param_init_subnet = core.Net('param_init_subnet')
param_init_subnet.ConstantFill([], "X", shape=[1], value=1)
param_init_subnet.ConstantFill([], "Y", shape=[1], value=2)
subnet = core.Net("subnet")
subnet.Add(["X", "Y"], "Z")
net = core.Net("net")
net.CreateScope([], "W")
net.Do(
"W", "W",
net=param_init_subnet.Proto(),
inner_blobs=[],
outer_blobs_idx=[],
)
net.Do(
"W", ["outer_Z", "W"],
net=subnet.Proto(),
inner_blobs=["Z"],
outer_blobs_idx=[0],
reuse_workspace=True,
)
return net
net = make_net()
workspace.ResetWorkspace()
workspace.RunNetOnce(net)
outer_Z_val = workspace.FetchBlob("outer_Z")
self.assertTrue(np.all(outer_Z_val == np.asarray([3])))
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from caffe2.python import (
brew, cnn, core, workspace, data_parallel_model,
timeout_guard, model_helper, optimizer)
from caffe2.python.test_util import TestCase
import caffe2.python.models.resnet as resnet
from caffe2.python.modeling.initializers import Initializer
from caffe2.python import convnet_benchmarks as cb
from caffe2.python import hypothesis_test_util as hu
import time
import numpy as np
from hypothesis import settings
CI_MAX_EXAMPLES = 2
CI_TIMEOUT = 600
def executor_test_settings(func):
if hu.is_sandcastle() or hu.is_travis():
return settings(
max_examples=CI_MAX_EXAMPLES,
timeout=CI_TIMEOUT
)(func)
else:
return func
def gen_test_resnet50(_order, _cudnn_ws):
model = cnn.CNNModelHelper(
order="NCHW",
name="resnet_50_test",
cudnn_exhaustive_search=True,
)
data = model.net.AddExternalInput("data")
label = model.net.AddExternalInput("label")
(_softmax, loss) = resnet.create_resnet50(
model,
data,
num_input_channels=3,
num_labels=1000,
label=label,
is_test=False,
)
return model, 227
def conv_model_generators():
return {
'AlexNet': cb.AlexNet,
'OverFeat': cb.OverFeat,
'VGGA': cb.VGGA,
'Inception': cb.Inception,
'MLP': cb.MLP,
'Resnet50': gen_test_resnet50,
}
def executor_test_model_names():
if hu.is_sandcastle() or hu.is_travis():
return ["MLP"]
else:
return conv_model_generators().keys()
def build_conv_model(model_name, batch_size):
model_gen_map = conv_model_generators()
assert model_name in model_gen_map, "Model " + model_name + " not found"
model, input_size = model_gen_map[model_name]("NCHW", None)
input_shape = [batch_size, 3, input_size, input_size]
if model_name == "MLP":
input_shape = [batch_size, input_size]
model.param_init_net.GaussianFill(
[],
"data",
shape=input_shape,
mean=0.0,
std=1.0
)
model.param_init_net.UniformIntFill(
[],
"label",
shape=[batch_size, ],
min=0,
max=999
)
model.AddGradientOperators(["loss"])
ITER = brew.iter(model, "iter")
LR = model.net.LearningRate(
ITER, "LR", base_lr=-1e-8, policy="step", stepsize=10000, gamma=0.999)
ONE = model.param_init_net.ConstantFill([], "ONE", shape=[1], value=1.0)
for param in model.params:
param_grad = model.param_to_grad[param]
model.net.WeightedSum([param, ONE, param_grad, LR], param)
return model
def build_resnet50_dataparallel_model(
num_gpus,
batch_size,
epoch_size,
cudnn_workspace_limit_mb=64,
num_channels=3,
num_labels=1000,
weight_decay=1e-4,
base_learning_rate=0.1,
image_size=227,
use_cpu=False):
batch_per_device = batch_size // num_gpus
train_arg_scope = {
'order': 'NCHW',
'use_cudnn': True,
'cudnn_exhaustive_search': False,
'ws_nbytes_limit': (cudnn_workspace_limit_mb * 1024 * 1024),
'deterministic': True,
}
train_model = model_helper.ModelHelper(
name="test_resnet50", arg_scope=train_arg_scope
)
def create_resnet50_model_ops(model, loss_scale):
with brew.arg_scope([brew.conv, brew.fc],
WeightInitializer=Initializer,
BiasInitializer=Initializer,
enable_tensor_core=0):
pred = resnet.create_resnet50(
model,
"data",
num_input_channels=num_channels,
num_labels=num_labels,
no_bias=True,
no_loss=True,
)
softmax, loss = model.SoftmaxWithLoss([pred, 'label'],
['softmax', 'loss'])
loss = model.Scale(loss, scale=loss_scale)
brew.accuracy(model, [softmax, "label"], "accuracy")
return [loss]
def add_optimizer(model):
stepsz = int(30 * epoch_size / batch_size)
optimizer.add_weight_decay(model, weight_decay)
opt = optimizer.build_multi_precision_sgd(
model,
base_learning_rate,
momentum=0.9,
nesterov=1,
policy="step",
stepsize=stepsz,
gamma=0.1
)
return opt
def add_image_input(model):
model.param_init_net.GaussianFill(
[],
["data"],
shape=[batch_per_device, 3, image_size, image_size],
dtype='float',
)
model.param_init_net.ConstantFill(
[],
["label"],
shape=[batch_per_device],
value=1,
dtype=core.DataType.INT32,
)
def add_post_sync_ops(model):
for param_info in model.GetOptimizationParamInfo(model.GetParams()):
if param_info.blob_copy is not None:
model.param_init_net.HalfToFloat(
param_info.blob,
param_info.blob_copy[core.DataType.FLOAT])
# Create parallelized model
data_parallel_model.Parallelize(
train_model,
input_builder_fun=add_image_input,
forward_pass_builder_fun=create_resnet50_model_ops,
optimizer_builder_fun=add_optimizer,
post_sync_builder_fun=add_post_sync_ops,
devices=list(range(num_gpus)),
rendezvous=None,
optimize_gradient_memory=True,
cpu_device=use_cpu,
shared_model=use_cpu,
)
return train_model
def run_resnet50_epoch(train_model, batch_size, epoch_size, skip_first_n_iter=0):
epoch_iters = int(epoch_size / batch_size)
prefix = "{}_{}".format(
train_model._device_prefix,
train_model._devices[0])
train_time = 0.0
train_examples = 0
for i in range(epoch_iters):
timeout = 600.0 if i == 0 else 60.0
with timeout_guard.CompleteInTimeOrDie(timeout):
t1 = time.time()
workspace.RunNet(train_model.net.Proto().name)
t2 = time.time()
dt = t2 - t1
if i >= skip_first_n_iter:
train_time += dt
train_examples += batch_size
fmt = "Finished iteration {}/{} ({:.2f} images/sec)"
print(fmt.format(i + 1, epoch_iters, batch_size / dt))
accuracy = workspace.FetchBlob(prefix + '/accuracy')
loss = workspace.FetchBlob(prefix + '/loss')
assert loss < 40, "Exploded gradients"
return (
train_examples,
train_time,
accuracy, loss)
class ExecutorTestBase(TestCase):
def compare_executors(self, model, ref_executor, test_executor, model_run_func):
model.Proto().type = ref_executor
model.param_init_net.set_rand_seed(seed=0xCAFFE2)
model.net.set_rand_seed(seed=0xCAFFE2)
workspace.ResetWorkspace()
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net)
model_run_func()
ref_ws = {str(k): workspace.FetchBlob(k) for k in workspace.Blobs()}
ref_ws = {k: v for k, v in ref_ws.items() if type(v) is np.ndarray}
workspace.ResetWorkspace()
workspace.RunNetOnce(model.param_init_net)
model.Proto().type = test_executor
workspace.CreateNet(model.net, overwrite=True)
model_run_func()
test_ws = {str(k): workspace.FetchBlob(k) for k in workspace.Blobs()}
test_ws = {k: v for k, v in test_ws.items() if type(v) is np.ndarray}
for blob_name, ref_val in ref_ws.items():
self.assertTrue(
blob_name in test_ws,
"Blob {} not found in {} run".format(blob_name, test_executor))
val = test_ws[blob_name]
np.testing.assert_array_equal(
val, ref_val,
"Blob {} differs in {} run".format(blob_name, test_executor))
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from caffe2.python import workspace
from caffe2.python.test.executor_test_util import (
build_conv_model,
build_resnet50_dataparallel_model,
run_resnet50_epoch,
ExecutorTestBase,
executor_test_settings,
executor_test_model_names)
from hypothesis import given
import hypothesis.strategies as st
import unittest
EXECUTORS = ["async_scheduling", "async_polling", "dag", "async_dag"]
ITERATIONS = 1
class ExecutorCPUConvNetTest(ExecutorTestBase):
@given(executor=st.sampled_from(EXECUTORS),
model_name=st.sampled_from(executor_test_model_names()),
batch_size=st.sampled_from([1]),
num_workers=st.sampled_from([8]))
@executor_test_settings
def test_executor(self, executor, model_name, batch_size, num_workers):
model = build_conv_model(model_name, batch_size)
model.Proto().num_workers = num_workers
def run_model():
iterations = ITERATIONS
if model_name == "MLP":
iterations = 1 # avoid numeric instability with MLP gradients
workspace.RunNet(model.net, iterations)
self.compare_executors(
model,
ref_executor="simple",
test_executor=executor,
model_run_func=run_model,
)
@unittest.skipIf(not workspace.has_gpu_support, "no gpu")
class ExecutorGPUResNetTest(ExecutorTestBase):
@given(executor=st.sampled_from(EXECUTORS),
num_workers=st.sampled_from([8]))
@executor_test_settings
def test_executor(self, executor, num_workers):
model = build_resnet50_dataparallel_model(
num_gpus=workspace.NumCudaDevices(), batch_size=8, epoch_size=8)
model.Proto().num_workers = num_workers
def run_model():
run_resnet50_epoch(model, batch_size=8, epoch_size=8)
self.compare_executors(
model,
ref_executor="simple",
test_executor=executor,
model_run_func=run_model,
)
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from caffe2.python import core, workspace
import unittest
core.GlobalInit(['python'])
class BlobDeallocationTest(unittest.TestCase):
def test(self):
net = core.Net('net')
x = net.GivenTensorStringFill([], ['x'], shape=[3], values=['a', 'b', 'c'])
y = net.GivenTensorStringFill([], ['y'], shape=[3], values=['d', 'e', 'f'])
net.Concat([x, y], ['concated', '_'], axis=0)
workspace.ResetWorkspace()
workspace.RunNetOnce(net)
workspace.ResetWorkspace()
workspace.RunNetOnce(net)
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
|
## @package onnx
# Module caffe2.python.onnx.backend_rep_cpp
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from onnx.backend.base import BackendRep, namedtupledict
# This is a wrapper around C++ Caffe2BackendRep,
# mainly to handle the different input and output types for convenience of Python
class Caffe2CppRep(BackendRep):
def __init__(self, cpp_rep):
super(Caffe2CppRep, self).__init__()
self.__core = cpp_rep
self.__external_outputs = cpp_rep.external_outputs()
self.__external_inputs = cpp_rep.external_inputs()
self.__uninitialized_inputs = cpp_rep.uninitialized_inputs()
def init_net(self):
return self.__core.init_net()
def pred_net(self):
return self.__core.pred_net()
def external_outputs(self):
return self.__core.external_outputs()
def external_inputs(self):
return self.__core.external_inputs()
def run(self, inputs):
output_values = None
if isinstance(inputs, dict):
output_values = self.__core.run(inputs)
elif isinstance(inputs, list) or isinstance(inputs, tuple):
if len(inputs) != len(self.__uninitialized_inputs):
raise RuntimeError('Expected {} values for uninitialized '
'graph inputs ({}), but got {}.'.format(
len(self.__uninitialized_inputs),
', '.join(self.__uninitialized_inputs),
len(inputs)))
input_map = {}
for k, v in zip(self.__uninitialized_inputs, inputs):
input_map[k] = v
output_values = self.__core.run(input_map)
else:
# single input
output_values = self.__core.run([inputs])
return namedtupledict('Outputs', self.__external_outputs)(*output_values)
|
## @package onnx
# Module caffe2.python.onnx.backend
"""Backend for running ONNX on Caffe2
To run this, you will need to have Caffe2 installed as well.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import collections
from subprocess import Popen, PIPE
import zipfile
import caffe2
from caffe2.python import core, workspace, rnn_cell, gru_cell
from caffe2.python.model_helper import ModelHelper
from caffe2.proto import caffe2_pb2
import caffe2.python.utils
import numpy as np
import onnx
from onnx import checker, GraphProto, TensorProto, AttributeProto, ModelProto
import onnx.numpy_helper
import onnx.defs
import onnx.optimizer
from onnx.backend.base import Backend, Device, DeviceType, namedtupledict
from caffe2.python.onnx.workspace import Workspace
from caffe2.python.onnx.backend_rep import Caffe2Rep
from caffe2.python.onnx.backend_cpp_rep import Caffe2CppRep
from caffe2.python.onnx.helper import dummy_name
import caffe2.python._import_c_extension as C
import warnings
def force_unicode(s):
try:
return s.decode('utf-8')
except AttributeError:
return s
def get_device_option(device):
m = {DeviceType.CPU: caffe2_pb2.CPU,
DeviceType.CUDA: caffe2_pb2.CUDA}
return core.DeviceOption(m[device.type], device.device_id)
class OnnxAttributes(dict):
"""
This is a more convenient way to work with ONNX/Caffe2 attributes
that is not the protobuf representation.
"""
@staticmethod
def from_onnx(args):
d = OnnxAttributes()
for arg in args:
d[arg.name] = convertAttributeProto(arg)
return d
def caffe2(self, kmap=lambda k: k):
for k, v in self.items():
if kmap(k) != '':
yield caffe2.python.utils.MakeArgument(kmap(k), v)
# TODO: Move this into ONNX main library
def convertAttributeProto(onnx_arg):
"""
Convert an ONNX AttributeProto into an appropriate Python object
for the type.
NB: Tensor attribute gets returned as the straight proto.
"""
if onnx_arg.HasField('f'):
return onnx_arg.f
elif onnx_arg.HasField('i'):
return onnx_arg.i
elif onnx_arg.HasField('s'):
return onnx_arg.s
elif onnx_arg.HasField('t'):
return onnx_arg.t # this is a proto!
elif len(onnx_arg.floats):
return list(onnx_arg.floats)
elif len(onnx_arg.ints):
return list(onnx_arg.ints)
elif len(onnx_arg.strings):
return list(onnx_arg.strings)
else:
raise ValueError("Unsupported ONNX attribute: {}".format(onnx_arg))
# TODO: Move this into ONNX main library
class OnnxNode(object):
"""
Reimplementation of NodeProto from ONNX, but in a form
more convenient to work with from Python.
We may temporarily edit these nodes to get them into Caffe2 form,
before actually translating into the Caffe2 protobuf, since this
is easier than decomposing everything, and putting it back together
when we're ready.
"""
def __init__(self, node):
self.name = str(node.name)
self.op_type = str(node.op_type)
self.attrs = OnnxAttributes.from_onnx(node.attribute)
self.inputs = list(node.input)
self.outputs = list(node.output)
Caffe2Ops = collections.namedtuple('Caffe2Ops', ['ops', 'init_ops', 'interface_blobs'])
class Caffe2Backend(Backend):
# The greatest version of the ONNX operator set which we are aware of.
# Models whose version is larger than this will cause us to emit a warning
# that we are attempting to translate on a "best effort" basis.
#
# If you increase this, make SURE you cross-reference all BC-breaking
# changes from one version to the next, and any that you did not
# implement, mark as broken in _broken_operators
_known_opset_version = 6
# This dictionary will record operators which are KNOWN to be
# broken, so we give a good error message rather than do something
# bogus and then fail.
_broken_operators = {
# 'BrokenOp': version_it_was_broken_in
}
# Operators that are different between Caffe2 and
# ONNX but only in their name.
# In most cases, this should be empty - as the effort of ONNX is
# to unify the operator definitions.
_renamed_operators = {
'Caffe2ConvTranspose': 'ConvTranspose',
'GlobalMaxPool': 'MaxPool',
'GlobalAveragePool': 'AveragePool',
'Pad': 'PadImage',
'Neg': 'Negative',
'BatchNormalization': 'SpatialBN',
'InstanceNormalization': 'InstanceNorm',
'MatMul': 'BatchMatMul',
'Upsample': 'ResizeNearest',
'Identity': 'Copy',
'InstanceNormalization': 'InstanceNorm',
'Equal': 'EQ',
'Less': 'LT',
'Greater': 'GT',
'Unsqueeze': 'ExpandDims',
}
_global_renamed_attrs = {'kernel_shape': 'kernels'}
_per_op_renamed_attrs = {
'Squeeze': {'axes': 'dims'},
'Unsqueeze': {'axes': 'dims'},
'Transpose': {'perm': 'axes'},
'Upsample': {'mode': ''},
'ConvTranspose': {'output_padding': 'adjs'},
'Selu': {'gamma': 'scale'},
}
# operators whose behavior is different beyond renaming
# the value is an attribute of this class that is a
# function from ToffeIR node_def to caffe2 op_def
_special_operators = {
'LSTM': '_create_lstm',
'GRU': '_create_gru',
'RNN': '_create_rnn',
}
# NB: By default, you will use the LATEST definition of the operator,
# so this interface MAY make BC-breaking changes. Specify an
# opset_version if you don't want this to version.
@classmethod
def run_node(cls, node, inputs, device='CPU', opset_version=_known_opset_version, outputs_info=None):
super(Caffe2Backend, cls).run_node(node, inputs, device=device, outputs_info=outputs_info)
device_option = get_device_option(Device(device))
with Workspace(), core.DeviceScope(device_option): # temporary!
if isinstance(inputs, dict):
for key, value in inputs.items():
workspace.FeedBlob(key, value)
else:
assert len(node.input) == len(inputs), "{}: expected {} but got {}".format(
node.op_type, len(node.input), len(inputs))
for key, value in zip(node.input, inputs):
workspace.FeedBlob(key, value)
ops = []
cbackend = C.Caffe2Backend()
ops_str = cbackend.convert_node(node.SerializeToString(), opset_version)
for s in ops_str[0] + ops_str[1]:
op = caffe2_pb2.OperatorDef()
op.ParseFromString(s)
op.device_option.CopyFrom(device_option)
ops.append(op)
# For testing
if "ONNX_CAFFE2_DEBUG" in os.environ:
init_ops, ops2, _ = cls._onnx_node_to_caffe2_op(
None, None, node, opset_version or cls._known_opset_version)
ops2 = init_ops + ops2
for op in ops2:
op.device_option.CopyFrom(device_option)
print("\nC++:\n{}\nPython:\n{}".format(ops, ops2))
workspace.RunOperatorsOnce(ops)
output_values = [workspace.FetchBlob(name) for name in node.output]
return namedtupledict('Outputs', node.output)(*output_values)
@classmethod
def _create_tensor_filling_op(cls, onnx_tensor, name=None):
"""
Given an Onnx TensorProto, translate it into a Caffe2 operator
which produces the given tensor filling op.
"""
assert name or onnx_tensor.name
name = name or onnx_tensor.name
c2_op = caffe2_pb2.OperatorDef()
c2_values = c2_op.arg.add()
c2_values.name = "values"
def tensor2list(onnx_tensor):
# Use the onnx.numpy_helper because the data may be raw
return onnx.numpy_helper.to_array(onnx_tensor).flatten().tolist()
if onnx_tensor.data_type in [TensorProto.FLOAT]:
c2_op.type = 'GivenTensorFill'
c2_values.floats.extend(tensor2list(onnx_tensor))
elif onnx_tensor.data_type in [TensorProto.DOUBLE]:
c2_op.type = 'GivenTensorDoubleFill'
c2_values.floats.extend(tensor2list(onnx_tensor))
elif onnx_tensor.data_type in [TensorProto.INT64,
TensorProto.UINT32]:
c2_op.type = 'GivenTensorInt64Fill'
c2_values.ints.extend(tensor2list(onnx_tensor))
elif onnx_tensor.data_type in [TensorProto.UINT8,
TensorProto.INT8,
TensorProto.UINT16,
TensorProto.INT16,
TensorProto.INT32]:
c2_op.type = 'GivenTensorIntFill'
c2_values.ints.extend(tensor2list(onnx_tensor))
elif onnx_tensor.data_type == TensorProto.BOOL:
c2_op.type = 'GivenTensorBoolFill'
c2_values.ints.extend(tensor2list(onnx_tensor))
elif onnx_tensor.data_type == TensorProto.STRING:
c2_op.type = 'GivenTensorStringFill'
c2_values.strings.extend(onnx_tensor.string_data)
else:
raise RuntimeError(
"unrecognized tensor type {}".format(onnx_tensor.data_type))
c2_shape = c2_op.arg.add()
c2_shape.name = "shape"
c2_shape.ints.extend(onnx_tensor.dims)
c2_op.output.append(name)
return c2_op
@classmethod
def _rnn_shape_inference(cls, init_model, pred_model, n, input_blob, W):
# ad-hoc, informally-specified, bug-ridden, slow
# implementation of shape inference
# if the weight matrices are directly provided as
# initializers, their dimensions should be available in the
# init net model.
for x in init_model.graph.input:
if x.name == W:
return x.type.tensor_type.shape.dim[1].dim_value
# otherwise, assume that the input_blob is either a direct
# graph input, or another rnn op of the same type. This
# matches the pattern produced by exporting from pytorch
# (where the weight matrices are unusable for this purpose due
# to reshaping operations that lose shape information).
for x in pred_model.graph.input:
if x.name == input_blob:
return x.type.tensor_type.shape.dim[2].dim_value
curr = n
while True:
for x in pred_model.graph.input:
if x.name == curr.inputs[0] and curr.op_type == 'Gather':
return x.type.tensor_type.shape.dim[1].dim_value
prev = [x for x in map(OnnxNode, pred_model.graph.node) if x.outputs[0] == curr.inputs[0]]
if len(prev) != 1:
return
prev = prev[0]
if prev.op_type == n.op_type:
return prev.attrs['hidden_size']
if prev.op_type == 'Transpose':
for x in pred_model.graph.input:
if x.name == prev.inputs[0]:
return x.type.tensor_type.shape.dim[2].dim_value
curr = prev
@classmethod
def _create_rnn(cls, init_model, pred_model, n, opset_version):
assert init_model is not None, "cannot convert RNNs without access to the full model"
assert pred_model is not None, "cannot convert RNNs without access to the full model"
attrs = dict(n.attrs) # make a copy, which is safe to mutate
hidden_size = attrs.pop('hidden_size')
activation = force_unicode(attrs.pop('activations', ('tanh',))[0])
direction = force_unicode(attrs.pop('direction', 'forward'))
assert not attrs, "unsupported RNN attributes: " + str(attrs.keys())
assert direction in ['forward', 'bidirectional'], "unsupported backwards RNN"
input_blob, W, R, B, sequence_lens, initial_h = n.inputs
if sequence_lens == "":
sequence_lens = None
input_size = cls._rnn_shape_inference(init_model, pred_model, n, input_blob, W)
if input_size is None:
raise RuntimeError("best-effort shape inference for RNN input failed")
init_net = core.Net("init-net")
pred_mh = ModelHelper()
def make_rnn(direction_offset):
name = dummy_name()
# input and recurrence biases are squashed together in
# onnx but not in caffe2
bias_offset = 2 * direction_offset * hidden_size
init_net.Slice(B, name + "/i2h_b",
starts=[bias_offset + 0 * hidden_size],
ends =[bias_offset + 1 * hidden_size])
init_net.Slice(B, name + "/gates_t_b",
starts=[bias_offset + 1 * hidden_size],
ends =[bias_offset + 2 * hidden_size])
weight_offset = direction_offset * hidden_size
init_net.Slice(W, name + '/i2h_w',
starts=[weight_offset + 0 * hidden_size, 0],
ends =[weight_offset + 1 * hidden_size,-1])
init_net.Slice(R, name + '/gates_t_w',
starts=[weight_offset + 0 * hidden_size, 0],
ends =[weight_offset + 1 * hidden_size,-1])
initial_h_sliced = name + '/initial_h'
init_net.Slice(initial_h, initial_h_sliced,
starts=[direction_offset + 0, 0, 0],
ends =[direction_offset + 1,-1,-1])
if direction_offset == 1:
input = pred_mh.net.ReversePackedSegs(
[input_blob, sequence_lens], name + "/input-reversed")
else:
input = input_blob
hidden_t_all, hidden_t_last = rnn_cell.BasicRNN(
pred_mh,
input,
sequence_lens,
[initial_h_sliced],
input_size,
hidden_size,
name,
drop_states=False,
forward_only=True,
activation=activation
)
if direction_offset == 1:
hidden_t_all = pred_mh.net.ReversePackedSegs(
[hidden_t_all, sequence_lens], name + "/output-reversed")
return hidden_t_all, hidden_t_last
if direction == 'forward':
hidden_t_all, hidden_t_last = make_rnn(0)
# in the forward case, storage is shared between the two
# outputs. We need to decouple them so that the
# VariableLengthSequencePadding only mutates n.outputs[0]
pred_mh.net.Copy(hidden_t_last, n.outputs[1])
pred_mh.net = pred_mh.net.Clone(
"dummy-clone-net",
blob_remap={ hidden_t_all: n.outputs[0] }
)
elif direction == 'bidirectional':
hidden_t_all_f, hidden_t_last_f = make_rnn(0)
hidden_t_all_b, hidden_t_last_b = make_rnn(1)
pred_mh.net.Concat([hidden_t_all_f, hidden_t_all_b],
[n.outputs[0], dummy_name()], axis=2)
pred_mh.net.Concat([hidden_t_last_f, hidden_t_last_b],
[n.outputs[1], dummy_name()], axis=0)
if sequence_lens is not None:
pred_mh.net.VariableLengthSequencePadding(
[n.outputs[0], sequence_lens], [n.outputs[0]])
return Caffe2Ops(list(pred_mh.Proto().op),
list(init_net.Proto().op),
list(pred_mh.Proto().external_input))
@classmethod
def _create_lstm(cls, init_model, pred_model, n, opset_version):
assert init_model is not None, "cannot convert LSTMs without access to the full model"
assert pred_model is not None, "cannot convert LSTMs without access to the full model"
attrs = dict(n.attrs) # make a copy, which is safe to mutate
hidden_size = attrs.pop('hidden_size')
direction = force_unicode(attrs.pop('direction', 'forward'))
assert not attrs, "unsupported LSTM attributes: " + str(attrs.keys())
assert direction in ['forward', 'bidirectional'], "unsupported backwards LSTM"
input_blob, W, R, B, sequence_lens, initial_h, initial_c = n.inputs
if sequence_lens == "":
sequence_lens = None
input_size = cls._rnn_shape_inference(init_model, pred_model, n, input_blob, W)
if input_size is None:
raise RuntimeError("best-effort shape inference for LSTM input failed")
init_net = core.Net("init-net")
pred_mh = ModelHelper()
def make_lstm(direction_offset):
name = dummy_name()
# input and recurrence biases are squashed together in
# onnx but not in caffe2
bias_offset = 8 * direction_offset * hidden_size
Bi = init_net.Slice(B, name + "_bias_i2h",
starts=[bias_offset + 0 * hidden_size],
ends =[bias_offset + 4 * hidden_size])
Br = init_net.Slice(B, name + "_bias_gates",
starts=[bias_offset + 4 * hidden_size],
ends =[bias_offset + 8 * hidden_size])
weight_offset = 4 * direction_offset * hidden_size
W_ = init_net.Slice(W, name + '/i2h_w_pre',
starts=[weight_offset + 0 * hidden_size, 0],
ends =[weight_offset + 4 * hidden_size,-1])
R_ = init_net.Slice(R, name + '/gates_t_w_pre',
starts=[weight_offset + 0 * hidden_size, 0],
ends =[weight_offset + 4 * hidden_size,-1])
# caffe2 has a different order from onnx. We need to rearrange
# i o f c -> i f o c
reforms = ((W_, 'i2h_w', [(0, -1)]),
(R_, 'gates_t_w', [(0, -1)]),
(Bi, 'i2h_b' , []),
(Br, 'gates_t_b', []))
for name_from, name_to, extra_dims in reforms:
xi, xo, xf, xc = [name_from + suffix for suffix in ("_i", "_o", "_f", "_c")]
for i, x in enumerate([xi, xo, xf, xc]):
dim0 = i * hidden_size, (i+1) * hidden_size
starts, ends = zip(dim0, *extra_dims)
init_net.Slice(name_from, x, starts=starts, ends=ends)
init_net.Concat([xi, xf, xo, xc], ['%s/%s' % (name, name_to), dummy_name()], axis=0)
initial_h_sliced = name + '/initial_h'
init_net.Slice(initial_h, initial_h_sliced,
starts=[direction_offset + 0, 0, 0],
ends =[direction_offset + 1,-1,-1])
initial_c_sliced = name + '/initial_c'
init_net.Slice(initial_c, initial_c_sliced,
starts=[direction_offset + 0, 0, 0],
ends =[direction_offset + 1,-1,-1])
if direction_offset == 1:
input = pred_mh.net.ReversePackedSegs(
[input_blob, sequence_lens], name + "/input-reversed")
else:
input = input_blob
hidden_t_all, hidden_t_last, _, cell_last, params = rnn_cell.LSTM(
pred_mh,
input,
sequence_lens,
[initial_h_sliced, initial_c_sliced],
input_size,
hidden_size,
name,
drop_states=False,
forward_only=True,
return_params=True
)
if direction_offset == 1:
hidden_t_all = pred_mh.net.ReversePackedSegs(
[hidden_t_all, sequence_lens], name + "/output-reversed")
return hidden_t_all, hidden_t_last, cell_last
if direction == 'forward':
hidden_t_all, hidden_t_last, cell_last = make_lstm(0)
# in the forward case, storage is shared between the three
# outputs. We need to decouple them so that the
# VariableLengthSequencePadding only mutates n.outputs[0]
pred_mh.net.Copy(hidden_t_last, n.outputs[1])
pred_mh.net.Copy(cell_last, n.outputs[2])
pred_mh.net = pred_mh.net.Clone(
"dummy-clone-net",
blob_remap={ hidden_t_all: n.outputs[0] }
)
elif direction == 'bidirectional':
hidden_t_all_f, hidden_t_last_f, cell_last_f = make_lstm(0)
hidden_t_all_b, hidden_t_last_b, cell_last_b = make_lstm(1)
pred_mh.net.Concat([hidden_t_all_f, hidden_t_all_b],
[n.outputs[0], dummy_name()], axis=2)
pred_mh.net.Concat([hidden_t_last_f, hidden_t_last_b],
[n.outputs[1], dummy_name()], axis=0)
pred_mh.net.Concat([cell_last_f, cell_last_b],
[n.outputs[2], dummy_name()], axis=0)
if sequence_lens is not None:
pred_mh.net.VariableLengthSequencePadding(
[n.outputs[0], sequence_lens], [n.outputs[0]])
return Caffe2Ops(list(pred_mh.Proto().op),
list(init_net.Proto().op),
list(pred_mh.Proto().external_input))
@classmethod
def _create_gru(cls, init_model, pred_model, n, opset_version):
assert init_model is not None, "cannot convert GRUs without access to the full model"
assert pred_model is not None, "cannot convert GRUs without access to the full model"
attrs = dict(n.attrs) # make a copy, which is safe to mutate
hidden_size = attrs.pop('hidden_size')
linear_before_reset = attrs.pop('linear_before_reset', 0)
direction = force_unicode(attrs.pop('direction', 'forward'))
assert not attrs, "unsupported GRU attributes: " + str(attrs.keys())
assert direction in ['forward', 'bidirectional'], "unsupported backwards GRU"
input_blob, W, R, B, sequence_lens, initial_h = n.inputs
if sequence_lens == "":
sequence_lens = None
input_size = cls._rnn_shape_inference(init_model, pred_model, n, input_blob, W)
if input_size is None:
raise RuntimeError("best-effort shape inference for GRU input failed")
init_net = core.Net("init-net")
pred_mh = ModelHelper()
def make_gru(direction_offset):
name = dummy_name()
# input and recurrence biases are squashed together in
# onnx but not in caffe2
bias_offset = 6 * direction_offset * hidden_size
Bi = init_net.Slice(B, name + "_bias_i2h",
starts=[bias_offset + 0 * hidden_size],
ends =[bias_offset + 3 * hidden_size])
Br = init_net.Slice(B, name + "_bias_gates",
starts=[bias_offset + 3 * hidden_size],
ends =[bias_offset + 6 * hidden_size])
weight_offset = 3 * direction_offset * hidden_size
W_ = init_net.Slice(W, name + '/i2h_w_pre',
starts=[weight_offset + 0 * hidden_size, 0],
ends =[weight_offset + 3 * hidden_size,-1])
R_ = init_net.Slice(R, name + '/gates_t_w_pre',
starts=[weight_offset + 0 * hidden_size, 0],
ends =[weight_offset + 3 * hidden_size,-1])
# caffe2 has a different order from onnx. We need to rearrange
# z r h -> r z h
reforms = ((W_, 'i2h_w', True, [(0,-1)]),
(R_, 'gate_t_w', False, [(0,-1)]),
(Bi, 'i2h_b', True, []),
(Br, 'gate_t_b', False, []))
for name_from, name_to, do_concat, extra_dims in reforms:
xz, xr, xh = ['%s/%s_%s' % (name, prefix, name_to) for prefix in ('update', 'reset', 'output')]
for i, x in enumerate([xz, xr, xh]):
dim0 = i * hidden_size, (i+1) * hidden_size
starts, ends = zip(dim0, *extra_dims)
init_net.Slice(name_from, x, starts=starts, ends=ends)
if do_concat:
init_net.Concat([xr, xz, xh], ['%s/%s' % (name, name_to), dummy_name()], axis=0)
initial_h_sliced = name + '/initial_h'
init_net.Slice(initial_h, initial_h_sliced,
starts=[direction_offset + 0, 0, 0],
ends =[direction_offset + 1,-1,-1])
if direction_offset == 1:
input = pred_mh.net.ReversePackedSegs(
[input_blob, sequence_lens], name + "/input-reversed")
else:
input = input_blob
hidden_t_all, hidden_t_last = gru_cell.GRU(
pred_mh,
input,
sequence_lens,
[initial_h_sliced],
input_size,
hidden_size,
name,
drop_states=False,
forward_only=True,
linear_before_reset=linear_before_reset
)
if direction_offset == 1:
hidden_t_all = pred_mh.net.ReversePackedSegs(
[hidden_t_all, sequence_lens], name + "/output-reversed")
return hidden_t_all, hidden_t_last
if direction == 'forward':
hidden_t_all, hidden_t_last = make_gru(0)
# in the forward case, storage is shared between the two
# outputs. We need to decouple them so that the
# VariableLengthSequencePadding only mutates n.outputs[0]
pred_mh.net.Copy(hidden_t_last, n.outputs[1])
pred_mh.net = pred_mh.net.Clone(
"dummy-clone-net",
blob_remap={ hidden_t_all: n.outputs[0] }
)
elif direction == 'bidirectional':
hidden_t_all_f, hidden_t_last_f = make_gru(0)
hidden_t_all_b, hidden_t_last_b = make_gru(1)
pred_mh.net.Concat([hidden_t_all_f, hidden_t_all_b],
[n.outputs[0], dummy_name()], axis=2)
pred_mh.net.Concat([hidden_t_last_f, hidden_t_last_b],
[n.outputs[1], dummy_name()], axis=0)
if sequence_lens is not None:
pred_mh.net.VariableLengthSequencePadding(
[n.outputs[0], sequence_lens], [n.outputs[0]])
return Caffe2Ops(list(pred_mh.Proto().op),
list(init_net.Proto().op),
list(pred_mh.Proto().external_input))
@classmethod
def _substitute_raw_value(cls, tp, raw_values_dict):
if tp.HasField('raw_data') and tp.raw_data == bytes(b'__EXTERNAL'):
if tp.name not in raw_values_dict:
raise RuntimeError('TensorProto for value {} referenced raw data but it was not found!'.format(tp.name))
else:
tp.raw_data = raw_values_dict[tp.name]
@classmethod
def _visit_and_substitute_raw_values(cls, nodes, raw_values_dict):
for node in nodes:
for attr in node.attribute:
if attr.HasField('t'):
cls._substitute_raw_value(attr.t, raw_values_dict)
for t in attr.tensors:
cls._substitute_raw_value(t, raw_values_dict)
if attr.HasField('g'):
cls._visit_and_substitute_raw_values(attr.g.node, raw_values_dict)
for g in attr.graphs:
cls._visit_and_substitute_raw_values(g.node, raw_values_dict)
@classmethod
def _external_value_resolution_pass(cls, model, raw_values_dict):
for init in model.graph.initializer:
cls._substitute_raw_value(init, raw_values_dict)
cls._visit_and_substitute_raw_values(model.graph.node, raw_values_dict)
@classmethod
def _direct_initialize_parameters(cls, initializer, ws, device_option):
for tp in initializer:
ws.FeedBlob(tp.name, onnx.numpy_helper.to_array(tp), device_option)
@classmethod
def _direct_initialize_inputs(cls, inputs, initialized, ws, device_option):
for value_info in inputs:
if value_info.name in initialized:
continue
shape = list(d.dim_value for d in value_info.type.tensor_type.shape.dim)
ws.FeedBlob(value_info.name, np.ones(shape), device_option)
@staticmethod
def optimize_onnx(input, init=False, predict=False):
passes = ['fuse_consecutive_transposes',
'eliminate_nop_transpose',
'fuse_transpose_into_gemm']
if init:
passes.append('split_init')
if predict:
passes.append('split_predict')
out = onnx.optimizer.optimize(input, passes)
return out
@classmethod
def prepare_zip_archive(cls, file, device='CPU', **kwargs):
with zipfile.ZipFile(file, mode='r') as z:
with z.open('__MODEL_PROTO', 'r') as f:
model = onnx.load(f);
blob_names = set(z.namelist()) - set('__MODEL_PROTO')
# TODO: make this more efficient
raw_values_dict = {}
for name in blob_names:
with z.open(name, 'r') as blob_file:
raw_values_dict[name] = blob_file.read()
cls._external_value_resolution_pass(model, raw_values_dict)
return cls.prepare(model, device, **kwargs)
@classmethod
def prepare(cls, model, device='CPU', **kwargs):
'''
For Onnx Caffe2Backend, we require that init_graph don't initialize the actual input of the predict_graph,
for example, if "img" is the input blob for the predict_net, we require that in init_graph and in
initializer of the predict_graph, "img" is not initalized. We don't have a check for this, since
there is no way we can know which blob is the input of the predict_graph.
'''
super(Caffe2Backend, cls).prepare(model, device, **kwargs)
opset_version = None
for imp in model.opset_import:
if not imp.HasField("domain") or imp.domain == "":
opset_version = imp.version
if imp.version > cls._known_opset_version:
warnings.warn("This version of onnx-caffe2 targets ONNX operator set version {}, but the model we are trying to import uses version {}. We will try to import it anyway, but if the model uses operators which had BC-breaking changes in the intervening versions, import will fail.".format(cls._known_opset_version, imp.version))
else:
warnings.warn("Unrecognized operator set {}".format(imp.domain))
if opset_version is None:
if model.ir_version >= 0x00000003:
raise RuntimeError("Model with IR version >= 3 did not specify ONNX operator set version (onnx-caffe2 requires it)")
else:
opset_version = 1
# Check whether we have RNN related ops
pred_model = ModelProto()
pred_model.ParseFromString(cls.optimize_onnx(model.SerializeToString(), predict=True))
rnn_nodes = []
for node in pred_model.graph.node:
if node.op_type in {'LSTM', 'GRU', 'RNN'}:
rnn_nodes.append(node)
# Build the C++ backend
# TODO: build a predictor that supports GPU
# And for RNN nets, we need to avoid adding init_net
use_cpp_backend = device == 'CPU' and not rnn_nodes
# use python backend for now
use_cpp_backend = False
if use_cpp_backend:
c2_rnn_ops = []
if rnn_nodes:
init_model = ModelProto()
init_model.ParseFromString(cls.optimize_onnx(model.SerializeToString(), init=True))
for node in rnn_nodes:
c2ops = cls._onnx_node_to_caffe2_op(
init_model, pred_model, node, opset_version)
init_ops = [x.SerializeToString() for x in c2ops.init_ops]
ops = [x.SerializeToString() for x in c2ops.ops]
external_inputs = c2ops.interface_blobs
c2_rnn_ops.append(C.Caffe2Ops(init_ops, ops, external_inputs))
del init_model
cbackend = C.Caffe2Backend()
rep = cbackend.prepare(model.SerializeToString(), device, c2_rnn_ops)
# For testing
# Dump the net descriptions to file for comparison with the Python ones
if "ONNX_CAFFE2_DEBUG" in os.environ:
pred_net_str = rep.pred_net()
pn = caffe2_pb2.NetDef()
pn.ParseFromString(pred_net_str)
init_net_str = rep.init_net()
inn = caffe2_pb2.NetDef()
inn.ParseFromString(init_net_str)
with open("cpp.txt", "w") as f:
f.write("pred_net: \n{}".format(pn))
rep_wrapper = Caffe2CppRep(rep)
return rep_wrapper
else:
ws = Workspace()
device_option = get_device_option(Device(device))
# Directly load initializer data into blobs in workspace
cls._direct_initialize_parameters(
model.graph.initializer,
ws,
device_option,
)
initialized = {init.name for init in model.graph.initializer}
cls._direct_initialize_inputs(
model.graph.input,
initialized,
ws,
device_option,
)
uninitialized = [value_info.name for value_info in model.graph.input if value_info.name not in initialized]
init_net, predict_net = cls._onnx_model_to_caffe2_net(model, device, opset_version, False)
if "ONNX_CAFFE2_DEBUG" in os.environ:
with open("python.txt", "w") as f:
f.write("pred_net: \n{}".format(predict_net))
retval = Caffe2Rep(init_net, predict_net, ws, uninitialized)
return retval
@classmethod
# TODO: This method needs a refactor for clarity
def _onnx_node_to_caffe2_op(cls, init_model, pred_model, node_def, opset_version):
cbackend = C.Caffe2Backend()
if cbackend.support_onnx_import(node_def.op_type):
op_strs = cbackend.convert_node(node_def.SerializeToString(), opset_version)
init_ops = []
for s in op_strs[0]:
op = caffe2_pb2.OperatorDef()
op.ParseFromString(s)
init_ops.append(op)
ops = []
for s in op_strs[1]:
op = caffe2_pb2.OperatorDef()
op.ParseFromString(s)
ops.append(op)
return Caffe2Ops(ops, init_ops, [])
if node_def.op_type in cls._special_operators:
translator = getattr(cls, cls._special_operators[node_def.op_type])
else:
translator = cls._common_onnx_node_to_caffe2_op
ops = translator(init_model, pred_model, OnnxNode(node_def), opset_version)
if isinstance(ops, Caffe2Ops):
return ops
if not isinstance(ops, collections.Iterable):
ops = [ops]
return Caffe2Ops(ops, [], [])
@classmethod
def _common_onnx_node_to_caffe2_op(cls, init_model, pred_model, onnx_node, opset_version):
"""
This translator performs the basic translation of ONNX nodes into
Caffe2 operators. Besides doing a straightforward marshalling from
one format to another, it also does these extra things:
- Renames operators based on '_renamed_operators'
- Renames attributes based on '_global_renamed_attrs' and
'_per_op_renamed_attrs'
If you're writing a custom translator, consider calling this first,
and then fixing things up further.
"""
c2_op = caffe2_pb2.OperatorDef()
c2_op.input.extend(onnx_node.inputs)
c2_op.output.extend(onnx_node.outputs)
c2_op.name = onnx_node.name
onnx_op_type = onnx_node.op_type
broken_version = cls._broken_operators.get(onnx_op_type, float('Inf'))
if broken_version <= opset_version:
raise ValueError(
"Don't know how to translate op {} in ONNX operator set v{} (I only support prior to v{})".format(onnx_op_type, opset_version, broken_version))
c2_op.type = cls._renamed_operators.get(onnx_op_type, onnx_op_type)
if not core.IsOperator(c2_op.type):
raise ValueError(
"Don't know how to translate op {}".format(onnx_op_type))
def kmap(k):
if (onnx_op_type in cls._per_op_renamed_attrs and
k in cls._per_op_renamed_attrs[onnx_op_type]):
return cls._per_op_renamed_attrs[onnx_op_type][k]
if k in cls._global_renamed_attrs:
return cls._global_renamed_attrs[k]
return k
c2_op.arg.extend(onnx_node.attrs.caffe2(kmap=kmap))
return c2_op
@staticmethod
def _all_names_in_graph(graph):
if graph is None:
return set()
names = set()
names.update(value_info.name for value_info in graph.input)
names.update(value_info.name for value_info in graph.output)
for node in graph.node:
names.update(node.input)
names.update(node.output)
return names
@classmethod
def _onnx_model_to_caffe2_net(cls, onnx_model, device, opset_version, include_initializers):
device_option = get_device_option(Device(device))
init_model = ModelProto()
init_model.ParseFromString(cls.optimize_onnx(onnx_model.SerializeToString(), init=True))
pred_model = ModelProto()
pred_model.ParseFromString(cls.optimize_onnx(onnx_model.SerializeToString(), predict=True))
init_net = caffe2_pb2.NetDef()
pred_net = caffe2_pb2.NetDef()
init_net.name = onnx_model.graph.name + '_init'
pred_net.name = onnx_model.graph.name + '_predict'
if include_initializers:
init_net.op.extend(cls._create_tensor_filling_op(tp) for tp in onnx_model.graph.initializer)
dummy_name(cls._all_names_in_graph(init_model.graph) | cls._all_names_in_graph(pred_model.graph))
success = True
for net, model in ( (init_net, init_model), (pred_net, pred_model) ):
net.device_option.CopyFrom(device_option)
for node in model.graph.node:
try:
c2ops = cls._onnx_node_to_caffe2_op(
init_model, pred_model, node, opset_version)
except Exception as e:
success = False
print('ONNX FATAL:', e)
continue
(init_net if include_initializers else net).op.extend(c2ops.init_ops)
net.op.extend(c2ops.ops)
net.external_input.extend(c2ops.interface_blobs)
net.external_output.extend(
value_info.name for value_info in model.graph.output)
net.external_input.extend(
value_info.name for value_info in model.graph.input)
if not success:
raise RuntimeError('ONNX conversion failed')
return init_net, pred_net
# wrapper for backwards compatability
@classmethod
def onnx_graph_to_caffe2_net(cls, model, device="CPU", opset_version=_known_opset_version):
return cls._onnx_model_to_caffe2_net(model, device=device, opset_version=opset_version, include_initializers=True)
@classmethod
def supports_device(cls, device_str):
device = Device(device_str)
if device.type == DeviceType.CPU:
return True
elif device.type == DeviceType.CUDA:
return workspace.has_gpu_support
return False
prepare = Caffe2Backend.prepare
prepare_zip_archive = Caffe2Backend.prepare_zip_archive
run_node = Caffe2Backend.run_node
run_model = Caffe2Backend.run_model
supports_device = Caffe2Backend.supports_device # noqa
|
## @package onnx
# Module caffe2.python.onnx.error
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
class BaseException(Exception): pass
class Unsupported(BaseException): pass
|
## @package onnx
# Module caffe2.python.onnx.frontend
"""Caffe2 Protobuf to ONNX converter
To run this, you will need to have Caffe2 installed as well.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import itertools
import collections
import logging
import re
from caffe2.python import core as caffe2_core
from caffe2.proto import caffe2_legacy_pb2
from enum import Enum
from onnx import (defs, checker, helper, numpy_helper, mapping,
ModelProto, GraphProto, NodeProto, AttributeProto, TensorProto, OperatorSetIdProto)
from onnx.helper import make_tensor, make_tensor_value_info, make_attribute, make_model
import numpy as np
from caffe2.python.onnx.helper import c2_native_run_net, dummy_name
from caffe2.python.onnx.error import Unsupported
import caffe2.python._import_c_extension as C
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class Caffe2Frontend(object):
# This number controls the semantics of the operators we target. Whenever
# ONNX makes a BC breaking change to semantics of operators, having this set
# to an accurate number will prevent our models form exporting. However,
# we should strive to keep this up-to-date as much as possible.
target_opset_version = 6
_renamed_operators = {
'SpatialBN': 'BatchNormalization',
'Conv1D': 'Conv',
'Conv2D': 'Conv',
'Conv3D': 'Conv',
'ConvTranspose1D': 'ConvTranspose',
'ConvTranspose2D': 'ConvTranspose',
'ConvTranspose3D': 'ConvTranspose',
'MaxPool1D': 'MaxPool',
'MaxPool2D': 'MaxPool',
'MaxPool3D': 'MaxPool',
'AveragePool1D': 'AveragePool',
'AveragePool2D': 'AveragePool',
'AveragePool3D': 'AveragePool',
}
# caffe2 arguments that are completely removed in onnx
_blacklist_caffe2_args = {
'order': {b'NCHW'},
'cudnn_exhaustive_search': {0, 1},
'use_cudnn': {0, 1},
}
_global_renamed_args = {
'kernels': 'kernel_shape',
}
_per_op_renamed_args = {
'Squeeze': {'dims': 'axes'},
'Transpose': {'axes': 'perm'},
}
_special_operators = {}
@classmethod
def _common_caffe2_arg_to_onnx_attr(cls, op_def, arg):
# name
op_type = op_def.type
if op_type in cls._per_op_renamed_args:
name = cls._per_op_renamed_args[op_type].get(
arg.name, arg.name)
else:
name = cls._global_renamed_args.get(arg.name, arg.name)
# value
if arg.HasField('f'):
value = arg.f
elif arg.HasField('i'):
value = arg.i
elif arg.HasField('s'):
value = arg.s
elif arg.floats:
value = arg.floats
elif arg.ints:
value = arg.ints
elif arg.strings:
value = arg.strings
else:
raise ValueError('Could not find data field in arg: {}'.format(arg))
if name in cls._blacklist_caffe2_args:
assert value in cls._blacklist_caffe2_args[arg.name]
return None
return helper.make_attribute(name, value)
@classmethod
def caffe2_arg_to_onnx_attr(cls, op_def, arg):
return cls._common_caffe2_arg_to_onnx_attr(op_def, arg)
@classmethod
def _common_caffe2_op_to_onnx_node(cls, op_def, shapes):
node_def = NodeProto()
node_def.name = op_def.name
node_def.op_type = cls._renamed_operators.get(op_def.type, op_def.type)
node_def.input.extend(op_def.input)
node_def.output.extend(op_def.output)
attrs = filter(None, [cls.caffe2_arg_to_onnx_attr(op_def, arg)
for arg in op_def.arg])
node_def.attribute.extend(attrs)
return node_def
@classmethod
def caffe2_op_to_onnx_node(cls, op_def, shapes):
if C.support_onnx_export(op_def.type):
shape_list = list(shapes.values())
node_strs, tensor_strs = C.export_to_onnx(op_def.SerializeToString(), shapes)
nodes = []
for s in node_strs:
node = NodeProto()
node.ParseFromString(s)
nodes.append(node)
const_tensors = []
for s in tensor_strs:
tensor = TensorProto()
tensor.ParseFromString(s)
const_tensors.append(tensor)
return nodes, const_tensors
elif op_def.type in cls._special_operators:
translator = getattr(cls, cls._special_operators[op_def.type])
else:
translator = cls._common_caffe2_op_to_onnx_node
nodes = translator(op_def, shapes)
const_tensors = []
if isinstance(nodes, tuple):
nodes, const_tensors = nodes
if not isinstance(nodes, collections.Iterable):
nodes = [nodes]
return nodes, const_tensors
@staticmethod
def _all_names_in_net(net):
if net is None:
return set()
names = set()
names.update(net.external_input)
names.update(net.external_output)
for op in net.op:
names.update(op.input)
names.update(op.output)
return names
@staticmethod
def _extract_value_info(tensor):
return make_tensor_value_info(
name=tensor.name,
elem_type=tensor.data_type,
shape=tensor.dims)
@classmethod
def caffe2_net_to_onnx_graph(cls,
predict_net,
init_net=None,
value_info=None):
if value_info is None:
value_info = {}
if not isinstance(value_info, dict):
raise ValueError('Please pass value_info as a '
'name -> (type, shape) dictionary')
cls._filter_fake_init(init_net, value_info)
cls._ssa_rewrite(predict_net, init_net, value_info)
if init_net:
initializer = cls.caffe2_init_net_to_initializer(init_net)
value_info.update({init.name: (init.data_type, init.dims)
for init in initializer})
else:
initializer = []
# Check whether we have got type shape info of all input
missing = (set(list(predict_net.external_input)) -
set(value_info.keys()))
if missing:
raise RuntimeError('Could not find value info of inputs: {}'.format(
', '.join(missing)))
inputs = {}
for name in predict_net.external_input:
elem_type, shape = value_info[name]
inputs[name] = np.random.randn(*shape).astype(
mapping.TENSOR_TYPE_TO_NP_TYPE[elem_type])
ws, outputs = c2_native_run_net(
init_net,
predict_net,
inputs)
for name in predict_net.external_output:
output = outputs[name]
elem_type = mapping.NP_TYPE_TO_TENSOR_TYPE[output.dtype]
shape = output.shape
value_info[name] = (elem_type, shape)
graph_def = GraphProto()
graph_def.name = predict_net.name
graph_def.initializer.extend(initializer)
# This is a mapping from Caffe2 names to ONNX names
graph_def.input.extend(
make_tensor_value_info(
name=name,
elem_type=value_info[name][0],
shape=value_info[name][1])
for name in predict_net.external_input)
dummy_name(cls._all_names_in_net(predict_net) |
cls._all_names_in_net(init_net))
for op in predict_net.op:
shapes = {}
for name in itertools.chain(op.input, op.output):
blob = ws.FetchBlob(name)
if hasattr(blob, 'shape'):
shapes[name] = blob.shape
nodes, const_tensors = cls.caffe2_op_to_onnx_node(op, shapes=shapes)
graph_def.node.extend(nodes)
graph_def.initializer.extend(const_tensors)
graph_def.input.extend([cls._extract_value_info(tensor) for tensor in const_tensors])
all_output = set(sum((list(node.output) for node in graph_def.node),
[init.name for init in graph_def.initializer]))
redundant_output = set(vi.name for vi in graph_def.output) - all_output
if redundant_output:
logger.warning(
'There are graph output not produced by any node or initializer: {}'
'! Will drop them.'.format(', '.join(redundant_output)))
graph_def.output.extend(
make_tensor_value_info(
name=name,
elem_type=value_info[name][0],
shape=value_info[name][1])
for name in predict_net.external_output
if name in all_output)
checker.check_graph(graph_def)
return graph_def
@classmethod
def caffe2_init_net_to_initializer(cls, init_net):
initializer = []
for op in init_net.op:
assert not op.input
try:
data_type, field_name = {
'GivenTensorFill': (TensorProto.FLOAT, 'floats'),
'GivenTensorInt64Fill': (TensorProto.INT64, 'ints'),
'GivenTensorIntFill': (TensorProto.INT32, 'ints'),
'GivenTensorBoolFill': (TensorProto.BOOL, 'ints'),
'GivenTensorStringFill': (TensorProto.STRING, 'strings'),
}[op.type]
except KeyError:
raise RuntimeError(
"Can not translate init_net with operator '{}' "
"to initializer".format(op.type)
)
raw = (data_type != TensorProto.STRING)
args = {a.name: a for a in op.arg}
vals = getattr(args['values'], field_name)
if raw:
vals = np.asarray(
vals,
dtype=mapping.TENSOR_TYPE_TO_NP_TYPE[data_type]).tobytes()
initializer.append(make_tensor(
name=op.output[0],
data_type=data_type,
dims=args['shape'].ints,
vals=vals,
raw=raw,
))
return initializer
@classmethod
def _filter_fake_init(cls, init_net, value_info):
if init_net:
fake_inits = [op for op in init_net.op
if len(op.output) == 1 and op.output[0] in value_info and
re.match('GivenTensor.*Fill|ConstantFill', op.type)]
for fake_init in fake_inits:
init_net.op.remove(fake_init)
del fake_inits[:]
del fake_inits
@classmethod
def _ssa_rewrite(cls, net, init_net, value_info):
def ssa_name(name, version):
return '{}_{}'.format(name, version)
if init_net:
for op in init_net.op:
assert re.match('GivenTensor.*Fill', op.type), "type is {}, \n{}".format(op.type, op)
assert len(op.output) == 1
op.output[0] = ssa_name(op.output[0], 0)
init_net.external_input[:] = [ssa_name(name, 0)
for name in init_net.external_input]
init_net.external_output[:] = [ssa_name(name, 0)
for name in init_net.external_output]
if value_info:
ssa_value_info = {ssa_name(name, 0): value
for name, value in value_info.items()}
value_info.clear()
value_info.update(ssa_value_info)
net.external_input[:] = [ssa_name(name, 0)
for name in net.external_input]
ssa, blob_versions = caffe2_core.get_ssa(net)
assert len(net.op) == len(ssa)
for op, (versioned_inputs, versioned_outputs) in zip(net.op, ssa):
op.input[:] = [ssa_name(name, version)
for name, version in versioned_inputs]
op.output[:] = [ssa_name(name, version)
for name, version in versioned_outputs]
net.external_output[:] = [ssa_name(name, blob_versions[name])
for name in net.external_output]
@classmethod
def caffe2_net_to_onnx_model(cls, *args, **kwargs):
opset_id = OperatorSetIdProto()
opset_id.domain = '' # ONNX default domain
opset_id.version = cls.target_opset_version
model = make_model(cls.caffe2_net_to_onnx_graph(*args, **kwargs),
opset_imports=[opset_id], # current supported opset version
producer_name='onnx-caffe2', # producer name
)
checker.check_model(model)
return model
caffe2_net_to_onnx_graph = Caffe2Frontend.caffe2_net_to_onnx_graph
caffe2_net_to_onnx_model = Caffe2Frontend.caffe2_net_to_onnx_model
caffe2_init_net_to_initializer = Caffe2Frontend.caffe2_init_net_to_initializer
|
## @package onnx
# Module caffe2.python.onnx.helper
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.proto import caffe2_pb2
from onnx.backend.base import namedtupledict
from caffe2.python.onnx.workspace import Workspace
import caffe2.python._import_c_extension as C
import io
import logging
import time
log = logging.getLogger(__name__)
def dummy_name(used_names=None):
if used_names is None:
return C.new_dummy_name()
else:
C.reset_dummy_name(set(used_names))
return None
def c2_native_run_op(op_def, inputs):
ws = Workspace()
if isinstance(inputs, dict):
for key, value in inputs.items():
ws.FeedBlob(key, value, op_def.device_option)
else:
assert(len(op_def.input) == len(inputs))
for key, value in zip(op_def.input, inputs):
ws.FeedBlob(key, value, op_def.device_option)
ws.RunOperatorOnce(op_def)
output_names = op_def.output
output_values = [ws.FetchBlob(name) for name in output_names]
return ws, namedtupledict('Outputs', output_names)(*output_values)
def c2_native_run_net(init_net, predict_net, inputs):
ws = Workspace()
if init_net:
ws.RunNetOnce(init_net)
if isinstance(inputs, dict):
for key, value in inputs.items():
ws.FeedBlob(key, value, predict_net.device_option)
else:
uninitialized = [input_name
for input_name in predict_net.external_input
if not ws.HasBlob(input_name)]
if len(uninitialized) == len(inputs):
for key, value in zip(uninitialized, inputs):
ws.FeedBlob(key, value, predict_net.device_option)
else:
# If everything is initialized,
# we just initialized the first len(inputs) external_input.
assert(len(inputs) <= len(predict_net.external_input))
for i in range(len(inputs)):
ws.FeedBlob(predict_net.external_input[i], inputs[i],
predict_net.device_option)
ws.RunNetOnce(predict_net)
output_names = predict_net.external_output
output_values = [ws.FetchBlob(name) for name in output_names]
return ws, namedtupledict('Outputs', output_names)(*output_values)
def load_caffe2_net(file):
net = caffe2_pb2.NetDef()
with open(file, "rb") as f:
net.ParseFromString(f.read())
return net
def save_caffe2_net(net, file, output_txt=False):
with open(file, "wb") as f:
f.write(net.SerializeToString())
if output_txt:
with open(file + "txt", "w") as f:
f.write(str(net))
def benchmark_caffe2_model(init_net, predict_net, warmup_iters=3, main_iters=10, layer_details=True):
'''
Run the benchmark net on the target model.
Return the execution time per iteration (millisecond).
'''
ws = Workspace()
if init_net:
ws.RunNetOnce(init_net)
ws.CreateNet(predict_net)
results = ws.BenchmarkNet(predict_net.name, warmup_iters, main_iters, layer_details)
del ws
return results[0]
def benchmark_pytorch_model(model, inputs, training=False, warmup_iters=3,
main_iters=10, verbose=False):
'''
Run the model several times, and measure the execution time.
Return the execution time per iteration (millisecond).
'''
for _i in range(warmup_iters):
model(*inputs)
total_pytorch_time = 0.0
for _i in range(main_iters):
ts = time.time()
model(*inputs)
te = time.time()
total_pytorch_time += te - ts
log.info("The PyTorch model execution time per iter is {} milliseconds, "
"{} iters per second.".format(total_pytorch_time / main_iters * 1000,
main_iters / total_pytorch_time))
return total_pytorch_time * 1000 / main_iters
|
## @package onnx
# Module caffe2.python.onnx.workspace
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import uuid
from caffe2.python import workspace
class Workspace(object):
"""
An object representing a Caffe2 workspace. It is a context manager,
so you can say 'with workspace:' to use the represented workspace
as your global workspace. It also supports every method supported
by caffe2.python.workspace, but instead of running these operations
in the global workspace, it runs them in the workspace represented
by this object. When this object goes dead, the workspace (and all
nets and blobs within it) are freed.
Why do we need this class? Caffe2's workspace model is very "global state"
oriented, in that there is always some ambient global workspace you are
working in which holds on to all of your networks and blobs. This class
makes it possible to work with workspaces more locally, and without
forgetting to deallocate everything in the end.
"""
def __init__(self):
# Caffe2 (apparently) doesn't provide any native method of generating
# a fresh, unused workspace, so we have to fake it by generating
# a unique ID and hoping it's not used already / will not be used
# directly in the future.
self.workspace_id = str(uuid.uuid4())
# A stack, so that the context manager is reentrant.
self.workspace_stack = []
def __getattr__(self, attr):
def f(*args, **kwargs):
with self:
return getattr(workspace, attr)(*args, **kwargs)
return f
def __enter__(self):
self.workspace_stack.append(workspace.CurrentWorkspace())
workspace.SwitchWorkspace(self.workspace_id, create_if_missing=True)
def __exit__(self, exc_type, exc_value, traceback):
w = self.workspace_stack.pop()
# Strictly speaking, create_if_missing here is unnecessary, since a user
# is not supposed to be allowed to destruct a workspace while we're in
# it. However, empirically, it has been observed that during abnormal
# shutdown, Caffe2 deletes its default workspace fairly early in the
# final calls to destructors. In this case, we may attempt to exit
# to a default workspace which no longer exists. create_if_missing=True
# will (harmlessly) recreate the workspace before we finally quit.)
workspace.SwitchWorkspace(w, create_if_missing=True)
def __del__(self):
# NB: This is a 'self' call because we need to switch into the workspace
# we want to reset before we actually reset it. A direct call to
# workspace.ResetWorkspace() will reset the ambient workspace, which
# is not want we want.
self.ResetWorkspace()
|
## @package onnx
# Module caffe2.python.onnx.backend_rep
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
from caffe2.proto import caffe2_pb2
from onnx.backend.base import BackendRep, namedtupledict
class Caffe2Rep(BackendRep):
def __init__(self, init_net, predict_net, workspace, uninitialized):
super(Caffe2Rep, self).__init__()
self.init_net = init_net
self.predict_net = predict_net
self.workspace = workspace
# The list of uninitialized external_inputs in workspace, we need this to
# pair the name with given sequence inputs.
self.uninitialized = uninitialized
self.nets_created = False
self.ran_init_net = False
@property
def _name_scope(self):
if self.predict_net.device_option.device_type == caffe2_pb2.CUDA:
return 'gpu_{}'.format(self.predict_net.device_option.cuda_gpu_id)
return ''
def run(self, inputs, **kwargs):
super(Caffe2Rep, self).run(inputs, **kwargs)
with self.workspace:
with core.DeviceScope(self.predict_net.device_option):
if isinstance(inputs, dict):
with core.NameScope(self._name_scope):
for key, value in inputs.items():
workspace.FeedBlob(key, value)
elif isinstance(inputs, list) or isinstance(inputs, tuple):
if len(self.uninitialized) != len(inputs):
raise RuntimeError('Expected {} values for uninitialized '
'graph inputs ({}), but got {}.'.format(
len(self.uninitialized),
', '.join(self.uninitialized),
len(inputs)))
for i, value in enumerate(inputs):
# namescope already baked into protobuf
workspace.FeedBlob(self.uninitialized[i], value)
else:
# single input
workspace.FeedBlob(self.uninitialized[0], inputs)
if not self.nets_created:
workspace.CreateNet(self.init_net)
workspace.CreateNet(self.predict_net)
self.nets_created = True
if not self.ran_init_net:
workspace.RunNet(self.init_net.name)
self.ran_init_net = True
workspace.RunNet(self.predict_net.name)
output_values = [workspace.FetchBlob(name)
for name in self.predict_net.external_output]
return namedtupledict('Outputs',
self.predict_net.external_output)(*output_values)
|
## @package onnx
# Module caffe2.python.onnx.bin.conversion
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
from caffe2.proto import caffe2_pb2
import click
import numpy as np
from onnx import checker, ModelProto
from caffe2.python.onnx.backend import Caffe2Backend as c2
import caffe2.python.onnx.frontend as c2_onnx
@click.command(
help='convert caffe2 net to onnx model',
context_settings={
'help_option_names': ['-h', '--help']
}
)
@click.argument('caffe2_net', type=click.File('rb'))
@click.option('--caffe2-net-name',
type=str,
help="Name of the caffe2 net")
@click.option('--caffe2-init-net',
type=click.File('rb'),
help="Path of the caffe2 init net pb file")
@click.option('--value-info',
type=str,
help='A json string providing the '
'type and shape information of the inputs')
@click.option('-o', '--output', required=True,
type=click.File('wb'),
help='Output path for the onnx model pb file')
def caffe2_to_onnx(caffe2_net,
caffe2_net_name,
caffe2_init_net,
value_info,
output):
c2_net_proto = caffe2_pb2.NetDef()
c2_net_proto.ParseFromString(caffe2_net.read())
if not c2_net_proto.name and not caffe2_net_name:
raise click.BadParameter(
'The input caffe2 net does not have name, '
'--caffe2-net-name must be provided')
c2_net_proto.name = caffe2_net_name or c2_net_proto.name
if caffe2_init_net:
c2_init_net_proto = caffe2_pb2.NetDef()
c2_init_net_proto.ParseFromString(caffe2_init_net.read())
c2_init_net_proto.name = '{}_init'.format(caffe2_net_name)
else:
c2_init_net_proto = None
if value_info:
value_info = json.loads(value_info)
onnx_model = c2_onnx.caffe2_net_to_onnx_model(
predict_net=c2_net_proto,
init_net=c2_init_net_proto,
value_info=value_info)
output.write(onnx_model.SerializeToString())
@click.command(
help='convert onnx model to caffe2 net',
context_settings={
'help_option_names': ['-h', '--help']
}
)
@click.argument('onnx_model', type=click.File('rb'))
@click.option('-o', '--output', required=True,
type=click.File('wb'),
help='Output path for the caffe2 net file')
@click.option('--init-net-output',
required=True,
type=click.File('wb'),
help='Output path for the caffe2 init net file')
def onnx_to_caffe2(onnx_model, output, init_net_output):
onnx_model_proto = ModelProto()
onnx_model_proto.ParseFromString(onnx_model.read())
init_net, predict_net = c2.onnx_graph_to_caffe2_net(onnx_model_proto)
init_net_output.write(init_net.SerializeToString())
output.write(predict_net.SerializeToString())
|
## @package onnx
# Module caffe2.python.onnx.tests.onnx_backend_test
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import unittest
import onnx.backend.test
import caffe2.python.onnx.backend as c2
# This is a pytest magic variable to load extra plugins
pytest_plugins = 'onnx.backend.test.report',
backend_test = onnx.backend.test.BackendTest(c2, __name__)
backend_test.exclude(r'(test_hardsigmoid' # Does not support Hardsigmoid.
'|test_mean|test_hardmax' # Does not support Mean and Hardmax.
'|test_cast.*FLOAT16.*' # Does not support Cast on Float16.
'|test_depthtospace.*' # Does not support DepthToSpace.
'|test_.*pool_.*same.*)') # Does not support pool same.
# Skip vgg to speed up CI
if 'JENKINS_URL' in os.environ:
backend_test.exclude(r'(test_vgg19|test_vgg)')
# import all test cases at global scope to make them visible to python.unittest
globals().update(backend_test
.enable_report()
.test_cases)
if __name__ == '__main__':
unittest.main()
|
## @package onnx
# Module caffe2.python.onnx.tests.test_utils
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import numpy as np
class TestCase(unittest.TestCase):
def setUp(self):
np.random.seed(seed=0)
def assertSameOutputs(self, outputs1, outputs2, decimal=7):
self.assertEqual(len(outputs1), len(outputs2))
for o1, o2 in zip(outputs1, outputs2):
np.testing.assert_almost_equal(o1, o2, decimal=decimal)
def add_test_case(name, test_func):
if not name.startswith('test_'):
raise ValueError('Test name must start with test_: {}'.format(name))
if hasattr(self, name):
raise ValueError('Duplicated test name: {}'.format(name))
setattr(self, name, test_func)
|
## @package onnx
# Module caffe2.python.onnx.tests.helper_test
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from caffe2.python.onnx.helper import dummy_name
from caffe2.python.onnx.tests.test_utils import TestCase
class TestCaffe2Basic(TestCase):
def test_dummy_name(self):
dummy_name([])
names_1 = [dummy_name() for _ in range(3)]
dummy_name([])
names_2 = [dummy_name() for _ in range(3)]
self.assertEqual(names_1, names_2)
dummy_name(names_1)
names_3 = [dummy_name() for _ in range(3)]
self.assertFalse(set(names_1) & set(names_3))
dummy_name(set(names_1))
names_4 = [dummy_name() for _ in range(3)]
self.assertFalse(set(names_1) & set(names_4))
if __name__ == '__main__':
unittest.main()
|
## @package onnx
# Module caffe2.python.onnx.tests.ssa_test
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import onnx
import numpy as np
from caffe2.proto import caffe2_pb2
from caffe2.python import core
from onnx import helper, TensorProto
import caffe2.python.onnx.frontend as c2_onnx
from caffe2.python.onnx.helper import c2_native_run_net
from caffe2.python.onnx.tests.test_utils import TestCase
class TestFrontendSSAConversion(TestCase):
def test_ssa(self):
X = np.random.randn(4, 2).astype(np.float32)
W = np.random.randn(3, 2).astype(np.float32)
b = np.random.randn(3).astype(np.float32)
s = np.random.randn(1).astype(np.float32)
np_result = X.dot(W.transpose()) + b + s
net = caffe2_pb2.NetDef()
net.name = 'test-ssa'
net.external_input[:] = ['W', 'X', 'b', 's']
net.op.extend([
core.CreateOperator(
'FC',
['X', 'W', 'b'],
['Y']
),
core.CreateOperator(
'Add',
['Y', 's'],
['Y'],
broadcast=True,
)
])
net.external_output[:] = ['Y']
init_net = caffe2_pb2.NetDef()
init_net.name = 'test-ssa-init'
init_net.op.extend([
core.CreateOperator(
'GivenTensorFill',
[],
['W'],
values=W,
shape=W.shape,
),
core.CreateOperator(
'GivenTensorFill',
[],
['b'],
values=b,
shape=b.shape,
),
core.CreateOperator(
'GivenTensorFill',
[],
['s'],
values=s,
shape=s.shape,
)
])
init_net.external_output[:] = ['W', 'b', 's']
_, orig_output = c2_native_run_net(
predict_net=net,
init_net=init_net,
inputs=[X])
value_info = {'X': (TensorProto.FLOAT, X.shape)}
c2_onnx.Caffe2Frontend._ssa_rewrite(
net,
init_net,
value_info)
self.assertEqual(net.external_input, ['W_0', 'X_0', 'b_0', 's_0'])
self.assertEqual(net.op[0].input, ['X_0', 'W_0', 'b_0'])
self.assertEqual(net.op[0].output, ['Y_1'])
self.assertEqual(net.op[1].input, ['Y_1', 's_0'])
self.assertEqual(net.op[1].output, ['Y_2'])
self.assertEqual(net.external_output, ['Y_2'])
self.assertEqual(init_net.external_input, [])
self.assertEqual(init_net.op[0].input, [])
self.assertEqual(init_net.op[0].output, ['W_0'])
self.assertEqual(init_net.op[1].input, [])
self.assertEqual(init_net.op[1].output, ['b_0'])
self.assertEqual(init_net.op[2].input, [])
self.assertEqual(init_net.op[2].output, ['s_0'])
self.assertEqual(init_net.external_output, ['W_0', 'b_0', 's_0'])
self.assertEqual(value_info, {'X_0': (TensorProto.FLOAT, X.shape)})
_, ssa_output = c2_native_run_net(
predict_net=net,
init_net=init_net,
inputs=[X])
self.assertSameOutputs(ssa_output, orig_output)
self.assertSameOutputs(ssa_output, [np_result])
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
|
## @package onnx
# Module caffe2.python.onnx.tests.conversion_test
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import tempfile
import textwrap
import traceback
import unittest
import zipfile
from caffe2.proto import caffe2_pb2
from caffe2.python import brew, core
from caffe2.python.model_helper import ModelHelper
from click.testing import CliRunner
import numpy as np
from onnx import helper, ModelProto, TensorProto
from caffe2.python.onnx.helper import dummy_name, c2_native_run_net
from caffe2.python.onnx.bin.conversion import caffe2_to_onnx, onnx_to_caffe2
import caffe2.python.onnx.backend as c2
from caffe2.python.onnx.tests.test_utils import TestCase
class TestConversion(TestCase):
def _run_command(self, cmd, *args, **kwargs):
runner = CliRunner()
result = runner.invoke(cmd, *args, **kwargs)
self.assertEqual(result.exit_code, 0, textwrap.dedent('''
Command exited with non-zero exit code:
output: {}
exception: {}
exc_info: {}
'''.format(result.output,
result.exception,
traceback.format_exception(*result.exc_info))))
return result
def test_caffe2_to_onnx(self):
caffe2_net = tempfile.NamedTemporaryFile()
caffe2_init_net = tempfile.NamedTemporaryFile()
output = tempfile.NamedTemporaryFile()
model = ModelHelper(name='caffe2-to-onnx-test')
brew.relu(model, ["X"], "Y")
caffe2_net.write(model.net.Proto().SerializeToString())
caffe2_net.flush()
init_model = ModelHelper(name='caffe2-to-onnx-init-test')
init_model.net.GivenTensorFill([], 'X', shape=[2, 2],
values=np.zeros((2, 2)).flatten().astype(float))
caffe2_init_net.write(init_model.net.Proto().SerializeToString())
caffe2_init_net.flush()
result = self._run_command(
caffe2_to_onnx, [
caffe2_net.name,
'--caffe2-init-net', caffe2_init_net.name,
'--output', output.name,
],
catch_exceptions=False,
)
onnx_model = ModelProto()
onnx_model.ParseFromString(output.read())
self.assertEqual(len(onnx_model.graph.node), 1)
self.assertEqual(onnx_model.graph.node[0].op_type, 'Relu')
self.assertEqual(len(onnx_model.graph.initializer), 1)
self.assertEqual(onnx_model.graph.initializer[0].name, onnx_model.graph.input[0].name)
def test_caffe2_to_onnx_value_info(self):
caffe2_net = tempfile.NamedTemporaryFile()
output = tempfile.NamedTemporaryFile()
model = ModelHelper(name='caffe2-to-onnx-test')
brew.relu(model, ["X"], "Y")
caffe2_net.write(model.net.Proto().SerializeToString())
caffe2_net.flush()
args = [caffe2_net.name, '--output', output.name]
self.assertRaisesRegexp(Exception,
'value info',
self._run_command, caffe2_to_onnx, args)
args.extend([
'--value-info',
json.dumps({
'X': (TensorProto.FLOAT, (2, 2)),
})])
result = self._run_command(caffe2_to_onnx, args)
onnx_model = ModelProto()
onnx_model.ParseFromString(output.read())
self.assertEqual(len(onnx_model.graph.node), 1)
self.assertEqual(onnx_model.graph.node[0].op_type, 'Relu')
self.assertEqual(len(onnx_model.graph.initializer), 0)
def test_onnx_to_caffe2(self):
onnx_model = tempfile.NamedTemporaryFile()
output = tempfile.NamedTemporaryFile()
init_net_output = tempfile.NamedTemporaryFile()
node_def = helper.make_node(
"Mul", ["X", "W"], ["Y"])
graph_def = helper.make_graph(
[node_def],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("W", TensorProto.FLOAT, (3, 2))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 2))],
initializer=[helper.make_tensor("W",
TensorProto.FLOAT,
[3, 2],
np.zeros((3, 2)).flatten().astype(float))])
model_def = helper.make_model(graph_def, producer_name='onnx-to-caffe2-test')
onnx_model.write(model_def.SerializeToString())
onnx_model.flush()
result = self._run_command(
onnx_to_caffe2, [
onnx_model.name,
'--output', output.name,
'--init-net-output', init_net_output.name,
])
caffe2_net = caffe2_pb2.NetDef()
caffe2_net.ParseFromString(output.read())
self.assertEqual(len(caffe2_net.op), 1)
self.assertEqual(caffe2_net.op[0].type, 'Mul')
caffe2_init_net = caffe2_pb2.NetDef()
caffe2_init_net.ParseFromString(init_net_output.read())
self.assertEqual(len(caffe2_init_net.op), 1)
self.assertEqual(set(sum([list(init_op.output)
for init_op in caffe2_init_net.op], [])),
{'W'})
def test_onnx_to_caffe2_zipfile(self):
buf = tempfile.NamedTemporaryFile()
onnx_model = zipfile.ZipFile(buf, 'w')
output = tempfile.NamedTemporaryFile()
init_net_output = tempfile.NamedTemporaryFile()
node_def = helper.make_node(
"MatMul", ["X", "W"], ["Y"])
X = np.random.rand(2, 3).astype(np.float32)
W = np.random.rand(3, 2).flatten().astype(np.float32)
graph_def = helper.make_graph(
[node_def],
"test",
[helper.make_tensor_value_info("X", TensorProto.FLOAT, (2, 3)),
helper.make_tensor_value_info("W", TensorProto.FLOAT, (3, 2))],
[helper.make_tensor_value_info("Y", TensorProto.FLOAT, (2, 2))],
initializer=[helper.make_tensor("W",
TensorProto.FLOAT,
[3, 2],
b'__EXTERNAL',
raw=True)])
model_def = helper.make_model(graph_def, producer_name='onnx-to-caffe2-test')
onnx_model.writestr('__MODEL_PROTO', model_def.SerializeToString())
onnx_model.writestr('W', W.tobytes())
onnx_model.close()
W = W.reshape((3, 2))
Y_expect = np.matmul(X, W)
c2_model = c2.prepare_zip_archive(buf)
Y = c2_model.run(X).Y
np.testing.assert_allclose(Y, Y_expect)
# TODO investigate why this is failing after changing Reshape
# operator from taking the new shape as attribute to as input
@unittest.skip
def test_convert_end2end(self):
predict_net_f = tempfile.NamedTemporaryFile()
init_net_f = tempfile.NamedTemporaryFile()
onnx_model_f = tempfile.NamedTemporaryFile()
x = 'X'
w = 'W'
b = 'b'
y = 'Y'
predict_net = caffe2_pb2.NetDef()
predict_net.name = 'test-convert-end2end'
predict_net.external_input[:] = [x, w, b]
predict_net.external_output[:] = [y]
predict_net.op.extend([
core.CreateOperator(
'FC',
inputs=[x, w, b],
outputs=[y],
axis=2,
),
])
predict_net_f.write(predict_net.SerializeToString())
predict_net_f.flush()
init_net = caffe2_pb2.NetDef()
init_net.name = 'test-convert-end2end-init'
init_net.external_output[:] = [w, b]
x_val = np.random.randn(1, 3, 2).astype(np.float32)
w_val = np.random.randn(4, 2).astype(np.float32)
b_val = np.random.randn(4).astype(np.float32)
init_net.op.extend([
core.CreateOperator(
'GivenTensorFill',
[],
[w],
values=w_val,
shape=w_val.shape,
),
core.CreateOperator(
'GivenTensorFill',
[],
[b],
values=b_val,
shape=b_val.shape,
),
])
init_net_f.write(init_net.SerializeToString())
init_net_f.flush()
y_val = np.matmul(x_val, w_val.transpose()) + b_val
for _ in range(5):
self._run_command(
caffe2_to_onnx, [
predict_net_f.name,
'--caffe2-init-net', init_net_f.name,
'--output', onnx_model_f.name,
'--value-info',
json.dumps({
x: (TensorProto.FLOAT, (1, 3, 2)),
}),
],
catch_exceptions=False,
)
onnx_model_f.seek(0)
onnx_model = ModelProto()
onnx_model.ParseFromString(onnx_model_f.read())
np.testing.assert_almost_equal(
c2.run_model(
onnx_model, {onnx_model.graph.input[0].name: x_val}),
[y_val])
self._run_command(
onnx_to_caffe2, [
onnx_model_f.name,
'--output', predict_net_f.name,
'--init-net-output', init_net_f.name,
])
predict_net_f.seek(0)
predict_net = caffe2_pb2.NetDef()
predict_net.ParseFromString(predict_net_f.read())
init_net_f.seek(0)
init_net = caffe2_pb2.NetDef()
init_net.ParseFromString(init_net_f.read())
x = predict_net.external_input[0]
np.testing.assert_almost_equal(c2_native_run_net(init_net=init_net,
predict_net=predict_net,
inputs={x: x_val})[1],
[y_val])
|
## @package onnx
# Module caffe2.python.onnx.tests.c2_ref_test
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import json
import os
import unittest
from caffe2.python import core
from caffe2.proto import caffe2_pb2
import onnx
from onnx.helper import make_node, make_graph, make_tensor, make_tensor_value_info, make_model
from caffe2.python.onnx.helper import c2_native_run_net, c2_native_run_op
from onnx import defs, mapping
import caffe2.python.onnx.frontend as c2_onnx
import caffe2.python.onnx.backend as c2
import numpy as np
from caffe2.python.models.download import downloadFromURLToFile, getURLFromName, deleteDirectory
from caffe2.python.onnx.helper import dummy_name
from caffe2.python.onnx.tests.test_utils import TestCase
class TestCaffe2Basic(TestCase):
def test_dummy_name(self):
n1 = dummy_name()
n2 = dummy_name()
assert n1 != n2, "Got same names in different calls: {}".format(n1)
def test_relu_graph(self):
X = np.random.randn(3, 2).astype(np.float32)
Y_ref = np.clip(X, 0, np.inf)
node_def = make_node(
"Relu", ["X"], ["Y"])
output = c2.run_node(
node_def, {"X": X})
np.testing.assert_almost_equal(output.Y, Y_ref)
graph_def = make_graph(
[node_def],
name="test",
inputs=[make_tensor_value_info("X", onnx.TensorProto.FLOAT, [3, 2])],
outputs=[make_tensor_value_info("Y", onnx.TensorProto.FLOAT, [3, 2])])
c2_rep = c2.prepare(make_model(graph_def, producer_name='caffe2-ref-test'))
output = c2_rep.run(X)
np.testing.assert_almost_equal(output.Y, Y_ref)
def test_initializer(self):
X = np.array([[1, 2], [3, 4]]).astype(np.float32)
Y = np.array([[1, 2], [3, 4]]).astype(np.float32)
weight = np.array([[1, 0], [0, 1]])
graph_def = make_graph(
[make_node("Add", ["X", "Y"], ["Z0"]),
make_node("Cast", ["Z0"], ["Z"], to="float"),
make_node("Mul", ["Z", "weight"], ["W0"]),
make_node("Tanh", ["W0"], ["W1"]),
make_node("Sigmoid", ["W1"], ["W2"]),
make_node("Scale", ["W2"], ["W3"], scale=-1.0)],
name="test_initializer",
inputs=[
make_tensor_value_info("X", onnx.TensorProto.FLOAT, (2, 2)),
make_tensor_value_info("Y", onnx.TensorProto.FLOAT, (2, 2)),
make_tensor_value_info("weight", onnx.TensorProto.FLOAT, (2, 2)),
],
outputs=[
make_tensor_value_info("W3", onnx.TensorProto.FLOAT, (2, 2))
],
initializer=[make_tensor("weight",
onnx.TensorProto.FLOAT,
[2, 2],
weight.flatten().astype(float))]
)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
W_ref = -sigmoid(np.tanh((X + Y) * weight))
c2_rep = c2.prepare(make_model(graph_def, producer_name='caffe2-ref-test'))
output = c2_rep.run({"X": X, "Y": Y})
np.testing.assert_almost_equal(output["W3"], W_ref)
def test_gemm(self):
# simple
A = np.random.randn(3, 2).astype(np.float32)
B = np.random.randn(2, 4).astype(np.float32)
C = np.random.randn(3, 4).astype(np.float32)
node_def = make_node(
'Gemm',
['A', 'B', 'C'],
["Y"])
output = c2.run_node(node_def, [A, B, C])
np.testing.assert_almost_equal(output["Y"], np.dot(A, B) + C)
# transA
A = np.transpose(A)
node_def = make_node(
'Gemm',
['A', 'B', 'C'],
["Y"],
transA=True)
output = c2.run_node(node_def, [A, B, C])
np.testing.assert_almost_equal(
output["Y"],
np.dot(np.transpose(A), B) + C)
# revert A
A = np.transpose(A)
# transB
B = np.transpose(B)
node_def = make_node(
'Gemm',
['A', 'B', 'C'],
["Y"],
transB=True)
output = c2.run_node(node_def, [A, B, C])
np.testing.assert_almost_equal(
output["Y"],
np.dot(A, np.transpose(B)) + C)
# revert A
B = np.transpose(B)
# scale
alpha = np.random.random()
beta = np.random.random()
node_def = make_node(
'Gemm',
['A', 'B', 'C'],
["Y"],
alpha=alpha,
beta=beta)
output = c2.run_node(node_def, [A, B, C])
np.testing.assert_almost_equal(
output["Y"],
alpha * np.dot(A, B) + beta * C)
# broadcast
C = np.random.randn(4).astype(np.float32)
node_def = make_node(
'Gemm',
['A', 'B', 'C'],
["Y"],
alpha=alpha,
beta=beta,
broadcast=1)
output = c2.run_node(node_def, [A, B, C])
np.testing.assert_almost_equal(
output["Y"],
alpha * np.dot(A, B) + beta * C)
def test_tensor_filling_ops(self):
for dtype in [
onnx.TensorProto.FLOAT,
onnx.TensorProto.DOUBLE,
onnx.TensorProto.BOOL,
onnx.TensorProto.INT8,
onnx.TensorProto.INT16,
onnx.TensorProto.INT32,
onnx.TensorProto.INT64,
onnx.TensorProto.UINT8,
onnx.TensorProto.UINT16,
onnx.TensorProto.UINT32,
]:
shape = (1, 2, 3)
vals = np.random.randn(*shape)
if dtype != onnx.TensorProto.BOOL:
vals *= 5
vals = vals.astype(
mapping.TENSOR_TYPE_TO_NP_TYPE[dtype])
tensor = make_tensor(
name='test-tensor-{}'.format(dtype),
data_type=dtype,
dims=[1, 2, 3],
vals=vals.flatten().tolist(),
)
op = c2.Caffe2Backend._create_tensor_filling_op(tensor)
self.assertEqual(len(op.input), 0)
self.assertEqual(op.output, [tensor.name])
ws, output = c2_native_run_op(op, inputs=[])
self.assertEqual(len(output), 1)
np.testing.assert_almost_equal(output[0], vals)
np.testing.assert_almost_equal(ws.FetchBlob(op.output[0]), vals)
def test_slice(self):
X = np.random.randn(1, 2, 3).astype(np.float32)
starts = np.array([0, 1, 0], dtype=np.int32)
ends = np.array([-1, 2, 3], dtype=np.int32)
predict_net = caffe2_pb2.NetDef()
predict_net.name = 'test-slice-net'
predict_net.external_input[:] = ['X']
predict_net.external_output[:] = ['Y']
predict_net.op.extend([
core.CreateOperator(
'Slice',
inputs=['X'],
outputs=['Y'],
starts=starts,
ends=ends,
),
])
ws, (Y,) = c2_native_run_net(
init_net=None,
predict_net=predict_net,
inputs=[X])
onnx_model = c2_onnx.caffe2_net_to_onnx_model(
predict_net=predict_net,
value_info={
'X': (onnx.mapping.NP_TYPE_TO_TENSOR_TYPE[X.dtype], X.shape)
})
Y, = c2.run_model(onnx_model, inputs=[X])
np.testing.assert_almost_equal(Y, X[:, 1:2, :])
class TestCaffe2End2End(TestCase):
def _model_dir(self, model):
caffe2_home = os.path.expanduser(os.getenv('ONNX_HOME', '~/.caffe2'))
models_dir = os.getenv('ONNX_MODELS', os.path.join(caffe2_home, 'models'))
return os.path.join(models_dir, model)
def _test_net(self,
net_name,
input_blob_dims=(1, 3, 224, 224),
decimal=7):
np.random.seed(seed=0)
model_dir = self._model_dir(net_name)
if not os.path.exists(model_dir):
self._download(net_name)
c2_predict_pb = os.path.join(model_dir, 'predict_net.pb')
c2_predict_net = caffe2_pb2.NetDef()
with open(c2_predict_pb, 'rb') as f:
c2_predict_net.ParseFromString(f.read())
c2_predict_net.name = net_name
c2_init_pb = os.path.join(model_dir, 'init_net.pb')
c2_init_net = caffe2_pb2.NetDef()
with open(c2_init_pb, 'rb') as f:
c2_init_net.ParseFromString(f.read())
c2_init_net.name = net_name + '_init'
n, c, h, w = input_blob_dims
data = np.random.randn(n, c, h, w).astype(np.float32)
inputs = [data]
_, c2_outputs = c2_native_run_net(c2_init_net, c2_predict_net, inputs)
del _
model = c2_onnx.caffe2_net_to_onnx_model(
predict_net=c2_predict_net,
init_net=c2_init_net,
value_info=json.load(open(os.path.join(model_dir, 'value_info.json'))))
c2_ir = c2.prepare(model)
onnx_outputs = c2_ir.run(inputs)
self.assertSameOutputs(c2_outputs, onnx_outputs, decimal=decimal)
def _download(self, model):
model_dir = self._model_dir(model)
assert not os.path.exists(model_dir)
os.makedirs(model_dir)
for f in ['predict_net.pb', 'init_net.pb', 'value_info.json']:
url = getURLFromName(model, f)
dest = os.path.join(model_dir, f)
try:
try:
downloadFromURLToFile(url, dest,
show_progress=False)
except TypeError:
# show_progress not supported prior to
# Caffe2 78c014e752a374d905ecfb465d44fa16e02a28f1
# (Sep 17, 2017)
downloadFromURLToFile(url, dest)
except Exception as e:
print("Abort: {reason}".format(reason=e))
print("Cleaning up...")
deleteDirectory(model_dir)
exit(1)
def test_alexnet(self):
self._test_net('bvlc_alexnet', decimal=4)
def test_resnet50(self):
self._test_net('resnet50')
@unittest.skipIf(
os.environ.get('JENKINS_URL'),
'Taking too long to download!')
def test_vgg16(self):
self._test_net('vgg16')
@unittest.skipIf(
os.environ.get('JENKINS_URL'),
'Running vgg19 on Travis with Python 2 keeps getting OOM!')
def test_vgg19(self):
self._test_net('vgg19')
def test_inception_v1(self):
self._test_net('inception_v1', decimal=2)
def test_inception_v2(self):
self._test_net('inception_v2')
@unittest.skip('Need to add support for ConstantFill operator')
def test_squeezenet(self):
self._test_net('squeezenet')
def test_shufflenet(self):
self._test_net('shufflenet')
def test_densenet121(self):
self._test_net('densenet121')
def test_bvlc_googlenet(self):
self._test_net('bvlc_googlenet')
def test_bvlc_reference_caffenet(self):
self._test_net('bvlc_reference_caffenet')
def test_bvlc_reference_rcnn_ilsvrc13(self):
self._test_net('bvlc_reference_rcnn_ilsvrc13')
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import inspect
import numpy as np
from hypothesis import assume, given, settings
import hypothesis.strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
class TestMatMul(hu.HypothesisTestCase):
@given(
M=st.integers(min_value=1, max_value=10),
K=st.integers(min_value=1, max_value=10),
N=st.integers(min_value=1, max_value=10),
trans_a=st.booleans(),
trans_b=st.booleans(),
**hu.gcs
)
def test_matmul(self, M, K, N, trans_a, trans_b, gc, dc):
X = np.random.rand(M, K).astype(np.float32) - 0.5
if trans_a:
X = X.transpose()
Y = np.random.rand(K, N).astype(np.float32) - 0.5
if trans_b:
Y = Y.transpose()
op = core.CreateOperator(
'MatMul', ['X', 'Y'], 'out', trans_a=trans_a, trans_b=trans_b
)
def matmul_ref(X, Y, trans_a, trans_b):
XX = X.transpose() if trans_a else X
YY = Y.transpose() if trans_b else Y
return (XX.dot(YY), )
# Check against numpy reference
self.assertReferenceChecks(gc, op, [X, Y, trans_a, trans_b], matmul_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X, Y], [0])
# Gradient check wrt X
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
# Gradient check wrt Y
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
@given(
M=st.integers(min_value=1, max_value=10),
K=st.integers(min_value=1, max_value=10),
N=st.integers(min_value=1, max_value=10),
axis_a=st.sampled_from([-3, -2, -1, 1, 2, 3]),
axis_b=st.sampled_from([-3, -2, -1, 1, 2, 3]),
trans_a=st.booleans(),
trans_b=st.booleans(),
**hu.gcs
)
def test_matmul_axis(
self, M, K, N, axis_a, axis_b, trans_a, trans_b, gc, dc
):
X = np.random.rand(M, K).astype(np.float32) - 0.5
if trans_a:
X = X.transpose()
shape_x = [X.shape[0], 1, 1, 1]
shape_x[axis_a] = X.shape[1]
X = X.reshape(*shape_x)
Y = np.random.rand(K, N).astype(np.float32) - 0.5
if trans_b:
Y = Y.transpose()
shape_y = [Y.shape[0], 1, 1, 1]
shape_y[axis_b] = Y.shape[1]
Y = Y.reshape(*shape_y)
op = core.CreateOperator(
'MatMul', ['X', 'Y'],
'out',
axis_a=axis_a,
axis_b=axis_b,
trans_a=trans_a,
trans_b=trans_b
)
def size_to_dim(X, axis):
dim = 1
for i in range(axis):
dim *= X.shape[i]
return dim
def size_from_dim(X, axis):
dim = 1
for i in range(axis, X.ndim):
dim *= X.shape[i]
return dim
def reshape(X, axis):
dim_0, dim_1 = size_to_dim(X, axis), size_from_dim(X, axis)
return X.reshape(dim_0, dim_1)
def canonical_axis(axis, ndim):
return ndim + axis if axis < 0 else axis
def matmul_ref(X, Y, axis_a, axis_b, trans_a, trans_b):
can_axis_a = canonical_axis(axis_a, X.ndim)
can_axis_b = canonical_axis(axis_b, Y.ndim)
X, Y = reshape(X, can_axis_a), reshape(Y, can_axis_b)
XX = X.transpose() if trans_a else X
YY = Y.transpose() if trans_b else Y
return (XX.dot(YY), )
# Check against numpy reference
self.assertReferenceChecks(
gc, op, [X, Y, axis_a, axis_b, trans_a, trans_b], matmul_ref
)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X, Y], [0])
# Gradient check wrt X
self.assertGradientChecks(gc, op, [X, Y], 0, [0])
# Gradient check wrt Y
self.assertGradientChecks(gc, op, [X, Y], 1, [0])
class TestBatchMatMul(hu.HypothesisTestCase):
@settings(max_examples=30)
@given(
C=st.integers(min_value=0, max_value=3), # number of batch dims
M=st.integers(min_value=1, max_value=10),
K=st.integers(min_value=1, max_value=10),
N=st.integers(min_value=1, max_value=10),
trans_a=st.booleans(),
trans_b=st.booleans(),
dtype=st.sampled_from([np.float32, np.float16]),
**hu.gcs
)
def test_batch_matmul(self, C, M, K, N, trans_a, trans_b, dtype, gc, dc):
if dtype == np.float16:
# fp16 is only supported with CUDA
assume(gc.device_type == caffe2_pb2.CUDA)
dc = [d for d in dc if d.device_type == caffe2_pb2.CUDA]
batch_dims = np.random.randint(
low=1,
high=3,
size=C,
dtype=np.int64).tolist()
X = np.random.rand(*(batch_dims + [M, K])).astype(dtype) - 0.5
if trans_a:
X = X.swapaxes(-1, -2)
Y = np.random.rand(*(batch_dims + [K, N])).astype(dtype) - 0.5
if trans_b:
Y = Y.swapaxes(-1, -2)
op = core.CreateOperator(
'BatchMatMul', ['X', 'Y'], 'out', trans_a=trans_a, trans_b=trans_b
)
def matmul_ref(X, Y, trans_a, trans_b, dtype):
XX = (X.swapaxes(-1, -2) if trans_a else X).astype(np.float32)
YY = (Y.swapaxes(-1, -2) if trans_b else Y).astype(np.float32)
return (np.matmul(XX, YY).astype(dtype),)
# relaxing the "threshold" for fp16 to 150x of the default
def relax_fp16_check(check_func, *args, **kwargs):
# inspect the default "threshold" value in check_func
argspec = inspect.getargspec(check_func)
threshold = argspec.defaults[
argspec.args.index('threshold') -
(len(argspec.args) - len(argspec.defaults))]
if dtype == np.float16:
threshold = 150 * threshold
check_func(*args, threshold=threshold, **kwargs)
# Check against numpy reference
relax_fp16_check(self.assertReferenceChecks, gc, op, [X, Y, trans_a, trans_b, dtype], matmul_ref)
# Check over multiple devices
relax_fp16_check(self.assertDeviceChecks, dc, op, [X, Y], [0])
# Gradient check wrt X
relax_fp16_check(self.assertGradientChecks, gc, op, [X, Y], 0, [0])
# Gradient check wrt Y
relax_fp16_check(self.assertGradientChecks, gc, op, [X, Y], 1, [0])
def _test_batch_matmul_with_broadcast_common(
self,
X,
Y,
dtype,
gc,
dc,
trans_a=None,
trans_b=None,
):
if trans_a is not None and trans_b is not None:
op = core.CreateOperator(
'BatchMatMul', ['X', 'Y'], 'out', trans_a=trans_a, trans_b=trans_b, broadcast=1
)
else:
op = core.CreateOperator(
'BatchMatMul', ['X', 'Y'], 'out', broadcast=1
)
def matmul_ref(X, Y, trans_a, trans_b, dtype):
XX = (X.swapaxes(-1, -2) if trans_a else X).astype(np.float32)
YY = (Y.swapaxes(-1, -2) if trans_b else Y).astype(np.float32)
return (np.matmul(XX, YY).astype(dtype),)
# Check against numpy reference
self.assertReferenceChecks(gc, op, [X, Y, trans_a, trans_b, dtype], matmul_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X, Y], [0])
@given(
C_1=st.integers(min_value=0, max_value=3), # number of batch dims
C_2=st.integers(min_value=0, max_value=3),
M=st.integers(min_value=1, max_value=10),
K=st.integers(min_value=1, max_value=10),
N=st.integers(min_value=1, max_value=10),
trans_a=st.booleans(),
trans_b=st.booleans(),
**hu.gcs
)
def test_numpy_batch_matmul(self, C_1, C_2, M, K, N, trans_a, trans_b, gc, dc):
np.set_printoptions(threshold=np.nan)
dtype = np.float32
batch_dims = np.random.randint(
low=0,
high=3,
size=max(C_1, C_2),
dtype=np.int64).tolist()
lbd = len(batch_dims)
X = np.random.rand(*(batch_dims[lbd - C_1:] + [M, K])).astype(dtype) - 0.5
if trans_a:
X = X.swapaxes(-1, -2)
Y = np.random.rand(*(batch_dims[lbd - C_2:] + [K, N])).astype(dtype) - 0.5
if trans_b:
Y = Y.swapaxes(-1, -2)
self._test_batch_matmul_with_broadcast_common(X, Y, dtype, gc, dc, trans_a, trans_b)
@settings(max_examples=30)
@given(
K=st.integers(min_value=1, max_value=10),
**hu.gcs
)
def test_numpy_batch_matmul_1d(self, K, gc, dc):
np.set_printoptions(threshold=np.nan)
dtype = np.float32
X = np.random.rand(K).astype(dtype) - 0.5
# TODO: test trans_a and trans_b
Y = np.random.rand(K).astype(dtype) - 0.5
self._test_batch_matmul_with_broadcast_common(X, Y, dtype, gc, dc)
@settings(max_examples=30)
@given(
K=st.integers(min_value=1, max_value=10),
N=st.integers(min_value=1, max_value=10),
**hu.gcs
)
def test_numpy_batch_matmul_1d_2d(self, K, N, gc, dc):
np.set_printoptions(threshold=np.nan)
dtype = np.float32
X = np.random.rand(K).astype(dtype) - 0.5
# TODO: test trans_a and trans_b
Y = np.random.rand(*[K, N]).astype(dtype) - 0.5
self._test_batch_matmul_with_broadcast_common(X, Y, dtype, gc, dc)
@settings(max_examples=30)
@given(
M=st.integers(min_value=1, max_value=10),
K=st.integers(min_value=1, max_value=10),
**hu.gcs
)
def test_numpy_batch_matmul_2d_1d(self, M, K, gc, dc):
np.set_printoptions(threshold=np.nan)
dtype = np.float32
X = np.random.rand(*[M, K]).astype(dtype) - 0.5
# TODO: test trans_a and trans_b
Y = np.random.rand(K).astype(dtype) - 0.5
self._test_batch_matmul_with_broadcast_common(X, Y, dtype, gc, dc)
if __name__ == "__main__":
import unittest
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
import numpy as np
import numpy.testing as npt
from hypothesis import given
import hypothesis.strategies as st
import functools
def primefac(n):
ret = []
divisor = 2
while divisor * divisor <= n:
while (n % divisor) == 0:
ret.append(divisor)
n = n // divisor
divisor = divisor + 1
if n > 1:
ret.append(n)
return ret
class TestReBatchingQueue(TestCase):
def test_rebatching_queue_single_enqueue_dequeue(self):
net = core.Net('net')
tensors = [
net.ConstantFill([], 1, value=1.0, run_once=False)
for times in range(3)
]
queue = net.CreateRebatchingQueue([], 1, capacity=10, num_blobs=1)
net.EnqueueRebatchingQueue([queue, tensors[0]], [])
net.EnqueueRebatchingQueue([queue, tensors[1]], [])
net.EnqueueRebatchingQueue([queue, tensors[2]], [])
results = [
net.DequeueRebatchingQueue([queue], 1),
net.DequeueRebatchingQueue([queue], 1),
net.DequeueRebatchingQueue([queue], 1),
]
workspace.RunNetOnce(net)
for idx in range(3):
self.assertEquals(workspace.FetchBlob(results[idx]), [1.0])
def test_rebatching_queue_multi_enqueue_dequeue(self):
net = core.Net('net')
workspace.FeedBlob(
"tensors", np.array([x for x in range(10)], np.int32)
)
queue = net.CreateRebatchingQueue([], 1, capacity=10, num_blobs=1)
net.EnqueueRebatchingQueue([queue, "tensors"], [], enqueue_batch=True)
results = [
net.DequeueRebatchingQueue([queue], 1, num_elements=5),
net.DequeueRebatchingQueue([queue], 1, num_elements=5),
]
workspace.RunNetOnce(net)
npt.assert_array_equal(
workspace.FetchBlob(results[0]), workspace.FetchBlob("tensors")[:5]
)
npt.assert_array_equal(
workspace.FetchBlob(results[1]), workspace.FetchBlob("tensors")[5:]
)
def test_rebatching_queue_closes_properly(self):
net = core.Net('net')
workspace.FeedBlob(
"tensors", np.array([x for x in range(10)], np.int32)
)
queue = net.CreateRebatchingQueue([], 1, capacity=10, num_blobs=1)
net.EnqueueRebatchingQueue([queue, "tensors"], 0, enqueue_batch=True)
net.CloseRebatchingQueue([queue], 0)
results = [
net.DequeueRebatchingQueue([queue], 1, num_elements=5),
net.DequeueRebatchingQueue([queue], 1, num_elements=5),
]
workspace.RunNetOnce(net)
npt.assert_array_equal(
workspace.FetchBlob(results[0]), workspace.FetchBlob("tensors")[:5]
)
npt.assert_array_equal(
workspace.FetchBlob(results[1]), workspace.FetchBlob("tensors")[5:]
)
# Enqueuing more should fail now since the queue is closed
net.EnqueueRebatchingQueue([queue, "tensors"], [], enqueue_batch=True)
with self.assertRaises(RuntimeError):
workspace.RunNetOnce(net)
# Dequeuing more should fail now since the queue is closed
results = [
net.DequeueRebatchingQueue([queue], 1, num_elements=5),
]
with self.assertRaises(RuntimeError):
workspace.RunNetOnce(net)
def test_rebatching_queue_multiple_components(self):
NUM_BLOBS = 4
NUM_ELEMENTS = 10
net = core.Net('net')
workspace.blobs['complex_tensor'] = np.array(
[[x, x + 1] for x in range(NUM_ELEMENTS)], dtype=np.int32
)
tensors = [
net.GivenTensorIntFill(
[],
1,
shape=[NUM_ELEMENTS],
values=[x for x in range(NUM_ELEMENTS)]
),
net.GivenTensorFill(
[],
1,
shape=[NUM_ELEMENTS],
values=[x * 1.0 for x in range(NUM_ELEMENTS)]
),
net.GivenTensorBoolFill(
[],
1,
shape=[NUM_ELEMENTS],
values=[(x % 2 == 0) for x in range(NUM_ELEMENTS)]
),
'complex_tensor',
]
queue = net.CreateRebatchingQueue(
[], 1, capacity=10, num_blobs=NUM_BLOBS
)
net.EnqueueRebatchingQueue([queue] + tensors, [], enqueue_batch=True)
results = net.DequeueRebatchingQueue([queue], NUM_BLOBS, num_elements=5)
workspace.RunNetOnce(net)
for idx in range(NUM_BLOBS):
npt.assert_array_equal(
workspace.FetchBlob(results[idx]),
workspace.FetchBlob(tensors[idx])[:5]
)
@given(
num_producers=st.integers(1, 5),
num_consumers=st.integers(1, 5),
producer_input_size=st.integers(1, 10),
producer_num_iterations=st.integers(1, 10),
capacity=st.integers(1, 10)
)
def test_rebatching_parallel_producer_consumer(
self, num_producers, num_consumers, producer_input_size,
producer_num_iterations, capacity
):
### Init ###
total_inputs = producer_num_iterations * producer_input_size * num_producers
inputs = []
init_net = core.Net('init_net')
queue = init_net.CreateRebatchingQueue(
[], 1, capacity=capacity, num_blobs=1
)
### Producers ###
producer_steps = []
for i in range(num_producers):
name = 'producer_%d' % i
net = core.Net(name)
values = [
producer_input_size * i + x for x in range(producer_input_size)
]
for _ in range(producer_num_iterations):
inputs.extend(values)
tensors = net.GivenTensorIntFill(
[], 1, shape=[producer_input_size], values=values
)
net.EnqueueRebatchingQueue([queue, tensors], [], enqueue_batch=True)
step = core.execution_step(
name, net, num_iter=producer_num_iterations
)
producer_steps.append(step)
producer_step = core.execution_step(
'producer', [
core.execution_step(
'producers', producer_steps, concurrent_substeps=True
)
]
)
### Consumers ###
outputs = []
def append(ins, outs):
# Extend is atomic
outputs.extend(ins[0].data.tolist())
consumer_steps = []
for i in range(num_consumers):
# This is just a way of deterministally read all the elements.
# We make `num_consumers` almost equal splits
# (the reminder goes to the last consumer).
num_elements_to_read = total_inputs // num_consumers
if i == num_consumers - 1:
num_elements_to_read = num_elements_to_read \
+ total_inputs % num_consumers
# If we have nothing to read this consumer will be idle
if (num_elements_to_read == 0):
continue
# Now we have to make a split on number of iterations and the read
# size for each iteration. This is again just one of many
# deterministic ways of doing it. We factorize the total number of
# elements we have to read and assign half of the factors to the
# iterations half to the read size.
factors = list(primefac(num_elements_to_read))
num_elements_per_iteration = functools.reduce(
lambda x, y: x * y, factors[len(factors) // 2:], 1
)
num_iterations = functools.reduce(
lambda x, y: x * y, factors[:len(factors) // 2], 1
)
name = 'consumer_%d' % i
net = core.Net(name)
blobs = net.DequeueRebatchingQueue(
[queue], 1, num_elements=num_elements_per_iteration
)
net.Python(append)([blobs], 0)
consumer_steps.append(
core.execution_step(name, net, num_iter=num_iterations)
)
consumer_step = core.execution_step(
'consumer', consumer_steps, concurrent_substeps=True
)
init_step = core.execution_step('init', init_net)
worker_step = core.execution_step(
'worker', [consumer_step, producer_step], concurrent_substeps=True
)
### Execute Plan ###
plan = core.Plan('test')
plan.AddStep(init_step)
plan.AddStep(worker_step)
self.ws.run(plan)
### Check Results ###
# We check that the outputs are a permutation of inputs
inputs.sort()
outputs.sort()
self.assertEquals(inputs, outputs)
if __name__ == "__main__":
import unittest
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import workspace, core, rnn_cell
from caffe2.python.model_helper import ModelHelper
from caffe2.python.rnn.rnn_cell_test_util import tanh
import caffe2.python.hypothesis_test_util as hu
from hypothesis import given
from hypothesis import settings as ht_settings
import hypothesis.strategies as st
import numpy as np
import unittest
def basic_rnn_reference(input, hidden_initial,
i2h_w, i2h_b,
gate_w, gate_b,
seq_lengths,
drop_states,
use_sequence_lengths):
D = hidden_initial.shape[-1]
T = input.shape[0]
N = input.shape[1]
if seq_lengths is not None:
seq_lengths = (np.ones(shape=(N, D)) *
seq_lengths.reshape(N, 1)).astype(np.int32)
ret = []
hidden_prev = hidden_initial
for t in range(T):
input_fc = np.dot(input[t], i2h_w.T) + i2h_b
recur_fc = np.dot(hidden_prev, gate_w.T) + gate_b
hidden_t = tanh(input_fc + recur_fc)
if seq_lengths is not None:
valid = (t < seq_lengths).astype(np.int32)
assert valid.shape == (N, D), (valid.shape, (N, D))
hidden_t = hidden_t * valid + \
hidden_prev * (1 - valid) * (1 - drop_states)
ret.append(hidden_t)
hidden_prev = hidden_t
return ret
class BasicRNNCellTest(hu.HypothesisTestCase):
@given(
seed=st.integers(0, 2**32 - 1),
seq_length=st.integers(min_value=1, max_value=5),
batch_size=st.integers(min_value=1, max_value=5),
input_size=st.integers(min_value=1, max_value=5),
hidden_size=st.integers(min_value=1, max_value=5),
drop_states=st.booleans(),
sequence_lengths=st.booleans(),
**hu.gcs
)
@ht_settings(max_examples=15)
def test_basic_rnn(self, seed, seq_length, batch_size, input_size, hidden_size,
drop_states, sequence_lengths, gc, dc):
np.random.seed(seed)
seq_lengths_data = np.random.randint(
1, seq_length + 1, size=(batch_size,)).astype(np.int32)
input_blob_data = np.random.randn(
seq_length, batch_size, input_size).astype(np.float32)
initial_h_data = np.random.randn(
batch_size, hidden_size).astype(np.float32)
gates_t_w_data = np.random.randn(
hidden_size, hidden_size).astype(np.float32)
gates_t_b_data = np.random.randn(
hidden_size).astype(np.float32)
i2h_w_data = np.random.randn(
hidden_size, input_size).astype(np.float32)
i2h_b_data = np.random.randn(
hidden_size).astype(np.float32)
with core.DeviceScope(gc):
with hu.temp_workspace():
workspace.FeedBlob(
'input_blob', input_blob_data, device_option=gc)
workspace.FeedBlob(
'seq_lengths', seq_lengths_data, device_option=gc)
workspace.FeedBlob(
'initial_h', initial_h_data, device_option=gc)
workspace.FeedBlob(
'basic_rnn/gates_t_w', gates_t_w_data, device_option=gc)
workspace.FeedBlob(
'basic_rnn/gates_t_b', gates_t_b_data, device_option=gc)
workspace.FeedBlob(
'basic_rnn/i2h_w', i2h_w_data, device_option=gc)
workspace.FeedBlob(
'basic_rnn/i2h_b', i2h_b_data, device_option=gc)
model = ModelHelper(name='model')
hidden_t_all, _ = rnn_cell.BasicRNN(
model,
'input_blob',
'seq_lengths' if sequence_lengths else None,
['initial_h'],
input_size,
hidden_size,
"basic_rnn",
activation='tanh',
forward_only=True,
drop_states=drop_states)
workspace.RunNetOnce(model.net)
result = workspace.FetchBlob(hidden_t_all)
reference = basic_rnn_reference(
input_blob_data,
initial_h_data,
i2h_w_data,
i2h_b_data,
gates_t_w_data,
gates_t_b_data,
seq_lengths_data if sequence_lengths else None,
drop_states=drop_states,
use_sequence_lengths=sequence_lengths
)
np.testing.assert_allclose(result, reference, atol=1e-4, rtol=1e-4)
if __name__ == "__main__":
workspace.GlobalInit([
'caffe2',
'--caffe2_log_level=0',
])
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
class TestListwiseL2rOps(hu.HypothesisTestCase):
def ref_lambda_rank_ndcg_loss(self, y, r):
n = len(y)
def get_discounts(v):
x = np.argsort(v)
d = [0 for _ in range(n)]
for i in range(n):
d[x[i]] = 1. / np.log2(n - i + 1.)
return d
def sigm(x):
return 1 / (1 + np.exp(-x))
def log_sigm(x):
return -np.log(1 + np.exp(-x))
g = [2**r[i] for i in range(n)]
d = get_discounts(r)
idcg = sum([g[i] * d[i] for i in range(n)])
d = get_discounts(y)
loss = 0
dy = np.zeros(n)
for i in range(n):
for j in range(n):
if i == j:
continue
lambda_weight = np.abs((2**r[i] - 2**r[j]) * (d[i] - d[j]))
rank_loss = -log_sigm(
y[i] - y[j] if r[i] > r[j] else y[j] - y[i]
)
rank_dy = (0. if r[i] > r[j] else 1.) - sigm(-y[i] + y[j])
loss += lambda_weight * rank_loss / idcg
dy[i] += lambda_weight * rank_dy / idcg
return loss, dy
@given(n=st.integers(1, 20), k=st.integers(2, 5))
def test_lambda_rank_ndcg_loss(self, n, k):
y = np.random.rand(n).astype(np.float32)
r = np.random.randint(k, size=n).astype(np.float32)
dloss = np.random.random(1).astype(np.float32)
workspace.blobs['y'] = y
workspace.blobs['r'] = r
workspace.blobs['dloss'] = dloss
op = core.CreateOperator('LambdaRankNdcg', ['y', 'r'], ['loss', 'dy'])
workspace.RunOperatorOnce(op)
loss = workspace.blobs['loss']
dy = workspace.blobs['dy']
ref_loss, ref_dy = self.ref_lambda_rank_ndcg_loss(y, r)
self.assertAlmostEqual(np.asscalar(loss), ref_loss, delta=1e-4)
np.testing.assert_allclose(dy, ref_dy, rtol=1e-5, atol=1e-6)
op = core.CreateOperator(
'LambdaRankNdcgGradient', ['y', 'dy', 'dloss'], ['dy_back']
)
workspace.RunOperatorOnce(op)
dy_back = workspace.blobs['dy_back']
np.testing.assert_allclose(
dy_back, np.asscalar(dloss) * ref_dy, rtol=1e-5, atol=1e-6
)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
class TestClipTensorByScalingOp(hu.HypothesisTestCase):
@given(n=st.integers(5, 8), d=st.integers(2, 4),
threshold=st.floats(0.1, 10),
additional_threshold=st.floats(0.1, 10),
use_additional_threshold=st.booleans(),
inplace=st.booleans(),
**hu.gcs_cpu_only)
def test_clip_tensor_by_scaling(self, n, d, threshold, additional_threshold,
use_additional_threshold, inplace, gc, dc):
tensor = np.random.rand(n, d).astype(np.float32)
val = np.array(np.linalg.norm(tensor))
additional_threshold = np.array([additional_threshold]).astype(np.float32)
def clip_tensor_by_scaling_ref(tensor_data, val_data,
additional_threshold=None):
if additional_threshold is not None:
final_threshold = threshold * additional_threshold
else:
final_threshold = threshold
if val_data > final_threshold:
ratio = final_threshold / float(val_data)
tensor_data = tensor_data * ratio
return [tensor_data]
op = core.CreateOperator(
"ClipTensorByScaling",
["tensor", "val"] if not use_additional_threshold else (
["tensor", "val", "additional_threshold"]),
['Y'] if not inplace else ["tensor"],
threshold=threshold,
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[tensor, val] if not use_additional_threshold else (
[tensor, val, additional_threshold]),
reference=clip_tensor_by_scaling_ref,
)
if __name__ == "__main__":
import unittest
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
import hypothesis.extra.numpy as hnp
@st.composite
def id_list_batch(draw):
num_inputs = draw(st.integers(1, 3))
batch_size = draw(st.integers(5, 10))
values_dtype = draw(st.sampled_from([np.int32, np.int64]))
inputs = []
for _ in range(num_inputs):
size = draw(st.integers(5, 10))
values = draw(hnp.arrays(values_dtype, size, st.integers(1, 10)))
lengths = draw(hu.lengths(len(values),
min_segments=batch_size,
max_segments=batch_size))
inputs.append(lengths)
inputs.append(values)
return inputs
def merge_id_lists_ref(*args):
n = len(args)
assert n > 0
assert n % 2 == 0
batch_size = len(args[0])
num_inputs = int(n / 2)
lengths = np.array([np.insert(args[2 * i], 0, 0)
for i in range(num_inputs)])
values = [args[2 * i + 1] for i in range(num_inputs)]
offsets = [np.cumsum(lengths[j]) for j in range(num_inputs)]
def merge_arrays(vs, offs, j):
concat = np.concatenate([vs[i][offs[i][j]:offs[i][j + 1]]
for i in range(num_inputs)])
return np.sort(np.unique(concat))
merged = [merge_arrays(values, offsets, j) for j in range(batch_size)]
merged_lengths = np.array([len(x) for x in merged])
merged_values = np.concatenate(merged)
return merged_lengths, merged_values
class TestMergeIdListsOp(hu.HypothesisTestCase):
def test_merge_id_lists_ref(self):
# Verify that the reference implementation is correct!
lengths_0 = np.array([3, 0, 4], dtype=np.int32)
values_0 = np.array([1, 5, 6, 2, 4, 5, 6], dtype=np.int64)
lengths_1 = np.array([3, 2, 1], dtype=np.int32)
values_1 = np.array([5, 8, 9, 14, 9, 5], dtype=np.int64)
merged_lengths, merged_values = merge_id_lists_ref(
lengths_0, values_0, lengths_1, values_1)
expected_lengths = np.array([5, 2, 4], dtype=np.int32)
expected_values = np.array([1, 5, 6, 8, 9, 9, 14, 2, 4, 5, 6], dtype=np.int64)
np.testing.assert_array_equal(merged_lengths, expected_lengths)
np.testing.assert_array_equal(merged_values, expected_values)
@given(inputs=id_list_batch(),
**hu.gcs_cpu_only)
def test_merge_id_lists_op(self, inputs, gc, dc):
num_inputs = int(len(inputs) / 2)
op = core.CreateOperator(
"MergeIdLists",
["{prefix}_{i}".format(prefix=p, i=i)
for i in range(num_inputs)
for p in ["lengths", "values"]],
["merged_lengths", "merged_values"]
)
self.assertDeviceChecks(dc, op, inputs, [0])
self.assertReferenceChecks(gc, op, inputs, merge_id_lists_ref)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import unittest
class TestSoftplus(hu.HypothesisTestCase):
@given(X=hu.tensor(),
**hu.gcs)
def test_softplus(self, X, gc, dc):
op = core.CreateOperator("Softplus", ["X"], ["Y"])
self.assertDeviceChecks(dc, op, [X], [0])
self.assertGradientChecks(gc, op, [X], 0, [0])
if __name__ == "__main__":
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from hypothesis import given
import numpy as np
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
class TestFlatten(hu.HypothesisTestCase):
@given(X=hu.tensor(min_dim=2, max_dim=4),
**hu.gcs)
def test_flatten(self, X, gc, dc):
for axis in range(X.ndim + 1):
op = core.CreateOperator(
"Flatten",
["X"],
["Y"],
axis=axis)
def flatten_ref(X):
shape = X.shape
outer = np.prod(shape[:axis]).astype(int)
inner = np.prod(shape[axis:]).astype(int)
return np.copy(X).reshape(outer, inner),
self.assertReferenceChecks(gc, op, [X], flatten_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X], [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import assume, given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
from caffe2.proto import caffe2_pb2
class TestReductionOps(hu.HypothesisTestCase):
@given(n=st.integers(5, 8), **hu.gcs)
def test_elementwise_sum(self, n, gc, dc):
X = np.random.rand(n).astype(np.float32)
def sum_op(X):
return [np.sum(X)]
op = core.CreateOperator(
"SumElements",
["X"],
["y"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=sum_op,
)
self.assertGradientChecks(
device_option=gc,
op=op,
inputs=[X],
outputs_to_check=0,
outputs_with_grads=[0],
)
@given(n=st.integers(5, 8), **hu.gcs)
def test_elementwise_int_sum(self, n, gc, dc):
X = np.random.rand(n).astype(np.int32)
def sum_op(X):
return [np.sum(X)]
op = core.CreateOperator(
"SumElementsInt",
["X"],
["y"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=sum_op,
)
@given(n=st.integers(1, 65536),
dtype=st.sampled_from([np.float32, np.float16]),
**hu.gcs)
def test_elementwise_sqrsum(self, n, dtype, gc, dc):
if dtype == np.float16:
# fp16 is only supported with CUDA
assume(gc.device_type == caffe2_pb2.CUDA)
dc = [d for d in dc if d.device_type == caffe2_pb2.CUDA]
X = np.random.rand(n).astype(dtype)
def sumsqr_op(X):
return [np.sum(X * X)]
op = core.CreateOperator(
"SumSqrElements",
["X"],
["y"]
)
threshold = 0.01 if dtype == np.float16 else 0.005
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=sumsqr_op,
threshold=threshold,
)
@given(n=st.integers(5, 8), **hu.gcs)
def test_elementwise_avg(self, n, gc, dc):
X = np.random.rand(n).astype(np.float32)
def avg_op(X):
return [np.mean(X)]
op = core.CreateOperator(
"SumElements",
["X"],
["y"],
average=1
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=avg_op,
)
self.assertGradientChecks(
device_option=gc,
op=op,
inputs=[X],
outputs_to_check=0,
outputs_with_grads=[0],
)
@given(batch_size=st.integers(1, 3),
m=st.integers(1, 3),
n=st.integers(1, 4),
**hu.gcs)
def test_rowwise_max(self, batch_size, m, n, gc, dc):
X = np.random.rand(batch_size, m, n).astype(np.float32)
def rowwise_max(X):
return [np.max(X, axis=2)]
op = core.CreateOperator(
"RowwiseMax",
["x"],
["y"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=rowwise_max,
)
@given(batch_size=st.integers(1, 3),
m=st.integers(1, 3),
n=st.integers(1, 4),
**hu.gcs)
def test_columnwise_max(self, batch_size, m, n, gc, dc):
X = np.random.rand(batch_size, m, n).astype(np.float32)
def columnwise_max(X):
return [np.max(X, axis=1)]
op = core.CreateOperator(
"ColwiseMax",
["x"],
["y"]
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X],
reference=columnwise_max,
)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
class TestClip(hu.HypothesisTestCase):
@given(X=hu.tensor(),
min_=st.floats(min_value=-1, max_value=0),
max_=st.floats(min_value=0, max_value=1),
inplace=st.booleans(),
**hu.gcs)
def test_clip(self, X, min_, max_, inplace, gc, dc):
# go away from the origin point to avoid kink problems
X[np.abs(X - min_) < 0.05] += 0.1
X[np.abs(X - max_) < 0.05] += 0.1
def clip_ref(X):
X = X.clip(min_, max_)
return (X,)
op = core.CreateOperator(
"Clip",
["X"], ["Y" if not inplace else "X"],
min=min_,
max=max_)
self.assertReferenceChecks(gc, op, [X], clip_ref)
# Check over multiple devices
self.assertDeviceChecks(dc, op, [X], [0])
# Gradient check wrt X
self.assertGradientChecks(gc, op, [X], 0, [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from hypothesis import given
import hypothesis.strategies as st
import numpy as np
from caffe2.python import brew, core, workspace
import caffe2.python.hypothesis_test_util as hu
from caffe2.python.model_helper import ModelHelper
import unittest
class TestSpatialBN(hu.HypothesisTestCase):
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
seed=st.integers(0, 65535),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
inplace=st.sampled_from([True, False]),
**hu.gcs)
def test_spatialbn_test_mode_3d(
self, size, input_channels, batch_size, seed, order, epsilon,
inplace, gc, dc):
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "mean", "var"],
["X" if inplace else "Y"],
order=order,
is_test=True,
epsilon=epsilon,
engine="CUDNN",
)
def reference_spatialbn_test(X, scale, bias, mean, var):
if order == "NCHW":
scale = scale[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis]
bias = bias[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis]
mean = mean[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis]
var = var[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis]
return ((X - mean) / np.sqrt(var + epsilon) * scale + bias,)
np.random.seed(1701)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.rand(batch_size, input_channels, size, size, size)\
.astype(np.float32) - 0.5
if order == "NHWC":
X = X.transpose(0, 2, 3, 4, 1)
self.assertReferenceChecks(gc, op, [X, scale, bias, mean, var],
reference_spatialbn_test)
self.assertDeviceChecks(dc, op, [X, scale, bias, mean, var], [0])
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
seed=st.integers(0, 65535),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
inplace=st.sampled_from([True, False]),
**hu.gcs)
def test_spatialbn_test_mode_1d(
self, size, input_channels, batch_size, seed, order, epsilon,
inplace, gc, dc):
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "mean", "var"],
["X" if inplace else "Y"],
order=order,
is_test=True,
epsilon=epsilon,
engine="CUDNN",
)
def reference_spatialbn_test(X, scale, bias, mean, var):
if order == "NCHW":
scale = scale[np.newaxis, :, np.newaxis]
bias = bias[np.newaxis, :, np.newaxis]
mean = mean[np.newaxis, :, np.newaxis]
var = var[np.newaxis, :, np.newaxis]
return ((X - mean) / np.sqrt(var + epsilon) * scale + bias,)
np.random.seed(1701)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.rand(
batch_size, input_channels, size).astype(np.float32) - 0.5
if order == "NHWC":
X = X.swapaxes(1, 2)
self.assertReferenceChecks(gc, op, [X, scale, bias, mean, var],
reference_spatialbn_test)
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
seed=st.integers(0, 65535),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
engine=st.sampled_from(["", "CUDNN"]),
inplace=st.sampled_from([True, False]),
**hu.gcs)
def test_spatialbn_test_mode(
self, size, input_channels, batch_size, seed, order, epsilon,
inplace, engine, gc, dc):
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "mean", "var"],
["X" if inplace else "Y"],
order=order,
is_test=True,
epsilon=epsilon,
engine=engine
)
def reference_spatialbn_test(X, scale, bias, mean, var):
if order == "NCHW":
scale = scale[np.newaxis, :, np.newaxis, np.newaxis]
bias = bias[np.newaxis, :, np.newaxis, np.newaxis]
mean = mean[np.newaxis, :, np.newaxis, np.newaxis]
var = var[np.newaxis, :, np.newaxis, np.newaxis]
return ((X - mean) / np.sqrt(var + epsilon) * scale + bias,)
np.random.seed(1701)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
if order == "NHWC":
X = X.swapaxes(1, 2).swapaxes(2, 3)
self.assertReferenceChecks(gc, op, [X, scale, bias, mean, var],
reference_spatialbn_test)
self.assertDeviceChecks(dc, op, [X, scale, bias, mean, var], [0])
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
seed=st.integers(0, 65535),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=st.floats(1e-5, 1e-2),
engine=st.sampled_from(["", "CUDNN"]),
inplace=st.sampled_from([True, False]),
**hu.gcs)
def test_spatialbn_train_mode(
self, size, input_channels, batch_size, seed, order, epsilon,
inplace, engine, gc, dc):
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "running_mean", "running_var"],
["X" if inplace else "Y",
"running_mean", "running_var", "saved_mean", "saved_var"],
order=order,
is_test=False,
epsilon=epsilon,
engine=engine,
)
np.random.seed(1701)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
if order == "NHWC":
X = X.swapaxes(1, 2).swapaxes(2, 3)
self.assertDeviceChecks(dc, op, [X, scale, bias, mean, var],
[0, 1, 2, 3, 4])
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
seed=st.integers(0, 65535),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs)
def test_spatialbn_train_mode_gradient_check(
self, size, input_channels, batch_size, seed, order, epsilon,
engine, gc, dc):
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "mean", "var"],
["Y", "mean", "var", "saved_mean", "saved_var"],
order=order,
is_test=False,
epsilon=epsilon,
engine=engine
)
np.random.seed(seed)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32) - 0.5
if order == "NHWC":
X = X.swapaxes(1, 2).swapaxes(2, 3)
for input_to_check in [0, 1, 2]: # dX, dScale, dBias
self.assertGradientChecks(gc, op, [X, scale, bias, mean, var],
input_to_check, [0])
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
seed=st.integers(0, 65535),
order=st.sampled_from(["NCHW", "NHWC"]),
epsilon=st.floats(min_value=1e-5, max_value=1e-2),
**hu.gcs)
def test_spatialbn_train_mode_gradient_check_1d(
self, size, input_channels, batch_size, seed, order, epsilon,
gc, dc):
op = core.CreateOperator(
"SpatialBN",
["X", "scale", "bias", "mean", "var"],
["Y", "mean", "var", "saved_mean", "saved_var"],
order=order,
is_test=False,
epsilon=epsilon,
engine="CUDNN",
)
np.random.seed(seed)
scale = np.random.rand(input_channels).astype(np.float32) + 0.5
bias = np.random.rand(input_channels).astype(np.float32) - 0.5
mean = np.random.randn(input_channels).astype(np.float32)
var = np.random.rand(input_channels).astype(np.float32) + 0.5
X = np.random.rand(
batch_size, input_channels, size).astype(np.float32) - 0.5
if order == "NHWC":
X = X.swapaxes(1, 2)
for input_to_check in [0, 1, 2]: # dX, dScale, dBias
self.assertGradientChecks(gc, op, [X, scale, bias, mean, var],
input_to_check, [0], stepsize=0.01)
@given(size=st.integers(7, 10),
input_channels=st.integers(1, 10),
batch_size=st.integers(1, 3),
seed=st.integers(0, 65535),
epsilon=st.floats(1e-5, 1e-2),
engine=st.sampled_from(["", "CUDNN"]),
**hu.gcs)
def test_spatialbn_brew_wrapper(
self, size, input_channels, batch_size, seed, epsilon,
engine, gc, dc):
np.random.seed(seed)
X = np.random.rand(
batch_size, input_channels, size, size).astype(np.float32)
workspace.FeedBlob('X', X)
model = ModelHelper(name='test_spatialbn_brew_wrapper')
brew.spatial_bn(
model,
'X',
'Y',
input_channels,
epsilon=epsilon,
is_test=False,
)
workspace.RunNetOnce(model.param_init_net)
workspace.RunNetOnce(model.net)
if __name__ == "__main__":
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from hypothesis import assume, given
import hypothesis.strategies as st
from caffe2.proto import caffe2_pb2
from caffe2.python import core, workspace
import caffe2.python.hypothesis_test_util as hu
import unittest
def _cudnn_supports(
dilation=False,
nhwc=False,
):
"""Return True if cuDNN supports this configuration."""
v = workspace.GetCuDNNVersion()
if dilation and v < 6000:
# Dilation not supported until v6
return False
if dilation and nhwc:
# Dilation and NHWC not supported together
return False
return True
def _conv_1d_output_size(size, kernel, pad, dilation, stride):
return max(
1,
int((size + pad * 2 - (dilation * (kernel - 1) + 1)) / stride) + 1
)
def _conv_2d_output_size(size, kernel, pad_h, pad_w, dilation,
stride_h, stride_w):
return [
_conv_1d_output_size(size, kernel, pad_h, dilation, stride_h),
_conv_1d_output_size(size, kernel, pad_w, dilation, stride_w)
]
def _conv_2d_offsets_dims(
batch_size,
size,
kernel,
pad_h,
pad_w,
dilation,
stride_h,
stride_w,
deformable_group
):
dims = [batch_size, 2 * kernel * kernel * deformable_group]
dims.extend(_conv_2d_output_size(size, kernel, pad_h, pad_w,
dilation, stride_h, stride_w))
return dims
def _conv_2d_random_offsets(
batch_size,
kernel,
dims,
num_deformable_group
):
o = []
for y0 in range(0, kernel):
for x0 in range(0, kernel):
# stay away from integer offsets which correspond to "ridges" on the
# interpolated surface resulting in less precise estimates
x = np.random.randint(0, kernel) + np.random.uniform(0.05, 0.95)
y = np.random.randint(0, kernel) + np.random.uniform(0.05, 0.95)
o.append(y - y0)
o.append(x - x0)
o = o * num_deformable_group
e = []
for v in o:
e.append([[v] * dims[1]] * dims[0])
return np.array([e] * batch_size).astype(np.float32)
def _conv_2d_shuffle_offsets(
batch_size,
kernel,
dims,
num_deformable_group,
input_channels,
output_channels
):
o = []
w0 = [[0 for x in range(kernel)] for y in range(kernel)]
for y0 in range(0, kernel):
for x0 in range(0, kernel):
x = np.random.randint(0, kernel)
y = np.random.randint(0, kernel)
o.append(y - y0)
o.append(x - x0)
w0[y][x] += 1
o = o * num_deformable_group
e = []
for v in o:
e.append([[v] * int(dims[1])] * int(dims[0]))
w0 = [[w0] * input_channels] * output_channels
return (
np.array([e] * batch_size).astype(np.float32),
np.array(w0).astype(np.float32).transpose((0, 2, 3, 1))
)
class TestConvolution(hu.HypothesisTestCase):
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(1, 5),
dilation=st.integers(1, 3),
size=st.integers(7, 10),
input_channels=st.integers(1, 8),
output_channels=st.integers(1, 8),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW"]),
engine=st.sampled_from(["", "CUDNN", "MKLDNN"]),
use_bias=st.booleans(),
deformable_group=st.integers(1, 3),
**hu.gcs_gpu_only)
def test_null_offset_convolution(self, stride, pad, kernel, dilation, size,
input_channels, output_channels, batch_size,
order, engine, use_bias, deformable_group,
gc, dc):
dkernel = dilation * (kernel - 1) + 1
if gc.device_type == caffe2_pb2.CUDA and engine == 'CUDNN':
assume(_cudnn_supports(dilation=(dilation > 1),
nhwc=(order == 'NHWC')))
assume(engine != "MKLDNN" or use_bias is True)
op = core.CreateOperator(
"DeformConv",
["X", "o", "w", "b"] if use_bias else ["X", "o", "w"],
["Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
deformable_group=deformable_group,
)
offset_dims = _conv_2d_offsets_dims(batch_size, size, kernel, pad, pad,
dilation, stride, stride,
deformable_group)
X = np.random.rand(
batch_size, size, size, input_channels).astype(np.float32) - 0.5
o = np.zeros(tuple(offset_dims), np.float32)
w = np.random.rand(
output_channels, kernel, kernel, input_channels).astype(np.float32)\
- 0.5
b = np.random.rand(output_channels).astype(np.float32) - 0.5
if order == "NCHW":
X = X.transpose((0, 3, 1, 2))
w = w.transpose((0, 3, 1, 2))
inputs = [X, o, w, b] if use_bias else [X, o, w]
# Error handling path.
if size + pad + pad < dkernel or size + pad + pad < dkernel:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if input_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if output_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
def reference_conv_op(*args):
reference_op = core.CreateOperator(
"Conv",
["X", "w", "b"] if use_bias else ["X", "w"],
["Y0"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
device_option=gc
)
workspace.RunOperatorOnce(reference_op)
reference_blob = workspace.FetchBlob("Y0")
return (reference_blob,)
self.assertReferenceChecks(gc, op, inputs, reference_conv_op)
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(stride=st.integers(1, 3),
pad=st.integers(0, 0),
kernel=st.integers(1, 5),
dilation=st.integers(1, 3),
size=st.integers(7, 10),
input_channels=st.integers(1, 8),
output_channels=st.integers(1, 8),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW"]),
engine=st.sampled_from(["", "CUDNN", "MKLDNN"]),
use_bias=st.booleans(),
deformable_group=st.integers(1, 4),
**hu.gcs_gpu_only)
def test_flat_input_convolution(self, stride, pad, kernel, dilation, size,
input_channels, output_channels, batch_size,
order, engine, use_bias,
deformable_group, gc, dc):
dkernel = dilation * (kernel - 1) + 1
if gc.device_type == caffe2_pb2.CUDA and engine == 'CUDNN':
assume(_cudnn_supports(dilation=(dilation > 1),
nhwc=(order == 'NHWC')))
assume(engine != "MKLDNN" or use_bias is True)
op = core.CreateOperator(
"DeformConv",
["X", "o", "w", "b"] if use_bias else ["X", "o", "w"],
["Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
deformable_group=deformable_group,
)
X = np.ones((batch_size, size, size, input_channels), np.float32) - 0.5
output_size = _conv_2d_output_size(size, kernel, pad, pad,
dilation, stride, stride)
o = _conv_2d_random_offsets(batch_size, kernel, output_size,
deformable_group)
w = np.ones((output_channels, kernel, kernel, input_channels), np.float32) - 0.5
b = np.random.rand(output_channels).astype(np.float32) - 0.5
if order == "NCHW":
X = X.transpose((0, 3, 1, 2))
w = w.transpose((0, 3, 1, 2))
inputs = [X, o, w, b] if use_bias else [X, o, w]
# Error handling path.
if size + pad + pad < dkernel or size + pad + pad < dkernel:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if input_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if output_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
def reference_conv_op(*args):
reference_op = core.CreateOperator(
"Conv",
["X", "w", "b"] if use_bias else ["X", "w"],
["Y0"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
device_option=gc
)
workspace.RunOperatorOnce(reference_op)
reference_blob = workspace.FetchBlob("Y0")
return (reference_blob,)
self.assertReferenceChecks(gc, op, inputs, reference_conv_op)
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(stride=st.integers(1, 1),
pad=st.integers(0, 0),
kernel=st.integers(1, 5),
dilation=st.integers(1, 1),
size=st.integers(7, 10),
input_channels=st.integers(1, 8),
output_channels=st.integers(1, 8),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW"]),
engine=st.sampled_from(["", "CUDNN", "MKLDNN"]),
use_bias=st.booleans(),
deformable_group=st.integers(1, 4),
**hu.gcs_gpu_only)
def test_shuffle_input_convolution(self, stride, pad, kernel, dilation, size,
input_channels, output_channels, batch_size,
order, engine, use_bias,
deformable_group, gc, dc):
dkernel = dilation * (kernel - 1) + 1
if gc.device_type == caffe2_pb2.CUDA and engine == 'CUDNN':
assume(_cudnn_supports(dilation=(dilation > 1),
nhwc=(order == 'NHWC')))
assume(engine != "MKLDNN" or use_bias is True)
op = core.CreateOperator(
"DeformConv",
["X", "o", "w", "b"] if use_bias else ["X", "o", "w"],
["Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
deformable_group=deformable_group,
)
X = np.random.rand(
batch_size, size, size, input_channels).astype(np.float32) - 0.5
output_size = _conv_2d_output_size(size, kernel, pad, pad,
dilation, stride, stride)
o, w0 = _conv_2d_shuffle_offsets(batch_size, kernel, output_size,
deformable_group, input_channels,
output_channels)
w = np.ones((output_channels, kernel, kernel, input_channels), np.float32)
b = np.random.rand(output_channels).astype(np.float32) - 0.5
if order == "NCHW":
X = X.transpose((0, 3, 1, 2))
w = w.transpose((0, 3, 1, 2))
w0 = w0.transpose((0, 3, 1, 2))
inputs = [X, o, w, b] if use_bias else [X, o, w]
# Error handling path.
if size + pad + pad < dkernel or size + pad + pad < dkernel:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if input_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if output_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
def reference_conv_op(*args):
with core.DeviceScope(gc):
workspace.FeedBlob("w0", w0)
reference_op = core.CreateOperator(
"Conv",
["X", "w0", "b"] if use_bias else ["X", "w0"],
["Y0"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
device_option=gc
)
workspace.RunOperatorOnce(reference_op)
reference_blob = workspace.FetchBlob("Y0")
return (reference_blob,)
self.assertReferenceChecks(gc, op, inputs, reference_conv_op)
# CUDNN does NOT support different padding values and we skip it
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(stride_h=st.integers(1, 3),
stride_w=st.integers(1, 3),
pad_h=st.integers(0, 3),
pad_w=st.integers(0, 3),
kernel=st.integers(2, 5),
size=st.integers(1, 8),
input_channels=st.integers(1, 3),
output_channels=st.integers(1, 3),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW"]),
engine=st.sampled_from(["", "EIGEN"]),
shared_buffer=st.booleans(),
use_bias=st.booleans(),
deformable_group=st.integers(1, 3),
**hu.gcs_gpu_only)
def test_conv_separate_stride_pad_gradients(self, stride_h, stride_w,
pad_h, pad_w, kernel, size,
input_channels, output_channels,
batch_size, order, engine,
shared_buffer, use_bias,
deformable_group, gc, dc):
op = core.CreateOperator(
"DeformConv",
["X", "o", "w", "b"] if use_bias else ["X", "o", "w"],
["Y"],
stride_h=stride_h,
stride_w=stride_w,
pad_t=pad_h,
pad_l=pad_w,
pad_b=pad_h,
pad_r=pad_w,
kernel=kernel,
order=order,
engine=engine,
shared_buffer=int(shared_buffer),
deformable_group=deformable_group,
)
X = np.random.rand(
batch_size, size, size, input_channels).astype(np.float32) - 0.5
output_size = _conv_2d_output_size(size, kernel, pad_h, pad_w, 1,
stride_h, stride_w)
o = _conv_2d_random_offsets(batch_size, kernel, output_size,
deformable_group)
w = np.random.rand(
output_channels, kernel, kernel, input_channels).astype(np.float32)\
- 0.5
b = np.random.rand(output_channels).astype(np.float32) - 0.5
if order == "NCHW":
X = X.transpose((0, 3, 1, 2))
w = w.transpose((0, 3, 1, 2))
inputs = [X, o, w, b] if use_bias else [X, o, w]
# Error handling path.
if size + pad_h < kernel or size + pad_w < kernel:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if input_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if output_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(len(inputs)):
self.assertGradientChecks(gc, op, inputs, i, [0])
@unittest.skipIf(not workspace.has_gpu_support, "No gpu support")
@given(stride=st.integers(1, 3),
pad=st.integers(0, 3),
kernel=st.integers(1, 5),
dilation=st.integers(1, 3),
size=st.integers(7, 10),
input_channels=st.integers(1, 8),
output_channels=st.integers(1, 8),
batch_size=st.integers(1, 3),
order=st.sampled_from(["NCHW"]),
engine=st.sampled_from(["", "CUDNN", "MKLDNN"]),
use_bias=st.booleans(),
deformable_group=st.integers(1, 3),
**hu.gcs_gpu_only)
def test_conv_gradients(self, stride, pad, kernel, dilation, size,
input_channels, output_channels, batch_size, order,
engine, use_bias, deformable_group, gc, dc):
dkernel = dilation * (kernel - 1) + 1
if gc.device_type == caffe2_pb2.CUDA and engine == 'CUDNN':
assume(_cudnn_supports(dilation=(dilation > 1),
nhwc=(order == 'NHWC')))
assume(engine != "MKLDNN" or use_bias is True)
op = core.CreateOperator(
"DeformConv",
["X", "o", "w", "b"] if use_bias else ["X", "o", "w"],
["Y"],
stride=stride,
kernel=kernel,
dilation=dilation,
pad=pad,
order=order,
engine=engine,
deformable_group=deformable_group,
)
X = np.random.rand(
batch_size, size, size, input_channels).astype(np.float32) - 0.5
output_size = _conv_2d_output_size(size, kernel, pad, pad,
dilation, stride, stride)
o = _conv_2d_random_offsets(batch_size, kernel, output_size, deformable_group)
w = np.random.rand(
output_channels, kernel, kernel, input_channels).astype(np.float32)\
- 0.5
b = np.random.rand(output_channels).astype(np.float32) - 0.5
if order == "NCHW":
X = X.transpose((0, 3, 1, 2))
w = w.transpose((0, 3, 1, 2))
inputs = [X, o, w, b] if use_bias else [X, o, w]
# Error handling path.
if size + pad + pad < dkernel or size + pad + pad < dkernel:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if input_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
if output_channels % deformable_group != 0:
with self.assertRaises(RuntimeError):
self.assertDeviceChecks(dc, op, inputs, [0])
return
self.assertDeviceChecks(dc, op, inputs, [0])
for i in range(len(inputs)):
self.assertGradientChecks(gc, op, inputs, i, [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
from scipy.sparse import coo_matrix
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
class TestSparseGradient(hu.HypothesisTestCase):
@given(M=st.integers(min_value=5, max_value=20),
N=st.integers(min_value=5, max_value=20),
K=st.integers(min_value=5, max_value=15),
sparsity=st.floats(min_value=0.1, max_value=1.0),
**hu.gcs_cpu_only)
def test_sparse_gradient(self, M, N, K, sparsity, gc, dc):
X = np.random.randn(M, K).astype(np.float32)
X[X > sparsity] = 0
X_coo = coo_matrix(X)
val, key, seg = X_coo.data, X_coo.col, X_coo.row
val = val.astype(np.float32)
key = key.astype(np.int64)
seg = seg.astype(np.int32)
Y = np.random.randn(K, N).astype(np.float32)
op = core.CreateOperator(
'SparseUnsortedSegmentWeightedSum',
['Y', 'val', 'key', 'seg'],
['out'],
num_segments=M)
# Gradient check wrt Y
self.assertGradientChecks(
gc, op, [Y, val, key, seg], 0, [0])
if __name__ == "__main__":
import unittest
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core
from hypothesis import given
from hypothesis import strategies as st
import caffe2.python.hypothesis_test_util as hu
import numpy as np
import unittest
class TestMathOps(hu.HypothesisTestCase):
@given(X=hu.tensor(),
exponent=st.floats(min_value=2.0, max_value=3.0),
**hu.gcs)
def test_elementwise_power(self, X, exponent, gc, dc):
def powf(X):
return (X ** exponent,)
def powf_grad(g_out, outputs, fwd_inputs):
return (exponent * (fwd_inputs[0] ** (exponent - 1)) * g_out,)
op = core.CreateOperator(
"Pow", ["X"], ["Y"], exponent=exponent)
self.assertReferenceChecks(gc, op, [X], powf,
output_to_grad="Y",
grad_reference=powf_grad),
@given(X=hu.tensor(),
exponent=st.floats(min_value=-3.0, max_value=3.0),
**hu.gcs)
def test_sign(self, X, exponent, gc, dc):
def signf(X):
return [np.sign(X)]
op = core.CreateOperator(
"Sign", ["X"], ["Y"])
self.assertReferenceChecks(gc, op, [X], signf),
self.assertDeviceChecks(dc, op, [X], [0])
if __name__ == "__main__":
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from hypothesis import given, assume
import hypothesis.strategies as st
from caffe2.python import core, model_helper, brew
import caffe2.python.hypothesis_test_util as hu
class TestInstanceNorm(hu.HypothesisTestCase):
def _get_inputs(self, N, C, H, W, order):
if order == 'NCHW':
input_data = np.random.rand(N, C, H, W).astype(np.float32)
elif order == 'NHWC':
# Allocate in the same order as NCHW and transpose to make sure
# the inputs are identical on freshly-seeded calls.
input_data = np.random.rand(N, C, H, W).astype(np.float32)
input_data = np.transpose(input_data, axes=(0, 2, 3, 1))
else:
raise Exception('unknown order type ({})'.format(order))
scale_data = np.random.rand(C).astype(np.float32)
bias_data = np.random.rand(C).astype(np.float32)
return input_data, scale_data, bias_data
def _get_op(self, device_option, store_mean, store_inv_stdev, epsilon,
order, inplace=False):
outputs = ['output' if not inplace else "input"]
if store_mean or store_inv_stdev:
outputs += ['mean']
if store_inv_stdev:
outputs += ['inv_stdev']
op = core.CreateOperator(
'InstanceNorm',
['input', 'scale', 'bias'],
outputs,
order=order,
epsilon=epsilon,
device_option=device_option)
return op
def _feed_inputs(self, input_blobs, device_option):
names = ['input', 'scale', 'bias']
for name, blob in zip(names, input_blobs):
self.ws.create_blob(name).feed(blob, device_option=device_option)
@given(gc=hu.gcs['gc'],
dc=hu.gcs['dc'],
N=st.integers(2, 3),
C=st.integers(2, 3),
H=st.integers(2, 3),
W=st.integers(2, 3),
order=st.sampled_from(['NCHW', 'NHWC']),
epsilon=st.floats(1e-6, 1e-4),
store_mean=st.booleans(),
seed=st.integers(0, 1000),
store_inv_stdev=st.booleans())
def test_instance_norm_gradients(
self, gc, dc, N, C, H, W, order, store_mean, store_inv_stdev,
epsilon, seed):
np.random.seed(seed)
# force store_inv_stdev if store_mean to match existing forward pass
# implementation
store_inv_stdev |= store_mean
op = self._get_op(
device_option=gc,
store_mean=store_mean,
store_inv_stdev=store_inv_stdev,
epsilon=epsilon,
order=order)
input_blobs = self._get_inputs(N, C, H, W, order)
output_indices = [0]
# if store_inv_stdev is turned on, store_mean must also be forced on
if store_mean or store_inv_stdev:
output_indices += [1]
if store_inv_stdev:
output_indices += [2]
self.assertDeviceChecks(dc, op, input_blobs, output_indices)
# The gradient only flows from output #0 since the other two only
# store the temporary mean and inv_stdev buffers.
# Check dl/dinput
self.assertGradientChecks(gc, op, input_blobs, 0, [0], stepsize=0.005,
threshold=0.01)
# Check dl/dscale
self.assertGradientChecks(gc, op, input_blobs, 1, [0])
# Check dl/dbias
self.assertGradientChecks(gc, op, input_blobs, 2, [0])
@given(gc=hu.gcs['gc'],
dc=hu.gcs['dc'],
N=st.integers(2, 10),
C=st.integers(3, 10),
H=st.integers(5, 10),
W=st.integers(7, 10),
seed=st.integers(0, 1000),
epsilon=st.floats(1e-6, 1e-4),
store_mean=st.booleans(),
store_inv_stdev=st.booleans())
def test_instance_norm_layout(self, gc, dc, N, C, H, W, store_mean,
store_inv_stdev, epsilon, seed):
# force store_inv_stdev if store_mean to match existing forward pass
# implementation
store_inv_stdev |= store_mean
outputs = {}
for order in ('NCHW', 'NHWC'):
np.random.seed(seed)
input_blobs = self._get_inputs(N, C, H, W, order)
self._feed_inputs(input_blobs, device_option=gc)
op = self._get_op(
device_option=gc,
store_mean=store_mean,
store_inv_stdev=store_inv_stdev,
epsilon=epsilon,
order=order)
self.ws.run(op)
outputs[order] = self.ws.blobs['output'].fetch()
np.testing.assert_allclose(
outputs['NCHW'],
outputs['NHWC'].transpose((0, 3, 1, 2)),
atol=1e-4,
rtol=1e-4)
@given(gc=hu.gcs['gc'],
dc=hu.gcs['dc'],
N=st.integers(2, 10),
C=st.integers(3, 10),
H=st.integers(5, 10),
W=st.integers(7, 10),
order=st.sampled_from(['NCHW', 'NHWC']),
epsilon=st.floats(1e-6, 1e-4),
store_mean=st.booleans(),
seed=st.integers(0, 1000),
store_inv_stdev=st.booleans(),
inplace=st.booleans())
def test_instance_norm_reference_check(
self, gc, dc, N, C, H, W, order, store_mean, store_inv_stdev,
epsilon, seed, inplace):
np.random.seed(seed)
# force store_inv_stdev if store_mean to match existing forward pass
# implementation
store_inv_stdev |= store_mean
if order != "NCHW":
assume(not inplace)
inputs = self._get_inputs(N, C, H, W, order)
op = self._get_op(
device_option=gc,
store_mean=store_mean,
store_inv_stdev=store_inv_stdev,
epsilon=epsilon,
order=order,
inplace=inplace)
def ref(input_blob, scale_blob, bias_blob):
if order == 'NHWC':
input_blob = np.transpose(input_blob, axes=(0, 3, 1, 2))
mean_blob = input_blob.reshape((N, C, -1)).mean(axis=2)
inv_stdev_blob = 1.0 / \
np.sqrt(input_blob.reshape((N, C, -1)).var(axis=2) + epsilon)
# _bc indicates blobs that are reshaped for broadcast
scale_bc = scale_blob[np.newaxis, :, np.newaxis, np.newaxis]
mean_bc = mean_blob[:, :, np.newaxis, np.newaxis]
inv_stdev_bc = inv_stdev_blob[:, :, np.newaxis, np.newaxis]
bias_bc = bias_blob[np.newaxis, :, np.newaxis, np.newaxis]
normalized_blob = scale_bc * (input_blob - mean_bc) * inv_stdev_bc \
+ bias_bc
if order == 'NHWC':
normalized_blob = np.transpose(
normalized_blob, axes=(0, 2, 3, 1))
if not store_mean and not store_inv_stdev:
return normalized_blob,
elif not store_inv_stdev:
return normalized_blob, mean_blob
else:
return normalized_blob, mean_blob, inv_stdev_blob
self.assertReferenceChecks(gc, op, inputs, ref)
@given(gc=hu.gcs['gc'],
dc=hu.gcs['dc'],
N=st.integers(2, 10),
C=st.integers(3, 10),
H=st.integers(5, 10),
W=st.integers(7, 10),
order=st.sampled_from(['NCHW', 'NHWC']),
epsilon=st.floats(1e-6, 1e-4),
store_mean=st.booleans(),
seed=st.integers(0, 1000),
store_inv_stdev=st.booleans())
def test_instance_norm_device_check(
self, gc, dc, N, C, H, W, order, store_mean, store_inv_stdev,
epsilon, seed):
np.random.seed(seed)
# force store_inv_stdev if store_mean to match existing forward pass
# implementation
store_inv_stdev |= store_mean
inputs = self._get_inputs(N, C, H, W, order)
op = self._get_op(
device_option=gc,
store_mean=store_mean,
store_inv_stdev=store_inv_stdev,
epsilon=epsilon,
order=order)
self.assertDeviceChecks(dc, op, inputs, [0])
@given(is_test=st.booleans(),
N=st.integers(2, 10),
C=st.integers(3, 10),
H=st.integers(5, 10),
W=st.integers(7, 10),
order=st.sampled_from(['NCHW', 'NHWC']),
epsilon=st.floats(1e-6, 1e-4),
seed=st.integers(0, 1000))
def test_instance_norm_model_helper(
self, N, C, H, W, order, epsilon, seed, is_test):
np.random.seed(seed)
model = model_helper.ModelHelper(name="test_model")
brew.instance_norm(
model,
'input',
'output',
C,
epsilon=epsilon,
order=order,
is_test=is_test)
input_blob = np.random.rand(N, C, H, W).astype(np.float32)
if order == 'NHWC':
input_blob = np.transpose(input_blob, axes=(0, 2, 3, 1))
self.ws.create_blob('input').feed(input_blob)
self.ws.create_net(model.param_init_net).run()
self.ws.create_net(model.net).run()
if is_test:
scale = self.ws.blobs['output_s'].fetch()
assert scale is not None
assert scale.shape == (C, )
bias = self.ws.blobs['output_b'].fetch()
assert bias is not None
assert bias.shape == (C, )
output_blob = self.ws.blobs['output'].fetch()
if order == 'NHWC':
output_blob = np.transpose(output_blob, axes=(0, 3, 1, 2))
assert output_blob.shape == (N, C, H, W)
if __name__ == '__main__':
import unittest
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from caffe2.python import core, workspace
from caffe2.python.test_util import TestCase
import numpy as np
lengths = [[0], [1, 2], [1, 0, 2, 0]]
features1 = [[],
[1, 2, 2],
[[1, 1], [2, 2], [2, 2]]
]
features2 = [[],
[2, 4, 4],
[[2, 2], [4, 4], [4, 4]]
]
lengths_exp = [[1], [1, 2], [1, 1, 2, 1]]
features1_exp = [[0],
[1, 2, 2],
[[1, 1], [0, 0], [2, 2], [2, 2], [0, 0]]]
features2_exp = [[0],
[2, 4, 4],
[[2, 2], [0, 0], [4, 4], [4, 4], [0, 0]]]
class TestEmptySampleOps(TestCase):
def test_emptysample(self):
for i in range(0, 3):
PadEmptyTest = core.CreateOperator(
'PadEmptySamples',
['lengths', 'features1', 'features2'],
['out_lengths', 'out_features1', 'out_features2'],
)
workspace.FeedBlob(
'lengths',
np.array(lengths[i], dtype=np.int32))
workspace.FeedBlob(
'features1',
np.array(features1[i], dtype=np.int64))
workspace.FeedBlob(
'features2',
np.array(features2[i], dtype=np.int64))
workspace.RunOperatorOnce(PadEmptyTest)
np.testing.assert_allclose(
lengths_exp[i],
workspace.FetchBlob('out_lengths'),
atol=1e-4, rtol=1e-4, err_msg='Mismatch in lengths')
np.testing.assert_allclose(
features1_exp[i],
workspace.FetchBlob('out_features1'),
atol=1e-4, rtol=1e-4, err_msg='Mismatch in features1')
np.testing.assert_allclose(
features2_exp[i],
workspace.FetchBlob('out_features2'),
atol=1e-4, rtol=1e-4, err_msg='Mismatch in features2')
if __name__ == "__main__":
import unittest
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from hypothesis import given
import hypothesis.strategies as st
from caffe2.python import core
import caffe2.python.hypothesis_test_util as hu
class TestAssert(hu.HypothesisTestCase):
@given(
dtype=st.sampled_from(['bool_', 'int32', 'int64']),
shape=st.lists(elements=st.integers(1, 10), min_size=1, max_size=4),
**hu.gcs)
def test_assert(self, dtype, shape, gc, dc):
test_tensor = np.random.rand(*shape).astype(np.dtype(dtype))
op = core.CreateOperator('Assert', ['X'], [])
def assert_ref(X):
return []
try:
self.assertReferenceChecks(gc, op, [test_tensor], assert_ref)
except Exception:
assert(not np.all(test_tensor))
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from hypothesis import given
import numpy as np
import unittest
from caffe2.proto import caffe2_pb2, hsm_pb2
from caffe2.python import workspace, core, gradient_checker
import caffe2.python.hypothesis_test_util as hu
import caffe2.python.hsm_util as hsmu
# User inputs tree using protobuf file or, in this case, python utils
# The hierarchy in this test looks as shown below. Note that the final subtrees
# (with word_ids as leaves) have been collapsed for visualization
# *
# / \
# * 5,6,7,8
# / \
# 0,1,2 3,4
tree = hsm_pb2.TreeProto()
words = [[0, 1, 2], [3, 4], [5, 6, 7, 8]]
node1 = hsmu.create_node_with_words(words[0], "node1")
node2 = hsmu.create_node_with_words(words[1], "node2")
node3 = hsmu.create_node_with_words(words[2], "node3")
node4 = hsmu.create_node_with_nodes([node1, node2], "node4")
node = hsmu.create_node_with_nodes([node4, node3], "node5")
tree.root_node.MergeFrom(node)
# structure:
# node5: [0, 2, ["node4", "node3"]] # offset, length, "node4, node3"
# node4: [2, 2, ["node1", "node2"]]
# node1: [4, 3, [0, 1 ,2]]
# node2: [7, 2, [3, 4]
# node3: [9, 4, [5, 6, 7, 8]
struct = [[0, 2, ["node4", "node3"], "node5"],
[2, 2, ["node1", "node2"], "node4"],
[4, 3, [0, 1, 2], "node1"],
[7, 2, [3, 4], "node2"],
[9, 4, [5, 6, 7, 8], "node3"]]
# Internal util to translate input tree to list of (word_id,path). serialized
# hierarchy is passed into the operator_def as a string argument,
hierarchy_proto = hsmu.create_hierarchy(tree)
arg = caffe2_pb2.Argument()
arg.name = "hierarchy"
arg.s = hierarchy_proto.SerializeToString()
beam = 5
args_search = []
arg_search = caffe2_pb2.Argument()
arg_search.name = "tree"
arg_search.s = tree.SerializeToString()
args_search.append(arg_search)
arg_search = caffe2_pb2.Argument()
arg_search.name = "beam"
arg_search.f = beam
args_search.append(arg_search)
class TestHsm(hu.HypothesisTestCase):
def test_hsm_search(self):
samples = 10
dim_in = 5
X = np.random.rand(samples, dim_in).astype(np.float32) - 0.5
w = np.random.rand(hierarchy_proto.size, dim_in) \
.astype(np.float32) - 0.5
b = np.random.rand(hierarchy_proto.size).astype(np.float32) - 0.5
labels = np.array([np.random.randint(0, 8) for i in range(samples)]) \
.astype(np.int32)
workspace.GlobalInit(['caffe2'])
workspace.FeedBlob("data", X)
workspace.FeedBlob("weights", w)
workspace.FeedBlob("bias", b)
workspace.FeedBlob("labels", labels)
op = core.CreateOperator(
'HSoftmaxSearch',
['data', 'weights', 'bias'],
['names', 'scores'],
'HSoftmaxSearch',
arg=args_search)
workspace.RunOperatorOnce(op)
names = workspace.FetchBlob('names')
scores = workspace.FetchBlob('scores')
def simulation_hsm_search():
names = []
scores = []
for line in struct:
s, e = line[0], line[0] + line[1]
score = np.dot(X, w[s:e].transpose()) + b[s:e]
score = np.exp(score - np.max(score, axis=1, keepdims=True))
score /= score.sum(axis=1, keepdims=True)
score = -np.log(score)
score = score.transpose()
idx = -1
for j, n in enumerate(names):
if n == line[3]:
idx = j
score += scores[j]
if idx == -1:
score[score > beam] = np.inf
else:
score[score - scores[idx] > beam] = np.inf
for i, name in enumerate(line[2]):
scores.append(score[i])
names.append(name)
scores = np.vstack(scores)
return names, scores.transpose()
p_names, p_scores = simulation_hsm_search()
idx = np.argsort(p_scores, axis=1)
p_scores = np.sort(p_scores, axis=1)
p_names = np.array(p_names)[idx]
for i in range(names.shape[0]):
for j in range(names.shape[1]):
if names[i][j]:
self.assertEquals(
names[i][j], p_names[i][j].item().encode('utf-8'))
self.assertAlmostEqual(
scores[i][j], p_scores[i][j], delta=0.001)
def test_hsm_run_once(self):
workspace.GlobalInit(['caffe2'])
workspace.FeedBlob("data",
np.random.randn(1000, 100).astype(np.float32))
workspace.FeedBlob("weights",
np.random.randn(1000, 100).astype(np.float32))
workspace.FeedBlob("bias", np.random.randn(1000).astype(np.float32))
workspace.FeedBlob("labels", np.random.rand(1000).astype(np.int32) * 9)
op = core.CreateOperator(
'HSoftmax',
['data', 'weights', 'bias', 'labels'],
['output', 'intermediate_output'],
'HSoftmax',
arg=[arg])
self.assertTrue(workspace.RunOperatorOnce(op))
# Test to check value of sum of squared losses in forward pass for given
# input
def test_hsm_forward(self):
cpu_device_option = caffe2_pb2.DeviceOption()
grad_checker = gradient_checker.GradientChecker(
0.01, 0.05, cpu_device_option, "default")
samples = 9
dim_in = 5
X = np.zeros((samples, dim_in)).astype(np.float32) + 1
w = np.zeros((hierarchy_proto.size, dim_in)).astype(np.float32) + 1
b = np.array([i for i in range(hierarchy_proto.size)])\
.astype(np.float32)
labels = np.array([i for i in range(samples)]).astype(np.int32)
workspace.GlobalInit(['caffe2'])
workspace.FeedBlob("data", X)
workspace.FeedBlob("weights", w)
workspace.FeedBlob("bias", b)
workspace.FeedBlob("labels", labels)
op = core.CreateOperator(
'HSoftmax',
['data', 'weights', 'bias', 'labels'],
['output', 'intermediate_output'],
'HSoftmax',
arg=[arg])
grad_ops, g_input = core.GradientRegistry.GetGradientForOp(
op, [s + '_grad' for s in op.output])
loss, _ = grad_checker.GetLossAndGrad(
op, grad_ops, X, op.input[0], g_input[0], [0]
)
self.assertAlmostEqual(loss, 44.269, delta=0.001)
# Test to compare gradient calculated using the gradient operator and the
# symmetric derivative calculated using Euler Method
# TODO : convert to both cpu and gpu test when ready.
@given(**hu.gcs_cpu_only)
def test_hsm_gradient(self, gc, dc):
samples = 10
dim_in = 5
X = np.random.rand(samples, dim_in).astype(np.float32) - 0.5
w = np.random.rand(hierarchy_proto.size, dim_in) \
.astype(np.float32) - 0.5
b = np.random.rand(hierarchy_proto.size).astype(np.float32) - 0.5
labels = np.array([np.random.randint(0, 8) for i in range(samples)]) \
.astype(np.int32)
workspace.GlobalInit(['caffe2'])
workspace.FeedBlob("data", X)
workspace.FeedBlob("weights", w)
workspace.FeedBlob("bias", b)
workspace.FeedBlob("labels", labels)
op = core.CreateOperator(
'HSoftmax',
['data', 'weights', 'bias', 'labels'],
['output', 'intermediate_output'],
'HSoftmax',
arg=[arg])
self.assertDeviceChecks(dc, op, [X, w, b, labels], [0])
for i in range(3):
self.assertGradientChecks(gc, op, [X, w, b, labels], i, [0])
def test_huffman_tree_hierarchy(self):
workspace.GlobalInit(['caffe2'])
labelSet = list(range(0, 6))
counts = [1, 2, 3, 4, 5, 6]
labels = sum([[l] * c for (l, c) in zip(labelSet, counts)], [])
Y = np.array(labels).astype(np.int64)
workspace.FeedBlob("labels", Y)
arg = caffe2_pb2.Argument()
arg.name = 'num_classes'
arg.i = 6
op = core.CreateOperator(
'HuffmanTreeHierarchy',
['labels'],
['huffman_tree'],
'HuffmanTreeHierarchy',
arg=[arg])
workspace.RunOperatorOnce(op)
huffmanTreeOutput = workspace.FetchBlob('huffman_tree')
treeOutput = hsm_pb2.TreeProto()
treeOutput.ParseFromString(huffmanTreeOutput[0])
treePathOutput = hsmu.create_hierarchy(treeOutput)
label_to_path = {}
for path in treePathOutput.paths:
label_to_path[path.word_id] = path
def checkPath(label, indices, code):
path = label_to_path[label]
self.assertEqual(len(path.path_nodes), len(code))
self.assertEqual(len(path.path_nodes), len(code))
for path_node, index, target in \
zip(path.path_nodes, indices, code):
self.assertEqual(path_node.index, index)
self.assertEqual(path_node.target, target)
checkPath(0, [0, 4, 6, 8], [1, 0, 0, 0])
checkPath(1, [0, 4, 6, 8], [1, 0, 0, 1])
checkPath(2, [0, 4, 6], [1, 0, 1])
checkPath(3, [0, 2], [0, 0])
checkPath(4, [0, 2], [0, 1])
checkPath(5, [0, 4], [1, 1])
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.