python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import crypten
import torch
from crypten.config import cfg
from crypten.debug import configure_logging, pdb
from test.multiprocess_test_case import get_random_test_tensor, MultiProcessTestCase
class TestDebug(MultiProcessTestCase):
def setUp(self):
super().setUp()
# We don't want the main process (rank -1) to initialize the communicator
if self.rank >= 0:
crypten.init()
# Testing debug mode
cfg.debug.debug_mode = True
cfg.debug.validation_mode = True
def testLogging(self):
configure_logging()
def testPdb(self):
self.assertTrue(hasattr(pdb, "set_trace"))
def test_wrap_error_detection(self):
"""Force a wrap error and test whether it raises in debug mode."""
encrypted_tensor = crypten.cryptensor(0)
encrypted_tensor.share = torch.tensor(2**63 - 1)
with self.assertRaises(ValueError):
encrypted_tensor.div(2)
def test_correctness_validation(self):
for grad_enabled in [False, True]:
crypten.set_grad_enabled(grad_enabled)
tensor = get_random_test_tensor(size=(2, 2), is_float=True)
encrypted_tensor = crypten.cryptensor(tensor)
# Test properties (non-tensor outputs)
_ = encrypted_tensor.size()
# Ensure correct validation works properly
encrypted_tensor.add(1)
# Ensure incorrect validation works properly for size
encrypted_tensor.add = lambda y: crypten.cryptensor(0)
with self.assertRaises(ValueError):
encrypted_tensor.add(10)
# Ensure incorrect validation works properly for value
encrypted_tensor.add = lambda y: crypten.cryptensor(tensor)
with self.assertRaises(ValueError):
encrypted_tensor.add(10)
# Test matmul in validation mode
x = get_random_test_tensor(size=(3, 5), is_float=True)
y = get_random_test_tensor(size=(1, 3), is_float=True)
x = crypten.cryptensor(x)
y = crypten.cryptensor(y)
_ = y.matmul(x)
| CrypTen-main | test/test_debug.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import itertools
import logging
import crypten
import torch
from crypten.common.tensor_types import is_float_tensor
from crypten.config import cfg
from crypten.nn.privacy import DPSplitModel, SkippedLoss
from test.multiprocess_test_case import get_random_test_tensor, MultiProcessTestCase
class TestMLP(torch.nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.linear1 = torch.nn.Linear(in_features, 100)
self.linear2 = torch.nn.Linear(100, 50)
self.linear3 = torch.nn.Linear(50, out_features)
def forward(self, x):
out = self.linear1(x).relu()
out = self.linear2(out).relu()
out = self.linear3(out)
return out
class TestMLPBN(torch.nn.Module):
def __init__(self, in_features, out_features):
super().__init__()
self.linear1 = torch.nn.Linear(in_features, 100)
self.linear2 = torch.nn.Linear(100, 50)
self.linear3 = torch.nn.Linear(50, out_features)
self.bn1 = torch.nn.BatchNorm1d(100)
self.bn2 = torch.nn.BatchNorm1d(50)
def forward(self, x):
out = self.linear1(x)
out = self.bn1(out).relu()
out = self.linear2(out)
out = self.bn2(out).relu()
out = self.linear3(out)
return out
# TODO: Add more model types
TEST_MODELS = [
# (model, input_size)
(torch.nn.Linear(4, 2), (3, 4)),
(torch.nn.Linear(100, 10), (150, 100)),
(torch.nn.Linear(30, 1), (50, 30)),
(torch.nn.Linear(1, 10), (30, 1)),
# TODO: Figure out what the conditions are for input sizes - pseudo-inverse loses information
# (torch.nn.Linear(1, 1), (5, 1)),
(TestMLP(100, 10), (20, 100)),
(TestMLPBN(100, 10), (20, 100)),
]
RR_PROBS = [None, 0.00001]
RAPPOR_PROBS = [None, 0.1, 0.4]
def RAPPOR_loss(alpha):
def rappor_loss(logits, targets):
p = logits.sigmoid()
r = alpha * p + (1 - alpha) * (1 - p)
return torch.nn.functional.binary_cross_entropy(r, targets)
return rappor_loss
class TestPrivacyModels(MultiProcessTestCase):
def _check(self, encrypted_tensor, reference, msg, tolerance=None):
if tolerance is None:
tolerance = getattr(self, "default_tolerance", 0.05)
tensor = encrypted_tensor.get_plain_text()
# Check sizes match
self.assertTrue(tensor.size() == reference.size(), msg)
self.assertTrue(is_float_tensor(reference), "reference must be a float")
diff = (tensor - reference).abs_()
norm_diff = diff.div(tensor.abs() + reference.abs()).abs_()
test_passed = norm_diff.le(tolerance) + diff.le(tolerance * 0.1)
test_passed = test_passed.gt(0).all().item() == 1
if not test_passed:
logging.info(msg)
logging.info("Result %s" % tensor)
logging.info("Result - Reference = %s" % (tensor - reference))
self.assertTrue(test_passed, msg=msg)
def _check_gradients_with_dp(self, model, dp_model, std, tolerance=None):
if tolerance is None:
tolerance = getattr(self, "default_tolerance", 0.07)
grad = torch.cat([p.grad.flatten() for p in model.parameters()])
dp_grad = torch.cat([p.grad.flatten() for p in dp_model.parameters()])
if std == 0:
self.assertTrue(
torch.allclose(grad, dp_grad, rtol=tolerance, atol=tolerance * 0.2)
)
else:
errors = grad - dp_grad
sample_mean = errors.mean()
sample_std = errors.std()
self.assertTrue(sample_mean.item() < tolerance)
self.assertTrue(sample_std.sub(std).abs() < tolerance)
def test_dp_split_mpc(self):
# TODO: Vary Noise Magnitude
NOISE_MAGNITUDE = 0
FEATURE_SRC = 0
LABEL_SRC = 1
# TODO: Fix full_jacobian protocol
# PROTOCOLS = ["full_jacobian", "layer_estimation"]
PROTOCOLS = ["layer_estimation"]
# TODO: Run multiple batches
# TODO: ensure this works with other rr_prob values
for (
model_tuple,
protocol,
rr_prob,
rappor_prob,
skip_forward,
) in itertools.product(
TEST_MODELS, PROTOCOLS, RR_PROBS, RAPPOR_PROBS, [False, True]
):
logging.info(f"Model: {model_tuple}; Protocol: {protocol}")
cfg.nn.dpsmpc.protocol = protocol
cfg.nn.dpsmpc.skip_loss_forward = skip_forward
model, size = model_tuple
# TODO test multiclass using CrossEntropyLoss()
if rappor_prob is None:
loss_pt = torch.nn.BCEWithLogitsLoss()
else:
loss_pt = RAPPOR_loss(rappor_prob)
# Compute model gradients without DP
features = get_random_test_tensor(size=size, is_float=True)
features.requires_grad = True
# Get reference logits from plaintext model
logits = model(features)
# TODO: Write code to generate labels for CrossEntropyLoss
labels = get_random_test_tensor(2, 0, logits.size(), is_float=False)
labels = labels.float()
labels_enc = crypten.cryptensor(labels, src=LABEL_SRC)
# Compute reference loss
loss = loss_pt(logits, labels)
# Run reference backward pass
model.zero_grad()
loss.backward()
# Delete plaintext model and features and labels for parties without access
labels = None
if self.rank != FEATURE_SRC:
model = None
features = None
# Run split models
for noise_src in [None, 0, 1]:
# Copy model so gradients do not overwrite original model for comparison
model_ = copy.deepcopy(model)
dp_model = DPSplitModel(
model_,
FEATURE_SRC,
LABEL_SRC,
NOISE_MAGNITUDE,
noise_src=noise_src,
randomized_response_prob=rr_prob,
rappor_prob=rappor_prob,
)
dp_logits = dp_model(features)
# Check forward pass
if self.rank == FEATURE_SRC:
self.assertTrue(
dp_logits.eq(logits).all(), "model outputs do not match"
)
dp_model.compute_loss(labels_enc)
if skip_forward:
self.assertTrue(isinstance(dp_model.loss, SkippedLoss))
else:
# communicate loss from feature_src party since other parties will
# have different losses.
torch.distributed.broadcast(loss, src=FEATURE_SRC)
self._check(
dp_model.loss,
loss,
"DP-Model loss is incorrect",
tolerance=0.15,
)
# Test zero_grad()
dp_model.zero_grad()
for p in dp_model.parameters():
self.assertIsNone(p.grad)
# Test backward()
dp_model.backward()
if self.rank == FEATURE_SRC:
self._check_gradients_with_dp(model, dp_model, NOISE_MAGNITUDE)
| CrypTen-main | test/test_privacy_models.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import queue
import sys
import threading
import unittest
from functools import wraps
from threading import Thread
import crypten
class MultiThreadTestCase(unittest.TestCase):
MAIN_PROCESS_RANK = -1
@property
def rank(self):
from crypten.communicator import InProcessCommunicator
if threading.current_thread() == threading.main_thread():
return self.MAIN_PROCESS_RANK
return InProcessCommunicator.get().rank
@property
def world_size(self):
return 2
def __init__(self, methodName):
super().__init__(methodName)
@classmethod
def setUpClass(cls):
for attr in dir(cls):
if attr.startswith("test"):
fn = getattr(cls, attr)
setattr(cls, attr, cls.join_or_run(fn))
@staticmethod
def join_or_run(fn):
@wraps(fn)
def wrapper(self):
if threading.current_thread() == threading.main_thread():
self._join_threads()
else:
fn(self)
return wrapper
def _join_threads(self):
for t in self.threads:
t.join()
try:
exception_info = self.exception_queue.get_nowait()
except queue.Empty:
pass
else:
sys.excepthook(*exception_info)
raise RuntimeError(
"Exception found in one of the parties. Look at past logs."
)
def _current_test_name(self):
# self.id() == e.g. '__main__.TestDistributed.TestAdditive.test_get_rank'
return self.id().split(".")[-1]
def setUp(self):
super().setUp()
if threading.current_thread() != threading.main_thread():
return
test_name = self._current_test_name()
test_fn = getattr(self, test_name)
self.exception_queue = queue.Queue()
self.threads = [
Thread(target=self._run, args=(test_fn, rank, self.world_size))
for rank in range(self.world_size)
]
for t in self.threads:
t.start()
def _run(self, test_fn, rank, world_size):
crypten.init_thread(rank, world_size)
self.setUp()
try:
test_fn()
except Exception:
self.exception_queue.put(sys.exc_info())
| CrypTen-main | test/multithread_test_case.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import crypten
import crypten.communicator as comm
import numpy
import torch
import torch.distributed as dist
import torch.nn as nn
import torch.nn.functional as F
from crypten.common import serial
from crypten.config import cfg
from test.multiprocess_test_case import get_random_test_tensor, MultiProcessTestCase
# TODO: Commenting this out until we figure out why `thread.join() hangs
# Perhaps the thread to be joined has somehow exited
# from test.multithread_test_case import MultiThreadTestCase
INVALID_SERIALIZED_OBJECTS = [
b"cos\nsystem\n(S'echo hello world'\ntR.",
b'\x80\x03cbuiltins\neval\n(Vprint("I should not print")\ntRctorch._utils\n_rebuild_tensor_v2\n(ctorch.storage\n_load_from_bytes\nB\x01\x01\x00\x00\x80\x02\x8a\nl\xfc\x9cF\xf9 j\xa8P\x19.\x80\x02M\xe9\x03.\x80\x02}q\x00(X\x10\x00\x00\x00protocol_versionq\x01M\xe9\x03X\r\x00\x00\x00little_endianq\x02\x88X\n\x00\x00\x00type_sizesq\x03}q\x04(X\x05\x00\x00\x00shortq\x05K\x02X\x03\x00\x00\x00intq\x06K\x04X\x04\x00\x00\x00longq\x07K\x04uu.\x80\x02(X\x07\x00\x00\x00storageq\x00ctorch\nFloatStorage\nq\x01X\x0f\x00\x00\x00140436995850160q\x02X\x03\x00\x00\x00cpuq\x03K\x01Ntq\x04Q.\x80\x02]q\x00X\x0f\x00\x00\x00140436995850160q\x01a.\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80?\x85RK\x00K\x01\x85K\x01\x85\x89ccollections\nOrderedDict\n)RtR.',
b'\x80\x04\x8c\x12torch.__builtins__\x94\x8c\x03get\x94\x93\x8c\x04eval\x94\x85\x94R\x8c\x1bprint("I should not print")\x94\x85\x94R.',
b"\x80\x04\x8c$torch.nn.modules._functions.torch.os\x94\x8c\x05execl\x94\x93\x8c\x0c/usr/bin/vim\x94\x8c\x0c/usr/bin/vim\x94\x86\x94R.",
b"\x80\x04\x8c\rtorch.storage\x94\x8c\x10_load_from_bytes\x94\x93C2\x80\x04\x8c\x02os\x94\x8c\x05execl\x94\x93\x8c\x0c/usr/bin/vim\x94\x8c\x0c/usr/bin/vim\x94\x86\x94R.\x94\x85\x94R.",
]
class TestCommunicator:
"""
This class tests all member functions of crypten package
"""
def test_send_recv(self):
tensor = torch.tensor([self.rank], dtype=torch.long)
# Send forward, receive backward
dst = (self.rank + 1) % self.world_size
src = (self.rank - 1) % self.world_size
if self.rank == 0:
comm.get().send(tensor, dst=dst)
result = comm.get().recv(tensor, src=src)
if self.rank > 0:
comm.get().send(tensor, dst=dst)
self.assertTrue(torch.is_tensor(result))
self.assertEqual(result.item(), src)
def test_scatter(self):
for rank in range(self.world_size):
tensor = []
if self.rank == rank:
tensor = [torch.tensor(i) for i in range(self.world_size)]
result = comm.get().scatter(tensor, rank, size=())
self.assertTrue(torch.is_tensor(result))
self.assertEqual(result.item(), self.rank)
def test_reduce(self):
sizes = [(), (1,), (5,), (5, 5), (5, 5, 5)]
for rank in range(self.world_size):
for size in sizes:
tensor = get_random_test_tensor(size=size)
result = comm.get().reduce(tensor, rank)
if rank == self.rank:
self.assertTrue((result == (tensor * self.world_size)).all())
# NOTE: torch.distributed has undefined behavior for non-dst rank
# else:
# self.assertTrue((result == tensor).all())
def test_all_reduce(self):
sizes = [(), (1,), (5,), (5, 5), (5, 5, 5)]
for size in sizes:
tensor = get_random_test_tensor(size=size)
result = comm.get().all_reduce(tensor)
self.assertTrue((result == (tensor * self.world_size)).all())
def test_gather(self):
tensor = torch.tensor([self.rank])
for rank in range(self.world_size):
result = comm.get().gather(tensor, rank)
if rank == self.rank:
self.assertEqual(result, [torch.tensor([0]), torch.tensor([1])])
else:
self.assertIsNone(result[0])
def test_gather_random(self):
sizes = [(), (1,), (5,), (5, 5), (5, 5, 5), (1000,)]
for rank in range(self.world_size):
for size in sizes:
tensor = get_random_test_tensor(size=size)
result = comm.get().gather(tensor, rank)
if rank == self.rank:
self.assertTrue(isinstance(result, list))
for res in result:
self.assertTrue((res == tensor).all())
else:
self.assertIsNone(result[0])
def test_all_gather(self):
tensor = torch.tensor([self.rank])
result = comm.get().all_gather(tensor)
self.assertEqual(
result, [torch.tensor([rank]) for rank in range(self.world_size)]
)
def test_mutation(self):
for _ in range(10):
tensor = torch.tensor([self.rank])
result = comm.get().all_gather(tensor)
# Mutate the tensor, which should have no effect since the gather
# has finished. If we don't clone the tensor though, this might
# mutate one of the tensors received by the other party.
tensor += 1
self.assertEqual(
result, [torch.tensor([rank]) for rank in range(self.world_size)]
)
def test_all_gather_random(self):
sizes = [(), (1,), (5,), (5, 5), (5, 5, 5)]
for size in sizes:
tensor = get_random_test_tensor(size=size)
result = comm.get().all_gather(tensor)
self.assertTrue(isinstance(result, list))
for res in result:
self.assertTrue((res == tensor).all())
def test_broadcast(self):
for rank in range(self.world_size):
tensor = torch.tensor([0], dtype=torch.long)
if self.rank == rank:
tensor += 1
tensor = comm.get().broadcast(tensor, rank)
self.assertTrue(torch.is_tensor(tensor))
self.assertEqual(tensor.item(), 1)
def test_get_world_size(self):
self.assertEqual(comm.get().get_world_size(), self.world_size)
def test_get_rank(self):
self.assertEqual(comm.get().get_rank(), self.rank)
def test_batched_all_reduce(self):
sizes = [(), (1,), (5,), (5, 5), (5, 5, 5)]
tensors = [get_random_test_tensor(size=size) for size in sizes]
results = comm.get().all_reduce(tensors, batched=True)
self.assertTrue(isinstance(results, list))
for i, result in enumerate(results):
self.assertTrue((result == (tensors[i] * self.world_size)).all())
def test_batched_reduce(self):
sizes = [(), (1,), (5,), (5, 5), (5, 5, 5)]
for rank in range(self.world_size):
tensors = [get_random_test_tensor(size=size) for size in sizes]
results = comm.get().reduce(tensors, rank, batched=True)
if rank == self.rank:
self.assertTrue(isinstance(results, list))
for i, result in enumerate(results):
self.assertTrue((result == (tensors[i] * self.world_size)).all())
# NOTE: torch.distributed has undefined behavior for non-dst rank
# else:
# self.assertTrue((result == tensor).all())
def test_batched_broadcast(self):
sizes = [(), (1,), (5,), (5, 5), (5, 5, 5)]
for rank in range(self.world_size):
if self.rank == rank:
tensors = [torch.ones(size) for size in sizes]
else:
tensors = [torch.zeros(size) for size in sizes]
tensors = comm.get().broadcast(tensors, rank, batched=True)
self.assertTrue(isinstance(tensors, list))
for tensor in tensors:
self.assertTrue(torch.is_tensor(tensor))
self.assertTrue(tensor.eq(1).all())
def test_send_recv_obj(self):
TEST_OBJECTS = [
{"a": 1, "b": 2, "c": 3},
torch.tensor(1),
torch.nn.Linear(10, 5),
CNN(),
]
for param in TEST_OBJECTS[2].parameters():
param.data.fill_(1.0)
for param in TEST_OBJECTS[3].parameters():
param.data.fill_(1.0)
serial.register_safe_class(CNN)
for reference in TEST_OBJECTS:
for src in range(self.world_size):
if self.rank == src:
test_obj = reference
comm.get().send_obj(test_obj, 1 - self.rank)
else:
test_obj = comm.get().recv_obj(1 - self.rank)
if isinstance(reference, torch.nn.Module):
test_obj_params = list(test_obj.parameters())
reference_params = list(reference.parameters())
for i, param in enumerate(reference_params):
self.assertTrue(
test_obj_params[i].eq(param).all(), "broadcast_obj failed"
)
else:
self.assertEqual(test_obj, reference, "broadcast_obj failed")
# Test that the restricted loader will raise an error for code injection
for invalid_obj in INVALID_SERIALIZED_OBJECTS:
for src in range(self.world_size):
if self.rank == src:
# Mimic send_obj without pickling invalid bytestream
size = torch.tensor(len(invalid_obj), dtype=torch.int32)
arr = torch.from_numpy(
numpy.frombuffer(invalid_obj, dtype=numpy.int8)
)
r0 = dist.isend(
size, dst=(1 - self.rank), group=comm.get().main_group
)
r1 = dist.isend(
arr, dst=(1 - self.rank), group=comm.get().main_group
)
r0.wait()
r1.wait()
else:
with self.assertRaises(ValueError):
comm.get().recv_obj(1 - self.rank)
def test_broadcast_obj(self):
TEST_OBJECTS = [
{"a": 1, "b": 2, "c": 3},
torch.tensor(1),
torch.nn.Linear(10, 5),
CNN(),
]
for param in TEST_OBJECTS[2].parameters():
param.data.fill_(1.0)
for param in TEST_OBJECTS[3].parameters():
param.data.fill_(1.0)
serial.register_safe_class(CNN)
for reference in TEST_OBJECTS:
for src in range(self.world_size):
test_obj = reference if self.rank == src else None
test_obj = comm.get().broadcast_obj(test_obj, src)
if isinstance(reference, torch.nn.Module):
test_obj_params = list(test_obj.parameters())
reference_params = list(reference.parameters())
for i, param in enumerate(reference_params):
self.assertTrue(
test_obj_params[i].eq(param).all(), "broadcast_obj failed"
)
else:
self.assertEqual(test_obj, reference, "broadcast_obj failed")
# Test that the restricted loader will raise an error for code injection
for invalid_obj in INVALID_SERIALIZED_OBJECTS:
for src in range(self.world_size):
if self.rank == src:
# Mimic broadcast_obj without pickling invalid bytestream
size = torch.tensor(len(invalid_obj), dtype=torch.int32)
arr = torch.from_numpy(
numpy.frombuffer(invalid_obj, dtype=numpy.int8)
)
dist.broadcast(size, src, group=comm.get().main_group)
dist.broadcast(arr, src, group=comm.get().main_group)
else:
with self.assertRaises(ValueError):
test_obj = None
comm.get().broadcast_obj(test_obj, src)
@unittest.skip("Skipping for now as it keeps timing out") # FIXME
def test_name(self):
# Test default name is correct
self.assertEqual(comm.get().get_name(), f"rank{comm.get().get_rank()}")
# Test name set / get
comm.get().set_name(f"{comm.get().get_rank()}")
self.assertEqual(comm.get().get_name(), f"{comm.get().get_rank()}")
# Test initialization using crypten.init()
name = f"init_{comm.get().get_rank()}"
crypten.uninit()
crypten.init(party_name=name)
self.assertEqual(comm.get().get_name(), f"init_{comm.get().get_rank()}")
# Test failure on bad input
for improper_input in [0, None, ["name"], ("name",)]:
with self.assertRaises(AssertionError):
comm.get().set_name(improper_input)
# TODO: Commenting this out until we figure out why `thread.join() hangs
# Perhaps the thread to be joined has somehow exited
# class TestCommunicatorMultiThread(TestCommunicator, MultiThreadTestCase):
# pass
class TestCommunicatorMultiProcess(TestCommunicator, MultiProcessTestCase):
def test_logging(self):
# Assert initialization resets comm.get() stats
self.assertEqual(comm.get().comm_rounds, 0)
self.assertEqual(comm.get().comm_bytes, 0)
# Test verbosity True setting and logging
cfg.communicator.verbose = True
sizes = [(), (1,), (5,), (5, 5), (5, 5, 5)]
# Test send / recv:
for size in sizes:
tensor = get_random_test_tensor(size=size, is_float=False)
crypten.reset_communication_stats()
# Send forward, receive backward
dst = (self.rank + 1) % self.world_size
src = (self.rank - 1) % self.world_size
if self.rank == 0:
comm.get().send(tensor, dst=dst)
tensor = comm.get().recv(tensor, src=src)
if self.rank > 0:
comm.get().send(tensor, dst=dst)
self.assertEqual(comm.get().comm_rounds, 2)
self.assertEqual(comm.get().comm_bytes, tensor.numel() * 8 * 2)
# Test all other ops:
ops = ["all_reduce", "all_gather", "broadcast", "gather", "reduce", "scatter"]
for size in sizes:
for op in ops:
tensor = get_random_test_tensor(size=size, is_float=False)
nbytes = tensor.numel() * 8
crypten.reset_communication_stats()
# Setup op-specific kwargs / inputs
args = ()
if op in ["gather", "reduce"]:
args = (0,) # dst arg
if op == "broadcast":
args = (0,) # dst arg
if op == "scatter":
tensor = [tensor] * self.world_size
args = (0,) # src arg
tensor = getattr(comm.get(), op)(tensor, *args)
self.assertEqual(comm.get().comm_rounds, 1)
if op in ["all_reduce", "all_gather"]:
reference = 2 * nbytes * (self.world_size - 1)
else:
reference = nbytes * (self.world_size - 1)
self.assertEqual(comm.get().comm_bytes, reference)
# Test reset_communication_stats
crypten.reset_communication_stats()
self.assertEqual(comm.get().comm_rounds, 0)
self.assertEqual(comm.get().comm_bytes, 0)
# test retrieving communication stats:
stats = comm.get().get_communication_stats()
self.assertIsInstance(stats, dict)
for key in ["rounds", "bytes", "time"]:
self.assertIn(key, stats)
self.assertEqual(stats[key], 0)
# Test verbosity False setting and no logging
cfg.communicator.verbose = False
tensor = get_random_test_tensor(size=size, is_float=False)
tensor = comm.get().broadcast(tensor, 0)
self.assertEqual(comm.get().comm_rounds, 0)
self.assertEqual(comm.get().comm_bytes, 0)
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=5, padding=1)
self.fc1 = nn.Linear(16 * 13 * 13, 100)
self.fc2 = nn.Linear(100, 2)
def forward(self, x):
out = self.conv1(x)
out = F.relu(out)
out = F.max_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = self.fc1(out)
out = F.relu(out)
out = self.fc2(out)
out = F.softmax(out, dim=1)
return out
# This code only runs when executing the file outside the test harness
if __name__ == "__main__":
unittest.main()
| CrypTen-main | test/test_communicator.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .multiprocess_test_case import MultiProcessTestCase
from .pytorch_backend import PyTorchTensor
# expose classes and functions in package:
__all__ = ["MultiProcessTestCase", "PyTorchTensor"]
| CrypTen-main | test/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import crypten
import torch
from crypten.common.tensor_types import is_float_tensor
from crypten.config import cfg
from test.multiprocess_test_case import get_random_test_tensor, MultiProcessTestCase
class TestOptim:
"""
This class tests the crypten.optim package.
"""
def _check(self, encrypted_tensor, reference, msg, tolerance=None):
if tolerance is None:
tolerance = getattr(self, "default_tolerance", 0.05)
tensor = encrypted_tensor.get_plain_text()
# Check sizes match
self.assertTrue(tensor.size() == reference.size(), msg)
if is_float_tensor(reference):
diff = (tensor - reference).abs_()
norm_diff = diff.div(tensor.abs() + reference.abs()).abs_()
test_passed = norm_diff.le(tolerance) + diff.le(tolerance * 0.2)
test_passed = test_passed.gt(0).all().item() == 1
else:
test_passed = (tensor == reference).all().item() == 1
if not test_passed:
logging.info(msg)
logging.info("Result: %s" % tensor)
logging.info("Reference: %s" % reference)
self.assertTrue(test_passed, msg=msg)
def test_sgd(self):
lr_vals = [0.01, 0.1, 0.5]
momentum_vals = [0.0, 0.1, 0.9]
dampening_vals = [0.0, 0.01, 0.1]
weight_decay_vals = [0.0, 0.9, 1.0]
nesterov_vals = [False, True]
torch_model = torch.nn.Linear(10, 2)
torch_model.weight = torch.nn.Parameter(
get_random_test_tensor(size=torch_model.weight.size(), is_float=True)
)
torch_model.bias = torch.nn.Parameter(
get_random_test_tensor(size=torch_model.bias.size(), is_float=True)
)
crypten_model = crypten.nn.Linear(10, 2)
crypten_model.set_parameter("weight", torch_model.weight)
crypten_model.set_parameter("bias", torch_model.bias)
crypten_model.encrypt()
for lr, momentum, dampening, weight_decay, nesterov in itertools.product(
lr_vals, momentum_vals, dampening_vals, weight_decay_vals, nesterov_vals
):
kwargs = {
"lr": lr,
"momentum": momentum,
"weight_decay": weight_decay,
"dampening": dampening,
"nesterov": nesterov,
}
if nesterov and (momentum <= 0 or dampening != 0):
with self.assertRaises(ValueError):
crypten.optim.SGD(crypten_model.parameters(), **kwargs)
continue
torch_optimizer = torch.optim.SGD(torch_model.parameters(), **kwargs)
crypten_optimizer = crypten.optim.SGD(crypten_model.parameters(), **kwargs)
x = get_random_test_tensor(size=(10,), is_float=True)
y = torch_model(x).sum()
y.backward()
xx = crypten.cryptensor(x)
yy = crypten_model(xx).sum()
yy.backward()
torch_optimizer.step()
crypten_optimizer.step()
torch_params = list(torch_model.parameters())
crypten_params = list(crypten_model.parameters())
for i in range(len(torch_params)):
self._check(
crypten_params[i], torch_params[i], "Parameter update mismatch"
)
torch_optimizer.zero_grad()
crypten_optimizer.zero_grad()
for i in range(len(crypten_params)):
self.assertIsNone(crypten_params[i].grad, "Optimizer zero_grad failed")
class TestTFP(MultiProcessTestCase, TestOptim):
def setUp(self):
self._original_provider = cfg.mpc.provider
cfg.mpc.provider = "TFP"
super(TestTFP, self).setUp()
def tearDown(self):
cfg.mpc.provider = self._original_provider
super(TestTFP, self).tearDown()
class TestTTP(MultiProcessTestCase, TestOptim):
def setUp(self):
self._original_provider = cfg.mpc.provider
cfg.mpc.provider = "TTP"
super(TestTTP, self).setUp()
def tearDown(self):
cfg.mpc.provider = self._original_provider
super(TestTTP, self).tearDown()
| CrypTen-main | test/test_optim.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import itertools
import logging
import unittest
import crypten
import crypten.communicator as comm
import torch
import torch.nn.functional as F
from crypten.common.rng import generate_random_ring_element
from crypten.common.tensor_types import is_float_tensor
from crypten.config import cfg
from crypten.encoder import FixedPointEncoder
from test.multiprocess_test_case import (
get_random_linear,
get_random_test_tensor,
MultiProcessTestCase,
onehot,
)
def linear_to_crypten(pytorch_linear):
"""
Converts torch.nn.Linear module into crypten.nn.Linear module.
"""
assert isinstance(pytorch_linear, torch.nn.Linear)
out_channels, in_channels = pytorch_linear.weight.size()
crypten_linear = crypten.nn.Linear(in_channels, out_channels)
crypten_linear.set_parameter("weight", pytorch_linear.weight)
crypten_linear.set_parameter("bias", pytorch_linear.bias)
return crypten_linear
class TestNN:
"""
This class tests the crypten.nn package.
"""
def _check(self, encrypted_tensor, reference, msg, tolerance=None):
if tolerance is None:
tolerance = getattr(self, "default_tolerance", 0.05)
tensor = encrypted_tensor.get_plain_text()
# Check sizes match
self.assertTrue(tensor.size() == reference.size(), msg)
if is_float_tensor(reference):
diff = (tensor - reference).abs_()
norm_diff = diff.div(tensor.abs() + reference.abs()).abs_()
test_passed = norm_diff.le(tolerance) + diff.le(tolerance * 0.2)
test_passed = test_passed.gt(0).all().item() == 1
else:
test_passed = (tensor == reference).all().item() == 1
if not test_passed:
logging.info(msg)
logging.info("Result %s" % tensor)
logging.info("Result - Reference = %s" % (tensor - reference))
self.assertTrue(test_passed, msg=msg)
def _compute_reference_parameters(self, init_name, reference, model, learning_rate):
for name, param in model.named_parameters(recurse=False):
local_name = init_name + "_" + name
reference[local_name] = (
param.get_plain_text() - learning_rate * param.grad.get_plain_text()
)
for name, module in model.named_children():
local_name = init_name + "_" + name
reference = self._compute_reference_parameters(
local_name, reference, module, learning_rate
)
return reference
def _check_reference_parameters(self, init_name, reference, model):
for name, param in model.named_parameters(recurse=False):
local_name = init_name + "_" + name
self._check(param, reference[local_name], "parameter update failed")
for name, module in model.named_children():
local_name = init_name + "_" + name
self._check_reference_parameters(local_name, reference, module)
def setUp(self):
super().setUp()
# We don't want the main process (rank -1) to initialize the communicator
if self.rank >= 0:
crypten.init()
def test_from_shares(self):
"""Tests crypten.nn.Module.set_parameter_from_shares() functionality."""
# create simple model:
input_size, output_size = 3, 10
model = crypten.nn.Linear(input_size, output_size)
# helper function that creates arithmetically shared tensor of some size:
def _generate_parameters(size):
num_parties = int(self.world_size)
reference = get_random_test_tensor(size=size, is_float=False)
zero_shares = generate_random_ring_element((num_parties, *size))
zero_shares = zero_shares - zero_shares.roll(1, dims=0)
shares = list(zero_shares.unbind(0))
shares[0] += reference
return shares, reference
# generate new set of parameters:
all_shares, all_references = {}, {}
for name, param in model.named_parameters():
shares, reference = _generate_parameters(param.size())
share = comm.get().scatter(shares, 0)
all_shares[name] = share
all_references[name] = reference
# cannot load parameters from share when model is not encrypted:
with self.assertRaises(AssertionError):
for name, share in all_shares.items():
model.set_parameter_from_shares(name, share)
# cannot load shares into non-existent parameters:
model.encrypt()
with self.assertRaises(ValueError):
model.set_parameter_from_shares("__DUMMY__", None)
# load parameter shares into model and check results:
for name, share in all_shares.items():
model.set_parameter_from_shares(name, share)
model.decrypt()
encoder = FixedPointEncoder()
for name, param in model.named_parameters():
reference = encoder.decode(all_references[name])
self.assertTrue(torch.allclose(param, reference))
def test_global_avg_pool_module(self):
"""
Tests the global average pool module with fixed 4-d test tensors
"""
# construct basic input
base_tensor = torch.tensor([[2, 1], [3, 0]])
all_init = []
for i in range(-2, 3):
all_init.append(torch.add(base_tensor, i))
init_tensor = torch.stack(all_init, dim=2)
init_tensor = init_tensor.unsqueeze(-1)
reference = base_tensor.unsqueeze(-1).unsqueeze(-1)
# create module
encr_module = crypten.nn.GlobalAveragePool().encrypt()
self.assertTrue(encr_module.encrypted, "module not encrypted")
# check correctness for a variety of input sizes
for i in range(1, 10):
input = init_tensor.repeat(1, 1, i, i)
encr_input = crypten.cryptensor(input)
encr_output = encr_module(encr_input)
self._check(encr_output, reference, "GlobalAveragePool failed")
@unittest.skip("ONNX converter for Dropout is broken.") # FIXME
def test_dropout_module(self):
"""Tests the dropout module"""
input_size = [3, 3, 3]
prob_list = [0.2 * x for x in range(1, 5)]
module_name = "Dropout"
for prob in prob_list:
for compute_gradients in [True, False]:
# generate inputs:
input = get_random_test_tensor(
size=input_size, is_float=True, ex_zero=True
)
input.requires_grad = True
encr_input = crypten.cryptensor(input)
encr_input.requires_grad = compute_gradients
# create PyTorch module:
module = getattr(torch.nn, module_name)(prob)
module.train()
# create encrypted CrypTen module:
encr_module = crypten.nn.from_pytorch(module, input)
# check that module properly encrypts / decrypts and
# check that encrypting with current mode properly
# performs no-op
for encrypted in [False, True, True, False, True]:
encr_module.encrypt(mode=encrypted)
if encrypted:
self.assertTrue(encr_module.encrypted, "module not encrypted")
else:
self.assertFalse(encr_module.encrypted, "module encrypted")
# compare model outputs:
# compare the zero and non-zero entries of the encrypted tensor
# with a directly constructed plaintext tensor, since we cannot
# ensure that the randomization produces the same output
# for both encrypted and plaintext tensors
self.assertTrue(encr_module.training, "training value incorrect")
encr_output = encr_module(encr_input)
plaintext_output = encr_output.get_plain_text()
scaled_tensor = input / (1 - prob)
reference = plaintext_output.where(plaintext_output == 0, scaled_tensor)
self._check(encr_output, reference, "Dropout forward failed")
# check backward
# compare the zero and non-zero entries of the grad in
# the encrypted tensor with a directly constructed plaintext
# tensor: we do this because we cannot ensure that the
# randomization produces the same output for the input
# encrypted and plaintext tensors and so we cannot ensure
# that the grad in the input tensor is populated identically
all_ones = torch.ones(reference.size())
ref_grad = plaintext_output.where(plaintext_output == 0, all_ones)
ref_grad_input = ref_grad / (1 - prob)
encr_output.sum().backward()
if compute_gradients:
self._check(
encr_input.grad,
ref_grad_input,
"dropout backward on input failed",
)
# check testing mode for Dropout module
encr_module.train(mode=False)
encr_output = encr_module(encr_input)
result = encr_input.eq(encr_output)
result_plaintext = result.get_plain_text().bool()
self.assertTrue(result_plaintext.all(), "dropout failed in test mode")
def test_non_pytorch_modules(self):
"""
Tests all non-container Modules in crypten.nn that do not have
equivalent modules in PyTorch.
"""
# input arguments for modules and input sizes:
no_input_modules = ["Constant", "ConstantOfShape", "Range"]
binary_modules = [
"Add",
"Concat",
"Div",
"Equal",
"MatMul",
"Mul",
"Sub",
]
ex_zero_modules = []
module_args = {
"Add": (),
"Concat": (0,),
"Constant": (1.2,),
"ConstantOfShape": (1.4,),
"Div": (),
"Erf": (),
"Equal": (),
"Exp": (),
"Expand": (),
"Gather": (0,),
"Gemm": (1.0, 1.0),
"MatMul": (),
"Mean": ([0], True),
"Mul": (),
"Pow": (),
"Range": (),
"Reshape": ((2, 2),),
"Shape": (),
"Slice": ([1], [4]),
"Sqrt": (),
"Sub": (),
"Sum": ([0], True),
"Squeeze": (0,),
"Transpose": ([1, 3, 0, 2],),
"Unsqueeze": (0,),
"Where": (),
}
module_lambdas = {
"Add": lambda x: x[0] + x[1],
"Concat": lambda x: torch.cat((x[0], x[1])),
"Constant": lambda _: torch.tensor(module_args["Constant"][0]),
"ConstantOfShape": lambda x: torch.tensor(
module_args["ConstantOfShape"][0]
).expand(x[0]),
"Div": lambda x: torch.div(x[0], x[1]),
"Erf": lambda x: torch.erf(x),
"Equal": lambda x: x[0].eq(x[1]),
"Exp": lambda x: torch.exp(x),
"Expand": lambda x: x[0].expand(x[1]),
"Gather": lambda x: torch.from_numpy(
x[0].numpy().take(x[1], module_args["Gather"][0])
),
"Gemm": lambda x: x[0].matmul(x[1]).add(x[2]),
"MatMul": lambda x: torch.matmul(x[0], x[1]),
"Mean": lambda x: torch.mean(
x, dim=module_args["Mean"][0], keepdim=(module_args["Mean"][1] == 1)
),
"Mul": lambda x: x[0].mul(x[1]),
"Pow": lambda x: x[0].pow(x[1]),
"Range": lambda x: torch.arange(x[0], x[1], x[2]),
"Reshape": lambda x: x[0].reshape(module_args["Reshape"][0]),
"Shape": lambda x: torch.tensor(x.size()).float(),
"Slice": lambda x: x[
module_args["Slice"][0][0] : module_args["Slice"][1][0], :
],
"Sqrt": lambda x: x.sqrt(),
"Sub": lambda x: x[0] - x[1],
"Sum": lambda x: torch.sum(
x, dim=module_args["Sum"][0], keepdim=(module_args["Sum"][1] == 1)
),
"Squeeze": lambda x: x.squeeze(module_args["Squeeze"][0]),
"Transpose": lambda x: torch.from_numpy(
x.numpy().transpose(module_args["Transpose"][0])
),
"Unsqueeze": lambda x: x.unsqueeze(module_args["Unsqueeze"][0]),
"Where": lambda x: torch.where(x[0].byte(), x[1], x[2]),
}
additional_inputs = {
"ConstantOfShape": ([2, 4],),
"Expand": ([2, 4],),
"Gather": (torch.tensor([[1, 2], [0, 3]]),),
"Pow": (2,),
"Range": (1, 6, 2),
}
input_sizes = {
"Add": (10, 12),
"Concat": (2, 2),
"Constant": (1,),
"Div": (3, 4),
"Erf": (1, 2),
"Equal": (2, 5, 3),
"Exp": (10, 10, 10),
"Expand": (1, 1),
"Gather": (4, 4, 4, 4),
"Gemm": (3, 4, 4),
"MatMul": (2, 4, 4),
"Mul": (4, 3, 2),
"Mean": (3, 3, 3),
"Pow": (4, 2),
"Reshape": (1, 4),
"Shape": (8, 3, 2),
"Slice": (5, 2),
"Sqrt": (2, 3),
"Sub": (10, 12),
"Sum": (3, 3, 3),
"Squeeze": (1, 12, 6),
"Transpose": (1, 2, 3, 4),
"Unsqueeze": (8, 3),
"Where": (3, 4, 2),
}
module_attributes = {
# each attribute has two parameters: the name, and a bool indicating
# whether the value should be wrapped into a list when the module is created
"Concat": [("axis", False)],
"Constant": [("value", False)],
"ConstantOfShape": [("value", False)],
"Gather": [("axis", False)],
"Gemm": [("alpha", False), ("beta", False)],
"Mean": [("axes", False), ("keepdims", False)],
"Slice": [("starts", False), ("ends", False)],
"Sum": [("axes", False), ("keepdims", False)],
"Squeeze": [("axes", True)],
"Transpose": [("perm", False)],
"Unsqueeze": [("axes", True)],
}
# loop over all modules:
for module_name in module_args.keys():
# create encrypted CrypTen module:
encr_module = getattr(crypten.nn, module_name)(*module_args[module_name])
encr_module.encrypt()
self.assertTrue(encr_module.encrypted, "module not encrypted")
# generate inputs:
inputs, encr_inputs = None, None
ex_zero_values = module_name in ex_zero_modules
if module_name in binary_modules:
inputs = [
get_random_test_tensor(
size=input_sizes[module_name],
is_float=True,
ex_zero=ex_zero_values,
max_value=1.0,
)
for _ in range(2)
]
encr_inputs = [crypten.cryptensor(input) for input in inputs]
elif module_name not in no_input_modules:
inputs = get_random_test_tensor(
size=input_sizes[module_name],
is_float=True,
ex_zero=ex_zero_values,
max_value=1.0,
)
if module_name == "Where": # Where condition is binary input
inputs = (inputs > 0.5).float()
if module_name == "Sqrt": # Sqrt requires positive input
inputs = inputs.abs()
encr_inputs = crypten.cryptensor(inputs)
# some modules take additional inputs:
if module_name in additional_inputs:
# base inputs:
if inputs is None:
inputs, encr_inputs = [], []
elif not isinstance(inputs, (list, tuple)):
inputs, encr_inputs = [inputs], [encr_inputs]
# add additional inputs:
for add_inp in additional_inputs[module_name]:
inputs.append(add_inp)
# encrypt only torch tensor inputs, not shapes or indices:
if torch.is_tensor(add_inp):
encr_inputs.append(crypten.cryptensor(add_inp))
else:
encr_inputs.append(add_inp)
# some modules cannot work with encrypted inputs:
if module_name in ["Gather"]:
with self.assertRaises(ValueError):
encr_output = encr_module(encr_inputs)
# but they can work using unencrypted indices:
encr_inputs[1] = additional_inputs[module_name][0]
# compare model outputs:
reference = module_lambdas[module_name](inputs)
encr_output = encr_module(encr_inputs)
if torch.is_tensor(encr_output):
self.assertTrue(
encr_module.SUPPORTS_PLAINTEXT_INPUTS,
msg=f"{module_name} has incorrect SUPPORTS_PLAINTEXT_INPUTS value",
)
encr_output = crypten.cryptensor(encr_output)
self._check(encr_output, reference, "%s failed" % module_name)
# create attributes for static from_onnx function
local_attr = {}
for i, attr_tuple in enumerate(module_attributes.get(module_name, [])):
attr_name, wrap_attr_in_list = attr_tuple
if wrap_attr_in_list:
local_attr[attr_name] = [module_args[module_name][i]]
else:
local_attr[attr_name] = module_args[module_name][i]
# Update ReduceSum/ReduceMean module attributes, since the module and
# from_onnx path are different
if module_name == "ReduceSum":
local_attr["keepdims"] = 1 if module_args["ReduceSum"][1] is True else 0
if module_name == "ReduceMean":
local_attr["keepdims"] = (
1 if module_args["ReduceMean"][1] is True else 0
)
if module_name == "Reshape":
local_attr["shape"] = module_args["Reshape"][0]
# compare model outputs using the from_onnx static function
module = getattr(crypten.nn, module_name).from_onnx(attributes=local_attr)
encr_module_onnx = module.encrypt()
encr_output = encr_module_onnx(encr_inputs)
if torch.is_tensor(encr_output):
self.assertTrue(
encr_module_onnx.SUPPORTS_PLAINTEXT_INPUTS,
msg=f"{module_name} has incorrect SUPPORTS_PLAINTEXT_INPUTS value",
)
encr_output = crypten.cryptensor(encr_output)
self._check(encr_output, reference, "%s failed" % module_name)
def test_pytorch_modules(self):
"""
Tests all non-container Modules in crypten.nn that have equivalent
modules in PyTorch.
"""
# input arguments for modules and input sizes:
module_args = {
"AdaptiveAvgPool2d": ((8, 8),),
"AdaptiveMaxPool2d": ((2, 2),),
"AvgPool2d": (2,),
"BatchNorm1d": (25,),
"BatchNorm2d": (3,),
"BatchNorm3d": (6,),
# "ConstantPad1d": (3, 1.0),
# "ConstantPad2d": (2, 2.0),
# "ConstantPad3d": (1, 0.0), # TODO: Support negative steps in Slice.
"Conv1d": (3, 6, 5),
"Conv2d": (3, 6, 5),
"Hardtanh": (-3, 1),
"Linear": (400, 120),
"MaxPool2d": (2,),
"ReLU": (),
"ReLU6": (),
"Sigmoid": (),
"Softmax": (0,),
"LogSoftmax": (0,),
}
input_sizes = {
"AdaptiveAvgPool2d": (1, 2, 24, 24),
"AdaptiveMaxPool2d": (1, 3, 8, 8),
"AvgPool2d": (1, 3, 32, 32),
"BatchNorm1d": (8, 25),
"BatchNorm2d": (8, 3, 7, 9),
"BatchNorm3d": (8, 6, 3, 4, 2),
"ConstantPad1d": (9,),
"ConstantPad2d": (3, 6),
"ConstantPad3d": (4, 2, 7),
"Conv1d": (1, 3, 32),
"Conv2d": (1, 3, 32, 32),
"Hardtanh": (1, 3, 32, 32),
"Linear": (1, 400),
"LogSoftmax": (5, 5, 5),
"MaxPool2d": (1, 2, 32, 32),
"ReLU": (1, 3, 32, 32),
"ReLU6": (1, 3, 32, 32),
"Sigmoid": (8, 3, 32, 32),
"Softmax": (5, 5, 5),
}
not_produced_by_onnx = [
"BatchNorm1d",
"BatchNorm2d",
"BatchNorm3d",
"Conv1d",
"Conv2d",
"Linear",
]
# loop over all modules:
for module_name, from_pytorch, compute_gradients in itertools.product(
module_args.keys(), [False, True], [False, True]
):
# some modules cannot be produced by the ONNX exporter:
if from_pytorch and module_name in not_produced_by_onnx:
continue
# generate inputs:
input = get_random_test_tensor(size=input_sizes[module_name], is_float=True)
input.requires_grad = True
encr_input = crypten.cryptensor(input)
encr_input.requires_grad = compute_gradients
# create PyTorch module:
args = module_args[module_name]
kwargs = {"ceil_mode": True} if module_name == "MaxPool2d" else {}
module = getattr(torch.nn, module_name)(*args, **kwargs)
module.train()
# create encrypted CrypTen module:
if from_pytorch:
encr_module = crypten.nn.from_pytorch(module, input)
else:
encr_module = getattr(crypten.nn, module_name)(*args, **kwargs)
for name, param in module.named_parameters():
setattr(encr_module, name, param)
# check that module properly encrypts / decrypts and
# check that encrypting with current mode properly performs no-op
for encrypted in [False, True, True, False, True]:
encr_module.encrypt(mode=encrypted)
if encrypted:
self.assertTrue(encr_module.encrypted, "module not encrypted")
else:
self.assertFalse(encr_module.encrypted, "module encrypted")
# check value of parameters:
for key in ["weight", "bias"]:
if hasattr(module, key): # if PyTorch model has key
# find that key in the crypten.nn.Graph:
if from_pytorch:
for encr_node in encr_module.modules():
if hasattr(encr_node, key):
encr_param = getattr(encr_node, key)
break
# or get it from the crypten Module directly:
else:
encr_param = getattr(encr_module, key)
# compare with reference:
# NOTE: Because some parameters are initialized randomly
# with different values on each process, we only want to
# check that they are consistent with source parameter value
reference = getattr(module, key)
src_reference = comm.get().broadcast(reference, 0)
msg = "parameter %s in %s incorrect" % (key, module_name)
if isinstance(encr_param, crypten.nn.Parameter):
encr_param = encr_param.data
if not crypten.is_encrypted_tensor(encr_param):
encr_param = crypten.cryptensor(encr_param, src=0)
self._check(encr_param, src_reference, msg)
# Forward Pass
self.assertTrue(encr_module.training, "training value incorrect")
reference = module(input)
encr_output = encr_module(encr_input)
msg = "from_pytorch" if from_pytorch else ""
self._check(encr_output, reference, f"{module_name} forward failed {msg}")
# Backward Pass
reference.sum().backward()
encr_output.sum().backward()
# Check input gradients
if compute_gradients:
self.assertIsNotNone(
encr_input.grad, f"{module_name} grad failed to populate {msg}."
)
self._check(
encr_input.grad,
input.grad,
f"{module_name} backward on input failed {msg}",
)
else:
self.assertIsNone(encr_input.grad)
# Check parameter gradients
for name, encr_param in encr_module.named_parameters():
name = name.split(".")[-1]
torch_param = getattr(module, name)
self._check(
encr_param.grad,
torch_param.grad,
f"{module_name} backward on parameter {name} failed {msg}",
)
def test_conv(self):
"""
Tests crypten.nn.Conv module.
"""
# try different dimensionalities:
for dim in range(1, 3):
for compute_gradients in [True, False]:
# fixed attributes of convolution:
stride = tuple([1] * dim)
padding = tuple([0] * dim)
dilation = tuple([1] * dim)
groups = 1
# generate input:
in_channels, out_channels = 4, 5
size = tuple([1, in_channels] + ([16] * dim))
input = get_random_test_tensor(size=size, is_float=True)
input.requires_grad = True
encr_input = crypten.cryptensor(input)
encr_input.requires_grad = compute_gradients
# generate kernel:
kernel_shape = tuple([out_channels, in_channels] + ([3] * dim))
kernel = get_random_test_tensor(size=kernel_shape, is_float=True)
kernel.requires_grad = True
encr_kernel = crypten.cryptensor(kernel)
encr_kernel.requires_grad = compute_gradients
# create "encrypted" CrypTen module:
module = crypten.nn.Conv(stride, padding, dilation, groups)
module.encrypt()
# compute PyTorch output:
func = getattr(torch.nn.functional, f"conv{dim}d", None)
reference = func(
input,
kernel,
None,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
)
# compare model outputs:
encr_output = module((encr_input, encr_kernel))
self._check(encr_output, reference, "Conv forward failed")
# test backward pass:
reference.backward(torch.ones(reference.size()))
encr_output.backward(encr_output.new(torch.ones(encr_output.size())))
if compute_gradients:
self._check(
encr_input.grad, input.grad, "Conv backward on input failed"
)
self._check(
encr_kernel.grad, kernel.grad, "Conv backward on weight failed"
)
else:
self.assertIsNone(encr_input.grad)
self.assertIsNone(encr_kernel.grad)
def test_linear(self):
"""
Tests crypten.nn.Linear module.
"""
dims = [(40, 80), (80, 1), (10, 10)]
sizes = [(1, 40), (4, 80), (6, 4, 2, 10)]
for compute_gradients in [True, False]:
for dim, size in zip(dims, sizes):
# generate inputs:
input = get_random_test_tensor(size=size, is_float=True)
input.requires_grad = True
encr_input = crypten.cryptensor(input)
encr_input.requires_grad = compute_gradients
# create PyTorch module:
module = torch.nn.Linear(*dim)
module.train()
# create encrypted CrypTen module:
encr_module = crypten.nn.Linear(*dim)
for n, p in module.named_parameters():
p = comm.get().broadcast(p, 0)
encr_module.set_parameter(n, p)
encr_module.encrypt().train()
# compare model outputs:
self.assertTrue(encr_module.training, "training value incorrect")
reference = module(input)
encr_output = encr_module(encr_input)
self._check(encr_output, reference, "Linear forward failed")
# test backward pass:
reference.backward(torch.ones(reference.size()))
encr_output.backward(encr_output.new(torch.ones(encr_output.size())))
if compute_gradients:
self._check(
encr_input.grad, input.grad, "Linear backward on input failed"
)
else:
self.assertIsNone(encr_input.grad)
for name, param in module.named_parameters():
encr_param = getattr(encr_module, name)
self._check(
encr_param.grad,
param.grad,
"Linear backward on %s failed" % name,
)
def test_parameter_module(self):
"""
Tests the crypten.nn.Parameter module.
"""
for trainable in [False, True]:
for ttype in [
torch.tensor,
torch.nn.parameter.Parameter,
crypten.cryptensor,
]:
# check creation of Parameter:
num_rows, num_cols = 5, 4
size = (num_rows, num_cols)
reference = get_random_test_tensor(size=size, is_float=True)
param = crypten.nn.Parameter(ttype(reference), trainable=trainable)
self.assertTrue(hasattr(param, "data"))
self.assertTrue(hasattr(param, "requires_grad"))
self.assertTrue(hasattr(param, "encrypted"))
self.assertEqual(
param.requires_grad,
trainable,
msg=f"requires_grad incorrect for type {ttype}",
)
# check that parameter is registered:
self.assertEqual(len(list(param.parameters())), 1 if trainable else 0)
self.assertEqual(len(list(param.buffers())), 0 if trainable else 1)
# check value of parameter:
for value in [param.data, param(None)]:
if ttype == crypten.cryptensor:
self.assertEqual(value.size(), reference.size())
equal_elem = (value == reference).sum().get_plain_text()
self.assertEqual(equal_elem.item(), num_rows * num_cols)
self.assertTrue(param.encrypted)
else:
self.assertEqual(value.size(), reference.size())
self.assertTrue((value == reference).all().item())
self.assertFalse(param.encrypted)
# check that we can encrypt parameter:
if ttype != crypten.cryptensor:
param = param.encrypt()
self.assertTrue(param.encrypted)
self._check(
param.data,
reference,
f"encryption of parameter failed for {ttype} with trainable = {trainable}",
)
for value in [param.data, param(None)]:
self.assertTrue(crypten.is_encrypted_tensor(value))
# check that we can decrypt parameter:
param = param.decrypt()
self.assertFalse(param.encrypted)
for value in [param.data, param(None)]:
self.assertTrue(torch.is_tensor(value))
# check that we cannot initialize with other types:
with self.assertRaises(AssertionError):
param = crypten.nn.Parameter(list(range(5)))
def test_inplace_warning(self):
"""Tests that a warning is thrown that indicates that the `inplace` kwarg
is ignored when a function is called with `inplace=True`
"""
modules = [
"Dropout",
"DropoutNd",
"Dropout2d",
"Dropout3d",
"Hardtanh",
"ReLU",
"ReLU6",
]
for module in modules:
module_str = (
module if module not in ["Dropout2d", "Dropout3d"] else "DropoutNd"
)
warning_str = (
f"CrypTen {module_str} module does not support inplace computation."
)
with self.assertLogs(logger=logging.getLogger(), level="WARNING") as cm:
getattr(crypten.nn, module)(inplace=True)
self.assertTrue(f"WARNING:root:{warning_str}" in cm.output)
def test_sequential(self):
"""
Tests crypten.nn.Sequential module.
"""
# try networks of different depth:
for num_layers in range(1, 6):
for compute_gradients in [True, False]:
# construct sequential container:
input_size = (3, 10)
output_size = (input_size[0], input_size[1] - num_layers)
layer_idx = range(input_size[1], output_size[1], -1)
# Construct module list
torch_module_list = [
torch.nn.Linear(num_feat, num_feat - 1) for num_feat in layer_idx
]
crypten_module_list = [
crypten.nn.Linear(num_feat, num_feat - 1) for num_feat in layer_idx
]
# Coordinate parameter values:
for i in range(len(torch_module_list)):
torch_module_list[i].weight = torch.nn.Parameter(
get_random_test_tensor(
size=torch_module_list[i].weight.size(), is_float=True
)
)
torch_module_list[i].bias = torch.nn.Parameter(
get_random_test_tensor(
size=torch_module_list[i].bias.size(), is_float=True
)
)
crypten_module_list[i].weight = torch_module_list[i].weight
crypten_module_list[i].bias = torch_module_list[i].bias
# Construct sequential modules
torch_sequential = torch.nn.Sequential(*torch_module_list)
crypten_sequential = crypten.nn.Sequential(*crypten_module_list)
crypten_sequential.encrypt()
# check container:
self.assertTrue(
crypten_sequential.encrypted, "nn.Sequential not encrypted"
)
for module in crypten_sequential.modules():
self.assertTrue(module.encrypted, "module not encrypted")
assert len(list(crypten_sequential.modules())) == len(
list(torch_sequential.modules())
), "nn.Sequential contains incorrect number of modules"
# construct test input and run through sequential container:
input = get_random_test_tensor(size=input_size, is_float=True)
encr_input = crypten.cryptensor(input)
encr_input.requires_grad = compute_gradients
encr_output = crypten_sequential(encr_input)
# compute reference output:
reference = torch_sequential(input)
# compare output to reference:
self._check(encr_output, reference, "nn.Sequential forward failed")
def test_graph(self):
"""
Tests crypten.nn.Graph module.
"""
for compute_gradients in [True, False]:
for num_inputs in [1, 2]:
# define test case:
input_size = (3, 10)
input = get_random_test_tensor(size=input_size, is_float=True)
input2 = get_random_test_tensor(size=input_size, is_float=True)
encr_input = crypten.cryptensor(input)
encr_input2 = crypten.cryptensor(input2)
encr_input.requires_grad = compute_gradients
encr_input2.requires_grad = compute_gradients
# for two inputs, sum the inputs first:
if num_inputs == 1:
graph = crypten.nn.Graph("input", "output")
elif num_inputs == 2:
graph = crypten.nn.Graph(["input1", "input2"], "output")
graph.add_module("input", crypten.nn.Add(), ["input1", "input2"])
else:
raise ValueError(f"Unsupported value of inputs: {num_inputs}")
# test residual block with subsequent linear layer:
linear1 = get_random_linear(input_size[1], input_size[1])
linear2 = get_random_linear(input_size[1], input_size[1])
graph.add_module("linear", linear_to_crypten(linear1), ["input"])
graph.add_module("residual", crypten.nn.Add(), ["input", "linear"])
graph.add_module("output", linear_to_crypten(linear2), ["residual"])
graph.encrypt()
# check container:
self.assertTrue(graph.encrypted, "nn.Graph not encrypted")
for module in graph.modules():
self.assertTrue(module.encrypted, "module not encrypted")
# compare output to reference:
if num_inputs == 1:
encr_output = graph(encr_input)
reference = linear2(linear1(input) + input)
elif num_inputs == 2:
encr_output = graph(encr_input, encr_input2)
reference = linear2(linear1(input + input2) + input + input2)
else:
raise ValueError(f"Unsupported value of inputs: {num_inputs}")
self._check(encr_output, reference, "nn.Graph forward failed")
def test_losses(self):
"""
Tests all Losses implemented in crypten.nn.
"""
# create test tensor:
input = get_random_test_tensor(max_value=0.999, is_float=True).abs() + 0.001
target = get_random_test_tensor(max_value=0.999, is_float=True).abs() + 0.001
encrypted_input = crypten.cryptensor(input)
encrypted_target = crypten.cryptensor(target)
losses = [
"BCELoss",
"BCEWithLogitsLoss",
"L1Loss",
"MSELoss",
]
# test forward() function of all simple losses:
for loss_name in losses:
for skip_forward in [False, True]:
enc_loss_object = getattr(crypten.nn, loss_name)(
skip_forward=skip_forward
)
self.assertEqual(
enc_loss_object.reduction, "mean", "Reduction used is not 'mean'"
)
input.requires_grad = True
input.grad = None
loss = getattr(torch.nn, loss_name)()(input, target)
encrypted_loss = enc_loss_object(encrypted_input, encrypted_target)
if not skip_forward:
self._check(
encrypted_loss, loss, f"{loss_name} failed forward w/o grad"
)
encrypted_input.requires_grad = True
encrypted_input.grad = None
encrypted_loss = enc_loss_object(encrypted_input, encrypted_target)
if not skip_forward:
self._check(
encrypted_loss, loss, f"{loss_name} failed forward with grad"
)
# Check backward
loss.backward()
encrypted_loss.backward()
self._check(
encrypted_input.grad, input.grad, f"{loss_name} grad failed"
)
# test forward() function of cross-entropy loss:
batch_size, num_targets = 16, 5
input = get_random_test_tensor(size=(batch_size, num_targets), is_float=True)
target = get_random_test_tensor(
size=(batch_size,), max_value=num_targets - 1
).abs()
encrypted_input = crypten.cryptensor(input)
encrypted_target = crypten.cryptensor(onehot(target, num_targets=num_targets))
enc_loss_object = crypten.nn.CrossEntropyLoss()
self.assertEqual(
enc_loss_object.reduction, "mean", "Reduction used is not 'mean'"
)
loss = torch.nn.CrossEntropyLoss()(input, target)
encrypted_loss = crypten.nn.CrossEntropyLoss()(
encrypted_input, encrypted_target
)
self._check(encrypted_loss, loss, "cross-entropy loss failed")
encrypted_input.requires_grad = True
encrypted_target.requires_grad = True
encrypted_loss = crypten.nn.CrossEntropyLoss()(
encrypted_input, encrypted_target
)
self._check(encrypted_loss, loss, "cross-entropy loss failed")
def test_distances(self):
distances = ["CosineSimilarity"]
for distance in distances:
x1 = get_random_test_tensor(is_float=True)
x2 = get_random_test_tensor(is_float=True)
x1.requires_grad = True
x2.requires_grad = True
x1_enc = crypten.cryptensor(x1, requires_grad=True)
x2_enc = crypten.cryptensor(x2, requires_grad=True)
dist_fn = getattr(torch.nn, distance)()
enc_dist_fn = getattr(crypten.nn, distance)()
# Forward Pass
dist = dist_fn(x1, x2)
dist_enc = enc_dist_fn(x1_enc, x2_enc)
self._check(dist_enc, dist, f"{distance} failed in forward")
# Backward Pass
grad_output = get_random_test_tensor(size=dist.size(), is_float=True).abs()
grad_output_enc = crypten.cryptensor(grad_output)
dist.backward(grad_output)
dist_enc.backward(grad_output_enc)
self._check(x1_enc.grad, x1.grad, f"{distance} 1st arg grad failed")
self._check(x2_enc.grad, x2.grad, f"{distance} 2nd arg grad failed")
def test_getattr_setattr(self):
"""Tests the __getattr__ and __setattr__ functions"""
tensor1 = get_random_test_tensor(size=(3, 3), is_float=True)
tensor2 = get_random_test_tensor(size=(3, 3), is_float=True)
class ExampleNet(crypten.nn.Module):
def __init__(self):
super(ExampleNet, self).__init__()
self.fc1 = crypten.nn.Linear(20, 1)
sample_buffer = tensor1
self.register_buffer("sample_buffer", sample_buffer)
sample_param = tensor2
self.register_parameter("sample_param", sample_param)
def forward(self, x):
out = self.fc1(x)
return out
model = ExampleNet()
model.encrypt()
self.assertTrue("fc1" in model._modules.keys(), "modules __setattr__ failed")
self._check(model.sample_buffer, tensor1, "buffer __getattr__ failed")
self._check(model.sample_param, tensor2, "parameter __getattr__ failed")
self.assertTrue(
isinstance(model.fc1, crypten.nn.Linear), "modules __getattr__ failed"
)
"""
assign to model.weight should change model._parameters["weight"]
"""
model.fc1.weight = torch.nn.Parameter(torch.zeros((2, 3)))
self.assertEqual(
model.fc1._parameters["weight"].tolist(),
torch.nn.Parameter(torch.zeros((2, 3))).tolist(),
)
"""
assign to model._parameters["weight"] should change model.weight
"""
model.fc1._parameters["weight"] = torch.nn.Parameter(torch.ones((2, 3)))
self.assertEqual(
model.fc1.weight.tolist(), torch.nn.Parameter(torch.ones((2, 3))).tolist()
)
"""
assign to model._buffers["bufferedItem"] should change model.bufferedItem
"""
model.fc1._buffers["bufferedItem"] = torch.nn.Parameter(torch.ones((2, 3)))
self.assertEqual(
model.fc1.bufferedItem.tolist(),
torch.nn.Parameter(torch.ones((2, 3))).tolist(),
)
"""
assign to model.weight should change model._parameters["weight"]
"""
model.fc1.bufferedItem = torch.nn.Parameter(torch.zeros((2, 3)))
self.assertEqual(
model.fc1._buffers["bufferedItem"].tolist(),
torch.nn.Parameter(torch.zeros((2, 3))).tolist(),
)
def test_training(self):
"""
Tests training of simple model in crypten.nn.
"""
# create MLP with one hidden layer:
learning_rate = 0.1
batch_size, num_inputs, num_intermediate, num_outputs = 8, 10, 5, 1
model = crypten.nn.Sequential(
crypten.nn.Linear(num_inputs, num_intermediate),
crypten.nn.ReLU(),
crypten.nn.Linear(num_intermediate, num_outputs),
)
model.train()
model.encrypt()
loss = crypten.nn.MSELoss()
# perform training iterations:
for _ in range(10):
for compute_gradients in [True, False]:
# get training sample:
input = get_random_test_tensor(
size=(batch_size, num_inputs), is_float=True
)
target = input.mean(dim=1, keepdim=True)
# encrypt training sample:
input = crypten.cryptensor(input)
target = crypten.cryptensor(target)
if compute_gradients:
input.requires_grad = True
target.requires_grad = True
# perform forward pass:
output = model(input)
loss_value = loss(output, target)
# set gradients to "zero" (setting to None is more efficient):
model.zero_grad()
for param in model.parameters():
self.assertIsNone(param.grad, "zero_grad did not reset gradients")
# perform backward pass:
loss_value.backward()
# perform parameter update:
reference = {}
reference = self._compute_reference_parameters(
"", reference, model, learning_rate
)
model.update_parameters(learning_rate)
self._check_reference_parameters("", reference, model)
def test_custom_module_training(self):
"""Tests training CrypTen models created directly using the crypten.nn.Module"""
BATCH_SIZE = 32
NUM_FEATURES = 3
class ExampleNet(crypten.nn.Module):
def __init__(self):
super(ExampleNet, self).__init__()
self.fc1 = crypten.nn.Linear(NUM_FEATURES, BATCH_SIZE)
self.fc2 = crypten.nn.Linear(BATCH_SIZE, 2)
def forward(self, x):
out = self.fc1(x)
out = self.fc2(out)
return out
model = ExampleNet()
x_orig = get_random_test_tensor(size=(BATCH_SIZE, NUM_FEATURES), is_float=True)
# y is a linear combo of x to ensure network can easily learn pattern
y_orig = (2 * x_orig.mean(dim=1)).gt(0).long()
y_one_hot = onehot(y_orig, num_targets=2)
# encrypt training sample:
x_train = crypten.cryptensor(x_orig, requires_grad=True)
y_train = crypten.cryptensor(y_one_hot)
for loss_name in ["BCELoss", "CrossEntropyLoss", "MSELoss"]:
# create loss function
loss = getattr(crypten.nn, loss_name)()
# create encrypted model
model.train()
model.encrypt()
num_epochs = 3
learning_rate = 0.001
for i in range(num_epochs):
output = model(x_train)
if loss_name == "MSELoss":
output_norm = output
else:
output_norm = output.softmax(1)
loss_value = loss(output_norm, y_train)
# set gradients to "zero"
model.zero_grad()
for param in model.parameters():
self.assertIsNone(param.grad, "zero_grad did not reset gradients")
# perform backward pass:
loss_value.backward()
for param in model.parameters():
if param.requires_grad:
self.assertIsNotNone(
param.grad, "required parameter gradient not created"
)
# update parameters
orig_parameters, upd_parameters = {}, {}
orig_parameters = self._compute_reference_parameters(
"", orig_parameters, model, 0
)
model.update_parameters(learning_rate)
upd_parameters = self._compute_reference_parameters(
"", upd_parameters, model, learning_rate
)
parameter_changed = False
for name, value in orig_parameters.items():
if param.requires_grad and param.grad is not None:
unchanged = torch.allclose(upd_parameters[name], value)
if unchanged is False:
parameter_changed = True
self.assertTrue(
parameter_changed, "no parameter changed in training step"
)
# record initial and current loss
if i == 0:
orig_loss = loss_value.get_plain_text()
curr_loss = loss_value.get_plain_text()
# check that the loss has decreased after training
self.assertTrue(
curr_loss.item() < orig_loss.item(),
"loss has not decreased after training",
)
def test_batchnorm_module(self):
"""
Test BatchNorm modules correctly set and update running stats. Also
tests stateless BatchNormalization module.
"""
batchnorm_fn_and_size = (
("BatchNorm1d", (100, 5, 4)),
("BatchNorm2d", (100, 7, 4, 20)),
("BatchNorm3d", (100, 5, 4, 8, 15)),
)
for batchnorm_fn, size in batchnorm_fn_and_size:
for is_training in (True, False):
# create random input tensor:
tensor = get_random_test_tensor(size=size, is_float=True)
tensor.requires_grad = True
encrypted_input = crypten.cryptensor(tensor, requires_grad=True)
# sample random weight and bias:
C = size[1]
params = {
"weight": get_random_test_tensor(
size=[C], max_value=1, is_float=True
),
"bias": get_random_test_tensor(
size=[C], max_value=1, is_float=True
),
}
for param in params.values():
param.requires_grad = True
# dimensions for mean and variance:
stats_dimensions = list(range(tensor.dim()))
# perform on C dimension for tensor of shape (N, C, +)
stats_dimensions.pop(1)
# create models:
enc_model = getattr(crypten.nn.module, batchnorm_fn)(C)
plain_model = getattr(torch.nn.modules, batchnorm_fn)(C)
for key, param in params.items():
enc_model.set_parameter(key, param)
setattr(plain_model, key, torch.nn.Parameter(param))
enc_model = enc_model.encrypt()
# check initial running statistics:
stats = ["running_mean", "running_var"]
for stat in stats:
self._check(
enc_model._buffers[stat],
plain_model._buffers[stat],
f"{stat} initial module value incorrect with train={is_training}",
)
orig_buffers = copy.deepcopy(plain_model._buffers)
# set training mode
plain_model.train(is_training)
enc_model.train(is_training)
# check output and running_stats update:
encr_output = enc_model.forward(encrypted_input)
output = plain_model.forward(tensor)
self._check(
encr_output,
output,
f"output of module incorrect with train={is_training}",
)
for stat in stats:
self._check(
enc_model._buffers[stat],
plain_model._buffers[stat],
f"{stat} momentum update in module incorrect with train={is_training}",
)
# compute output of stateless batchnorm module:
stateless_model = crypten.nn.module.BatchNormalization().encrypt()
stateless_model.train(is_training)
encr_output = stateless_model(
(
encrypted_input,
crypten.cryptensor(params["weight"]),
crypten.cryptensor(params["bias"]),
crypten.cryptensor(orig_buffers["running_mean"]),
crypten.cryptensor(orig_buffers["running_var"]),
)
)
if not is_training:
encr_output = (encr_output,)
# check that output of stateless batchnorm module is correct:
self.assertEqual(len(encr_output), 5 if is_training else 1)
self._check(
encr_output[0],
output,
f"output of BatchNormalization incorrect with train={is_training}",
)
if is_training: # statistics are only updated at training time
for idx, stat in enumerate(stats):
self._check(
encr_output[1 + idx],
plain_model._buffers[stat],
f"{stat} update in BatchNormalization module incorrect",
)
def test_unencrypted_modules(self):
"""Tests crypten.Modules without encrypting them."""
# generate input:
input_size = (32, 16)
output_size = (input_size[0], 8)
sample = get_random_test_tensor(size=input_size, is_float=True)
target = get_random_test_tensor(size=output_size, is_float=True)
# create model and criterion:
linear = crypten.nn.Linear(input_size[1], output_size[1])
criterion = crypten.nn.MSELoss()
# function running the actual test:
def _run_test(_sample, _target):
# forward pass fails when feeding encrypted input into unencrypted model:
linear.zero_grad()
if not linear.encrypted and not torch.is_tensor(_sample):
with self.assertRaises(RuntimeError):
output = linear(_sample)
return
# when model is encrypted, feeding unencrypted input is not supported:
if linear.encrypted and torch.is_tensor(_sample):
with self.assertRaises(NotImplementedError):
output = linear(_sample)
return
# forward pass succeeds in other cases:
output = linear(_sample)
loss = criterion(output, _target)
self.assertIsNotNone(loss)
# backward pass succeeds in other cases:
loss.backward()
for param in linear.parameters():
self.assertIsNotNone(param.grad)
# test parameter update:
original_params = [param.clone() for param in linear.parameters()]
linear.update_parameters(1.0)
for idx, param in enumerate(linear.parameters()):
diff = param.sub(original_params[idx]).abs().mean()
if isinstance(diff, crypten.CrypTensor):
diff = diff.get_plain_text()
self.assertGreater(diff.item(), 1e-4)
# test both tensor types in models with and without encryption:
for encrypted in [False, True, False, True]:
linear.encrypt(mode=encrypted)
_run_test(sample, target)
_run_test(crypten.cryptensor(sample), crypten.cryptensor(target))
def test_state_dict(self):
"""
Tests dumping and loading of state dicts.
"""
import io
def _check_equal(t1, t2):
"""
Checks whether to tensors are identical.
"""
if isinstance(t1, torch.nn.parameter.Parameter):
t1 = t1.data
if isinstance(t2, torch.nn.parameter.Parameter):
t2 = t2.data
self.assertEqual(type(t1), type(t2))
if isinstance(t1, crypten.CrypTensor):
t1 = t1.get_plain_text()
t2 = t2.get_plain_text()
self.assertTrue(t1.eq(t2).all())
def _check_state_dict(model, state_dict):
"""
Checks if state_dict matches parameters in model.
"""
# get all parameters, buffers, and names from model:
params_buffers = {}
for func in ["named_parameters", "named_buffers"]:
params_buffers.update({k: v for k, v in getattr(model, func)()})
# do all the checks:
self.assertEqual(len(params_buffers), len(state_dict))
for name, param_or_buffer in params_buffers.items():
self.assertIn(name, state_dict)
_check_equal(state_dict[name], param_or_buffer)
# test for individual modules:
module_args = {
"BatchNorm1d": (400,),
"BatchNorm2d": (3,),
"BatchNorm3d": (6,),
"Conv1d": (3, 6, 5),
"Conv2d": (3, 6, 5),
"Linear": (400, 120),
}
for module_name, args in module_args.items():
for encrypt in [False, True]:
# create module and get state dict:
module = getattr(crypten.nn, module_name)(*args)
if encrypt:
module.encrypt()
state_dict = module.state_dict()
_check_state_dict(module, state_dict)
# load state dict into fresh module:
new_module = getattr(crypten.nn, module_name)(*args)
if encrypt:
with self.assertRaises(AssertionError):
new_module.load_state_dict(state_dict)
new_module.encrypt()
new_module.load_state_dict(state_dict)
_check_state_dict(new_module, state_dict)
# check saving and loading from file for encrypted modules
if encrypt:
f = io.BytesIO()
crypten.save(module.state_dict(), f)
f.seek(0)
new_module2 = getattr(crypten.nn, module_name)(*args)
new_module2.encrypt()
new_module2.load_state_dict(crypten.load(f))
_check_state_dict(new_module2, state_dict)
# tests for model that is sequence of modules:
for num_layers in range(1, 6):
for encrypt in [False, True]:
# some variables that we need:
input_size = (3, 10)
output_size = (input_size[0], input_size[1] - num_layers)
layer_idx = range(input_size[1], output_size[1], -1)
# construct sequential model:
module_list = [
crypten.nn.Linear(num_feat, num_feat - 1) for num_feat in layer_idx
]
model = crypten.nn.Sequential(*module_list)
if encrypt:
model.encrypt()
# check state dict:
state_dict = model.state_dict()
_check_state_dict(model, state_dict)
# load state dict into fresh model:
state_dict = model.state_dict()
module_list = [
crypten.nn.Linear(num_feat, num_feat - 1) for num_feat in layer_idx
]
new_model = crypten.nn.Sequential(*module_list)
if encrypt:
with self.assertRaises(AssertionError):
new_model.load_state_dict(state_dict)
new_model.encrypt()
new_model.load_state_dict(state_dict)
# check new model:
_check_state_dict(model, state_dict)
# check saving and loading from file for encrypted modules
if encrypt:
f = io.BytesIO()
crypten.save(model.state_dict(), f)
f.seek(0)
module_list = [
crypten.nn.Linear(num_feat, num_feat - 1)
for num_feat in layer_idx
]
new_model2 = crypten.nn.Sequential(*module_list)
new_model2.encrypt()
new_model2.load_state_dict(crypten.load(f))
_check_state_dict(new_model2, state_dict)
def test_to(self):
"""Test Module.to, Module.cpu, and Module.cuda"""
module_list = [crypten.nn.Linear(10, 10) for _ in range(3)]
model = crypten.nn.Sequential(*module_list)
model_cpu = model.to("cpu")
cpu = torch.device("cpu")
for param in model_cpu.parameters():
self.assertEqual(param.device, cpu)
for buffer in model_cpu.buffers():
self.assertEqual(buffer.device, cpu)
model_cpu = model.cpu()
for param in model_cpu.parameters():
self.assertEqual(param.device, cpu)
for buffer in model_cpu.buffers():
self.assertEqual(buffer.device, cpu)
if torch.cuda.is_available():
cuda = torch.device("cuda:0")
model_cuda = model.cuda()
for param in model_cuda.parameters():
self.assertEqual(param.device, cuda)
for buffer in model_cuda.buffers():
self.assertEqual(buffer.device, cuda)
model_cuda = model.to("cuda:0")
for param in model_cuda.parameters():
self.assertEqual(param.device, cuda)
for buffer in model_cuda.buffers():
self.assertEqual(buffer.device, cuda)
def test_module_dict(self):
"""Test ModuleDict module"""
module_dict = crypten.nn.ModuleDict()
self.assertEqual(len(module_dict), 0, "ModuleDict initialized incorrect size")
# Test initialization
module_dict = crypten.nn.ModuleDict(
{"conv2d": crypten.nn.Conv2d(10, 10, 3), "pool": crypten.nn.MaxPool2d(3)}
)
self.assertEqual(len(module_dict), 2, "ModuleDict initialized incorrect size")
self.assertTrue("conv2d" in module_dict.keys(), "ModuleDict init failed")
self.assertTrue(
isinstance(module_dict["conv2d"], crypten.nn.Conv2d),
"ModuleDict init failed",
)
self.assertTrue("pool" in module_dict.keys(), "ModuleDict init failed")
self.assertTrue(
isinstance(module_dict["pool"], crypten.nn.MaxPool2d),
"ModuleDict init failed",
)
# Test setitem
module_dict["conv1d"] = crypten.nn.Conv1d(5, 5, 3)
self.assertEqual(len(module_dict), 3, "ModuleDict setitem failed")
self.assertTrue("conv1d" in module_dict.keys(), "ModuleDict setitem failed")
self.assertTrue(
isinstance(module_dict["conv1d"], crypten.nn.Conv1d),
"ModuleDict setitem failed",
)
# Test pop
conv = module_dict.pop("conv2d")
self.assertTrue(isinstance(conv, crypten.nn.Conv2d), "ModuleDict pop failed")
self.assertEqual(len(module_dict), 2, "ModuleDict pop failed")
self.assertFalse("conv2d" in module_dict.keys(), "ModuleDict pop failed")
# Test list initialization
module_dict = crypten.nn.ModuleDict(
[["relu", crypten.nn.ReLU()], ["sigmoid", crypten.nn.Sigmoid()]]
)
self.assertEqual(len(module_dict), 2, "ModuleDict initialized incorrect size")
self.assertTrue("relu" in module_dict.keys(), "ModuleDict init failed")
self.assertTrue(
isinstance(module_dict["relu"], crypten.nn.ReLU), "ModuleDict init failed"
)
self.assertTrue("sigmoid" in module_dict.keys(), "ModuleDict init failed")
self.assertTrue(
isinstance(module_dict["sigmoid"], crypten.nn.Sigmoid),
"ModuleDict init failed",
)
# Test clear
module_dict.clear()
self.assertEqual(len(module_dict), 0, "ModuleDict clear failed")
def test_module_list(self):
"""Test ModuleDict module"""
module_list = crypten.nn.ModuleList()
self.assertEqual(len(module_list), 0, "ModuleList initialized incorrect size")
# Test initialization
module_list = crypten.nn.ModuleList(
[crypten.nn.Conv2d(10, 10, 3), crypten.nn.MaxPool2d(3)]
)
self.assertEqual(len(module_list), 2, "ModuleList initialized incorrect size")
self.assertTrue(
isinstance(module_list[0], crypten.nn.Conv2d),
"ModuleList init failed",
)
self.assertTrue(
isinstance(module_list[1], crypten.nn.MaxPool2d),
"ModuleList init failed",
)
# Test append
module_list.append(crypten.nn.ReLU())
self.assertEqual(len(module_list), 3, "ModuleList append failed")
self.assertTrue(
isinstance(module_list[2], crypten.nn.ReLU),
"ModuleList append failed",
)
# Test extend
module_list.extend([crypten.nn.Linear(10, 5), crypten.nn.ReLU()])
msg = "ModuleList append failed"
self.assertEqual(len(module_list), 5, msg)
self.assertTrue(isinstance(module_list[3], crypten.nn.Linear), msg)
self.assertTrue(isinstance(module_list[4], crypten.nn.ReLU), msg)
# Test insert
module_list.insert(1, crypten.nn.Sigmoid())
msg = "ModuleList append failed"
self.assertEqual(len(module_list), 6, msg)
self.assertTrue(isinstance(module_list[1], crypten.nn.Sigmoid), msg)
# Test __delitem__
del module_list[1]
msg = "ModuleList delitem failed"
self.assertEqual(len(module_list), 5, msg)
self.assertTrue(isinstance(module_list[1], crypten.nn.MaxPool2d), msg)
# Test __delitem__ with slice
del module_list[1:3]
msg = "ModuleList delitem failed with slice input"
self.assertEqual(len(module_list), 3, msg)
self.assertTrue(isinstance(module_list[0], crypten.nn.Conv2d), msg)
self.assertTrue(isinstance(module_list[1], crypten.nn.Linear), msg)
def test_parameter_initializations(self):
"""Test crypten.nn.init initializations"""
sizes = [
(),
(1,),
(5,),
(1, 1),
(1, 5),
(5, 5),
(1, 1, 1),
(5, 5, 5),
(1, 1, 1, 1),
(5, 5, 5, 5),
]
deterministic = ["constant_", "dirac_", "ones_", "zeros_"]
non_deterministic = [
"kaiming_normal_",
"kaiming_uniform_",
"normal_",
"orthogonal_",
"sparse_",
"trunc_normal_",
"uniform_",
"xavier_normal_",
"xavier_uniform_",
]
requires_more_dims = [
"dirac_",
"kaiming_normal_",
"kaiming_uniform_",
"orthogonal_",
"xavier_normal_",
"xavier_uniform_",
]
only_two_dims = ["sparse_"]
args_dict = {"constant_": (0.5,), "sparse_": (0.2,)}
for init, size, private in itertools.product(
deterministic + non_deterministic, sizes, [False, True]
):
if len(size) < 3 and init in requires_more_dims:
continue
if len(size) != 2 and init in only_two_dims:
continue
args = args_dict.get(init, ())
tensor = torch.empty(size)
encrypted = crypten.cryptensor(tensor) if private else tensor.clone()
# Set seed to assert values (and therefore distributions) are the same
torch.manual_seed(0)
reference = getattr(torch.nn.init, init)(tensor, *args)
torch.manual_seed(0)
encrypted_out = getattr(crypten.nn.init, init)(encrypted, *args)
self.assertTrue(
encrypted_out.size() == reference.size(),
f"crypten.nn.init.{init} size mismatch",
)
if private:
self._check(
encrypted_out,
reference,
f"private crypten.nn.init.{init} failed.",
)
elif init in deterministic:
self.assertTrue(
encrypted_out.eq(reference).all(),
f"public crypten.nn.init.{init} failed.",
)
def test_tutorial_modules(self):
"""Tests that all modules from tutorial 5 properly convert to crypten modules using from_pytorch"""
input_sizes = {
AliceNet: (1, 50),
AliceNet2: (1, 1, 28, 28),
}
for torch_class, input_size in input_sizes.items():
# Create torch model
torch_model = torch_class()
torch_model.eval()
# Coordinate model weights across parties
with torch.no_grad():
for p in torch_model.parameters():
p.set_(
get_random_test_tensor(
max_value=1.0, size=p.size(), is_float=True
)
)
# Create CrypTen model
dummy_input = torch.empty(input_size)
crypten_model = crypten.nn.from_pytorch(torch_model, dummy_input)
crypten_model.encrypt()
# Create test inputs
test_input = get_random_test_tensor(
max_value=2.0, size=input_size, is_float=True
)
test_input_encr = crypten.cryptensor(test_input)
# Test model forward function
torch_output = torch_model(test_input)
crypten_output = crypten_model(test_input_encr)
self._check(
crypten_output, torch_output, f"from_pytorch failed for {torch_class}"
)
class AliceNet(torch.nn.Module):
def __init__(self):
super(AliceNet, self).__init__()
self.fc1 = torch.nn.Linear(50, 20)
self.fc2 = torch.nn.Linear(20, 2)
def forward(self, x):
out = self.fc1(x)
out = F.relu(out)
out = self.fc2(out)
return out
class AliceNet2(torch.nn.Module):
def __init__(self):
super(AliceNet2, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 16, kernel_size=5, padding=0)
self.conv2 = torch.nn.Conv2d(16, 16, kernel_size=5, padding=0)
self.fc1 = torch.nn.Linear(16 * 4 * 4, 100)
self.fc2 = torch.nn.Linear(100, 10)
self.batchnorm1 = torch.nn.BatchNorm2d(16)
self.batchnorm2 = torch.nn.BatchNorm2d(16)
self.batchnorm3 = torch.nn.BatchNorm1d(100)
def forward(self, x):
out = self.conv1(x)
out = self.batchnorm1(out)
out = F.relu(out)
out = F.avg_pool2d(out, 2)
out = self.conv2(out)
out = self.batchnorm2(out)
out = F.relu(out)
out = F.avg_pool2d(out, 2)
out = out.view(out.size(0), -1)
out = self.fc1(out)
out = self.batchnorm3(out)
out = F.relu(out)
out = self.fc2(out)
return out
# Run all unit tests with both TFP and TTP providers
class TestTFP(MultiProcessTestCase, TestNN):
def setUp(self):
self._original_provider = cfg.mpc.provider
cfg.mpc.provider = "TFP"
super(TestTFP, self).setUp()
def tearDown(self):
cfg.mpc.provider = self._original_provider
super(TestTFP, self).tearDown()
class TestTTP(MultiProcessTestCase, TestNN):
def setUp(self):
self._original_provider = cfg.mpc.provider
cfg.mpc.provider = "TTP"
super(TestTTP, self).setUp()
def tearDown(self):
cfg.mpc.provider = self._original_provider
super(TestTTP, self).tearDown()
# This code only runs when executing the file outside the test harness
if __name__ == "__main__":
unittest.main()
| CrypTen-main | test/test_nn.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import unittest
import crypten
import torch
from crypten.common.tensor_types import is_int_tensor
from crypten.mpc.primitives import BinarySharedTensor
from test.multiprocess_test_case import get_random_test_tensor, MultiProcessTestCase
class TestBinary(MultiProcessTestCase):
"""
This class tests all functions of BinarySharedTensor.
"""
def setUp(self):
super().setUp()
# We don't want the main process (rank -1) to initialize the communcator
if self.rank >= 0:
crypten.init()
def _check(self, encrypted_tensor, reference, msg, dst=None, tolerance=None):
if tolerance is None:
tolerance = getattr(self, "default_tolerance", 0.05)
tensor = encrypted_tensor.get_plain_text(dst=dst)
if dst is not None and dst != self.rank:
self.assertIsNone(tensor)
return
# Check sizes match
self.assertTrue(tensor.size() == reference.size(), msg)
self.assertTrue(is_int_tensor(reference), "reference must be a long")
test_passed = (tensor == reference).all().item() == 1
if not test_passed:
logging.info(msg)
logging.info("Result %s" % tensor)
logging.info("Result - Reference = %s" % (tensor - reference))
self.assertTrue(test_passed, msg=msg)
def test_encrypt_decrypt(self):
"""
Tests tensor encryption and decryption for both positive
and negative values.
"""
sizes = [
(),
(1,),
(5,),
(1, 1),
(1, 5),
(5, 1),
(5, 5),
(1, 5, 5),
(5, 1, 5),
(5, 5, 1),
(5, 5, 5),
(1, 3, 32, 32),
(5, 3, 32, 32),
]
for size in sizes:
# encryption and decryption without source:
reference = get_random_test_tensor(size=size, is_float=False)
encrypted_tensor = BinarySharedTensor(reference)
self._check(encrypted_tensor, reference, "en/decryption failed")
for dst in range(self.world_size):
self._check(
encrypted_tensor, reference, "en/decryption failed", dst=dst
)
# encryption and decryption with source:
for src in range(self.world_size):
input_tensor = reference if src == self.rank else []
encrypted_tensor = BinarySharedTensor(
input_tensor, src=src, broadcast_size=True
)
for dst in range(self.world_size):
self._check(
encrypted_tensor,
reference,
"en/decryption with broadcast_size failed",
dst=dst,
)
def test_transpose(self):
sizes = [
(1,),
(5,),
(1, 1),
(1, 5),
(5, 1),
(5, 5),
(1, 5, 5),
(5, 1, 5),
(5, 5, 1),
(5, 5, 5),
(1, 3, 32, 32),
(5, 3, 32, 32),
]
for size in sizes:
tensor = get_random_test_tensor(size=size, is_float=False)
encrypted_tensor = BinarySharedTensor(tensor)
if len(size) == 2: # t() asserts dim == 2
reference = tensor.t()
encrypted_out = encrypted_tensor.t()
self._check(encrypted_out, reference, "t() failed")
for dim0 in range(len(size)):
for dim1 in range(len(size)):
reference = tensor.transpose(dim0, dim1)
encrypted_out = encrypted_tensor.transpose(dim0, dim1)
self._check(encrypted_out, reference, "transpose failed")
def test_permute(self):
"""Test the permute operations"""
sizes = [
(1,),
(5,),
(1, 5),
(1, 5, 7),
(7, 1, 5),
(5, 7, 1),
(1, 3, 5, 7),
(5, 3, 32, 32),
]
for size in sizes:
tensor = get_random_test_tensor(size=size, is_float=False)
encrypted_tensor = BinarySharedTensor(tensor)
# test reversing the dimensions
dim_arr = [x - 1 for x in range(tensor.dim(), 0, -1)]
reference = tensor.permute(dim_arr)
encrypted_out = encrypted_tensor.permute(dim_arr)
self._check(encrypted_out, reference, "permute failed")
# test one particular non-reversed permutation
if tensor.dim() == 4:
dim_arr = [1, 3, 0, 2]
reference = tensor.permute(dim_arr)
encrypted_out = encrypted_tensor.permute(dim_arr)
self._check(encrypted_out, reference, "permute failed")
def test_XOR(self):
"""Test bitwise-XOR function on BinarySharedTensor"""
for tensor_type in [lambda x: x, BinarySharedTensor]:
tensor = get_random_test_tensor(is_float=False)
tensor2 = get_random_test_tensor(is_float=False)
reference = tensor ^ tensor2
encrypted_tensor = BinarySharedTensor(tensor)
encrypted_tensor2 = tensor_type(tensor2)
encrypted_out = encrypted_tensor ^ encrypted_tensor2
self._check(encrypted_out, reference, "%s XOR failed" % tensor_type)
def test_AND(self):
"""Test bitwise-AND function on BinarySharedTensor"""
for tensor_type in [lambda x: x, BinarySharedTensor]:
tensor = get_random_test_tensor(is_float=False)
tensor2 = get_random_test_tensor(is_float=False)
reference = tensor & tensor2
encrypted_tensor = BinarySharedTensor(tensor)
encrypted_tensor2 = tensor_type(tensor2)
encrypted_out = encrypted_tensor & encrypted_tensor2
self._check(encrypted_out, reference, "%s AND failed" % tensor_type)
def test_OR(self):
"""Test bitwise-OR function on BinarySharedTensor"""
for tensor_type in [lambda x: x, BinarySharedTensor]:
tensor = get_random_test_tensor(is_float=False)
tensor2 = get_random_test_tensor(is_float=False)
reference = tensor | tensor2
encrypted_tensor = BinarySharedTensor(tensor)
encrypted_tensor2 = tensor_type(tensor2)
encrypted_out = encrypted_tensor | encrypted_tensor2
self._check(encrypted_out, reference, "%s OR failed" % tensor_type)
def test_bitwise_broadcasting(self):
"""Tests bitwise function broadcasting"""
bitwise_ops = ["__and__", "__or__", "__xor__"]
sizes = [
(),
(1,),
(2,),
(1, 1),
(1, 2),
(2, 1),
(2, 2),
(1, 1, 1),
(1, 1, 2),
(1, 2, 1),
(2, 1, 1),
(2, 2, 2),
(1, 1, 1, 1),
(1, 1, 1, 2),
(1, 1, 2, 1),
(1, 2, 1, 1),
(2, 1, 1, 1),
(2, 2, 2, 2),
]
for tensor_type in [lambda x: x, BinarySharedTensor]:
for op in bitwise_ops:
for size1, size2 in itertools.combinations(sizes, 2):
tensor1 = get_random_test_tensor(size=size1, is_float=False)
tensor2 = get_random_test_tensor(size=size2, is_float=False)
encrypted_tensor1 = BinarySharedTensor(tensor1)
tensor2_transformed = tensor_type(tensor2)
if isinstance(tensor2_transformed, BinarySharedTensor):
tensor2_transformed_type = "private"
else:
tensor2_transformed_type = "public"
self._check(
getattr(encrypted_tensor1, op)(tensor2_transformed),
getattr(tensor1, op)(tensor2),
f"{tensor2_transformed_type} {op} broadcasting "
f"failed with sizes {size1}, {size2}",
)
def test_invert(self):
"""Test bitwise-invert function on BinarySharedTensor"""
tensor = get_random_test_tensor(is_float=False)
encrypted_tensor = BinarySharedTensor(tensor)
reference = ~tensor
encrypted_out = ~encrypted_tensor
self._check(encrypted_out, reference, "invert failed")
def test_add(self):
"""Tests add using binary shares"""
for tensor_type in [lambda x: x, BinarySharedTensor]:
tensor = get_random_test_tensor(is_float=False)
tensor2 = get_random_test_tensor(is_float=False)
reference = tensor + tensor2
encrypted_tensor = BinarySharedTensor(tensor)
encrypted_tensor2 = tensor_type(tensor2)
encrypted_out = encrypted_tensor + encrypted_tensor2
self._check(encrypted_out, reference, "%s add failed" % tensor_type)
def test_comparators(self):
"""Test comparators (>, >=, <, <=, ==, !=)"""
for _scale in [False, True]:
for comp in ["gt", "ge", "lt", "le", "eq", "ne"]:
for tensor_type in [lambda x: x, BinarySharedTensor]:
tensor = get_random_test_tensor(is_float=False)
tensor2 = get_random_test_tensor(is_float=False)
encrypted_tensor = BinarySharedTensor(tensor)
encrypted_tensor2 = tensor_type(tensor2)
reference = getattr(tensor, comp)(tensor2).long()
encrypted_out = getattr(encrypted_tensor, comp)(encrypted_tensor2)
self._check(encrypted_out, reference, "%s comparator failed" % comp)
def test_sum(self):
"""Tests sum using binary shares"""
tensor = get_random_test_tensor(size=(5, 5, 5), is_float=False)
encrypted = BinarySharedTensor(tensor)
self._check(encrypted.sum(), tensor.sum(), "sum failed")
for dim in [0, 1, 2]:
reference = tensor.sum(dim)
encrypted_out = encrypted.sum(dim)
self._check(encrypted_out, reference, "sum failed")
def test_get_set(self):
for tensor_type in [lambda x: x, BinarySharedTensor]:
for size in range(1, 5):
# Test __getitem__
tensor = get_random_test_tensor(size=(size, size), is_float=False)
reference = tensor[:, 0]
encrypted_tensor = BinarySharedTensor(tensor)
encrypted_out = encrypted_tensor[:, 0]
self._check(encrypted_out, reference, "getitem failed")
reference = tensor[0, :]
encrypted_out = encrypted_tensor[0, :]
self._check(encrypted_out, reference, "getitem failed")
# Test __setitem__
tensor2 = get_random_test_tensor(size=(size,), is_float=False)
reference = tensor.clone()
reference[:, 0] = tensor2
encrypted_out = BinarySharedTensor(tensor)
encrypted2 = tensor_type(tensor2)
encrypted_out[:, 0] = encrypted2
self._check(
encrypted_out, reference, "%s setitem failed" % type(encrypted2)
)
reference = tensor.clone()
reference[0, :] = tensor2
encrypted_out = BinarySharedTensor(tensor)
encrypted2 = tensor_type(tensor2)
encrypted_out[0, :] = encrypted2
self._check(
encrypted_out, reference, "%s setitem failed" % type(encrypted2)
)
def test_share_attr(self):
"""Tests share attribute getter and setter"""
for is_float in (True, False):
reference = get_random_test_tensor(is_float=is_float)
encrypted_tensor = BinarySharedTensor(reference)
self.assertTrue(
torch.equal(encrypted_tensor.share, encrypted_tensor.share),
"share getter failed",
)
new_share = get_random_test_tensor(is_float=False)
encrypted_tensor.share = new_share
self.assertTrue(
torch.equal(encrypted_tensor.share, new_share), "share setter failed"
)
def test_inplace(self):
"""Test inplace vs. out-of-place functions"""
for op in ["__xor__", "__and__", "__or__"]:
for tensor_type in [lambda x: x, BinarySharedTensor]:
tensor1 = get_random_test_tensor(is_float=False)
tensor2 = get_random_test_tensor(is_float=False)
reference = getattr(tensor1, op)(tensor2)
encrypted1 = BinarySharedTensor(tensor1)
encrypted2 = tensor_type(tensor2)
input_plain_id = id(encrypted1.share)
input_encrypted_id = id(encrypted1)
# Test that out-of-place functions do not modify the input
private = isinstance(encrypted2, BinarySharedTensor)
encrypted_out = getattr(encrypted1, op)(encrypted2)
self._check(
encrypted1,
tensor1,
"%s out-of-place %s modifies input"
% ("private" if private else "public", op),
)
self._check(
encrypted_out,
reference,
"%s out-of-place %s produces incorrect output"
% ("private" if private else "public", op),
)
self.assertFalse(id(encrypted_out.share) == input_plain_id)
self.assertFalse(id(encrypted_out) == input_encrypted_id)
# Test that in-place functions modify the input
inplace_op = op[:2] + "i" + op[2:]
encrypted_out = getattr(encrypted1, inplace_op)(encrypted2)
self._check(
encrypted1,
reference,
"%s in-place %s does not modify input"
% ("private" if private else "public", inplace_op),
)
self._check(
encrypted_out,
reference,
"%s in-place %s produces incorrect output"
% ("private" if private else "public", inplace_op),
)
self.assertTrue(id(encrypted_out.share) == input_plain_id)
self.assertTrue(id(encrypted_out) == input_encrypted_id)
def test_control_flow_failure(self):
"""Tests that control flow fails as expected"""
tensor = get_random_test_tensor(is_float=False)
encrypted_tensor = BinarySharedTensor(tensor)
with self.assertRaises(RuntimeError):
if encrypted_tensor:
pass
with self.assertRaises(RuntimeError):
tensor = 5 if encrypted_tensor else 0
with self.assertRaises(RuntimeError):
if False:
pass
elif encrypted_tensor:
pass
def test_src_failure(self):
"""Tests that out-of-bounds src fails as expected"""
tensor = get_random_test_tensor(is_float=True)
for src in [None, "abc", -2, self.world_size]:
with self.assertRaises(AssertionError):
BinarySharedTensor(tensor, src=src)
def test_src_match_input_data(self):
"""Tests incorrect src in BinarySharedTensor fails as expected"""
tensor = get_random_test_tensor(is_float=True)
tensor.src = 0
for testing_src in [None, "abc", -2, self.world_size]:
with self.assertRaises(AssertionError):
BinarySharedTensor(tensor, src=testing_src)
def test_where(self):
"""Tests where() conditional element selection"""
sizes = [(10,), (5, 10), (1, 5, 10)]
y_types = [lambda x: x, BinarySharedTensor]
for size, y_type in itertools.product(sizes, y_types):
tensor1 = get_random_test_tensor(size=size, is_float=False)
encrypted_tensor1 = BinarySharedTensor(tensor1)
tensor2 = get_random_test_tensor(size=size, is_float=False)
encrypted_tensor2 = y_type(tensor2)
condition_tensor = (
get_random_test_tensor(max_value=1, size=[1], is_float=False) + 1
)
condition_encrypted = BinarySharedTensor(condition_tensor)
condition_bool = condition_tensor.bool()
reference_out = tensor1 * condition_tensor + tensor2 * (
1 - condition_tensor
)
encrypted_out = encrypted_tensor1.where(condition_bool, encrypted_tensor2)
y_is_private = y_type == BinarySharedTensor
self._check(
encrypted_out,
reference_out,
f"{'private' if y_is_private else 'public'} y "
"where failed with public condition",
)
encrypted_out = encrypted_tensor1.where(
condition_encrypted, encrypted_tensor2
)
self._check(
encrypted_out,
reference_out,
f"{'private' if y_is_private else 'public'} y "
"where failed with private condition",
)
def test_gather(self):
"""Test gather function of encrypted tensor"""
sizes = [(5, 5), (5, 5, 5), (5, 5, 5, 5)]
for size in sizes:
for dim in range(len(size)):
tensor = get_random_test_tensor(size=size, is_float=False)
index = get_random_test_tensor(size=size, is_float=False)
index = index.abs().clamp(0, 4)
encrypted = BinarySharedTensor(tensor)
reference = tensor.gather(dim, index)
encrypted_out = encrypted.gather(dim, index)
self._check(encrypted_out, reference, f"gather failed with size {size}")
def test_scatter(self):
"""Test scatter function of encrypted tensor"""
funcs = ["scatter", "scatter_"]
sizes = [(5, 5), (5, 5, 5), (5, 5, 5, 5)]
for func in funcs:
for size in sizes:
for tensor_type in [lambda x: x, BinarySharedTensor]:
for dim in range(len(size)):
tensor1 = get_random_test_tensor(size=size, is_float=False)
tensor2 = get_random_test_tensor(size=size, is_float=False)
index = get_random_test_tensor(size=size, is_float=False)
index = index.abs().clamp(0, 4)
encrypted = BinarySharedTensor(tensor1)
encrypted2 = tensor_type(tensor2)
reference = getattr(tensor1, func)(dim, index, tensor2)
encrypted_out = getattr(encrypted, func)(dim, index, encrypted2)
private = tensor_type == BinarySharedTensor
self._check(
encrypted_out,
reference,
"%s %s failed" % ("private" if private else "public", func),
)
if func.endswith("_"):
# Check in-place scatter modified input
self._check(
encrypted,
reference,
"%s %s failed to modify input"
% ("private" if private else "public", func),
)
else:
# Check original is not modified
self._check(
encrypted,
tensor1,
"%s %s unintendedly modified input"
% ("private" if private else "public", func),
)
def test_split(self):
"""Test gather function of encrypted tensor"""
sizes = [(5, 5), (5, 5, 5), (5, 5, 5, 5)]
for size in sizes:
for dim in range(len(size)):
tensor = get_random_test_tensor(size=size, is_float=False)
encrypted = BinarySharedTensor(tensor)
for idx in range(6):
split = (idx, 5 - idx)
reference0, reference1 = tensor.split(split, dim=dim)
encrypted_out0, encrypted_out1 = encrypted.split(split, dim=dim)
self._check(
encrypted_out0, reference0, f"split failed with input {split}"
)
self._check(
encrypted_out1, reference1, f"split failed with input {split}"
)
split = (5,)
(reference,) = tensor.split(split, dim=dim)
(encrypted_out,) = encrypted.split(split, dim=dim)
self._check(
encrypted_out, reference, f"split failed with input {split}"
)
with self.assertRaises(RuntimeError):
encrypted_out.split((5, 1))
# This code only runs when executing the file outside the test harness
if __name__ == "__main__":
unittest.main()
| CrypTen-main | test/test_binary.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import math
import os
import unittest
import crypten
import crypten.communicator as comm
import torch
import torch.nn.functional as F
from crypten.common.functions.pooling import _pool2d_reshape
from crypten.common.rng import generate_kbit_random_tensor, generate_random_ring_element
from crypten.common.tensor_types import is_float_tensor
from crypten.config import cfg
from crypten.mpc import MPCTensor, ptype as Ptype
from crypten.mpc.primitives import ArithmeticSharedTensor, BinarySharedTensor
from test.multiprocess_test_case import get_random_test_tensor, MultiProcessTestCase
class TestMPC:
"""
This class tests all functions of MPCTensor.
"""
def _get_random_test_tensor(self, *args, **kwargs):
return get_random_test_tensor(device=self.device, *args, **kwargs)
def _check(self, encrypted_tensor, reference, msg, dst=None, tolerance=None):
if tolerance is None:
tolerance = getattr(self, "default_tolerance", 0.05)
tensor = encrypted_tensor.get_plain_text(dst=dst)
if dst is not None and dst != self.rank:
self.assertIsNone(tensor)
return
# Check sizes match
self.assertTrue(tensor.size() == reference.size(), msg)
self.assertTrue(is_float_tensor(reference), "reference must be a float")
if tensor.device != reference.device:
tensor = tensor.cpu()
reference = reference.cpu()
diff = (tensor - reference).abs_()
norm_diff = diff.div(tensor.abs() + reference.abs()).abs_()
test_passed = norm_diff.le(tolerance) + diff.le(tolerance * 0.2)
test_passed = test_passed.gt(0).all().item() == 1
if not test_passed:
logging.info(msg)
logging.info("Result %s" % tensor)
logging.info("Reference %s" % reference)
logging.info("Result - Reference = %s" % (tensor - reference))
self.assertTrue(test_passed, msg=msg)
def _check_tuple(self, encrypted_tuple, reference, msg, tolerance=None):
self.assertTrue(isinstance(encrypted_tuple, tuple))
self.assertEqual(len(encrypted_tuple), len(reference))
for i in range(len(reference)):
self._check(encrypted_tuple[i], reference[i], msg, tolerance=tolerance)
def test_repr(self):
a = self._get_random_test_tensor(size=(1,))
arithmetic = MPCTensor(a, ptype=Ptype.arithmetic)
binary = MPCTensor(a, ptype=Ptype.binary)
# Make sure these don't crash
print(arithmetic)
repr(arithmetic)
print(binary)
repr(binary)
def test_from_shares(self):
"""Tests MPCTensor.from_shares() functionality."""
# settings for test:
num_parties = int(self.world_size)
size = (5, 4)
def _generate_tensor(ptype):
reference = self._get_random_test_tensor(size=size, is_float=False)
# generate arithmetic sharing of reference tensor:
if ptype == Ptype.arithmetic:
zero_shares = generate_random_ring_element(
(num_parties, *size), device=self.device
)
zero_shares = zero_shares - zero_shares.roll(1, dims=0)
shares = list(zero_shares.unbind(0))
shares[0] += reference
# generate binary sharing of reference tensor:
else:
zero_shares = generate_kbit_random_tensor(
(num_parties, *size), device=self.device
)
zero_shares = zero_shares ^ zero_shares.roll(1, dims=0)
shares = list(zero_shares.unbind(0))
shares[0] ^= reference
# return shares and reference:
return shares, reference
# test both types:
for ptype in [Ptype.arithmetic, Ptype.binary]:
# generate shares, sync them between parties, and create tensor:
shares, reference = _generate_tensor(ptype)
share = comm.get().scatter(shares, 0)
encrypted_tensor = MPCTensor.from_shares(share, ptype=ptype)
# check resulting tensor:
self.assertIsInstance(encrypted_tensor, MPCTensor)
self.assertEqual(encrypted_tensor.ptype, ptype)
self.assertIsInstance(encrypted_tensor._tensor, ptype.to_tensor())
decrypted_tensor = encrypted_tensor.reveal()
self.assertTrue(torch.all(decrypted_tensor.eq(reference)).item())
def test_share_attr(self):
"""Tests share attribute getter and setter"""
for is_float in (True, False):
reference = self._get_random_test_tensor(is_float=is_float)
encrypted_tensor = MPCTensor(reference)
underlying_tensor = encrypted_tensor.share
self.assertTrue(
torch.equal(encrypted_tensor.share, underlying_tensor),
"share getter failed",
)
new_share = self._get_random_test_tensor(is_float=False)
encrypted_tensor.share = new_share
self.assertTrue(
torch.equal(encrypted_tensor.share, new_share), "share setter failed"
)
def test_encrypt_decrypt(self):
"""
Tests tensor encryption and decryption for both positive
and negative values.
"""
sizes = [
(),
(1,),
(5,),
(1, 1),
(1, 5),
(5, 1),
(5, 5),
(1, 5, 5),
(5, 1, 5),
(5, 5, 1),
(5, 5, 5),
(1, 3, 32, 32),
(5, 3, 32, 32),
]
for size in sizes:
# encryption and decryption without source:
reference = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = MPCTensor(reference)
self._check(encrypted_tensor, reference, "en/decryption failed")
for dst in range(self.world_size):
self._check(
encrypted_tensor, reference, "en/decryption failed", dst=dst
)
# test creation via new() function:
encrypted_tensor2 = encrypted_tensor.new(reference)
self.assertIsInstance(
encrypted_tensor2, MPCTensor, "new() returns incorrect type"
)
self._check(encrypted_tensor2, reference, "en/decryption failed")
# TODO: Implement broadcast_size on GPU
if self.device.type == "cuda":
continue
# encryption and decryption with source:
for src in range(self.world_size):
input_tensor = reference if src == self.rank else []
encrypted_tensor = MPCTensor(input_tensor, src=src, broadcast_size=True)
for dst in range(self.world_size):
self._check(
encrypted_tensor,
reference,
"en/decryption with broadcast_size failed",
dst=dst,
)
# MPCTensors cannot be initialized with None:
with self.assertRaises(ValueError):
_ = MPCTensor(None)
def test_arithmetic(self):
"""Tests arithmetic functions on encrypted tensor."""
arithmetic_functions = ["add", "add_", "sub", "sub_", "mul", "mul_"]
for func in arithmetic_functions:
for tensor_type in [lambda x: x, MPCTensor]:
tensor1 = self._get_random_test_tensor(is_float=True)
tensor2 = self._get_random_test_tensor(is_float=True)
encrypted = MPCTensor(tensor1)
encrypted2 = tensor_type(tensor2)
reference = getattr(tensor1, func)(tensor2)
encrypted_out = getattr(encrypted, func)(encrypted2)
self._check(
encrypted_out,
reference,
"%s %s failed"
% ("private" if tensor_type == MPCTensor else "public", func),
)
if "_" in func:
# Check in-place op worked
self._check(
encrypted,
reference,
"%s %s failed"
% ("private" if tensor_type == MPCTensor else "public", func),
)
else:
# Check original is not modified
self._check(
encrypted,
tensor1,
"%s %s failed"
% ("private" if tensor_type == MPCTensor else "public", func),
)
# Check encrypted vector with encrypted scalar works.
tensor1 = self._get_random_test_tensor(is_float=True)
tensor2 = self._get_random_test_tensor(is_float=True, size=(1,))
encrypted1 = MPCTensor(tensor1)
encrypted2 = MPCTensor(tensor2)
reference = getattr(tensor1, func)(tensor2)
encrypted_out = getattr(encrypted1, func)(encrypted2)
self._check(encrypted_out, reference, "private %s failed" % func)
tensor = self._get_random_test_tensor(is_float=True)
reference = tensor * tensor
encrypted = MPCTensor(tensor)
encrypted_out = encrypted.square()
self._check(encrypted_out, reference, "square failed")
# Test radd, rsub, and rmul
reference = 2 + tensor1
encrypted = MPCTensor(tensor1)
encrypted_out = 2 + encrypted
self._check(encrypted_out, reference, "right add failed")
reference = 2 - tensor1
encrypted_out = 2 - encrypted
self._check(encrypted_out, reference, "right sub failed")
reference = 2 * tensor1
encrypted_out = 2 * encrypted
self._check(encrypted_out, reference, "right mul failed")
def test_sum(self):
"""Tests sum reduction on encrypted tensor."""
tensor = self._get_random_test_tensor(size=(100, 100), is_float=True)
encrypted = MPCTensor(tensor)
self._check(encrypted.sum(), tensor.sum(), "sum failed")
for dim in [0, 1]:
reference = tensor.sum(dim)
encrypted_out = encrypted.sum(dim)
self._check(encrypted_out, reference, "sum failed")
def test_prod(self):
"""Tests prod reduction on encrypted tensor."""
tensor = self._get_random_test_tensor(size=(3, 3), max_value=3, is_float=False)
encrypted = MPCTensor(tensor)
self._check(encrypted.prod(), tensor.prod().float(), "prod failed")
tensor = self._get_random_test_tensor(
size=(5, 5, 5), max_value=3, is_float=False
)
encrypted = MPCTensor(tensor)
for dim in [0, 1, 2]:
reference = tensor.prod(dim).float()
encrypted_out = encrypted.prod(dim)
self._check(encrypted_out, reference, "prod failed")
def test_ptype(self):
"""Test that ptype attribute creates the correct type of encrypted tensor"""
ptype_values = [crypten.mpc.arithmetic, crypten.mpc.binary]
tensor_types = [ArithmeticSharedTensor, BinarySharedTensor]
for i, curr_ptype in enumerate(ptype_values):
tensor = self._get_random_test_tensor(is_float=False)
encr_tensor = crypten.cryptensor(tensor, ptype=curr_ptype)
assert isinstance(encr_tensor._tensor, tensor_types[i]), "ptype test failed"
def test_div(self):
"""Tests division of encrypted tensor by scalar and tensor."""
for function in ["div", "div_"]:
for scalar in [2, 2.0]:
tensor = self._get_random_test_tensor(is_float=True)
reference = tensor.float().div(scalar)
encrypted_tensor = MPCTensor(tensor)
encrypted_tensor = getattr(encrypted_tensor, function)(scalar)
self._check(encrypted_tensor, reference, "scalar division failed")
# multiply denominator by 10 to avoid dividing by small num
divisor = self._get_random_test_tensor(is_float=True, ex_zero=True) * 10
reference = tensor.div(divisor)
encrypted_tensor = MPCTensor(tensor)
encrypted_tensor = getattr(encrypted_tensor, function)(divisor)
self._check(encrypted_tensor, reference, "tensor division failed")
def test_mean(self):
"""Tests computing means of encrypted tensors."""
tensor = self._get_random_test_tensor(size=(5, 10, 15), is_float=True)
encrypted = MPCTensor(tensor)
self._check(encrypted.mean(), tensor.mean(), "mean failed")
for dim in [0, 1, 2]:
reference = tensor.mean(dim)
encrypted_out = encrypted.mean(dim)
self._check(encrypted_out, reference, "mean failed")
def test_var(self):
"""Tests computing variances of encrypted tensors."""
tensor = self._get_random_test_tensor(size=(5, 10, 15), is_float=True)
encrypted = MPCTensor(tensor)
self._check(encrypted.var(), tensor.var(), "var failed")
for dim in [0, 1, 2]:
reference = tensor.var(dim)
encrypted_out = encrypted.var(dim)
self._check(encrypted_out, reference, "var failed")
def test_matmul(self):
"""Test matrix multiplication."""
for tensor_type in [lambda x: x, MPCTensor]:
tensor = self._get_random_test_tensor(max_value=7, is_float=True)
for width in range(2, tensor.nelement()):
matrix_size = (tensor.nelement(), width)
matrix = self._get_random_test_tensor(
max_value=7, size=matrix_size, is_float=True
)
reference = tensor.matmul(matrix)
encrypted_tensor = MPCTensor(tensor)
matrix = tensor_type(matrix)
encrypted_tensor = encrypted_tensor.matmul(matrix)
self._check(
encrypted_tensor,
reference,
"Private-%s matrix multiplication failed"
% ("private" if tensor_type == MPCTensor else "public"),
)
def test_dot_ger(self):
"""Test dot product of vector and encrypted tensor."""
for tensor_type in [lambda x: x, MPCTensor]:
tensor1 = self._get_random_test_tensor(is_float=True).squeeze()
tensor2 = self._get_random_test_tensor(is_float=True).squeeze()
dot_reference = tensor1.dot(tensor2)
ger_reference = torch.outer(tensor1, tensor2)
tensor2 = tensor_type(tensor2)
# dot
encrypted_tensor = MPCTensor(tensor1)
encrypted_out = encrypted_tensor.dot(tensor2)
self._check(
encrypted_out,
dot_reference,
"%s dot product failed" % "private"
if tensor_type == MPCTensor
else "public",
)
# ger
encrypted_tensor = MPCTensor(tensor1)
encrypted_out = encrypted_tensor.ger(tensor2)
self._check(
encrypted_out,
ger_reference,
"%s outer product failed" % "private"
if tensor_type == MPCTensor
else "public",
)
def test_squeeze(self):
tensor = self._get_random_test_tensor(is_float=True)
for dim in [0, 1, 2]:
# Test unsqueeze
reference = tensor.unsqueeze(dim)
encrypted = MPCTensor(tensor)
encrypted_out = encrypted.unsqueeze(dim)
self._check(encrypted_out, reference, "unsqueeze failed")
# Test squeeze
encrypted = MPCTensor(tensor.unsqueeze(0))
encrypted_out = encrypted.squeeze()
self._check(encrypted_out, reference.squeeze(), "squeeze failed")
# Check that the encrypted_out and encrypted point to the same
# thing.
encrypted_out[0:2] = torch.tensor(
[0, 1], dtype=torch.float, device=self.device
)
ref = encrypted.squeeze().get_plain_text()
self._check(encrypted_out, ref, "squeeze failed")
def test_transpose(self):
sizes = [
(1,),
(5,),
(1, 1),
(1, 5),
(5, 1),
(5, 5),
(1, 5, 5),
(5, 1, 5),
(5, 5, 1),
(5, 5, 5),
(1, 3, 32, 32),
(5, 3, 32, 32),
]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = MPCTensor(tensor)
if len(size) == 2: # t() asserts dim == 2
reference = tensor.t()
encrypted_out = encrypted_tensor.t()
self._check(encrypted_out, reference, "t() failed")
for dim0 in range(len(size)):
for dim1 in range(len(size)):
reference = tensor.transpose(dim0, dim1)
encrypted_out = encrypted_tensor.transpose(dim0, dim1)
self._check(encrypted_out, reference, "transpose failed")
def test_conv1d_smaller_signal_one_channel(self):
self._conv1d(5, 1)
def test_conv1d_smaller_signal_many_channels(self):
self._conv1d(5, 5)
def test_conv1d_larger_signal_one_channel(self):
self._conv1d(16, 1)
def test_conv1d_larger_signal_many_channels(self):
self._conv1d(16, 5)
def _conv1d(self, signal_size, in_channels):
"""Test convolution of encrypted tensor with public/private tensors."""
nbatches = [1, 3]
kernel_sizes = [1, 2, 3]
ochannels = [1, 3, 6]
paddings = [0, 1]
strides = [1, 2]
dilations = [1, 2]
groupings = [1, 2]
for func_name in ["conv1d", "conv_transpose1d"]:
for kernel_type in [lambda x: x, MPCTensor]:
for (
batches,
kernel_size,
out_channels,
padding,
stride,
dilation,
groups,
) in itertools.product(
nbatches,
kernel_sizes,
ochannels,
paddings,
strides,
dilations,
groupings,
):
# group convolution is not supported on GPU
if self.device.type == "cuda" and groups > 1:
continue
input_size = (batches, in_channels * groups, signal_size)
signal = self._get_random_test_tensor(
size=input_size, is_float=True
)
if func_name == "conv1d":
k_size = (out_channels * groups, in_channels, kernel_size)
else:
k_size = (in_channels * groups, out_channels, kernel_size)
kernel = self._get_random_test_tensor(size=k_size, is_float=True)
reference = getattr(F, func_name)(
signal,
kernel,
padding=padding,
stride=stride,
dilation=dilation,
groups=groups,
)
encrypted_signal = MPCTensor(signal)
encrypted_kernel = kernel_type(kernel)
encrypted_conv = getattr(encrypted_signal, func_name)(
encrypted_kernel,
padding=padding,
stride=stride,
dilation=dilation,
groups=groups,
)
self._check(encrypted_conv, reference, f"{func_name} failed")
def test_conv2d_square_image_one_channel(self):
self._conv2d((5, 5), 1, "conv2d")
def test_conv_transpose2d_square_image_one_channel(self):
self._conv2d((5, 5), 1, "conv_transpose2d")
def test_conv2d_square_image_many_channels(self):
self._conv2d((5, 5), 5, "conv2d")
def test_conv_transpose2d_square_image_many_channels(self):
self._conv2d((5, 5), 5, "conv_transpose2d")
def test_conv2d_rectangular_image_one_channel(self):
self._conv2d((16, 7), 1, "conv2d")
def test_conv_transpose2d_rectangular_image_one_channel(self):
self._conv2d((16, 7), 1, "conv_transpose2d")
def test_conv2d_rectangular_image_many_channels(self):
self._conv2d((16, 7), 5, "conv2d")
def test_conv_transpose2d_rectangular_image_many_channels(self):
self._conv2d((16, 7), 5, "conv_transpose2d")
def _conv2d(self, image_size, in_channels, func_name):
"""Test convolution of encrypted tensor with public/private tensors."""
nbatches = [1, 3]
kernel_sizes = [(1, 1), (2, 2), (2, 3)]
ochannels = [1, 3]
paddings = [0, 1, (0, 1)]
strides = [1, 2, (1, 2)]
dilations = [1, 2]
groupings = [1, 2]
assert func_name in [
"conv2d",
"conv_transpose2d",
], f"Invalid func_name: {func_name}"
for kernel_type in [lambda x: x, MPCTensor]:
for (
batches,
kernel_size,
out_channels,
padding,
stride,
dilation,
groups,
) in itertools.product(
nbatches,
kernel_sizes,
ochannels,
paddings,
strides,
dilations,
groupings,
):
# group convolution is not supported on GPU
if self.device.type == "cuda" and groups > 1:
continue
# sample input:
input_size = (batches, in_channels * groups, *image_size)
input = self._get_random_test_tensor(size=input_size, is_float=True)
# sample filtering kernel:
if func_name == "conv2d":
k_size = (out_channels * groups, in_channels, *kernel_size)
else:
k_size = (in_channels * groups, out_channels, *kernel_size)
kernel = self._get_random_test_tensor(size=k_size, is_float=True)
# perform filtering:
encr_matrix = MPCTensor(input)
encr_kernel = kernel_type(kernel)
encr_conv = getattr(encr_matrix, func_name)(
encr_kernel,
padding=padding,
stride=stride,
dilation=dilation,
groups=groups,
)
# check that result is correct:
reference = getattr(F, func_name)(
input,
kernel,
padding=padding,
stride=stride,
dilation=dilation,
groups=groups,
)
self._check(encr_conv, reference, "%s failed" % func_name)
def test_max_pooling(self):
"""Test max_pool of encrypted tensor."""
def _assert_index_match(
indices,
encrypted_indices,
matrix_size,
kernel_size,
**kwargs,
):
# Assert each kernel is one-hot
self.assertTrue(
encrypted_indices.get_plain_text()
.sum(-1)
.sum(-1)
.eq(torch.ones_like(indices))
.all(),
"Encrypted indices are not one-hot",
)
# Populate tensor with kernel indices
arange_size = matrix_size[-2:]
index_values = torch.arange(arange_size.numel(), device=indices.device)
index_values = index_values.view(arange_size)
index_values = index_values.expand(matrix_size)
# Ensure encrypted indices are correct
index_mask, size = _pool2d_reshape(index_values, kernel_size, **kwargs)
index_mask = index_mask.view(*size, kernel_size, kernel_size)
crypten_indices = encrypted_indices.mul(index_mask).sum(-1).sum(-1)
self._check(
crypten_indices, indices.float(), "max_pool2d indexing is incorrect"
)
dilations = [1, 2]
for width in range(2, 5):
for kernel_size in range(1, width):
matrix_size = (1, 4, 5, width)
matrix = self._get_random_test_tensor(size=matrix_size, is_float=True)
strides = list(range(1, kernel_size + 1)) + [(1, kernel_size)]
paddings = range(kernel_size // 2 + 1)
for (
stride,
padding,
dilation,
ceil_mode,
return_indices,
) in itertools.product(
strides,
paddings,
dilations,
[False, True],
[False, True],
):
kwargs = {
"stride": stride,
"padding": padding,
"dilation": dilation,
"ceil_mode": ceil_mode,
"return_indices": return_indices,
}
# Skip kernels that lead to 0-size outputs
if (kernel_size - 1) * dilation > width - 1:
continue
reference = F.max_pool2d(matrix, kernel_size, **kwargs)
encrypted_matrix = MPCTensor(matrix)
encrypted_pool = encrypted_matrix.max_pool2d(kernel_size, **kwargs)
if return_indices:
indices = reference[1]
encrypted_indices = encrypted_pool[1]
kwargs.pop("return_indices")
_assert_index_match(
indices,
encrypted_indices,
matrix.size(),
kernel_size,
**kwargs,
)
encrypted_pool = encrypted_pool[0]
reference = reference[0]
self._check(encrypted_pool, reference, "max_pool2d failed")
def test_avg_pooling(self):
"""Test avg_pool of encrypted tensor."""
for width in range(2, 5):
for kernel_size in range(1, width):
matrix_size = (1, 4, 5, width)
matrix = self._get_random_test_tensor(size=matrix_size, is_float=True)
strides = list(range(1, kernel_size + 1)) + [(1, kernel_size)]
paddings = range(kernel_size // 2 + 1)
for stride, padding in itertools.product(strides, paddings):
kwargs = {"stride": stride, "padding": padding}
reference = F.avg_pool2d(matrix, kernel_size, **kwargs)
encrypted_matrix = MPCTensor(matrix)
encrypted_pool = encrypted_matrix.avg_pool2d(kernel_size, **kwargs)
self._check(encrypted_pool, reference, "avg_pool2d failed")
def test_adaptive_pooling(self):
"""test adaptive_avg_pool2d and adaptive_max_pool2d"""
for in_size in range(1, 11):
for out_size in list(range(1, in_size + 1)) + [None]:
input_size = (1, in_size, in_size)
output_size = (out_size, out_size)
tensor = self._get_random_test_tensor(
size=input_size, is_float=True
).unsqueeze(0)
encrypted = MPCTensor(tensor)
# Test adaptive_avg_pool2d
reference = F.adaptive_avg_pool2d(tensor, output_size)
encrypted_out = encrypted.adaptive_avg_pool2d(output_size)
self._check(encrypted_out, reference, "adaptive_avg_pool2d failed")
# Test adapvite_max_pool2d
for return_indices in [False, True]:
reference = F.adaptive_max_pool2d(
tensor, output_size, return_indices=return_indices
)
encrypted_out = encrypted.adaptive_max_pool2d(
output_size, return_indices=return_indices
)
if return_indices:
encrypted_out = encrypted_out[0]
reference = reference[0]
self._check(encrypted_out, reference, "adaptive_max_pool2d failed")
def test_take(self):
"""Tests take function on encrypted tensor"""
tensor_size = [5, 5, 5, 5]
index = torch.tensor(
[[[1, 2], [3, 4]], [[4, 2], [1, 3]]], dtype=torch.long, device=self.device
)
tensor = self._get_random_test_tensor(size=tensor_size, is_float=True)
# Test when dimension!=None
for dimension in range(0, 4):
ndarray = tensor.cpu().numpy()
reference = torch.from_numpy(ndarray.take(index.cpu(), dimension))
encrypted_tensor = MPCTensor(tensor)
encrypted_out = encrypted_tensor.take(index, dimension)
self._check(encrypted_out, reference, "take function failed: dimension set")
# Test when dimension is default (i.e. None)
sizes = [(15,), (5, 10), (15, 10, 5)]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = MPCTensor(tensor)
take_indices = [[0], [10], [0, 5, 10]]
for indices in take_indices:
indices = torch.tensor(indices, device=self.device)
self._check(
encrypted_tensor.take(indices),
tensor.take(indices),
f"take failed with indices {indices}",
)
def test_neg(self):
"""Test negative on encrypted tensor."""
for width in range(2, 5):
matrix_size = (5, width)
matrix = self._get_random_test_tensor(size=matrix_size, is_float=True)
encrypted_matrix = MPCTensor(matrix)
self._check(-encrypted_matrix, -matrix, "__neg__ failed")
for func_name in ["neg", "neg_"]:
reference = getattr(matrix, func_name)()
encrypted_output = getattr(encrypted_matrix, func_name)()
self._check(encrypted_output, reference, "%s failed" % func_name)
def test_relu(self):
"""Test relu on encrypted tensor."""
for width in range(2, 5):
matrix_size = (5, width)
matrix = self._get_random_test_tensor(size=matrix_size, is_float=True)
# Generate some negative values
matrix2 = self._get_random_test_tensor(size=matrix_size, is_float=True)
matrix = matrix - matrix2
encrypted_matrix = MPCTensor(matrix)
reference = F.relu_(matrix)
encrypted_matrix = encrypted_matrix.relu()
self._check(encrypted_matrix, reference, "relu failed")
def test_comparators(self):
"""Test comparators (>, >=, <, <=, ==, !=)"""
for comp in ["gt", "ge", "lt", "le", "eq", "ne"]:
for tensor_type in [lambda x: x, MPCTensor]:
tensor1 = self._get_random_test_tensor(is_float=True)
tensor2 = self._get_random_test_tensor(is_float=True)
encrypted_tensor1 = MPCTensor(tensor1)
encrypted_tensor2 = tensor_type(tensor2)
reference = getattr(tensor1, comp)(tensor2).float()
encrypted_out = getattr(encrypted_tensor1, comp)(encrypted_tensor2)
self._check(encrypted_out, reference, "%s comparator failed" % comp)
# Check deterministic example to guarantee all combinations
tensor1 = torch.tensor([2.0, 3.0, 1.0, 2.0, 2.0])
tensor2 = torch.tensor([2.0, 2.0, 2.0, 3.0, 1.0])
encrypted_tensor1 = MPCTensor(tensor1)
encrypted_tensor2 = tensor_type(tensor2)
reference = getattr(tensor1, comp)(tensor2).float()
encrypted_out = getattr(encrypted_tensor1, comp)(encrypted_tensor2)
self._check(encrypted_out, reference, "%s comparator failed" % comp)
def test_max_min_pairwise(self):
"""Tests max and min for the deterministic constant (n^2) algorithm"""
self._max_min("pairwise")
def test_max_min_log_reduction(self):
"""Tests max and min for log reduction algorithm"""
self._max_min("log_reduction")
def test_max_min_double_log_reduction(self):
"""Tests max and min for double log reduction algorithm"""
self._max_min("double_log_reduction")
def test_max_min_accelerated_cascade(self):
"""Tests max and min for accelerated cascading algorithm"""
self._max_min("accelerated_cascade")
def _max_min(self, method):
"""Test max and min for the specified algorithm"""
sizes = [
(),
(1,),
(5,),
(1, 1),
(1, 5),
(5, 5),
(1, 1, 1),
(5, 5, 5),
(1, 1, 1, 1),
(5, 5, 5, 5),
]
test_cases = [
torch.tensor(
[[1, 1, 2, 1, 4, 1, 3, 4]], dtype=torch.float, device=self.device
)
] + [self._get_random_test_tensor(size=size, is_float=False) for size in sizes]
for tensor in test_cases:
tensor = tensor.float()
encrypted_tensor = MPCTensor(tensor)
for comp in ["max", "min"]:
reference = getattr(tensor, comp)()
with cfg.temp_override({"functions.max_method": method}):
encrypted_out = getattr(encrypted_tensor, comp)()
self._check(encrypted_out, reference, "%s reduction failed" % comp)
for dim in range(tensor.dim()):
for keepdim in [False, True]:
reference = getattr(tensor, comp)(dim, keepdim=keepdim)
# Test with one_hot = False
with cfg.temp_override({"functions.max_method": method}):
encrypted_out = getattr(encrypted_tensor, comp)(
dim, keepdim=keepdim, one_hot=False
)
# Check max / min values are correct
self._check(
encrypted_out[0], reference[0], "%s reduction failed" % comp
)
# Test argmax / argmin values are correct
out_encr = encrypted_out[1]
out_decr = out_encr.get_plain_text().long()
argmax_ref = reference[1]
# Must index into tensor since ties are broken randomly
# so crypten and PyTorch can return different indices.
# This checks that they index to the same value.
if not keepdim:
out_decr = out_decr.unsqueeze(dim)
argmax_ref = argmax_ref.unsqueeze(dim)
mpc_result = tensor.gather(dim, out_decr)
torch_result = tensor.gather(dim, argmax_ref)
self.assertTrue(
(mpc_result == torch_result).all().item(),
"%s reduction failed" % comp,
)
# Test indices with one_hot = True
with cfg.temp_override({"functions.max_method": method}):
encrypted_out = getattr(encrypted_tensor, comp)(
dim, keepdim=keepdim, one_hot=True
)
# Check argmax results
val_ref = reference[0]
out_encr = encrypted_out[1]
out_decr = out_encr.get_plain_text()
self.assertTrue((out_decr.sum(dim) == 1).all())
self.assertTrue(
(
out_decr.mul(tensor).sum(dim, keepdim=keepdim)
== val_ref
).all()
)
def test_argmax_argmin_pairwise(self):
"""Tests argmax and argmin for the deterministic constant (n^2) algorithm"""
self._argmax_argmin("pairwise")
def test_argmax_argmin_log_reduction(self):
"""Tests argmax and argmin for log reduction algorithm"""
self._argmax_argmin("log_reduction")
def test_argmax_argmin_double_log_reduction(self):
"""Tests argmax and argmin for double log reduction algorithm"""
self._argmax_argmin("double_log_reduction")
def test_argmax_argmin_accelerated_cascade(self):
"""Tests max and min for accelerated cascading algorithm"""
self._max_min("accelerated_cascade")
def _argmax_argmin(self, method):
"""Test argmax and argmin for specified algorithm"""
sizes = [
(),
(1,),
(5,),
(1, 1),
(1, 5),
(5, 5),
(1, 1, 1),
(5, 5, 5),
(1, 1, 1, 1),
(5, 5, 5, 5),
]
test_cases = [
torch.tensor(
[[1, 1, 2, 1, 4, 1, 3, 4]], dtype=torch.float, device=self.device
)
] + [self._get_random_test_tensor(size=size, is_float=False) for size in sizes]
for tensor in test_cases:
tensor = tensor.float()
encrypted_tensor = MPCTensor(tensor)
for comp in ["argmax", "argmin"]:
cmp = comp[3:]
value = getattr(tensor, cmp)()
# test with one_hot = False
with cfg.temp_override({"functions.max_method": method}):
encrypted_out = getattr(encrypted_tensor, comp)(one_hot=False)
# Must index into tensor since ties are broken randomly
# so crypten and PyTorch can return different indices.
# This checks that they index to the same value.
decrypted_out = encrypted_out.get_plain_text()
if tensor.dim() == 0: # if input is 0-d, argmax should be 0
self.assertEqual(decrypted_out, 0)
else:
decrypted_val = tensor.flatten()[decrypted_out.long()]
self.assertTrue(decrypted_val.eq(value).all().item())
# test with one_hot = False
with cfg.temp_override({"functions.max_method": method}):
encrypted_out = getattr(encrypted_tensor, comp)(one_hot=True)
one_hot_indices = (tensor == value).float()
decrypted_out = encrypted_out.get_plain_text()
self.assertTrue(decrypted_out.sum() == 1)
self.assertTrue(decrypted_out.mul(one_hot_indices).sum() == 1)
for dim in range(tensor.dim()):
for keepdim in [False, True]:
# Compute one-hot argmax/min reference in plaintext
values, indices = getattr(tensor, cmp)(dim, keepdim=keepdim)
# test with one_hot = False
with cfg.temp_override({"functions.max_method": method}):
encrypted_out = getattr(encrypted_tensor, comp)(
dim, keepdim=keepdim, one_hot=False
)
# Must index into tensor since ties are broken randomly
# so crypten and PyTorch can return different indices.
# This checks that they index to the same value.abs
decrypted_out = encrypted_out.get_plain_text()
if not keepdim:
decrypted_out = decrypted_out.unsqueeze(dim)
indices = indices.unsqueeze(dim)
decrypted_val = tensor.gather(dim, decrypted_out.long())
reference = tensor.gather(dim, indices)
self.assertTrue(decrypted_val.eq(reference).all().item())
# test with one_hot = True
with cfg.temp_override({"functions.max_method": method}):
encrypted_out = getattr(encrypted_tensor, comp)(
dim, keepdim=keepdim, one_hot=True
)
decrypted_out = encrypted_out.get_plain_text()
if not keepdim:
values = values.unsqueeze(dim)
one_hot_indices = tensor.eq(values).float()
self.assertTrue(decrypted_out.sum(dim).eq(1).all())
self.assertTrue(
decrypted_out.mul(one_hot_indices).sum(dim).eq(1).all()
)
def test_abs_sign(self):
"""Test absolute value function"""
for op in ["abs", "sign"]:
tensor = self._get_random_test_tensor(is_float=True)
if op == "sign":
# do not test on 0 since torch.tensor([0]).sign() = 0
tensor = tensor + (tensor == 0).float()
encrypted_tensor = MPCTensor(tensor)
reference = getattr(tensor, op)()
encrypted_out = getattr(encrypted_tensor, op)()
self._check(encrypted_out, reference, "%s failed" % op)
def test_approximations(self):
"""Test appoximate functions (exp, log, sqrt, reciprocal, pos_pow)"""
def test_with_inputs(func, input):
encrypted_tensor = MPCTensor(input)
reference = getattr(tensor, func)()
encrypted_out = getattr(encrypted_tensor, func)()
self._check(encrypted_out, reference, "%s failed" % func)
# Test on [-10, 10] range
full_range_cases = ["exp"]
tensor = torch.tensor(
[0.01 * i for i in range(-1000, 1001, 1)], device=self.device
)
for func in full_range_cases:
test_with_inputs(func, tensor)
# Test on [0, 10] range
tensor[tensor == 0] = 1.0
non_zero_cases = ["reciprocal"]
for func in non_zero_cases:
test_with_inputs(func, tensor)
# Test on [0, 10] range
tensor = tensor[1001:]
pos_cases = ["log", "sqrt"]
for func in pos_cases:
test_with_inputs(func, tensor)
# Test pos_pow with several exponents
encrypted_tensor = MPCTensor(tensor)
# Reduced the max_value so approximations have less absolute error
tensor_exponent = self._get_random_test_tensor(
max_value=2, size=tensor.size(), is_float=True
)
exponents = [-3, -2, -1, 0, 1, 2, 3, tensor_exponent]
exponents += [MPCTensor(tensor_exponent)]
for p in exponents:
if isinstance(p, MPCTensor):
reference = tensor.pow(p.get_plain_text())
else:
reference = tensor.pow(p)
encrypted_out = encrypted_tensor.pos_pow(p)
self._check(encrypted_out, reference, f"pos_pow failed with power {p}")
def test_norm(self):
"""Tests p-norm"""
for p in [1, 1.5, 2, 3, float("inf"), "fro"]:
for dim in [None, 0, 1, 2]:
tensor = self._get_random_test_tensor(size=(3, 3, 3), is_float=True) / 5
if dim is None:
reference = tensor.norm(p=p)
else:
reference = tensor.norm(p=p, dim=dim)
encrypted = MPCTensor(tensor)
encrypted_out = encrypted.norm(p=p, dim=dim)
self._check(encrypted_out, reference, f"{p}-norm failed", tolerance=0.5)
def test_logistic(self):
"""Tests logistic functions (sigmoid, tanh)"""
tensor = torch.tensor(
[0.01 * i for i in range(-1000, 1001, 1)], device=self.device
)
encrypted_tensor = MPCTensor(tensor)
cases = ["sigmoid", "tanh"]
for func in cases:
reference = getattr(tensor, func)()
encrypted_out = getattr(encrypted_tensor, func)()
self._check(encrypted_out, reference, "%s failed" % func)
def test_hardtanh(self):
tensor = torch.arange(-10, 10, dtype=torch.float32)
encrypted = MPCTensor(tensor)
for minval in range(-10, 10):
for maxval in range(minval, 11):
reference = torch.nn.functional.hardtanh(tensor, minval, maxval)
encrypted_out = encrypted.hardtanh(minval, maxval)
self._check(encrypted_out, reference, "hardtanh failed")
def test_inplace_warning(self):
"""Tests that a warning is thrown that indicates that the `inplace` kwarg
is ignored when a function is called with `inplace=True`
"""
tensor = get_random_test_tensor(is_float=True)
encrypted = MPCTensor(tensor)
functions = ["dropout", "_feature_dropout"]
for func in functions:
warning_str = (
f"CrypTen {func} does not support inplace computation during training."
)
with self.assertLogs(logger=logging.getLogger(), level="WARNING") as cm:
getattr(encrypted, func)(inplace=True)
self.assertTrue(f"WARNING:root:{warning_str}" in cm.output)
def test_cos_sin(self):
"""Tests trigonometric functions (cos, sin)"""
tensor = torch.tensor(
[0.01 * i for i in range(-1000, 1001, 1)], device=self.device
)
encrypted_tensor = MPCTensor(tensor)
cases = ["cos", "sin"]
for func in cases:
reference = getattr(tensor, func)()
encrypted_out = getattr(encrypted_tensor, func)()
self._check(encrypted_out, reference, "%s failed" % func)
def test_softmax(self):
"""Test softmax and log_softmax function"""
for softmax_fn in ["softmax", "log_softmax"]:
# Test 0-dim tensor:
tensor = self._get_random_test_tensor(size=(), is_float=True)
reference = getattr(tensor, softmax_fn)(0)
encrypted_tensor = MPCTensor(tensor)
encrypted_out = getattr(encrypted_tensor, softmax_fn)(0)
self._check(encrypted_out, reference, "0-dim tensor %s failed" % softmax_fn)
# Test all other sizes
sizes = [
(1,),
(5,),
(1, 1),
(1, 5),
(5, 1),
(5, 5),
(1, 5, 5),
(5, 1, 5),
(5, 5, 1),
(5, 5, 5),
(1, 5, 5, 5),
(5, 5, 5, 5),
]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True) / 5
encrypted_tensor = MPCTensor(tensor)
for dim in range(tensor.dim()):
reference = getattr(tensor, softmax_fn)(dim)
encrypted_out = getattr(encrypted_tensor, softmax_fn)(dim)
self._check(encrypted_out, reference, "%s failed" % softmax_fn)
def test_get_set(self):
"""Tests element setting and getting by index"""
for tensor_type in [lambda x: x, MPCTensor]:
for size in range(1, 5):
# Test __getitem__
tensor = self._get_random_test_tensor(size=(size, size), is_float=True)
reference = tensor[:, 0]
encrypted_tensor = MPCTensor(tensor)
encrypted_out = encrypted_tensor[:, 0]
self._check(encrypted_out, reference, "getitem failed")
reference = tensor[0, :]
encrypted_out = encrypted_tensor[0, :]
self._check(encrypted_out, reference, "getitem failed")
# Test __setitem__
tensor2 = self._get_random_test_tensor(size=(size,), is_float=True)
reference = tensor.clone()
reference[:, 0] = tensor2
encrypted_out = MPCTensor(tensor)
encrypted2 = tensor_type(tensor2)
encrypted_out[:, 0] = encrypted2
self._check(
encrypted_out, reference, "%s setitem failed" % type(encrypted2)
)
reference = tensor.clone()
reference[0, :] = tensor2
encrypted_out = MPCTensor(tensor)
encrypted2 = tensor_type(tensor2)
encrypted_out[0, :] = encrypted2
self._check(
encrypted_out, reference, "%s setitem failed" % type(encrypted2)
)
def test_pad(self):
"""Tests padding"""
sizes = [(1,), (5,), (1, 1), (5, 5), (5, 5, 5), (5, 3, 32, 32)]
pads = [
(0, 0, 0, 0),
(1, 0, 0, 0),
(0, 1, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1),
(1, 1, 1, 1),
(2, 2, 1, 1),
(2, 2, 2, 2),
]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = MPCTensor(tensor)
for pad in pads:
for value in [0, 1, 10]:
if tensor.dim() < 2:
pad = pad[:2]
reference = torch.nn.functional.pad(tensor, pad, value=value)
encrypted_value = MPCTensor(value, device=self.device)
encrypted_out = encrypted_tensor.pad(pad, value=encrypted_value)
encrypted_out2 = encrypted_tensor.pad(pad, value=value)
self._check(encrypted_out, reference, "pad failed")
self._check(encrypted_out2, reference, "pad failed")
def test_index_add(self):
"""Test index_add function of encrypted tensor"""
index_add_functions = ["index_add", "index_add_"]
tensor_size1 = [5, 5, 5, 5]
index = torch.tensor(
[1, 2, 3, 4, 4, 2, 1, 3], dtype=torch.long, device=self.device
)
for dimension in range(0, 4):
tensor_size2 = [5, 5, 5, 5]
tensor_size2[dimension] = index.size(0)
for func in index_add_functions:
for tensor_type in [lambda x: x, MPCTensor]:
tensor1 = self._get_random_test_tensor(
size=tensor_size1, is_float=True
)
tensor2 = self._get_random_test_tensor(
size=tensor_size2, is_float=True
)
encrypted = MPCTensor(tensor1)
encrypted2 = tensor_type(tensor2)
reference = getattr(tensor1, func)(dimension, index, tensor2)
encrypted_out = getattr(encrypted, func)(
dimension, index, encrypted2
)
private_type = tensor_type == MPCTensor
self._check(
encrypted_out,
reference,
"%s %s failed"
% ("private" if private_type else "public", func),
)
if func.endswith("_"):
# Check in-place index_add worked
self._check(
encrypted,
reference,
"%s %s failed"
% ("private" if private_type else "public", func),
)
else:
# Check original is not modified
self._check(
encrypted,
tensor1,
"%s %s failed"
% (
"private" if tensor_type == MPCTensor else "public",
func,
),
)
def test_scatter(self):
"""Test scatter/scatter_add function of encrypted tensor"""
funcs = ["scatter", "scatter_", "scatter_add", "scatter_add_"]
sizes = [(5, 5), (5, 5, 5), (5, 5, 5, 5)]
for func in funcs:
for size in sizes:
for tensor_type in [lambda x: x, MPCTensor]:
for dim in range(len(size)):
tensor1 = self._get_random_test_tensor(size=size, is_float=True)
tensor2 = self._get_random_test_tensor(size=size, is_float=True)
index = self._get_random_test_tensor(size=size, is_float=False)
index = index.abs().clamp(0, 4)
encrypted = MPCTensor(tensor1)
encrypted2 = tensor_type(tensor2)
reference = getattr(tensor1, func)(dim, index, tensor2)
encrypted_out = getattr(encrypted, func)(dim, index, encrypted2)
private = tensor_type == MPCTensor
self._check(
encrypted_out,
reference,
"%s %s failed" % ("private" if private else "public", func),
)
if func.endswith("_"):
# Check in-place scatter/scatter_add worked
self._check(
encrypted,
reference,
"%s %s failed"
% ("private" if private else "public", func),
)
else:
# Check original is not modified
self._check(
encrypted,
tensor1,
"%s %s failed"
% ("private" if private else "public", func),
)
def test_broadcast_arithmetic_ops(self):
"""Test broadcast of arithmetic functions."""
arithmetic_functions = ["add", "sub", "mul", "div"]
# TODO: Add broadcasting for pos_pow since it can take a tensor argument
arithmetic_sizes = [
(),
(1,),
(2,),
(1, 1),
(1, 2),
(2, 1),
(2, 2),
(1, 1, 1),
(1, 1, 2),
(1, 2, 1),
(2, 1, 1),
(2, 2, 2),
(1, 1, 1, 1),
(1, 1, 1, 2),
(1, 1, 2, 1),
(1, 2, 1, 1),
(2, 1, 1, 1),
(2, 2, 2, 2),
]
for tensor_type in [lambda x: x, MPCTensor]:
for func in arithmetic_functions:
for size1, size2 in itertools.combinations(arithmetic_sizes, 2):
exclude_zero = True if func == "div" else False
# multiply denominator by 10 to avoid dividing by small num
const = 10 if func == "div" else 1
tensor1 = self._get_random_test_tensor(size=size1, is_float=True)
tensor2 = self._get_random_test_tensor(
size=size2, is_float=True, ex_zero=exclude_zero
)
tensor2 *= const
encrypted1 = MPCTensor(tensor1)
encrypted2 = tensor_type(tensor2)
reference = getattr(tensor1, func)(tensor2)
encrypted_out = getattr(encrypted1, func)(encrypted2)
private = isinstance(encrypted2, MPCTensor)
self._check(
encrypted_out,
reference,
"%s %s broadcast failed"
% ("private" if private else "public", func),
)
# Test with integer tensor
tensor2 = self._get_random_test_tensor(
size=size2, is_float=False, ex_zero=exclude_zero
)
tensor2 *= const
reference = getattr(tensor1, func)(tensor2.float())
encrypted_out = getattr(encrypted1, func)(tensor2)
self._check(
encrypted_out,
reference,
"%s broadcast failed with public integer tensor" % func,
)
def test_broadcast_matmul(self):
"""Test broadcast of matmul."""
matmul_sizes = [(1, 1), (1, 5), (5, 1), (5, 5)]
batch_dims = [(), (1,), (5,), (1, 1), (1, 5), (5, 5)]
for tensor_type in [lambda x: x, MPCTensor]:
for size in matmul_sizes:
for batch1, batch2 in itertools.combinations(batch_dims, 2):
size1 = (*batch1, *size)
size2 = (*batch2, *size)
tensor1 = self._get_random_test_tensor(size=size1, is_float=True)
tensor2 = self._get_random_test_tensor(size=size2, is_float=True)
tensor2 = tensor2.transpose(-2, -1)
encrypted1 = MPCTensor(tensor1)
encrypted2 = tensor_type(tensor2)
reference = tensor1.matmul(tensor2)
encrypted_out = encrypted1.matmul(encrypted2)
private = isinstance(encrypted2, MPCTensor)
self._check(
encrypted_out,
reference,
"%s matmul broadcast failed"
% ("private" if private else "public"),
)
# Test with integer tensor
tensor2 = self._get_random_test_tensor(size=size2, is_float=False)
tensor2 = tensor2.float().transpose(-2, -1)
reference = tensor1.matmul(tensor2)
encrypted_out = encrypted1.matmul(tensor2)
self._check(
encrypted_out,
reference,
"matmul broadcast failed with public integer tensor",
)
def test_inplace(self):
"""Test inplace vs. out-of-place functions"""
for op in ["add", "sub", "mul", "div"]:
for tensor_type in [lambda x: x, MPCTensor]:
tensor1 = self._get_random_test_tensor(is_float=True)
tensor2 = self._get_random_test_tensor(is_float=True)
reference = getattr(torch, op)(tensor1, tensor2)
encrypted1 = MPCTensor(tensor1)
encrypted2 = tensor_type(tensor2)
input_tensor_id = id(encrypted1._tensor)
input_encrypted_id = id(encrypted1)
# Test that out-of-place functions do not modify the input
private = isinstance(encrypted2, MPCTensor)
encrypted_out = getattr(encrypted1, op)(encrypted2)
self._check(
encrypted1,
tensor1,
"%s out-of-place %s modifies input"
% ("private" if private else "public", op),
)
self._check(
encrypted_out,
reference,
"%s out-of-place %s produces incorrect output"
% ("private" if private else "public", op),
)
self.assertFalse(id(encrypted_out._tensor) == input_tensor_id)
self.assertFalse(id(encrypted_out) == input_encrypted_id)
# Test that in-place functions modify the input
encrypted_out = getattr(encrypted1, op + "_")(encrypted2)
self._check(
encrypted1,
reference,
"%s in-place %s_ does not modify input"
% ("private" if private else "public", op),
)
self._check(
encrypted_out,
reference,
"%s in-place %s_ produces incorrect output"
% ("private" if private else "public", op),
)
self.assertTrue(id(encrypted_out._tensor) == input_tensor_id)
self.assertTrue(id(encrypted_out) == input_encrypted_id)
def test_copy_clone(self):
"""Tests shallow_copy and clone of encrypted tensors."""
sizes = [(5,), (1, 5), (5, 10, 15)]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = MPCTensor(tensor)
# test shallow_copy
encrypted_tensor_shallow = encrypted_tensor.shallow_copy()
self.assertEqual(
id(encrypted_tensor_shallow._tensor), id(encrypted_tensor._tensor)
)
self._check(encrypted_tensor_shallow, tensor, "shallow_copy failed")
# test clone
encrypted_tensor_clone = encrypted_tensor.clone()
self.assertNotEqual(
id(encrypted_tensor_clone._tensor), id(encrypted_tensor._tensor)
)
self._check(encrypted_tensor_clone, tensor, "clone failed")
def test_copy_(self):
"""Tests copy_ function."""
sizes = [(5,), (1, 5), (5, 10, 15)]
for size in sizes:
tensor1 = self._get_random_test_tensor(size=size, is_float=True)
tensor2 = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor1 = MPCTensor(tensor1)
encrypted_tensor2 = MPCTensor(tensor2)
encrypted_tensor1.copy_(encrypted_tensor2)
self._check(encrypted_tensor1, tensor2, "copy_ failed")
def test_index_select(self):
"""Tests index_select of encrypted tensors."""
sizes = [(5,), (5, 10), (5, 10, 15)]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = MPCTensor(tensor)
indices = [[0], [0, 3], [0, 2, 4]]
for dim in range(tensor.dim()):
for index in indices:
index_tensor = torch.tensor(
index, dtype=torch.long, device=self.device
)
reference = tensor.index_select(dim, index_tensor)
encrypted_out = encrypted_tensor.index_select(dim, index_tensor)
self._check(
encrypted_out,
reference,
"index_select failed at dim {dim} and index {index}",
)
def test_narrow(self):
"""Tests narrow function."""
sizes = [(5, 6), (5, 6, 7), (6, 7, 8, 9)]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encr_tensor = MPCTensor(tensor)
for dim in range(len(size)):
for start in range(size[dim] - 2):
for length in range(1, size[dim] - start):
tensor_narrow = tensor.narrow(dim, start, length)
encr_tensor_narrow = encr_tensor.narrow(dim, start, length)
self._check(
encr_tensor_narrow,
tensor_narrow,
"narrow failed along dimension %d" % dim,
)
def test_repeat_expand(self):
"""Tests repeat and expand of encrypted tensors."""
sizes = [(1, 8), (4, 1, 8)]
repeat_dims = [(4, 2, 1), (4, 2, 10)]
expand_dims = [(4, 2, 8), (4, 5, 8), (10, 4, 5, 8)]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = MPCTensor(tensor)
for dims in repeat_dims:
encrypted_tensor_repeated = encrypted_tensor.repeat(*dims)
# test that repeat copies tensor's data
self.assertNotEqual(
id(encrypted_tensor_repeated._tensor), id(encrypted_tensor._tensor)
)
self._check(
encrypted_tensor_repeated,
tensor.repeat(*dims),
f"repeat failed with dims {dims}",
)
for dims in expand_dims:
encrypted_tensor_expanded = encrypted_tensor.expand(*dims)
# test that expand creates a view into the same underlying tensor
self.assertNotEqual(
id(encrypted_tensor_expanded.share), id(encrypted_tensor.share)
)
self._check(
encrypted_tensor_expanded,
tensor.expand(*dims),
f"repeat failed with dims {dims}",
)
def test_view_flatten(self):
"""Tests view and flatten of encrypted tensors."""
sizes = [(100,), (4, 25), (2, 5, 10)]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = MPCTensor(tensor)
for dim in range(tensor.dim()):
self._check(
encrypted_tensor.flatten(start_dim=dim),
tensor.flatten(start_dim=dim),
f"flatten failed with dim {dim}",
)
shapes = [100, (5, 20), (10, 2, 5), (-1, 10)]
for shape in shapes:
self._check(
encrypted_tensor.view(shape),
tensor.view(shape),
f"view failed with shape {shape}",
)
def test_roll(self):
"""Tests roll of encrypted tensors."""
sizes = [(10, 1), (5, 2), (5, 10, 15)]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = MPCTensor(tensor)
roll_shifts = [1, 2, 3, (2, 1)]
roll_dims = [0, 1, 0, (0, 1)]
for shifts, dims in zip(roll_shifts, roll_dims):
encrypted_tensor_rolled = encrypted_tensor.roll(shifts, dims=dims)
self.assertEqual(encrypted_tensor_rolled.numel(), tensor.numel())
self._check(
encrypted_tensor_rolled,
tensor.roll(shifts, dims=dims),
f"roll failed with shift {shifts} and dims {dims}",
)
def test_unfold(self):
"""Tests unfold of encrypted tensors."""
tensor_sizes = [(8,), (15, 10, 5), (5, 10, 15, 20)]
for tensor_size in tensor_sizes:
tensor = self._get_random_test_tensor(size=tensor_size, is_float=True)
encrypted_tensor = MPCTensor(tensor)
for size, step in itertools.product(range(1, 4), range(1, 4)):
# check unfold along higher dimension if possible
for dim in range(tensor.dim()):
self._check(
encrypted_tensor.unfold(dim, size, step),
tensor.unfold(dim, size, step),
"unfold failed with dim "
f"{dim}, size {size}, and step {step}",
)
def test_to(self):
"""Tests Arithemetic/Binary SharedTensor type conversions."""
from crypten.mpc.ptype import ptype as Ptype
tensor_sizes = [(), (1,), (5,), (1, 1), (5, 5), (1, 1, 1), (5, 5, 5)]
for size in tensor_sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = MPCTensor(tensor)
self.assertEqual(encrypted_tensor.ptype, Ptype.arithmetic)
binary_encrypted_tensor = encrypted_tensor.to(Ptype.binary)
self.assertEqual(binary_encrypted_tensor.ptype, Ptype.binary)
# check original encrypted_tensor was not modified after conversion
self._check(
encrypted_tensor,
tensor,
"encrypted_tensor was modified during conversion to BinarySharedTensor.",
)
encrypted_from_binary = binary_encrypted_tensor.to(Ptype.arithmetic)
self._check(
encrypted_from_binary,
tensor,
"to failed from BinarySharedTensor to ArithmeticSharedTensor",
)
# Test API
tensor = self._get_random_test_tensor(size=(5,), is_float=True)
encrypted_tensor = MPCTensor(tensor)
if torch.cuda.is_available():
encrypted_tensor = encrypted_tensor.to("cuda")
self.assertEqual(encrypted_tensor.device.type, "cuda")
self.assertEqual(encrypted_tensor.ptype, Ptype.arithmetic)
self._check(
encrypted_tensor,
tensor,
"encrypted_tensor was modified during conversion to cuda",
)
encrypted_tensor = encrypted_tensor.to(device="cuda")
self.assertEqual(encrypted_tensor.device.type, "cuda")
self.assertEqual(encrypted_tensor.ptype, Ptype.arithmetic)
self._check(
encrypted_tensor,
tensor,
"encrypted_tensor was modified during conversion to cuda",
)
encrypted_tensor = encrypted_tensor.to("cpu")
self.assertEqual(encrypted_tensor.device.type, "cpu")
self.assertEqual(encrypted_tensor.ptype, Ptype.arithmetic)
self._check(
encrypted_tensor,
tensor,
"encrypted_tensor was modified during conversion to cpu",
)
encrypted_tensor = encrypted_tensor.to(device="cpu")
self.assertEqual(encrypted_tensor.device.type, "cpu")
self.assertEqual(encrypted_tensor.ptype, Ptype.arithmetic)
self._check(
encrypted_tensor,
tensor,
"encrypted_tensor was modified during conversion to cpu",
)
encrypted_tensor = encrypted_tensor.to(ptype=Ptype.binary)
self.assertEqual(encrypted_tensor.device.type, "cpu")
self.assertEqual(encrypted_tensor.ptype, Ptype.binary)
self._check(
encrypted_tensor,
tensor,
"encrypted_tensor was modified during conversion to BinarySharedTensor.",
)
encrypted_tensor = encrypted_tensor.to(ptype=Ptype.arithmetic)
self.assertEqual(encrypted_tensor.device.type, "cpu")
self.assertEqual(encrypted_tensor.ptype, Ptype.arithmetic)
self._check(
encrypted_tensor,
tensor,
"encrypted_tensor was modified during conversion to ArithmeticSharedTensor.",
)
def test_cumsum(self):
"""Tests cumulative sum on encrypted tensors."""
sizes = [(8,), (5, 10), (15, 10, 5)]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = MPCTensor(tensor)
for dim in range(tensor.dim()):
self._check(
encrypted_tensor.cumsum(dim),
tensor.cumsum(dim),
f"cumsum failed along {dim} dim",
)
def test_trace(self):
"""Tests trace operation on 2D encrypted tensors."""
sizes = [(3, 3), (10, 10), (2, 3)]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = MPCTensor(tensor)
self._check(encrypted_tensor.trace(), tensor.trace(), "trace failed")
def test_flip(self):
"""Tests flip operation on encrypted tensors."""
sizes = [(5,), (5, 10), (5, 10, 15)]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = MPCTensor(tensor)
flip_dims = [(0,), (0, 1), (0, 1, 2)]
for dims in flip_dims:
if len(dims) <= tensor.dim():
self._check(
encrypted_tensor.flip(dims),
tensor.flip(dims),
f"flip failed with {dims} dims",
)
def test_control_flow_failure(self):
"""Tests that control flow fails as expected"""
tensor = self._get_random_test_tensor(is_float=True)
encrypted_tensor = MPCTensor(tensor)
with self.assertRaises(RuntimeError):
if encrypted_tensor:
pass
with self.assertRaises(RuntimeError):
tensor = 5 if encrypted_tensor else 0
with self.assertRaises(RuntimeError):
if False:
pass
elif encrypted_tensor:
pass
def test_where(self):
"""Tests where() conditional element selection"""
sizes = [(10,), (5, 10), (1, 5, 10)]
y_types = [lambda x: x, MPCTensor]
for size, y_type in itertools.product(sizes, y_types):
tensor1 = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor1 = MPCTensor(tensor1)
tensor2 = self._get_random_test_tensor(size=size, is_float=True)
encrypted_tensor2 = y_type(tensor2)
condition_tensor = (
self._get_random_test_tensor(max_value=1, size=size, is_float=False) + 1
)
condition_encrypted = MPCTensor(condition_tensor)
condition_bool = condition_tensor.bool()
reference_out = tensor1.where(condition_bool, tensor2)
encrypted_out = encrypted_tensor1.where(condition_bool, encrypted_tensor2)
y_is_private = y_type == MPCTensor
self._check(
encrypted_out,
reference_out,
f"{'private' if y_is_private else 'public'} y "
"where failed with public condition",
)
encrypted_out = encrypted_tensor1.where(
condition_encrypted, encrypted_tensor2
)
self._check(
encrypted_out,
reference_out,
f"{'private' if y_is_private else 'public'} y "
"where failed with private condition",
)
# test scalar y
scalar = self._get_random_test_tensor(max_value=0, size=[1], is_float=True)
self._check(
encrypted_tensor1.where(condition_bool, scalar),
tensor1.where(condition_bool, scalar),
"where failed against scalar y with public condition",
)
self._check(
encrypted_tensor1.where(condition_encrypted, scalar),
tensor1.where(condition_bool, scalar),
"where failed against scalar y with private condition",
)
def test_unbind(self):
"""Tests unbind"""
sizes = [
(1,),
(5,),
(1, 1),
(1, 5),
(5, 5),
(1, 1, 1),
(5, 5, 5),
(1, 1, 1, 1),
(5, 5, 5, 5),
]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encrypted = MPCTensor(tensor)
for dim in range(tensor.dim()):
reference = tensor.unbind(dim)
encrypted_out = encrypted.unbind(dim)
self._check_tuple(encrypted_out, reference, "unbind failed")
def test_split(self):
"""Tests split"""
sizes = [
(1,),
(5,),
(1, 1),
(1, 5),
(5, 5),
(1, 1, 1),
(5, 5, 5),
(1, 1, 1, 1),
(5, 5, 5, 5),
]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, is_float=True)
encrypted = MPCTensor(tensor)
for dim in range(tensor.dim()):
# Get random split
split = self._get_random_test_tensor(
size=(), max_value=tensor.size(dim)
)
split = split.abs().clamp(0, tensor.size(dim) - 1)
split = split.item()
# Test int split
int_split = 1 if split == 0 else split
reference = tensor.split(int_split, dim=dim)
encrypted_out = encrypted.split(int_split, dim=dim)
self._check_tuple(encrypted_out, reference, "split failed")
# Test list split
split = [split, tensor.size(dim) - split]
reference = tensor.split(split, dim=dim)
encrypted_out = encrypted.split(split, dim=dim)
self._check_tuple(encrypted_out, reference, "split failed")
def test_set(self):
"""Tests set correctly re-assigns encrypted shares"""
sizes = [(1, 5), (5, 10), (15, 10, 5)]
for size in sizes:
tensor1 = self._get_random_test_tensor(size=size, is_float=True)
encrypted1 = MPCTensor(tensor1)
tensor2 = self._get_random_test_tensor(size=size, is_float=True)
encrypted2 = MPCTensor(tensor2)
# check encrypted set
encrypted1.set(encrypted2)
self._check(
encrypted1, tensor2, f"set with encrypted other failed with size {size}"
)
# check plain text set
encrypted1 = MPCTensor(tensor1)
encrypted1.set(tensor2)
self._check(
encrypted1,
tensor2,
f"set with unencrypted other failed with size {size}",
)
def test_polynomial(self):
"""Tests polynomial function"""
sizes = [
(1,),
(5,),
(1, 1),
(1, 5),
(5, 5),
(1, 1, 1),
(5, 5, 5),
(1, 1, 1, 1),
(5, 5, 5, 5),
]
for size in sizes:
tensor = self._get_random_test_tensor(size=size, max_value=3, is_float=True)
encrypted = MPCTensor(tensor)
for terms in range(1, 5):
coeffs = self._get_random_test_tensor(
size=(terms,), max_value=3, is_float=True
)
reference = torch.zeros(size=tensor.size(), device=self.device)
for i, term in enumerate(coeffs.tolist()):
reference += term * tensor.pow(i + 1)
# Test list coeffs
encrypted_out = encrypted.polynomial(coeffs.tolist())
self._check(encrypted_out, reference, "polynomial failed")
# Test plaintext tensor coeffs
encrypted_out = encrypted.polynomial(coeffs)
self._check(encrypted_out, reference, "polynomial failed")
# Test encrypted tensor coeffs
coeffs_enc = MPCTensor(coeffs)
encrypted_out = encrypted.polynomial(coeffs_enc)
self._check(encrypted_out, reference, "polynomial failed")
def test_gather(self):
"""Test gather function of encrypted tensor"""
sizes = [(5, 5), (5, 5, 5), (5, 5, 5, 5)]
for size in sizes:
for dim in range(len(size)):
tensor = self._get_random_test_tensor(size=size, is_float=True)
index = self._get_random_test_tensor(size=size, is_float=False)
index = index.abs().clamp(0, 4)
encrypted = MPCTensor(tensor)
reference = tensor.gather(dim, index)
encrypted_out = encrypted.gather(dim, index)
self._check(encrypted_out, reference, f"gather failed with size {size}")
def test_dropout(self):
"""
Tests the dropout functions. Directly compares the zero and non-zero
entries of the input tensor, since we cannot force the encrypted and
unencrypted versions to generate identical random output. Also confirms
that the number of zeros in the encrypted dropout function is as expected.
"""
all_prob_values = [x * 0.2 for x in range(5)]
def get_first_nonzero_value(x):
x = x.flatten()
x = x[x.abs().ge(1e-4)]
x = x.take(torch.tensor(0))
return x
# check that the encrypted and plaintext versions scale
# identically, by testing on all-ones tensor
for prob in all_prob_values:
tensor = torch.ones([10, 10, 10], device=self.device).float()
encr_tensor = MPCTensor(tensor)
dropout_encr = encr_tensor.dropout(prob, training=True)
dropout_decr = dropout_encr.get_plain_text()
dropout_plain = F.dropout(tensor, prob, training=True)
# All non-zero values should be identical in both tensors, so
# compare any one of them
decr_nonzero_value = get_first_nonzero_value(dropout_decr)
plaintext_nonzero_value = get_first_nonzero_value(dropout_plain)
self.assertTrue(
math.isclose(
decr_nonzero_value,
plaintext_nonzero_value,
rel_tol=1e-2,
abs_tol=1e-2,
)
)
for dropout_fn in ["dropout", "_feature_dropout"]:
for prob in all_prob_values:
for size in [(5, 10), (5, 10, 15), (5, 10, 15, 20)]:
for inplace in [False, True]:
for training in [False, True]:
tensor = self._get_random_test_tensor(
size=size, ex_zero=True, min_value=1.0, is_float=True
)
encr_tensor = MPCTensor(tensor)
dropout_encr = getattr(encr_tensor, dropout_fn)(
prob, inplace=inplace, training=training
)
if training:
# Check the scaling for non-zero elements
dropout_decr = dropout_encr.get_plain_text()
scaled_tensor = tensor / (1 - prob)
reference = dropout_decr.where(
dropout_decr == 0, scaled_tensor
)
else:
reference = tensor
self._check(
dropout_encr,
reference,
f"dropout failed with size {size} and probability "
f"{prob}",
)
if inplace:
self._check(
encr_tensor,
reference,
f"in-place dropout failed with size {size} and "
f"probability {prob}",
)
else:
self._check(
encr_tensor,
tensor,
"out-of-place dropout modifies input",
)
# Check that channels that are zeroed are all zeros
if dropout_fn in [
"dropout2d",
"dropout3d",
"feature_dropout",
]:
dropout_encr_flat = dropout_encr.flatten(
start_dim=0, end_dim=1
)
dropout_flat = dropout_encr_flat.get_plain_text()
for i in range(0, dropout_flat.size(0)):
all_zeros = (dropout_flat[i] == 0).all()
all_nonzeros = (dropout_flat[i] != 0).all()
self.assertTrue(
all_zeros or all_nonzeros,
f"{dropout_fn} failed for size {size} with "
f"training {training} and inplace {inplace}",
)
# Check the expected number of zero elements
# For speed, restrict test to single p = 0.4
encr_tensor = MPCTensor(torch.empty((int(1e5), 2, 2)).fill_(1).to(self.device))
dropout_encr = encr_tensor.dropout(0.4)
dropout_tensor = dropout_encr.get_plain_text()
frac_zero = float((dropout_tensor == 0).sum()) / dropout_tensor.nelement()
self.assertTrue(math.isclose(frac_zero, 0.4, rel_tol=1e-2, abs_tol=1e-2))
def _test_cache_save_load(self):
# Determine expected filepaths
provider = crypten.mpc.get_default_provider()
request_path = provider._DEFAULT_CACHE_PATH + f"/request_cache-{self.rank}"
tuple_path = provider._DEFAULT_CACHE_PATH + f"/tuple_cache-{self.rank}"
# Clear any existing files in the cache location
if os.path.exists(request_path):
os.remove(request_path)
if os.path.exists(tuple_path):
os.remove(tuple_path)
# Store cache values for reference
requests = provider.request_cache
tuple_cache = provider.tuple_cache
# Save cache to file
provider.save_cache()
# Assert cache files exist
self.assertTrue(
os.path.exists(request_path), "request_cache file not found after save"
)
self.assertTrue(
os.path.exists(tuple_path), "tuple_cache file not found after save"
)
# Assert cache empty
self.assertEqual(
len(provider.request_cache), 0, "cache save did not clear request cache"
)
self.assertEqual(
len(provider.tuple_cache), 0, "cache save did not clear tuple cache"
)
# Ensure test is working properly by not clearing references
self.assertTrue(len(requests) > 0, "reference requests cleared during save")
self.assertTrue(len(tuple_cache) > 0, "reference tuples cleared during save")
# Load cache from file
provider.load_cache()
# Assert files are deleted
self.assertFalse(
os.path.exists(request_path), "request_cache filepath exists after load"
)
self.assertFalse(
os.path.exists(tuple_path), "tuple_cache filepath exists after load"
)
# Assert request cache is loaded as expected
self.assertEqual(
provider.request_cache, requests, "loaded request_cache is incorrect"
)
# Assert loaded tuple dict is as expected
tc = [(k, v) for k, v in provider.tuple_cache.items()]
ref = [(k, v) for k, v in tuple_cache.items()]
for i in range(len(tc)):
t, r = tc[i], ref[i]
t_key, r_key = t[0], r[0]
t_tuples, r_tuples = t[1], r[1]
# Check keys
self.assertEqual(t_key, r_key, "Loaded tuple_cache key is incorrect")
# Check tuple values
for j in range(len(t_tuples)):
t_tuple, r_tuple = t_tuples[j], r_tuples[j]
for k in range(len(t_tuple)):
t_tensor = t_tuple[k]._tensor
r_tensor = r_tuple[k]._tensor
self.assertTrue(
t_tensor.eq(r_tensor).all(),
"Loaded tuple_cache tuple tensor incorrect",
)
def test_tuple_cache(self):
# Skip RSS setting since it does not generate tuples
if cfg.mpc.protocol == "replicated":
return
# TODO: encorporate wrap_rng for 3PC+ settings
if comm.get().get_world_size() > 2:
return
provider = crypten.mpc.get_default_provider()
# Test tracing attribute
crypten.trace()
self.assertTrue(provider.tracing)
x = get_random_test_tensor(is_float=True)
x = crypten.cryptensor(x)
_ = x.square()
_ = x * x
_ = x.matmul(x.t())
_ = x.relu()
y = x.unsqueeze(0)
_ = y.conv1d(y, stride=2)
# Populate reference requests
ref_names = ["square"]
ref_names += ["generate_additive_triple"] * 2
ref_names += ["generate_binary_triple"] * 7 + ["B2A_rng"]
ref_names += ["generate_additive_triple"] * 2
ref_args = [
(torch.Size([1, 5]),),
(torch.Size([1, 5]), torch.Size([1, 5]), "mul"),
(torch.Size([1, 5]), torch.Size([5, 1]), "matmul"),
(torch.Size([1, 1, 5]), torch.Size([1, 1, 5])),
]
ref_args += [(torch.Size([2, 1, 1, 5]), torch.Size([2, 1, 1, 5]))] * 6
ref_args += [(torch.Size([1, 5]),)]
ref_args += [(torch.Size([1, 5]), torch.Size([1, 5]), "mul")]
ref_args += [(torch.Size([1, 1, 5]), torch.Size([1, 1, 5]), "conv1d")]
kwargs = {"device": torch.device("cpu")}
conv_kwargs = {"device": torch.device("cpu"), "stride": 2}
requests = [(ref_names[i], ref_args[i], kwargs) for i in range(12)]
requests += [(ref_names[12], ref_args[12], conv_kwargs)]
self.assertEqual(
provider.request_cache,
requests,
"TupleProvider request cache incorrect",
)
crypten.trace(False)
self.assertFalse(provider.tracing)
# Check that cache populates as expected
crypten.fill_cache()
kwargs = frozenset(kwargs.items())
conv_kwargs = frozenset(conv_kwargs.items())
keys = [(ref_names[i], ref_args[i], kwargs) for i in range(12)]
keys += [(ref_names[12], ref_args[12], conv_kwargs)]
self.assertEqual(
set(provider.tuple_cache.keys()),
set(keys),
"TupleProvider tuple_cache populated incorrectly",
)
# Test saving from / loading to cache
self._test_cache_save_load()
# Test that function calls return from cache when trace is off
crypten.trace(False)
_ = x.square()
_ = x * x
_ = x.matmul(x.t())
_ = x.relu()
y = x.unsqueeze(0)
_ = y.conv1d(y, stride=2)
for v in provider.tuple_cache.values():
self.assertEqual(
len(v), 0, msg="TupleProvider is not popping tuples properly from cache"
)
# Run all unit tests with both TFP and TTP providers
class TestTFP(MultiProcessTestCase, TestMPC):
def setUp(self):
self._original_provider = cfg.mpc.provider
crypten.CrypTensor.set_grad_enabled(False)
cfg.mpc.provider = "TFP"
super(TestTFP, self).setUp()
def tearDown(self):
cfg.mpc.provider = self._original_provider
crypten.CrypTensor.set_grad_enabled(True)
super(TestTFP, self).tearDown()
class TestTTP(MultiProcessTestCase, TestMPC):
def setUp(self):
self._original_provider = cfg.mpc.provider
crypten.CrypTensor.set_grad_enabled(False)
cfg.mpc.provider = "TTP"
super(TestTTP, self).setUp()
def tearDown(self):
cfg.mpc.provider = self._original_provider
crypten.CrypTensor.set_grad_enabled(True)
super(TestTTP, self).tearDown()
class Test3PC(MultiProcessTestCase, TestMPC):
def setUp(self):
super(Test3PC, self).setUp(world_size=3)
def tearDown(self):
super(Test3PC, self).tearDown()
class TestRSS(MultiProcessTestCase, TestMPC):
def setUp(self):
self._original_protocol = cfg.mpc.protocol
cfg.mpc.protocol = "replicated"
super(TestRSS, self).setUp(world_size=3)
def tearDown(self):
cfg.mpc.protocol = self._original_protocol
super(TestRSS, self).tearDown()
# This code only runs when executing the file outside the test harness (e.g.
# via the buck target of another test)
if __name__ == "__main__":
unittest.main()
| CrypTen-main | test/test_mpc.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import crypten
try:
from ..benchmarks import benchmark
except ValueError:
# ValueError is raised for relative import
# when calling $python -m unittest test/test_benchmark.py
from benchmarks import benchmark
class TestBenchmark(unittest.TestCase):
def setUp(self):
crypten.init()
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_func_benchmarks_run(self):
"""Ensure function benchmarks run without an exception"""
func_benchmarks = benchmark.FuncBenchmarks()
func_benchmarks.run()
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_model_benchmarks_run(self):
"""Ensure model benchmarks run without an exception"""
model_benchmarks = benchmark.ModelBenchmarks()
for model in model_benchmarks.models:
model.epochs = 2
model_benchmarks.run()
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_func_benchmarks_data(self):
"""Sanity check length and columns of function benchmarks"""
func_benchmarks = benchmark.FuncBenchmarks()
func_benchmarks.run()
expected_n_rows = len(benchmark.FuncBenchmarks.UNARY)
expected_n_rows += len(benchmark.FuncBenchmarks.BINARY)
expected_n_rows += len(benchmark.FuncBenchmarks.LAYERS)
n_rows = func_benchmarks.df.shape[0]
self.assertEqual(
n_rows,
expected_n_rows,
msg=f"function benchmarks {n_rows} rows. Expected {expected_n_rows}",
)
self.assertGreater(
func_benchmarks.df["total abs error"].sum(),
0,
msg="total abs error should be greater than 0",
)
self.assertTrue(
all(func_benchmarks.df["runtime"] > 0),
msg="runtime is less than or equal to zero",
)
self.assertTrue(
all(func_benchmarks.df["runtime crypten"] > 0),
msg="crypten runtime is less than or equal to zero",
)
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_model_benchmarks_data(self):
"""Sanity check length and columns of model benchmarks"""
model_benchmarks = benchmark.ModelBenchmarks()
for model in model_benchmarks.models:
model.epochs = 2
model_benchmarks.run()
expected_n_rows = 2 * len(model_benchmarks.models)
n_rows = model_benchmarks.df.shape[0]
self.assertEqual(
n_rows,
expected_n_rows,
msg=f"model benchmarks have {n_rows} rows. Expected {expected_n_rows}",
)
self.assertTrue(
all(model_benchmarks.df["seconds per epoch"] > 0),
msg="seconds per epoch should be greater than 0",
)
self.assertTrue(
all(model_benchmarks.df["inference time"] > 0),
msg="inference time should be greater than 0",
)
self.assertTrue(
all(model_benchmarks.df["accuracy"] > 0)
and all(model_benchmarks.df["accuracy"] < 1.0),
msg="accuracy should be between 0 and 1.0",
)
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_advanced_model_benchmarks(self):
"""Tests advanced models are added with flag"""
model_benchmarks = benchmark.ModelBenchmarks(advanced_models=False)
self.assertTrue(all(not model.advanced for model in model_benchmarks.models))
all_model_benchmarks = benchmark.ModelBenchmarks(advanced_models=True)
self.assertGreater(
len(all_model_benchmarks.models), len(model_benchmarks.models)
)
# This code only runs when executing the file outside the test harness
if __name__ == "__main__":
unittest.main()
| CrypTen-main | test/test_benchmark.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import math
import random
import unittest
from collections import defaultdict
import crypten
import crypten.communicator as comm
import torch
import torch.nn.functional as F
from crypten.common import serial
from crypten.common.tensor_types import is_float_tensor
from crypten.config import cfg
from test.multiprocess_test_case import get_random_test_tensor, MultiProcessTestCase
from torch import nn
class TestCrypten(MultiProcessTestCase):
"""
This class tests all member functions of crypten package
"""
def setUp(self):
super().setUp()
if self.rank >= 0:
crypten.init()
crypten.set_default_cryptensor_type("mpc")
def _check(self, encrypted_tensor, reference, msg, tolerance=None):
if tolerance is None:
tolerance = getattr(self, "default_tolerance", 0.05)
tensor = encrypted_tensor.get_plain_text()
# Check sizes match
self.assertTrue(tensor.size() == reference.size(), msg)
self.assertTrue(is_float_tensor(reference), "reference must be a float")
diff = (tensor - reference).abs_()
norm_diff = diff.div(tensor.abs() + reference.abs()).abs_()
test_passed = norm_diff.le(tolerance) + diff.le(tolerance * 0.1)
test_passed = test_passed.gt(0).all().item() == 1
if not test_passed:
logging.info(msg)
logging.info("Result %s" % tensor)
logging.info("Result = %s;\nreference = %s" % (tensor, reference))
self.assertTrue(test_passed, msg=msg)
def test_przs_generators(self):
"""Tests that przs generators are initialized independently"""
# Check that each party has two unique generators for next and prev seeds
for device in crypten.generators["prev"].keys():
t0 = torch.randint(
-(2**63),
2**63 - 1,
(1,),
device=device,
generator=crypten.generators["prev"][device],
)
t1 = torch.randint(
-(2**63),
2**63 - 1,
(1,),
device=device,
generator=crypten.generators["next"][device],
)
self.assertNotEqual(t0.item(), t1.item())
# Check that generators are sync'd as expected
for rank in range(self.world_size):
receiver = rank
sender = (rank + 1) % self.world_size
if self.rank == receiver:
sender_value = comm.get().recv_obj(sender)
receiver_value = crypten.generators["next"][device].initial_seed()
self.assertEqual(sender_value, receiver_value)
elif self.rank == sender:
sender_value = crypten.generators["prev"][device].initial_seed()
comm.get().send_obj(sender_value, receiver)
def test_global_generator(self):
"""Tests that global generator is generated properly"""
# Check that all seeds are the same
for device in crypten.generators["global"].keys():
this_generator = crypten.generators["global"][device].initial_seed()
generator0 = comm.get().broadcast_obj(this_generator, 0)
self.assertEqual(this_generator, generator0)
def test_manual_seeds(self):
"""
Tests that user-supplied seeds replaces auto-generated seeds
and tests that the seed values match the expected values
"""
# Store auto-generated seeds
orig_seeds = defaultdict(dict)
seed_names = ["prev", "next", "local", "global"]
for seed_name in seed_names:
for device in crypten.generators[seed_name].keys():
orig_seeds[seed_name][device] = crypten.generators[seed_name][
device
].initial_seed()
# User-generated seeds
next_seed = random.randint(0, 2**63 - 1)
local_seed = random.randint(0, 2**63 - 1)
global_seed = random.randint(0, 2**63 - 1)
# Store expected seeds
expected_seeds = {}
expected_seeds["next"] = next_seed
expected_seeds["local"] = local_seed
# Set user-generated seeds in crypten
cfg.debug.debug_mode = True
crypten.manual_seed(next_seed, local_seed, global_seed)
# Check that user-generated seeds are not equal to the auto-generated ones
for seed_name in seed_names:
for device in crypten.generators[seed_name].keys():
self.assertNotEqual(
crypten.generators[seed_name][device].initial_seed(),
orig_seeds[seed_name][device],
)
# Check if seeds match the expected seeds
if seed_name in expected_seeds.keys():
self.assertEqual(
crypten.generators[seed_name][device].initial_seed(),
expected_seeds[seed_name],
)
# Run the tests to validate prev and global are intialized correctly
self.test_przs_generators()
self.test_global_generator()
def test_cat_stack(self):
"""Tests concatenation and stacking of tensors"""
tensor1 = get_random_test_tensor(size=(5, 5, 5, 5), is_float=True)
tensor2 = get_random_test_tensor(size=(5, 5, 5, 5), is_float=True)
encrypted1 = crypten.cryptensor(tensor1)
encrypted2 = crypten.cryptensor(tensor2)
for module in [crypten, torch]: # torch.cat on CrypTensor runs crypten.cat
for op in ["cat", "stack"]:
reference = getattr(torch, op)([tensor1, tensor2])
encrypted_out = getattr(module, op)([encrypted1, encrypted2])
self._check(encrypted_out, reference, "%s failed" % op)
for dim in range(4):
reference = getattr(torch, op)([tensor1, tensor2], dim=dim)
encrypted_out = getattr(module, op)(
[encrypted1, encrypted2], dim=dim
)
self._check(encrypted_out, reference, "%s failed" % op)
def test_print_log(self):
"""Tests crypten.print and crypten.log logging functions."""
crypten.print("test")
crypten.log("test")
def test_rand(self):
"""Tests uniform random variable generation on [0, 1)"""
for size in [(10,), (10, 10), (10, 10, 10)]:
randvec = crypten.rand(*size)
self.assertTrue(randvec.size() == size, "Incorrect size")
tensor = randvec.get_plain_text()
self.assertTrue(
(tensor >= 0).all() and (tensor < 1).all(), "Invalid values"
)
randvec = crypten.rand(int(1e6)).get_plain_text()
mean = torch.mean(randvec)
var = torch.var(randvec)
self.assertTrue(torch.isclose(mean, torch.tensor([0.5]), rtol=1e-3, atol=1e-3))
self.assertTrue(
torch.isclose(var, torch.tensor([1.0 / 12]), rtol=1e-3, atol=1e-3)
)
def test_bernoulli(self):
for size in [(10,), (10, 10), (10, 10, 10)]:
probs = torch.rand(size)
randvec = crypten.bernoulli(probs)
self.assertTrue(randvec.size() == size, "Incorrect size")
tensor = randvec.get_plain_text()
self.assertTrue(((tensor == 0) + (tensor == 1)).all(), "Invalid values")
probs = torch.Tensor(int(1e4)).fill_(0.2)
randvec = crypten.bernoulli(probs).get_plain_text()
frac_zero = float((randvec == 0).sum()) / randvec.nelement()
self.assertTrue(math.isclose(frac_zero, 0.8, rel_tol=1e-1, abs_tol=1e-1))
def test_cryptensor_registration(self):
"""Tests the registration mechanism for custom `CrypTensor` types."""
# perform tests:
cryptensor_name = "my_cryptensor"
self.assertEqual(crypten.get_default_cryptensor_type(), "mpc")
with self.assertRaises(ValueError):
crypten.set_default_cryptensor_type(cryptensor_name)
tensor = crypten.cryptensor(torch.zeros(1, 3))
self.assertEqual(crypten.get_cryptensor_type(tensor), "mpc")
# register new tensor type:
@crypten.register_cryptensor(cryptensor_name)
class MyCrypTensor(crypten.CrypTensor):
"""Dummy `CrypTensor` type."""
def __init__(self, *args, **kwargs):
self.is_custom_type = True
# test that registration was successful:
self.assertEqual(crypten.get_default_cryptensor_type(), "mpc")
crypten.set_default_cryptensor_type(cryptensor_name)
self.assertEqual(crypten.get_default_cryptensor_type(), cryptensor_name)
tensor = crypten.cryptensor(torch.zeros(1, 3))
self.assertTrue(getattr(tensor, "is_custom_type", False))
self.assertEqual(crypten.get_cryptensor_type(tensor), cryptensor_name)
def test_cryptensor_instantiation(self):
"""Tests that CrypTensors cannot be instantiated."""
tensor = get_random_test_tensor()
with self.assertRaises(TypeError):
encrypted_tensor = crypten.CrypTensor(tensor)
encrypted_tensor = crypten.mpc.MPCTensor(tensor)
self.assertIsInstance(encrypted_tensor, crypten.CrypTensor)
def test_save_load(self):
"""Test that crypten.save and crypten.load properly save and load
shares of cryptensors"""
import io
import pickle
def custom_load_function(f):
obj = pickle.load(f)
return obj
def custom_save_function(obj, f):
pickle.dump(obj, f)
all_save_fns = [torch.save, custom_save_function]
all_load_fns = [torch.load, custom_load_function]
tensor = get_random_test_tensor()
cryptensor1 = crypten.cryptensor(tensor)
for i, save_closure in enumerate(all_save_fns):
load_closure = all_load_fns[i]
f = [
io.BytesIO() for i in range(crypten.communicator.get().get_world_size())
]
crypten.save(cryptensor1, f[self.rank], save_closure=save_closure)
f[self.rank].seek(0)
cryptensor2 = crypten.load(f[self.rank], load_closure=load_closure)
# test whether share matches
self.assertTrue(cryptensor1.share.allclose(cryptensor2.share))
# test whether tensor matches
self.assertTrue(
cryptensor1.get_plain_text().allclose(cryptensor2.get_plain_text())
)
attributes = [
a
for a in dir(cryptensor1)
if not a.startswith("__")
and not callable(getattr(cryptensor1, a))
and a not in ["share", "_tensor", "ctx"]
]
for a in attributes:
attr1, attr2 = getattr(cryptensor1, a), getattr(cryptensor2, a)
if a == "encoder":
self.assertTrue(attr1._scale == attr2._scale)
self.assertTrue(attr1._precision_bits == attr2._precision_bits)
elif torch.is_tensor(attr1):
self.assertTrue(attr1.eq(attr2).all())
else:
self.assertTrue(attr1 == attr2)
def test_plaintext_save_load_from_party(self):
"""Test that crypten.save_from_party and crypten.load_from_party
properly save and load plaintext tensors"""
import tempfile
import numpy as np
def custom_load_function(f):
np_arr = np.load(f)
tensor = torch.from_numpy(np_arr)
return tensor
def custom_save_function(obj, f):
np_arr = obj.numpy()
np.save(f, np_arr)
comm = crypten.communicator
filename = tempfile.NamedTemporaryFile(delete=True).name
all_save_fns = [torch.save, custom_save_function]
all_load_fns = [torch.load, custom_load_function]
all_file_completions = [".pth", ".npy"]
all_test_load_fns = [torch.load, np.load]
for dimensions in range(1, 5):
# Create tensors with different sizes on each rank
size = [self.rank + 1] * dimensions
size = tuple(size)
tensor = torch.randn(size=size)
for i, save_closure in enumerate(all_save_fns):
load_closure = all_load_fns[i]
test_load_fn = all_test_load_fns[i]
complete_file = filename + all_file_completions[i]
for src in range(comm.get().get_world_size()):
crypten.save_from_party(
tensor, complete_file, src=src, save_closure=save_closure
)
# the following line will throw an error if an object saved with
# torch.save is attempted to be loaded with np.load
if self.rank == src:
test_load_fn(complete_file)
encrypted_load = crypten.load_from_party(
complete_file, src=src, load_closure=load_closure
)
reference_size = tuple([src + 1] * dimensions)
self.assertEqual(encrypted_load.size(), reference_size)
size_out = [src + 1] * dimensions
reference = (
tensor if self.rank == src else torch.empty(size=size_out)
)
comm.get().broadcast(reference, src)
self._check(encrypted_load, reference, "crypten.load() failed")
# test for invalid load_closure
with self.assertRaises(TypeError):
crypten.load_from_party(
complete_file, src=src, load_closure=(lambda f: None)
)
# test pre-loaded
encrypted_preloaded = crypten.load_from_party(
src=src, preloaded=tensor
)
self._check(
encrypted_preloaded,
reference,
"crypten.load() failed using preloaded",
)
def test_plaintext_save_load_module_from_party(self):
"""Test that crypten.save_from_party and crypten.load_from_party
properly save and load plaintext modules"""
import tempfile
comm = crypten.communicator
for model_type in [TestModule, NestedTestModule]:
# Create models with different parameter values on each rank
rank = comm.get().get_rank()
test_model = model_type(200, 10)
test_model.set_all_parameters(rank)
serial.register_safe_class(model_type)
filename = tempfile.NamedTemporaryFile(delete=True).name
for src in range(comm.get().get_world_size()):
crypten.save_from_party(test_model, filename, src=src)
result = crypten.load_from_party(filename, src=src)
if src == rank:
for param in result.parameters(recurse=True):
self.assertTrue(
param.eq(rank).all().item(), "Model load failed"
)
self.assertEqual(result.src, src)
def test_where(self):
"""Test that crypten.where properly conditions"""
sizes = [(10,), (5, 10), (1, 5, 10)]
y_types = [lambda x: x, crypten.cryptensor]
for size, y_type in itertools.product(sizes, y_types):
tensor1 = get_random_test_tensor(size=size, is_float=True)
encrypted_tensor1 = crypten.cryptensor(tensor1)
tensor2 = get_random_test_tensor(size=size, is_float=True)
encrypted_tensor2 = y_type(tensor2)
condition_tensor = (
get_random_test_tensor(max_value=1, size=size, is_float=False) + 1
)
condition_encrypted = crypten.cryptensor(condition_tensor)
condition_bool = condition_tensor.bool()
reference_out = torch.where(condition_bool, tensor1, tensor2)
encrypted_out = crypten.where(
condition_bool, encrypted_tensor1, encrypted_tensor2
)
y_is_private = crypten.is_encrypted_tensor(tensor2)
self._check(
encrypted_out,
reference_out,
f"{'private' if y_is_private else 'public'} y "
"where failed with public condition",
)
encrypted_out = encrypted_tensor1.where(
condition_encrypted, encrypted_tensor2
)
self._check(
encrypted_out,
reference_out,
f"{'private' if y_is_private else 'public'} y "
"where failed with private condition",
)
@unittest.skip("Test is flaky, with successes, failures and timeouts as outcomes")
def test_is_initialized(self):
"""Tests that the is_initialized flag is set properly"""
comm = crypten.communicator
self.assertTrue(crypten.is_initialized())
self.assertTrue(comm.is_initialized())
crypten.uninit()
self.assertFalse(crypten.is_initialized())
self.assertFalse(comm.is_initialized())
# note that uninit() kills the TTP process, so we need to restart it:
if self.rank == self.MAIN_PROCESS_RANK and crypten.mpc.ttp_required():
self.processes += [self._spawn_ttp()]
crypten.init()
self.assertTrue(crypten.is_initialized())
self.assertTrue(comm.is_initialized())
# Modules used for testing saveing / loading of modules
class TestModule(nn.Module):
def __init__(self, input_features, output_features):
super(TestModule, self).__init__()
self.fc1 = nn.Linear(input_features, 100)
self.fc2 = nn.Linear(100, 50)
self.fc3 = nn.Linear(50, output_features)
def forward(self, input):
out = F.relu(self.fc1(input))
out = F.relu(self.fc2(out))
out = self.fc3(out)
return out
def set_all_parameters(self, value):
self.fc1.weight.data.fill_(value)
self.fc1.bias.data.fill_(value)
self.fc2.weight.data.fill_(value)
self.fc2.bias.data.fill_(value)
self.fc3.weight.data.fill_(value)
self.fc3.bias.data.fill_(value)
class NestedTestModule(nn.Module):
def __init__(self, input_features, output_features):
super(NestedTestModule, self).__init__()
self.fc1 = nn.Linear(input_features, input_features)
self.nested = TestModule(input_features, output_features)
def forward(self, input):
out = F.relu(self.fc1(input))
out = self.nested(out)
def set_all_parameters(self, value):
self.fc1.weight.data.fill_(value)
self.fc1.bias.data.fill_(value)
self.nested.set_all_parameters(value)
# This code only runs when executing the file outside the test harness
if __name__ == "__main__":
unittest.main()
| CrypTen-main | test/test_crypten.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import crypten
import crypten.nn.tensorboard as tensorboard
from test.multiprocess_test_case import MultiProcessTestCase
class TestTensorboard(MultiProcessTestCase):
"""This class tests the crypten.nn.tensorboard package."""
def setUp(self):
super().setUp()
if self.rank >= 0:
crypten.init()
def test_tensorboard(self):
# create small crypten model:
model = crypten.nn.Graph("input", "output")
model.add_module("intermediate1", crypten.nn.ReLU(), ["input"])
model.add_module("intermediate2", crypten.nn.Constant(1), [])
model.add_module("output", crypten.nn.Add(), ["intermediate1", "intermediate2"])
# create tensorboard graph:
tensorboard.graph(model)
self.assertTrue(True, "creation of tensorboard graph failed")
if __name__ == "__main__":
unittest.main()
| CrypTen-main | test/test_tensorboard.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
import itertools
import logging
import unittest
import crypten
import torch
import torch.nn as nn
import torch.nn.functional as F
from crypten.config import cfg
from crypten.cuda import CUDALongTensor
from test.multiprocess_test_case import get_random_test_tensor, MultiProcessTestCase
from test.test_mpc import TestMPC
class MLP(nn.Module):
def __init__(self, in_dim=128):
super().__init__()
self.linear1 = nn.Linear(in_dim, 128)
self.relu1 = nn.ReLU()
self.linear2 = nn.Linear(128, 128)
self.relu2 = nn.ReLU()
self.linear3 = nn.Linear(128, 1)
def forward(self, x):
x = self.linear1(x)
x = self.relu1(x)
x = self.linear2(x)
x = self.relu2(x)
x = self.linear3(x)
return x
@unittest.skipIf(torch.cuda.is_available() is False, "requires CUDA")
class TestCUDA(TestMPC):
"""
This class tests all functions of CUDALongTensor as well as its integration with MPCTensor.
"""
def _check_int(self, result, reference, msg):
# Check sizes match
self.assertTrue(result.size() == reference.size(), msg)
is_eq = (result == reference).all().item() == 1
if not is_eq:
logging.info(msg)
logging.info("Result %s" % result)
logging.info("Reference %s" % reference)
logging.info("Result - Reference = %s" % (result - reference))
self.assertTrue(is_eq, msg=msg)
def test_mlp(self):
"""Test the forward/backward pass of MLP on GPU"""
model = MLP()
dummy_input = torch.empty((32, 128))
model = crypten.nn.from_pytorch(model, dummy_input=dummy_input)
model = model.to(self.device)
model.encrypt()
model.train()
rand_in = crypten.cryptensor(
torch.rand([32, 128], device=self.device), requires_grad=True
)
output = model(rand_in)
model.zero_grad()
output.backward()
model.update_parameters(learning_rate=1e-3)
def test_patched_matmul(self):
"""Test torch.matmul on CUDALongTensor"""
input_sizes = [
(5,),
(5, 5),
(5,),
(5, 5),
(5, 5, 5),
(5,),
(5, 5, 5, 5),
(5, 5),
# Check large interleaves for 4x16-bit process
(1, 256),
(1, 1024),
]
other_sizes = [
(5,),
(5, 5),
(5, 5),
(5,),
(5,),
(5, 5, 5),
(5, 5),
(5, 5, 5, 5),
(256, 1),
(1024, 1),
]
for x_size, y_size in zip(input_sizes, other_sizes):
x = get_random_test_tensor(size=x_size, max_value=2**62, is_float=False)
x_cuda = CUDALongTensor(x)
y = get_random_test_tensor(size=y_size, max_value=2**62, is_float=False)
y_cuda = CUDALongTensor(y)
z = torch.matmul(x_cuda, y_cuda)
self.assertTrue(
type(z) == CUDALongTensor, "result should be a CUDALongTensor"
)
reference = torch.matmul(x, y)
self._check_int(z.cpu(), reference, "matmul failed for cuda_patches")
def test_conv1d_smaller_signal_one_channel(self):
self._patched_conv1d(5, 1)
self._conv1d(5, 1)
def test_conv1d_smaller_signal_many_channels(self):
self._patched_conv1d(5, 5)
self._conv1d(5, 5)
@unittest.skipIf(torch.cuda.is_available() is False, "requires CUDA")
def test_conv1d_larger_signal_one_channel(self):
self._patched_conv1d(16, 1)
self._conv1d(16, 1)
def test_conv1d_larger_signal_many_channels(self):
self._patched_conv1d(16, 5)
self._conv1d(16, 5)
def test_conv1d_large_filter(self):
self._patched_conv1d(1024, 1, kernel_sizes=[256, 512])
def _patched_conv1d(self, signal_size, in_channels, kernel_sizes=None):
"""Test convolution of torch.cuda.LongTensor with cuda_patches technique."""
nbatches = [1, 3]
ochannels = [1, 3, 6]
paddings = [0, 1]
strides = [1, 2]
if kernel_sizes is None:
kernel_sizes = [1, 2, 3]
for func_name in ["conv1d", "conv_transpose1d"]:
for (
batches,
kernel_size,
out_channels,
padding,
stride,
) in itertools.product(
nbatches, kernel_sizes, ochannels, paddings, strides
):
input_size = (batches, in_channels, signal_size)
signal = get_random_test_tensor(size=input_size, is_float=False)
if func_name == "conv1d":
k_size = (out_channels, in_channels, kernel_size)
else:
k_size = (in_channels, out_channels, kernel_size)
kernel = get_random_test_tensor(size=k_size, is_float=False)
signal_cuda = CUDALongTensor(signal)
kernel_cuda = CUDALongTensor(kernel)
reference = getattr(F, func_name)(
signal, kernel, padding=padding, stride=stride
)
result = getattr(F, func_name)(
signal_cuda, kernel_cuda, padding=padding, stride=stride
)
self.assertTrue(
type(result) == CUDALongTensor, "result should be a CUDALongTensor"
)
result = result.data.cpu()
self._check_int(result, reference, f"{func_name} failed")
def test_conv2d_square_image_one_channel(self):
self._patched_conv2d((5, 5), 1)
self._conv2d((5, 5), 1)
def test_conv2d_square_image_many_channels(self):
self._patched_conv2d((5, 5), 5)
self._conv2d((5, 5), 5)
def test_conv2d_rectangular_image_one_channel(self):
self._patched_conv2d((16, 7), 1)
self._conv2d((16, 7), 1)
def test_conv2d_rectangular_image_many_channels(self):
self._patched_conv2d((16, 7), 5)
self._conv2d((16, 7), 5)
def test_conv2d_large_kernel(self):
self.nbatches = [1]
self.ochannels = [1]
self.paddings = [0]
self.strides = [(64, 64)]
self.kernel_sizes = [(64, 64)]
self._patched_conv2d((64, 64), 1)
def _patched_conv2d(self, image_size, in_channels):
"""Test convolution of torch.cuda.LongTensor with cuda_patches technique."""
kwargs = collections.OrderedDict()
kwargs["nbatches"] = [1, 3]
kwargs["kernel_sizes"] = [(1, 1), (2, 2), (2, 3)]
kwargs["ochannels"] = [1, 3, 6]
kwargs["paddings"] = [0, 1, (0, 1)]
kwargs["strides"] = [1, 2, (1, 2)]
for attribute in [
"nbatches",
"ochannels",
"paddings",
"strides",
"kernel_sizes",
]:
if hasattr(self, attribute):
kwargs[attribute] = getattr(self, attribute)
for func_name in ["conv2d", "conv_transpose2d"]:
for (
batches,
kernel_size,
out_channels,
padding,
stride,
) in itertools.product(*[v for _, v in kwargs.items()]):
# sample input:
input_size = (batches, in_channels, *image_size)
input = get_random_test_tensor(size=input_size, is_float=False)
# sample filtering kernel:
if func_name == "conv2d":
k_size = (out_channels, in_channels, *kernel_size)
else:
k_size = (in_channels, out_channels, *kernel_size)
kernel = get_random_test_tensor(size=k_size, is_float=False)
input_cuda = CUDALongTensor(input)
kernel_cuda = CUDALongTensor(kernel)
result = getattr(F, func_name)(
input_cuda, kernel_cuda, padding=padding, stride=stride
)
self.assertTrue(
type(result) == CUDALongTensor, "result should be a CUDALongTensor"
)
result = result.data.cpu()
# check that result is correct:
reference = getattr(F, func_name)(
input, kernel, padding=padding, stride=stride
)
self._check_int(result, reference, "%s failed" % func_name)
def test_torch_arithmetic(self):
"""Test torch arithmetic on CUDALongTensor"""
funcs = ["add", "sub", "mul", "div"]
a = get_random_test_tensor(is_float=False)
b = get_random_test_tensor(min_value=1, is_float=False)
a_cuda = CUDALongTensor(a)
b_cuda = CUDALongTensor(b)
for op in funcs:
kwargs = {"rounding_mode": "trunc"} if op == "div" else {}
reference = getattr(torch, op)(a, b, **kwargs)
result = getattr(torch, op)(a_cuda, b_cuda, **kwargs)
result2 = getattr(a_cuda, op)(b_cuda, **kwargs)
self.assertTrue(type(result), CUDALongTensor)
self._check_int(
reference, result.cpu(), "torch.{} failed for CUDALongTensor".format(op)
)
self._check_int(
reference,
result2.cpu(),
"torch.{} failed for CUDALongTensor".format(op),
)
def test_torch_comparators(self):
"""Test torch comparators on CUDALongTensor"""
for comp in ["gt", "ge", "lt", "le", "eq", "ne"]:
tensor = get_random_test_tensor(is_float=False)
tensor2 = get_random_test_tensor(is_float=False)
t_cuda = CUDALongTensor(tensor)
t2_cuda = CUDALongTensor(tensor2)
reference = getattr(torch, comp)(tensor, tensor2).long()
result1 = getattr(t_cuda, comp)(t2_cuda)
result2 = getattr(torch, comp)(t_cuda, t2_cuda)
self.assertTrue(
type(result1) == CUDALongTensor, "result should be a CUDALongTensor"
)
self.assertTrue(
type(result2) == CUDALongTensor, "result should be a CUDALongTensor"
)
self._check_int(result1.cpu(), reference, "%s comparator failed" % comp)
self._check_int(result2.cpu(), reference, "%s comparator failed" % comp)
def test_torch_avg_pool2d(self):
"""Test avg_pool2d on CUDALongTensor"""
for width in range(2, 5):
for kernel_size in range(1, width):
matrix_size = (1, 4, 5, width)
matrix = get_random_test_tensor(size=matrix_size, is_float=False)
matrix_cuda = CUDALongTensor(matrix)
for stride in range(1, kernel_size + 1):
for padding in range(kernel_size // 2 + 1):
for divisor_override in [None, 1, 2]:
reference = F.avg_pool2d(
matrix,
kernel_size,
stride=stride,
padding=padding,
divisor_override=divisor_override,
)
result = F.avg_pool2d(
matrix_cuda,
kernel_size,
stride=stride,
padding=padding,
divisor_override=divisor_override,
)
self._check_int(
result.cpu(), reference, "avg_pool2d failed"
)
def test_torch_stack_cat(self):
"""Test torch.cat/torch.stack on CUDALongTensor"""
funcs = ["stack", "cat"]
tensors = [get_random_test_tensor(is_float=False) for _ in range(10)]
tensors_cuda = [CUDALongTensor(t) for t in tensors]
for op in funcs:
reference = getattr(torch, op)(tensors)
result = getattr(CUDALongTensor, op)(tensors_cuda)
self.assertTrue(
type(result) == CUDALongTensor, "result should be a CUDALongTensor"
)
self._check_int(
reference, result.cpu(), "torch.{} failed for CUDALongTensor".format(op)
)
def test_torch_broadcast_tensor(self):
"""Test torch.broadcast_tensor on CUDALongTensor"""
x = get_random_test_tensor(size=(1, 5), is_float=False)
y = get_random_test_tensor(size=(5, 1), is_float=False)
x_cuda = CUDALongTensor(x)
y_cuda = CUDALongTensor(y)
a, b = torch.broadcast_tensors(x, y)
a_cuda, b_cuda = torch.broadcast_tensors(x_cuda, y_cuda)
self.assertTrue(
type(a_cuda) == CUDALongTensor, "result should be a CUDALongTensor"
)
self.assertTrue(
type(b_cuda) == CUDALongTensor, "result should be a CUDALongTensor"
)
self._check_int(
a, a_cuda.cpu(), "torch.broadcast_tensor failed for CUDALongTensor"
)
self._check_int(
b, b_cuda.cpu(), "torch.broadcast_tensor failed for CUDALongTensor"
)
def test_torch_split(self):
"""Test torch.split on CUDALongTensor"""
sizes = [
(1,),
(5,),
(1, 1),
(1, 5),
(5, 5),
(1, 1, 1),
(5, 5, 5),
(1, 1, 1, 1),
(5, 5, 5, 5),
]
for size in sizes:
tensor = get_random_test_tensor(size=size, is_float=False)
t_cuda = CUDALongTensor(tensor)
for dim in range(tensor.dim()):
# Get random split
split = get_random_test_tensor(size=(), max_value=tensor.size(dim))
split = split.abs().clamp(0, tensor.size(dim) - 1)
split = split.item()
# Test int split
int_split = 1 if split == 0 else split
reference = torch.split(tensor, int_split, dim=dim)
result = t_cuda.split(int_split, dim=dim)
result2 = torch.split(t_cuda, int_split, dim=dim)
for i in range(len(result)):
self.assertTrue(
type(result[i]) == CUDALongTensor,
"result should be a CUDALongTensor",
)
self.assertTrue(
type(result2[i]) == CUDALongTensor,
"result should be a CUDALongTensor",
)
self._check_int(result[i].cpu(), reference[i], "split failed")
self._check_int(result2[i].cpu(), reference[i], "split failed")
# Test list split
split = [split, tensor.size(dim) - split]
reference = torch.split(tensor, split, dim=dim)
result = t_cuda.split(split, dim=dim)
result2 = torch.split(t_cuda, split, dim=dim)
for i in range(len(result)):
self.assertTrue(
type(result[i]) == CUDALongTensor,
"result should be a CUDALongTensor",
)
self.assertTrue(
type(result2[i]) == CUDALongTensor,
"result should be a CUDALongTensor",
)
self._check_int(result[i].cpu(), reference[i], "split failed")
self._check_int(result2[i].cpu(), reference[i], "split failed")
def test_torch_unbind(self):
"""Test torch.unbind on CUDALongTensor"""
sizes = [
(1,),
(5,),
(1, 1),
(1, 5),
(5, 5),
(1, 1, 1),
(5, 5, 5),
(1, 1, 1, 1),
(5, 5, 5, 5),
]
for size in sizes:
tensor = get_random_test_tensor(size=size, is_float=False)
t_cuda = CUDALongTensor(tensor)
for dim in range(tensor.dim()):
reference = tensor.unbind(dim)
result = torch.unbind(t_cuda, dim)
result2 = t_cuda.unbind(dim)
for i in range(len(result)):
self.assertTrue(
type(result[i]) == CUDALongTensor,
"result should be a CUDALongTensor",
)
self.assertTrue(
type(result2[i]) == CUDALongTensor,
"result should be a CUDALongTensor",
)
self._check_int(
result[i].cpu(), reference[i], "unbind failed on CUDALongTensor"
)
self._check_int(
result2[i].cpu(),
reference[i],
"unbind failed on CUDALongTensor",
)
def test_torch_gather(self):
"""Test torch.gather on CUDALongTensor"""
sizes = [(5, 5), (5, 5, 5), (5, 5, 5, 5)]
for size in sizes:
for dim in range(len(size)):
tensor = get_random_test_tensor(size=size, is_float=False)
index = get_random_test_tensor(size=size, is_float=False)
index = index.abs().clamp(0, 4)
t_cuda = CUDALongTensor(tensor)
idx_cuda = CUDALongTensor(index)
reference = tensor.gather(dim, index)
result = t_cuda.gather(dim, idx_cuda)
result2 = torch.gather(t_cuda, dim, idx_cuda)
self._check_int(
result.cpu(), reference, f"gather failed with size {size}"
)
self._check_int(
result2.cpu(), reference, f"gather failed with size {size}"
)
@unittest.skip("torch.scatter behaves inconsistently on CUDA")
def test_torch_scatter(self):
"""Test scatter/scatter_add function of CUDALongTensor
This test will be skipped for now since torch.scatter provides
inconsistent result given the same input on CUDA. This is likely
due to a potential bug on pytorch's implementation of scatter
"""
funcs = ["scatter", "scatter_add"]
sizes = [(5, 5), (5, 5, 5), (5, 5, 5, 5)]
for func in funcs:
for size in sizes:
for dim in range(len(size)):
tensor1 = get_random_test_tensor(size=size, is_float=False)
tensor2 = get_random_test_tensor(size=size, is_float=False)
index = get_random_test_tensor(size=size, is_float=False)
index = index.abs().clamp(0, 4)
t1_cuda = CUDALongTensor(tensor1)
t2_cuda = CUDALongTensor(tensor2)
idx_cuda = CUDALongTensor(index)
reference = getattr(torch, func)(tensor1, dim, index, tensor2)
result = getattr(torch, func)(t1_cuda, dim, idx_cuda, t2_cuda)
result2 = getattr(t1_cuda, func)(dim, idx_cuda, t2_cuda)
self.assertTrue(
type(result) == CUDALongTensor,
"result should be a CUDALongTensor",
)
self.assertTrue(
type(result2) == CUDALongTensor,
"result should be a CUDALongTensor",
)
self._check_int(result.cpu(), reference, "{} failed".format(func))
self._check_int(result2.cpu(), reference, "{} failed".format(func))
def test_torch_nonzero(self):
"""Test torch.nonzero on CUDALongTensor"""
sizes = [(5, 5), (5, 5, 5), (5, 5, 5, 5)]
for size in sizes:
t1 = get_random_test_tensor(size=size, is_float=False)
t1_cuda = CUDALongTensor(t1)
ref = t1.nonzero(as_tuple=False)
ref_tuple = t1.nonzero(as_tuple=True)
result = t1_cuda.nonzero(as_tuple=False)
result_tuple = t1_cuda.nonzero(as_tuple=True)
self.assertTrue(
type(result) == CUDALongTensor, "result should be a CUDALongTensor"
)
self._check_int(result.cpu(), ref, "nonzero failed")
for i in range(len(result_tuple)):
self.assertTrue(
type(result_tuple[i]) == CUDALongTensor,
"result should be a CUDALongTensor",
)
self._check_int(result_tuple[i].cpu(), ref_tuple[i], "nonzero failed")
@unittest.skip("torch.scatter behaves inconsistently on CUDA")
def test_scatter(self):
"""This test will be skipped for now since torch.scatter provides
inconsistent result given the same input on CUDA. This is likely
due to a potential bug on pytorch's implementation of scatter
"""
pass
# Run all unit tests with both TFP and TTP providers
class TestTFP(MultiProcessTestCase, TestCUDA):
def __init__(self, methodName):
super().__init__(methodName)
self.device = torch.device("cuda")
def setUp(self):
self._original_provider = cfg.mpc.provider
crypten.CrypTensor.set_grad_enabled(False)
cfg.mpc.provider = "TFP"
super(TestTFP, self).setUp()
def tearDown(self):
cfg.mpc.provider = self._original_provider
crypten.CrypTensor.set_grad_enabled(True)
super(TestTFP, self).tearDown()
class TestTTP(MultiProcessTestCase, TestCUDA):
def __init__(self, methodName):
super().__init__(methodName)
self.device = torch.device("cuda")
def setUp(self):
self._original_provider = cfg.mpc.provider
crypten.CrypTensor.set_grad_enabled(False)
cfg.mpc.provider = "TTP"
super(TestTTP, self).setUp()
def tearDown(self):
cfg.mpc.provider = self._original_provider
crypten.CrypTensor.set_grad_enabled(True)
super(TestTTP, self).tearDown()
# This code only runs when executing the file outside the test harness (e.g.
# via the buck target of another test)
if __name__ == "__main__":
unittest.main()
| CrypTen-main | test/test_cuda.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import multiprocessing
import os
import sys
import tempfile
import traceback
import unittest
import warnings
from functools import wraps
import crypten.communicator as comm
import crypten.debug
import torch
import torch.distributed as dist
from crypten.config import cfg
def get_random_test_tensor(
max_value=6, min_value=None, size=(1, 5), is_float=False, ex_zero=False, device=None
):
"""Generates random tensor for testing
Args:
max_value (int): defines maximum value for int tensor
min_value (int): defines minimum value for int tensor
size (tuple): size of tensor
is_float (bool): determines float or int tensor
ex_zero (bool): excludes zero tensor
Returns: torch.tensor
"""
if min_value is None:
min_value = -max_value
if is_float:
tensor = (
torch.rand(torch.Size(size), device=device) * (max_value - min_value)
+ min_value
)
else:
tensor = torch.randint(
min_value, max_value, torch.Size(size), dtype=torch.int64, device=device
)
if ex_zero:
# replace 0 with 1
tensor[tensor == 0] = 1
# Broadcast this tensor to the world so that the generated random tensor
# is in sync in all distributed processes. See T45688819 for more
# information.
tensor = comm.get().broadcast(tensor, 0)
return tensor
def onehot(indices, num_targets=None):
"""
Converts index vector into one-hot matrix.
"""
assert indices.dtype == torch.long, "indices must be long integers"
assert indices.min() >= 0, "indices must be non-negative"
if num_targets is None:
num_targets = indices.max() + 1
onehot_vector = torch.zeros(indices.nelement(), num_targets, dtype=torch.long)
onehot_vector.scatter_(1, indices.view(indices.nelement(), 1), 1)
return onehot_vector
def get_random_linear(in_channels, out_channels):
linear = torch.nn.Linear(in_channels, out_channels)
if dist.is_initialized():
# Broadcast this tensor to the world so that the generated random tensor
# is in sync in all distributed processes. See T45688819 for more
# information.
comm.get().broadcast(linear.weight, 0)
comm.get().broadcast(linear.bias, 0)
return linear
class MultiProcessTestCase(unittest.TestCase):
MAIN_PROCESS_RANK = -1
DEFAULT_DEVICE = "cpu"
DEFAULT_WORLD_SIZE = 2
@staticmethod
def join_or_run(fn):
@wraps(fn)
def wrapper(self):
if self.rank == self.MAIN_PROCESS_RANK:
self._join_processes(fn)
else:
fn(self)
return wrapper
# The main process spawns N subprocesses that run the test.
# This function patches overwrites every test function to either
# assume the role of the main process and join its subprocesses,
# or run the underlying test function.
@classmethod
def setUpClass(cls):
for attr in dir(cls):
if attr.startswith("test"):
fn = getattr(cls, attr)
setattr(cls, attr, cls.join_or_run(fn))
def __init__(self, methodName):
super().__init__(methodName)
self.device = torch.device(self.DEFAULT_DEVICE)
self.rank = self.MAIN_PROCESS_RANK
self.mp_context = multiprocessing.get_context("spawn")
def setUp(self, world_size=DEFAULT_WORLD_SIZE):
super(MultiProcessTestCase, self).setUp()
crypten.debug.configure_logging()
self.world_size = world_size
self.default_tolerance = 0.5
self.queue = self.mp_context.Queue()
# This gets called in the children process as well to give subclasses a
# chance to initialize themselves in the new process
if self.rank == self.MAIN_PROCESS_RANK:
self.file = tempfile.NamedTemporaryFile(delete=True).name
self.processes = [self._spawn_process(rank) for rank in range(world_size)]
if crypten.mpc.ttp_required():
self.processes += [self._spawn_ttp()]
def tearDown(self):
super(MultiProcessTestCase, self).tearDown()
for p in self.processes:
p.terminate()
def _current_test_name(self):
# self.id() == e.g. '__main__.TestDistributed.TestAdditive.test_get_rank'
return self.id().split(".")[-1]
@staticmethod
def _spawn_ttp_process_with_config(config):
"""Runs TTPServer with config copied from parent"""
cfg.set_config(config)
crypten.mpc.provider.TTPServer()
def _spawn_ttp(self):
communicator_args = {
"WORLD_SIZE": self.world_size,
"RANK": self.world_size,
"RENDEZVOUS": "file://%s" % self.file,
"BACKEND": "gloo",
}
for key, val in communicator_args.items():
os.environ[key] = str(val)
process = self.mp_context.Process(
target=self._spawn_ttp_process_with_config, name="TTP", args=(cfg.config,)
)
process.start()
return process
def _spawn_process(self, rank):
name = "Process " + str(rank)
test_name = self._current_test_name()
process = self.mp_context.Process(
target=self.__class__._run,
name=name,
args=(test_name, cfg.config, rank, self.world_size, self.file, self.queue),
)
process.start()
return process
@classmethod
def _run(cls, test_name, config, rank, world_size, file, exception_queue):
self = cls(test_name)
self.file = file
self.rank = int(rank)
self.world_size = world_size
# Copy config to child processes.
cfg.set_config(config)
# set environment variables:
communicator_args = {
"WORLD_SIZE": self.world_size,
"RANK": self.rank,
"RENDEZVOUS": "file://%s" % self.file,
"BACKEND": "gloo",
}
for key, val in communicator_args.items():
os.environ[key] = str(val)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
try:
crypten.init()
except BaseException:
tb_string = traceback.format_exc()
exception_queue.put(tb_string)
sys.exit(0)
self.setUp()
try:
getattr(self, test_name)()
exception_queue.put(None)
except BaseException:
tb_string = traceback.format_exc()
exception_queue.put(tb_string)
crypten.uninit()
sys.exit(0)
def _join_processes(self, fn):
exceptions = {}
for p in self.processes:
p.join()
if not self.queue.empty():
tb = self.queue.get()
if tb is not None:
exceptions[p.name] = tb
test_name = str(self.__class__).split("'")[1]
test_name += f".{self._current_test_name()}"
msg = f"\n\n\n~ Test {test_name} failed ~"
msg += "\n===========\nExceptions:\n===========\n"
for name, tb in exceptions.items():
msg += f"** {name} ** :\n{tb}\n"
self.assertEqual(len(exceptions), 0, msg)
| CrypTen-main | test/multiprocess_test_case.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
# from test.multiprocess_test_case import get_random_test_tensor
import crypten
import torch
import torchvision
from test.multiprocess_test_case import MultiProcessTestCase
class TestModels(MultiProcessTestCase):
"""
This class tests the crypten.models package.
"""
__PRETRAINED_UNAVAILABLE = [
"mnasnet0_75",
"mnasnet1_3",
"shufflenet_v2_x1_5",
"shufflenet_v2_x2_0",
]
def setUp(self):
super().setUp()
crypten.init()
def _check_modules(self, crypten_model, torchvision_model, msg):
msg += " in modules."
# Check modules()
crypten_modules = [m for m in crypten_model.modules()]
torchvision_modules = [m for m in torchvision_model.modules()]
self.assertTrue(len(crypten_modules) == len(torchvision_modules), msg)
for i, module in enumerate(crypten_modules):
self.assertEqual(
type(module).__name__, type(torchvision_modules[i]).__name__, msg
)
# Check named_modules()
crypten_named_modules = dict(crypten_model.named_modules())
torchvision_named_modules = dict(torchvision_model.named_modules())
self.assertEqual(
len(crypten_named_modules), len(torchvision_named_modules), msg
)
for k in crypten_named_modules.keys():
self.assertTrue(k in torchvision_named_modules, msg)
def _check_parameters(self, crypten_model, torchvision_model, pretrained, msg):
msg += " in parameters."
if pretrained:
msg = f"Pretrained {msg}"
# Test parameters()
crypten_params = [p for p in crypten_model.parameters()]
torchvision_params = [p for p in torchvision_model.parameters()]
self.assertEqual(len(crypten_params), len(torchvision_params), msg)
for i, crypten_param in enumerate(crypten_params):
torchvision_param = torchvision_params[i]
self.assertEqual(crypten_param.size(), torchvision_param.size(), msg)
if pretrained:
if isinstance(crypten_param, crypten.CrypTensor):
crypten_param = crypten_param.get_plain_text()
self.assertTrue(
torch.allclose(crypten_param, torchvision_param, atol=1e-4)
)
# Test named_parameters()
crypten_named_params = dict(crypten_model.named_parameters())
torchvision_named_params = dict(torchvision_model.named_parameters())
self.assertEqual(len(crypten_named_params), len(torchvision_named_params))
for name, crypten_param in crypten_named_params.items():
self.assertTrue(name in torchvision_named_params, msg)
torchvision_param = torchvision_named_params[name]
self.assertEqual(
crypten_param.size(), torchvision_param.size(), f"{msg}: {name} size"
)
if pretrained:
if isinstance(crypten_param, crypten.CrypTensor):
crypten_param = crypten_param.get_plain_text()
self.assertTrue(
torch.allclose(crypten_param, torchvision_param, atol=1e-4),
f"{msg}: {name}",
)
def _check_model(self, model_name, *args, **kwargs):
crypten_model = getattr(crypten.models, model_name)(*args, **kwargs)
torchvision_model = getattr(torchvision.models, model_name)(*args, **kwargs)
self.assertTrue(
isinstance(crypten_model, crypten.nn.Module),
f"{model_name} crypten model is not a crypten.nn.Module",
)
self.assertTrue(
isinstance(torchvision_model, torch.nn.Module),
f"{model_name} torchvision model is not a torch.nn.Module",
)
msg = f"{model_name} failed"
# Check Modules
self._check_modules(crypten_model, torchvision_model, msg)
# Check Parameters
pretrained = kwargs.get("pretrained", False)
self._check_parameters(crypten_model, torchvision_model, pretrained, msg)
# Check encrypted
crypten_model.encrypt()
self._check_modules(crypten_model, torchvision_model, msg)
self._check_parameters(crypten_model, torchvision_model, pretrained, msg)
# NOTE: Removing to avoid timeout issues
# Check forward pass:
# input = get_random_test_tensor(size=(1, 3, 224, 224), is_float=True)
# output = torchvision_model(input)
# encr_input = crypten.cryptensor(input)
# encr_output = crypten_model(encr_input)
# self._check(encr_output, output, f"{model_name} model forward failed")
def _check_all_models(self, list_of_model_names):
for model_name in list_of_model_names:
for pretrained in [False, True]:
if pretrained and model_name in self.__PRETRAINED_UNAVAILABLE:
# mnasnet raises ValueError while shufflenet raises NotImplementedError
with self.assertRaises((ValueError, NotImplementedError)):
self._check_model(model_name, pretrained=pretrained)
continue
self._check_model(model_name, pretrained=pretrained)
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_alexnet(self):
"""Tests AlexNet model"""
self._check_model("alexnet")
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_densenet121(self):
"""Tests DenseNet121 model"""
self._check_model("densenet121")
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_densenet161(self):
"""Tests DenseNet161 model"""
self._check_model("densenet161")
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_densenet169(self):
"""Tests DenseNet169 model"""
self._check_model("densenet169")
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_densenet201(self):
"""Tests DenseNet201 model"""
self._check_model("densenet201")
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_googlenet(self):
"""Tests GoogLeNet models"""
self._check_model("googlenet")
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_inception(self):
"""Tests inception models"""
self._check_model("inception_v3")
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_mnasnet(self):
"""Tests MnasNet models"""
model_names = ["mnasnet0_5", "mnasnet0_75", "mnasnet1_0", "mnasnet1_3"]
self._check_all_models(model_names)
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_mobilenet(self):
"""Tests MobileNet models"""
self._check_model("mobilenet_v2")
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_resnet_small(self):
"""Tests small ResNet models"""
model_names = ["resnet18", "resnet34", "resnet50"]
self._check_all_models(model_names)
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_resnet101(self):
"""Tests ResNet101 model"""
self._check_model("resnet101")
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_resnet152(self):
"""Tests ResNet152 model"""
self._check_model("resnet152")
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_resnext101_32x8d(self):
"""Tests ResNeXt models"""
self._check_model("resnext101_32x8d")
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_resnext50_32x4d(self):
self._check_model("resnext50_32x4d")
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_shufflenet(self):
"""Tests ShuffleNet models"""
model_names = [
"shufflenet_v2_x0_5",
"shufflenet_v2_x1_0",
"shufflenet_v2_x1_5",
"shufflenet_v2_x2_0",
]
self._check_all_models(model_names)
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_squeezenet(self):
"""Tests SqueezeNet models"""
model_names = ["squeezenet1_0", "squeezenet1_1"]
self._check_all_models(model_names)
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_vgg11(self):
"""Tests VGG11 model"""
self._check_model("vgg11")
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_vgg11_bn(self):
"""Tests VGG11 model with Batch Normalization"""
self._check_model("vgg11_bn")
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_vgg13(self):
"""Tests VGG13 model"""
self._check_model("vgg13")
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_vgg13_bn(self):
"""Tests VGG13 model with Batch Normalization"""
self._check_model("vgg13_bn")
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_vgg16(self):
"""Tests VGG16 model"""
self._check_model("vgg16")
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_vgg16_bn(self):
"""Tests VGG16 model with Batch Normalization"""
self._check_model("vgg16_bn")
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_vgg19(self):
"""Tests VGG19 model"""
self._check_model("vgg19")
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_vgg19_bn(self):
"""Tests VGG19 model with Batch Normalization"""
self._check_model("vgg19_bn")
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_wide_resnet101_2(self):
"""Tests wide_resnet101_2 model"""
self._check_model("wide_resnet101_2")
@unittest.skip("Skipping to resolve timeout issues in unittest framework")
def test_wide_resnet50_2(self):
"""Test wide_resnet50_2 model"""
self._check_model("wide_resnet50_2")
| CrypTen-main | test/test_models.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import crypten
import torch
from crypten.config import cfg
from test.multiprocess_test_case import MultiProcessTestCase
class TestDistributions:
"""
This class tests accuracy of distributions provided by random sampling in crypten.
"""
def _check_distribution(
self, func, expected_mean, expected_variance, lb=None, ub=None
):
"""
Checks that the function `func` returns a distribution with the expected
size, mean, and variance.
Arguments:
func - A function that takes a size and returns a random sample as a CrypTensor
expected_mean - The expected mean for the distribution returned by function `func`
expected_variance - The expected variance for the distribution returned by function `func
lb - An expected lower bound on samples from the given distribution. Use None if -Inf.
ub - An expected uppder bound on samples from the given distribution. Use None if +Inf.
"""
name = func.__name__
for size in [(10000,), (1000, 10), (101, 11, 11)]:
sample = func(size)
self.assertTrue(
sample.size() == size, "Incorrect size for %s distribution" % name
)
plain_sample = sample.get_plain_text().float()
mean = plain_sample.mean()
var = plain_sample.var()
self.assertTrue(
math.isclose(mean, expected_mean, rel_tol=1e-1, abs_tol=1e-1),
"incorrect variance for %s distribution: %f" % (name, mean),
)
self.assertTrue(
math.isclose(var, expected_variance, rel_tol=1e-1, abs_tol=1e-1),
"incorrect variance for %s distribution: %f" % (name, var),
)
if lb is not None:
self.assertTrue(
plain_sample.ge(lb).all(),
"Sample detected below lower bound for %s distribution" % name,
)
if ub is not None:
self.assertTrue(
plain_sample.le(ub).all(),
"Sample detected below lower bound for %s distribution" % name,
)
def test_uniform(self):
self._check_distribution(crypten.rand, 0.5, 0.083333, lb=0, ub=1)
def test_normal(self):
self._check_distribution(crypten.randn, 0, 1)
def test_bernoulli(self):
for p in [0.25 * i for i in range(5)]:
def bernoulli(*size):
x = crypten.cryptensor(p * torch.ones(*size))
return x.bernoulli()
self._check_distribution(bernoulli, p, p * (1 - p), lb=0, ub=1)
# Assert all values are in discrete set {0, 1}
tensor = bernoulli((1000,)).get_plain_text()
self.assertTrue(
((tensor == 0) + (tensor == 1)).all(), "Invalid Bernoulli values"
)
# Run all unit tests with both TFP and TTP providers
class TestTFP(MultiProcessTestCase, TestDistributions):
def setUp(self):
self._original_provider = cfg.mpc.provider
crypten.CrypTensor.set_grad_enabled(False)
cfg.mpc.provider = "TFP"
super(TestTFP, self).setUp()
def tearDown(self):
cfg.mpc.provider = self._original_provider
crypten.CrypTensor.set_grad_enabled(True)
super(TestTFP, self).tearDown()
class TestTTP(MultiProcessTestCase, TestDistributions):
def setUp(self):
self._original_provider = cfg.mpc.provider
crypten.CrypTensor.set_grad_enabled(False)
cfg.mpc.provider = "TTP"
super(TestTTP, self).setUp()
def tearDown(self):
cfg.mpc.provider = self._original_provider
crypten.CrypTensor.set_grad_enabled(True)
super(TestTTP, self).tearDown()
| CrypTen-main | test/test_distributions.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
import crypten
import crypten.communicator as comm
import crypten.mpc as mpc
import torch
from crypten.config import cfg
@mpc.run_multiprocess(world_size=2)
def test_rank_func():
return comm.get().get_rank()
@mpc.run_multiprocess(world_size=2)
def test_exception_func():
raise RuntimeError()
@mpc.run_multiprocess(world_size=10)
def test_worldsize_func():
return 1
@mpc.run_multiprocess(world_size=2)
def test_generator_func():
device = torch.device("cpu")
t0 = torch.randint(
-(2**63), 2**63 - 1, (1,), generator=crypten.generators["prev"][device]
).item()
t1 = torch.randint(
-(2**63), 2**63 - 1, (1,), generator=crypten.generators["next"][device]
).item()
return (t0, t1)
@mpc.run_multiprocess(world_size=2)
def test_with_args_kwargs_func(first, *args, a=None, **kwargs):
"""function that removes first arg and `a` kwarg"""
return args, kwargs
@mpc.run_multiprocess(world_size=5)
def test_rng_seeds_func():
"""Tests that rng seeds differ and coordinate where desired"""
device = torch.device("cpu")
prev_seed = crypten.generators["prev"][device].initial_seed()
next_seed = crypten.generators["next"][device].initial_seed()
local_seed = crypten.generators["local"][device].initial_seed()
global_seed = crypten.generators["global"][device].initial_seed()
return (prev_seed, next_seed, local_seed, global_seed)
class TestContext(unittest.TestCase):
def test_rank(self):
ranks = test_rank_func()
self.assertEqual(ranks, [0, 1])
def test_exception(self):
ret = test_exception_func()
self.assertEqual(ret, None)
def test_world_size(self):
ones = test_worldsize_func()
self.assertEqual(ones, [1] * 10)
def test_in_first(self):
# TODO: Make this work with TTP provider
cfg.mpc.provider = "TFP"
# This will cause the parent process to init with world-size 1
crypten.init()
self.assertEqual(comm.get().get_world_size(), 1)
# This will fork 2 children which will have to init with world-size 2
self.assertEqual(test_rank_func(), [0, 1])
# Make sure everything is the same in the parent
self.assertEqual(comm.get().get_world_size(), 1)
def test_generator(self):
"""Tests that generators for PRZS RNG are setup properly with different
RNG seeds for each process.
"""
generators = test_generator_func()
# Test that generators are communicated properly across processes
for i in range(len(generators)):
j = (i + 1) % len(generators) # Next process index
self.assertEqual(generators[i][0], generators[j][1])
# Test that RNG seeds are different from each process
for i in range(len(generators)):
self.assertNotEqual(generators[i][0], generators[i][1])
def test_with_args_kwargs(self):
args = (2, 3, 5, 8)
kwargs = {"a": 1, "b": 2, "c": 3, "d": 4}
retval = test_with_args_kwargs_func(*args, **kwargs)
ret_args, ret_kwargs = retval[0]
kwargs.pop("a")
self.assertEqual(ret_args, args[1:])
self.assertEqual(ret_kwargs, kwargs)
def test_rng_seeds(self):
all_seeds = test_rng_seeds_func()
prev_seeds = [seed[0] for seed in all_seeds]
next_seeds = [seed[1] for seed in all_seeds]
local_seeds = [seed[2] for seed in all_seeds]
global_seeds = [seed[3] for seed in all_seeds]
# Test local seeds are all unique
self.assertTrue(len(set(local_seeds)) == len(local_seeds))
# Test global seeds are all the same
self.assertTrue(len(set(global_seeds)) == 1)
# Test that next seeds are equal to next party's prev_seed
for i, next_seed in enumerate(next_seeds):
next_index = (i + 1) % len(prev_seeds)
prev_seed = prev_seeds[next_index]
self.assertEqual(next_seed, prev_seed)
| CrypTen-main | test/test_context.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import random
import unittest
import crypten
import crypten.gradients as gradients
import torch
from crypten.common.tensor_types import is_float_tensor
from crypten.config import cfg
from crypten.gradients import AutogradContext, AutogradFunction
from test.multiprocess_test_case import get_random_test_tensor, MultiProcessTestCase
class TestAutograd:
"""
This class tests all autograd-related functionality.
"""
def setUp(self):
# we do not want main process (rank -1) initializing the communicator:
if self.rank >= 0:
crypten.init()
def _check(self, encrypted_tensor, reference, msg, tolerance=None):
if tolerance is None:
tolerance = getattr(self, "default_tolerance", 0.05)
tensor = encrypted_tensor.get_plain_text()
# check that sizes match:
self.assertTrue(tensor.size() == reference.size(), msg)
# check that values match:
if is_float_tensor(reference):
diff = (tensor - reference).abs_()
norm_diff = diff.div(tensor.abs() + reference.abs()).abs_()
test_passed = norm_diff.le(tolerance) + diff.le(tolerance * 0.1)
test_passed = test_passed.gt(0).all().item() == 1
else:
test_passed = (tensor == reference).all().item() == 1
if not test_passed:
logging.info(msg)
logging.info("Result %s" % tensor)
logging.info("Result - Reference = %s" % (tensor - reference))
self.assertTrue(test_passed, msg=msg)
def test_non_differentiable_marking(self):
"""Tests whether marking of non-differentiability works correctly."""
# generate random inputs:
inputs = [get_random_test_tensor(is_float=True) for _ in range(5)]
inputs = [crypten.cryptensor(input) for input in inputs]
ctx = AutogradContext()
# repeat test multiple times:
for _ in range(10):
# mark non-differentiable inputs as such:
differentiable = [random.random() > 0.5 for _ in range(len(inputs))]
for idx, diff in enumerate(differentiable):
if not diff:
ctx.mark_non_differentiable(inputs[idx])
# check that inputs were correctly marked:
for idx, input in enumerate(inputs):
self.assertEqual(
ctx.is_differentiable(input),
differentiable[idx],
"marking of differentiability failed",
)
ctx.reset()
# test behavior of autograd in CrypTensor:
input = inputs[0]
input.requires_grad = True
reference = [True, True, False]
for func_name in ["min", "max"]:
outputs = [None] * 3
outputs[0] = getattr(input, func_name)()
outputs[1], outputs[2] = getattr(input, func_name)(0)
for idx, output in enumerate(outputs):
self.assertEqual(
output.requires_grad,
reference[idx],
"value of requires_grad is incorrect",
)
# behavior of max_pool2d in which indices are returned:
input = get_random_test_tensor(size=(1, 3, 8, 8), is_float=True)
input = crypten.cryptensor(input, requires_grad=True)
reference = [True, True, False]
outputs = [None] * 3
outputs[0] = input.max_pool2d(2, return_indices=False)
outputs[1], outputs[2] = input.max_pool2d(2, return_indices=True)
for idx, output in enumerate(outputs):
self.assertEqual(
output.requires_grad,
reference[idx],
"value of requires_grad is incorrect",
)
def test_inplace(self):
"""
Tests that in-place functions cannot be used in autograd but return
correct results outside of autograd.
"""
value = 1.5
reference = get_random_test_tensor(size=(1, 3, 8, 8), is_float=True)
for requires_grad in [False, True]:
result = crypten.cryptensor(reference, requires_grad=requires_grad)
if requires_grad:
with self.assertRaises(RuntimeError):
result.add_(value)
else:
result.add_(value)
self._check(result, reference.add(value), "in-place addition failed")
def test_autograd_registation(self):
"""Tests registration of new autograd function."""
# check that get_grad_fn() returns correct functions:
for func_name, reference_func in gradients.FUNCTION_REGISTRY.items():
grad_fn = gradients.get_grad_fn(func_name)
self.assertEqual(grad_fn, reference_func)
self.assertEqual(grad_fn.name, func_name)
# check that non-existing functions return None:
for invalid_func_name in ["bfobofb", "djhfhr"]:
func = gradients.get_grad_fn(invalid_func_name)
self.assertIsNone(func)
# check that registering new classes works:
for func_name in ["mock_func1", "mock_func2", "mock_func3"]:
cls = type("%sName" % func_name, (AutogradFunction,), {})
gradients.register_function(func_name)(cls)
grad_fn = gradients.get_grad_fn(func_name)
self.assertEqual(grad_fn, cls)
self.assertEqual(grad_fn.name, func_name)
# check that existing functions cannot be overwritten:
for func_name in ["add", "sub", "view"]:
cls = type("%sName" % func_name, (AutogradFunction,), {})
with self.assertRaises(ValueError):
gradients.register_function(func_name)(cls)
def test_autograd_func_take(self):
"""Tests the part of autograd take that does not have a torch equivalent"""
tensor_size = [5, 5, 5, 5]
index = torch.tensor([[[1, 2], [3, 4]], [[4, 2], [1, 3]]], dtype=torch.long)
# Test when dimension!=None
for dimension in range(0, 4):
tensor = get_random_test_tensor(size=tensor_size, is_float=True)
ref_forward = torch.from_numpy(tensor.numpy().take(index, dimension))
encrypted_tensor = crypten.cryptensor(tensor)
encr_inputs = [encrypted_tensor, index, dimension]
# test forward
ctx = AutogradContext()
grad_fn_take = gradients.get_grad_fn("take")
encr_output = grad_fn_take.forward(ctx, *encr_inputs)
self._check(encr_output, ref_forward, "take forward failed: dimension set")
# test backward:
# first, recreate take forward function with only torch operations
tensor2 = get_random_test_tensor(size=tensor_size, is_float=True)
tensor2.requires_grad = True
all_indices = [slice(0, x) for x in tensor2.size()]
all_indices[dimension] = index
ref_forward_torch = tensor2[all_indices]
grad_output = torch.ones(ref_forward_torch.size())
ref_forward_torch.backward(grad_output)
# next, do backward pass on encrypted tensor
encr_grad_output = encr_output.new(grad_output)
encr_grad = grad_fn_take.backward(ctx, encr_grad_output)
# finally, compare values
self._check(encr_grad, tensor2.grad, "take backward failed: dimension set")
def test_detach(self):
"""Tests that detach() works as expected."""
for func_name in ["detach", "detach_"]:
# get test case:
input_size = (12, 5)
input1 = get_random_test_tensor(size=input_size, is_float=True)
input2 = get_random_test_tensor(size=input_size, is_float=True)
input1 = crypten.cryptensor(input1, requires_grad=True)
input2 = crypten.cryptensor(input2, requires_grad=True)
# perform forward computation with detach in the middle:
intermediate = input1.add(1.0)
intermediate = getattr(intermediate, func_name)()
output = intermediate.add(input2).sum()
# perform backward:
output.backward()
msg = "detach() function does not behave as expected"
self.assertIsNone(output.grad, msg)
self.assertIsNone(intermediate.grad, msg)
self.assertIsNone(input1.grad, msg)
self.assertIsNotNone(input2.grad, msg)
def test_forward_tracking(self):
"""Tests that requires_grad influences tracking of forward computations."""
for requires_grad in [True, False]:
# get test case:
input = get_random_test_tensor(size=(12, 5), is_float=True)
input = crypten.cryptensor(input, requires_grad=requires_grad)
# perform forward computation:
output = input.exp().sum()
# setting requires_grad post-hoc should not affect backward behavior:
input.requires_grad = True
output.requires_grad = True
output.backward()
# check results:
msg = "tracking of forward computations does not work as expected"
if requires_grad:
self.assertIsNotNone(input.grad, msg)
self.assertIsNone(output.grad, msg)
else:
self.assertIsNone(input.grad, msg)
self.assertIsNotNone(output.grad, msg)
def test_autograd_accumulation(self):
"""Tests accumulation in autograd."""
# graphs that have nodes with multiple parents, dead leafs, etc.:
def test_case1(input, encr_input):
output = input.add(1.0).add(input.exp()).sum()
encr_output = encr_input.add(1.0).add(encr_input.exp()).sum()
return output, encr_output
def test_case2(input, encr_input):
intermediate = input.pow(2.0) # PyTorch
output = intermediate.add(1.0).add(intermediate.mul(2.0)).sum()
encr_intermediate = encr_input.square() # CrypTen
encr_output = (
encr_intermediate.add(1.0).add(encr_intermediate.mul(2.0)).sum()
)
return output, encr_output
def test_case3(input, encr_input):
intermediate1 = input.pow(2.0) # PyTorch
intermediate2 = intermediate1.add(1.0).add(intermediate1.mul(2.0))
output = intermediate2.pow(2.0).sum()
encr_intermediate1 = encr_input.square() # CrypTen
encr_intermediate2 = encr_intermediate1.add(1.0).add(
encr_intermediate1.mul(2.0)
)
encr_output = encr_intermediate2.square().sum()
return output, encr_output
def test_case4(input, encr_input):
intermediate1 = input.mul(3.0).add(2.0).pow(2.0) # PyTorch
intermediate2 = intermediate1.add(1.0).add(intermediate1.mul(2.0))
output = intermediate2.pow(2.0).sum()
encr_intermediate1 = encr_input.mul(3.0).add(2.0).square() # CrypTen
encr_intermediate2 = encr_intermediate1.add(1.0).add(
encr_intermediate1.mul(2.0)
)
encr_output = encr_intermediate2.square().sum()
return output, encr_output
def test_case5(input, encr_input):
intermediate1 = input.mul(3.0) # PyTorch
intermediate2 = input.add(2.0).pow(2.0)
intermediate3 = input.pow(2.0)
output = (
torch.cat([intermediate1, intermediate2, intermediate3]).mul(0.5).sum()
)
encr_intermediate1 = encr_input.mul(3.0) # CrypTen
encr_intermediate2 = encr_input.add(2.0).square()
encr_intermediate3 = encr_input.pow(2.0)
encr_output = (
crypten.cat(
[encr_intermediate1, encr_intermediate2, encr_intermediate3]
)
.mul(0.5)
.sum()
)
return output, encr_output
def test_case6(input, encr_input):
idx1 = torch.tensor([[0, 2, 4, 3, 8]], dtype=torch.long)
idx2 = torch.tensor([[5, 1, 3, 5, 2]], dtype=torch.long)
idx3 = torch.tensor([[2, 3, 1]], dtype=torch.long)
intermediate1 = input.gather(0, idx1).gather(1, idx3).pow(2.0) # PyTorch
intermediate2 = input.gather(0, idx2).gather(1, idx3).add(-2.0)
output = torch.cat([intermediate1, intermediate2]).mul(0.5).sum()
encr_intermediate1 = (
encr_input.gather(0, idx1).gather(1, idx3).square()
) # CrypTen
encr_intermediate2 = encr_input.gather(0, idx2).gather(1, idx3).add(-2.0)
encr_output = (
crypten.cat([encr_intermediate1, encr_intermediate2], dim=0)
.mul(0.5)
.sum()
)
return output, encr_output
def test_case7(input, encr_input):
intermediate1 = input.add(3.0) # PyTorch
intermediate2 = input.add(2.0).pow(2.0)
intermediate3 = intermediate1.add(intermediate2)
intermediate4 = intermediate1.add(intermediate2)
output = intermediate3.add(intermediate4).sum()
encr_intermediate1 = encr_input.add(3.0) # CrypTen
encr_intermediate2 = encr_input.add(2.0).pow(2.0)
encr_intermediate3 = encr_intermediate1.add(encr_intermediate2)
encr_intermediate4 = encr_intermediate1.add(encr_intermediate2)
encr_output = encr_intermediate3.add(encr_intermediate4).sum()
return output, encr_output
def test_case8(input, encr_input):
intermediate1 = input.add(3.0)
intermediate2 = torch.cat([input, intermediate1])
intermediate3 = intermediate2.pow(2.0)
output = torch.cat([input, intermediate2, intermediate3]).add(-1).sum()
encr_intermediate1 = encr_input.add(3.0)
encr_intermediate2 = crypten.cat([encr_input, encr_intermediate1])
encr_intermediate3 = encr_intermediate2.pow(2.0)
encr_output = (
crypten.cat([encr_input, encr_intermediate2, encr_intermediate3])
.add(-1)
.sum()
)
return output, encr_output
def test_case9(input, encr_input):
intermediate1 = torch.cat([input, input])
intermediate2 = intermediate1.mean(0, keepdim=True)
output = torch.cat([intermediate2, intermediate1], dim=0).sum()
encr_intermediate1 = crypten.cat([encr_input, encr_input])
encr_intermediate2 = encr_intermediate1.mean(0, keepdim=True)
encr_output = crypten.cat([encr_intermediate2, encr_intermediate1]).sum()
return output, encr_output
# loop over test cases:
test_cases = [
value
for key, value in locals().items()
if callable(value) and key.startswith("test_case")
]
for idx, test_case in enumerate(test_cases):
# get input tensors:
input = get_random_test_tensor(size=(12, 5), is_float=True)
input.requires_grad = True
encr_input = crypten.cryptensor(input, requires_grad=True)
# perform multiple forward computations on input that get combined:
output, encr_output = test_case(input, encr_input)
self._check(
encr_output._tensor, output, "forward for test case %d failed" % idx
)
self.assertTrue(
encr_output.requires_grad,
"requires_grad incorrect for test case %d" % idx,
)
# perform backward computation:
output.backward()
encr_output.backward()
self._check(
encr_input.grad, input.grad, "backward for test case %d failed" % idx
)
# test cases in which tensor gets combined with itself:
for func_name in ["sub", "add", "mul"]:
# get input tensors:
input = get_random_test_tensor(size=(12, 5), is_float=True)
input.requires_grad = True
encr_input = crypten.cryptensor(input, requires_grad=True)
# perform forward-backward pass:
output = getattr(input, func_name)(input).sum()
encr_output = getattr(encr_input, func_name)(encr_input).sum()
self._check(encr_output._tensor, output, "forward failed")
self.assertTrue(encr_output.requires_grad, "requires_grad incorrect")
output.backward()
encr_output.backward()
self._check(encr_input.grad, input.grad, "%s backward failed" % func_name)
def test_autograd(self):
"""Tests autograd graph construction and backprop."""
# define test cases:
tests = [
(1, ["relu", "neg", "relu", "sum"]),
(2, ["t", "neg", "add", "sum"]),
(2, ["relu", "mul", "t", "sum"]),
]
binary_functions = ["add", "sub", "mul", "dot", "matmul"]
# PyTorch test case:
for test in tests:
# get test case:
number_of_inputs, ops = test
inputs = [
get_random_test_tensor(size=(12, 5), is_float=True)
for _ in range(number_of_inputs)
]
encr_inputs = [crypten.cryptensor(input) for input in inputs]
# get autograd variables:
for input in inputs:
input.requires_grad = True
for encr_input in encr_inputs:
encr_input.requires_grad = True
# perform forward pass, logging all intermediate outputs:
outputs, encr_outputs = [inputs], [encr_inputs]
for op in ops:
# get inputs for current operation:
input, output = outputs[-1], []
encr_input, encr_output = encr_outputs[-1], []
# apply current operation:
if op in binary_functions: # combine outputs via operation
output.append(getattr(input[0], op)(input[1]))
encr_output.append(getattr(encr_input[0], op)(encr_input[1]))
else:
for idx in range(len(input)):
output.append(getattr(input[idx], op)())
encr_output.append(getattr(encr_input[idx], op)())
# keep references to outputs of operation:
outputs.append(output)
encr_outputs.append(encr_output)
# check output of forward pass:
output, encr_output = outputs[-1][0], encr_outputs[-1][0]
self._check(encr_output._tensor, output, "forward failed")
self.assertTrue(encr_output.requires_grad, "requires_grad incorrect")
# perform backward pass:
output.backward()
encr_output.backward()
# test result of running backward function:
for idx in range(number_of_inputs):
self._check(encr_inputs[idx].grad, inputs[idx].grad, "backward failed")
def test_autograd_repetition(self):
"""Tests running autograd on the same input repeatedly."""
# create test case:
input = get_random_test_tensor(size=(12, 5), is_float=True)
input.requires_grad = True
encr_input = crypten.cryptensor(input, requires_grad=True)
# re-use the same input multiple times:
for _ in range(7):
# perform forward pass:
output = input.exp().sum()
encr_output = encr_input.exp().sum()
self._check(encr_output._tensor, output, "forward failed")
self.assertTrue(encr_output.requires_grad, "requires_grad incorrect")
# perform backward computation:
output.backward()
encr_output.backward()
self._check(encr_input.grad, input.grad, "backward failed")
# Run all unit tests with both TFP and TTP providers
class TestTFP(MultiProcessTestCase, TestAutograd):
def setUp(self):
self._original_provider = cfg.mpc.provider
cfg.mpc.provider = "TFP"
super(TestTFP, self).setUp()
def tearDown(self):
cfg.mpc.provider = self._original_provider
super(TestTFP, self).tearDown()
class TestTTP(MultiProcessTestCase, TestAutograd):
def setUp(self):
self._original_provider = cfg.mpc.provider
cfg.mpc.provider = "TTP"
super(TestTTP, self).setUp()
def tearDown(self):
cfg.mpc.provider = self._original_provider
super(TestTTP, self).tearDown()
# This code only runs when executing the file outside the test harness
if __name__ == "__main__":
unittest.main()
| CrypTen-main | test/test_autograd.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import unittest
import crypten
import crypten.communicator as comm
import torch
import torch.nn.functional as F
from crypten.common.rng import generate_random_ring_element
from crypten.common.tensor_types import is_float_tensor
from crypten.common.util import count_wraps
from crypten.mpc.primitives import ArithmeticSharedTensor
from test.multiprocess_test_case import get_random_test_tensor, MultiProcessTestCase
class TestArithmetic(MultiProcessTestCase):
"""
This class tests all functions of the ArithmeticSharedTensor.
"""
def setUp(self):
super().setUp()
# We don't want the main process (rank -1) to initialize the communcator
if self.rank >= 0:
crypten.init()
def _check(self, encrypted_tensor, reference, msg, dst=None, tolerance=None):
if tolerance is None:
tolerance = getattr(self, "default_tolerance", 0.05)
tensor = encrypted_tensor.get_plain_text(dst=dst)
if dst is not None and dst != self.rank:
self.assertIsNone(tensor)
return
# Check sizes match
self.assertTrue(tensor.size() == reference.size(), msg)
self.assertTrue(is_float_tensor(reference), "reference must be a float")
diff = (tensor - reference).abs_()
norm_diff = diff.div(tensor.abs() + reference.abs()).abs_()
test_passed = norm_diff.le(tolerance) + diff.le(tolerance * 0.1)
test_passed = test_passed.gt(0).all().item() == 1
if not test_passed:
logging.info(msg)
logging.info("Result %s" % tensor)
logging.info("Result - Reference = %s" % (tensor - reference))
self.assertTrue(test_passed, msg=msg)
def test_share_attr(self):
"""Tests share attribute getter and setter"""
for is_float in (True, False):
reference = get_random_test_tensor(is_float=is_float)
encrypted_tensor = ArithmeticSharedTensor(reference)
self.assertTrue(
torch.equal(encrypted_tensor.share, encrypted_tensor.share),
"share getter failed",
)
new_share = get_random_test_tensor(is_float=False)
encrypted_tensor.share = new_share
self.assertTrue(
torch.equal(encrypted_tensor.share, new_share), "share setter failed"
)
def test_encrypt_decrypt(self):
"""
Tests tensor encryption and decryption for both positive
and negative values.
"""
sizes = [
(),
(1,),
(5,),
(1, 1),
(1, 5),
(5, 1),
(5, 5),
(1, 5, 5),
(5, 1, 5),
(5, 5, 1),
(5, 5, 5),
(1, 3, 32, 32),
(5, 3, 32, 32),
]
for size in sizes:
# encryption and decryption without source:
reference = get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = ArithmeticSharedTensor(reference)
self._check(encrypted_tensor, reference, "en/decryption failed")
for dst in range(self.world_size):
self._check(
encrypted_tensor, reference, "en/decryption failed", dst=dst
)
# encryption and decryption with source:
for src in range(self.world_size):
input_tensor = reference if src == self.rank else []
encrypted_tensor = ArithmeticSharedTensor(
input_tensor, src=src, broadcast_size=True
)
for dst in range(self.world_size):
self._check(
encrypted_tensor,
reference,
"en/decryption with broadcast_size failed",
dst=dst,
)
def test_arithmetic(self):
"""Tests arithmetic functions on encrypted tensor."""
arithmetic_functions = ["add", "add_", "sub", "sub_", "mul", "mul_"]
for func in arithmetic_functions:
for tensor_type in [lambda x: x, ArithmeticSharedTensor]:
tensor1 = get_random_test_tensor(is_float=True)
tensor2 = get_random_test_tensor(is_float=True)
encrypted = ArithmeticSharedTensor(tensor1)
encrypted2 = tensor_type(tensor2)
reference = getattr(tensor1, func)(tensor2)
encrypted_out = getattr(encrypted, func)(encrypted2)
private_type = tensor_type == ArithmeticSharedTensor
self._check(
encrypted_out,
reference,
"%s %s failed" % ("private" if private_type else "public", func),
)
if "_" in func:
# Check in-place op worked
self._check(
encrypted,
reference,
"%s %s failed"
% ("private" if private_type else "public", func),
)
else:
# Check original is not modified
self._check(
encrypted,
tensor1,
"%s %s failed"
% (
"private"
if tensor_type == ArithmeticSharedTensor
else "public",
func,
),
)
# Check encrypted vector with encrypted scalar works.
tensor1 = get_random_test_tensor(is_float=True)
tensor2 = get_random_test_tensor(is_float=True, size=(1,))
encrypted1 = ArithmeticSharedTensor(tensor1)
encrypted2 = ArithmeticSharedTensor(tensor2)
reference = getattr(tensor1, func)(tensor2)
encrypted_out = getattr(encrypted1, func)(encrypted2)
self._check(encrypted_out, reference, "private %s failed" % func)
tensor = get_random_test_tensor(is_float=True)
reference = tensor * tensor
encrypted = ArithmeticSharedTensor(tensor)
encrypted_out = encrypted.square()
self._check(encrypted_out, reference, "square failed")
# Test radd, rsub, and rmul
reference = 2 + tensor1
encrypted = ArithmeticSharedTensor(tensor1)
encrypted_out = 2 + encrypted
self._check(encrypted_out, reference, "right add failed")
reference = 2 - tensor1
encrypted_out = 2 - encrypted
self._check(encrypted_out, reference, "right sub failed")
reference = 2 * tensor1
encrypted_out = 2 * encrypted
self._check(encrypted_out, reference, "right mul failed")
def test_sum(self):
"""Tests sum reduction on encrypted tensor."""
tensor = get_random_test_tensor(size=(5, 100, 100), is_float=True)
encrypted = ArithmeticSharedTensor(tensor)
self._check(encrypted.sum(), tensor.sum(), "sum failed")
for dim in [0, 1, 2]:
reference = tensor.sum(dim)
encrypted_out = encrypted.sum(dim)
self._check(encrypted_out, reference, "sum failed")
def test_prod(self):
"""Tests prod reduction on encrypted tensor."""
tensor = get_random_test_tensor(size=(3, 3), max_value=3, is_float=False)
encrypted = ArithmeticSharedTensor(tensor)
self._check(encrypted.prod(), tensor.prod().float(), "prod failed")
# test with dim argument
tensor = get_random_test_tensor(size=(5, 5, 5), max_value=3, is_float=False)
encrypted = ArithmeticSharedTensor(tensor)
for dim in [0, 1, 2]:
reference = tensor.prod(dim).float()
encrypted_out = encrypted.prod(dim)
self._check(encrypted_out, reference, "prod failed")
def test_div(self):
"""Tests division of encrypted tensor by scalar."""
for function in ["div", "div_"]:
for scalar in [2, 2.0]:
tensor = get_random_test_tensor(is_float=True)
reference = tensor.float().div(scalar)
encrypted_tensor = ArithmeticSharedTensor(tensor)
encrypted_tensor = getattr(encrypted_tensor, function)(scalar)
self._check(encrypted_tensor, reference, "division failed")
divisor = get_random_test_tensor(is_float=float)
divisor += (divisor == 0).to(dtype=divisor.dtype) # div by 0
reference = tensor.div(divisor)
encrypted_tensor = ArithmeticSharedTensor(tensor)
encrypted_tensor = getattr(encrypted_tensor, function)(divisor)
self._check(encrypted_tensor, reference, "division failed")
def test_mean(self):
"""Tests computing means of encrypted tensors."""
tensor = get_random_test_tensor(size=(5, 10, 15), is_float=True)
encrypted = ArithmeticSharedTensor(tensor)
self._check(encrypted.mean(), tensor.mean(), "mean failed")
for dim in [0, 1, 2]:
reference = tensor.mean(dim)
encrypted_out = encrypted.mean(dim)
self._check(encrypted_out, reference, "mean failed")
def test_matmul(self):
"""Test matrix multiplication."""
for tensor_type in [lambda x: x, ArithmeticSharedTensor]:
tensor = get_random_test_tensor(max_value=7, is_float=True)
for width in range(2, tensor.nelement()):
matrix_size = (tensor.nelement(), width)
matrix = get_random_test_tensor(
max_value=7, size=matrix_size, is_float=True
)
reference = tensor.matmul(matrix)
encrypted_tensor = ArithmeticSharedTensor(tensor)
matrix = tensor_type(matrix)
encrypted_tensor = encrypted_tensor.matmul(matrix)
private_type = tensor_type == ArithmeticSharedTensor
self._check(
encrypted_tensor,
reference,
"Private-%s matrix multiplication failed"
% ("private" if private_type else "public"),
)
def test_index_add(self):
"""Test index_add function of encrypted tensor"""
index_add_functions = ["index_add", "index_add_"]
tensor_size1 = [5, 5, 5, 5]
index = torch.tensor([1, 2, 3, 4, 4, 2, 1, 3], dtype=torch.long)
for dimension in range(0, 4):
tensor_size2 = [5, 5, 5, 5]
tensor_size2[dimension] = index.size(0)
for func in index_add_functions:
for tensor_type in [lambda x: x, ArithmeticSharedTensor]:
tensor1 = get_random_test_tensor(size=tensor_size1, is_float=True)
tensor2 = get_random_test_tensor(size=tensor_size2, is_float=True)
encrypted = ArithmeticSharedTensor(tensor1)
encrypted2 = tensor_type(tensor2)
reference = getattr(tensor1, func)(dimension, index, tensor2)
encrypted_out = getattr(encrypted, func)(
dimension, index, encrypted2
)
private = tensor_type == ArithmeticSharedTensor
self._check(
encrypted_out,
reference,
"%s %s failed" % ("private" if private else "public", func),
)
if func.endswith("_"):
# Check in-place index_add worked
self._check(
encrypted,
reference,
"%s %s failed" % ("private" if private else "public", func),
)
else:
# Check original is not modified
self._check(
encrypted,
tensor1,
"%s %s failed" % ("private" if private else "public", func),
)
def test_scatter(self):
"""Test scatter/scatter_add function of encrypted tensor"""
funcs = ["scatter", "scatter_", "scatter_add", "scatter_add_"]
sizes = [(5, 5), (5, 5, 5), (5, 5, 5, 5)]
for func in funcs:
for size in sizes:
for tensor_type in [lambda x: x, ArithmeticSharedTensor]:
for dim in range(len(size)):
tensor1 = get_random_test_tensor(size=size, is_float=True)
tensor2 = get_random_test_tensor(size=size, is_float=True)
index = get_random_test_tensor(size=size, is_float=False)
index = index.abs().clamp(0, 4)
encrypted = ArithmeticSharedTensor(tensor1)
encrypted2 = tensor_type(tensor2)
reference = getattr(tensor1, func)(dim, index, tensor2)
encrypted_out = getattr(encrypted, func)(dim, index, encrypted2)
private = tensor_type == ArithmeticSharedTensor
self._check(
encrypted_out,
reference,
"%s %s failed" % ("private" if private else "public", func),
)
if func.endswith("_"):
# Check in-place scatter/scatter-add modified input
self._check(
encrypted,
reference,
"%s %s failed to modify input"
% ("private" if private else "public", func),
)
else:
# Check original is not modified
self._check(
encrypted,
tensor1,
"%s %s unintendedly modified input"
% ("private" if private else "public", func),
)
def test_dot_ger(self):
"""Test dot product of vector and encrypted tensor."""
for tensor_type in [lambda x: x, ArithmeticSharedTensor]:
tensor1 = get_random_test_tensor(is_float=True).squeeze()
tensor2 = get_random_test_tensor(is_float=True).squeeze()
dot_reference = tensor1.dot(tensor2)
ger_reference = torch.outer(tensor1, tensor2)
tensor2 = tensor_type(tensor2)
# dot
encrypted_tensor = ArithmeticSharedTensor(tensor1)
encrypted_out = encrypted_tensor.dot(tensor2)
self._check(
encrypted_out,
dot_reference,
"%s dot product failed" % "private"
if tensor_type == ArithmeticSharedTensor
else "public",
)
# ger
encrypted_tensor = ArithmeticSharedTensor(tensor1)
encrypted_out = encrypted_tensor.ger(tensor2)
self._check(
encrypted_out,
ger_reference,
"%s outer product failed" % "private"
if tensor_type == ArithmeticSharedTensor
else "public",
)
def test_squeeze(self):
tensor = get_random_test_tensor(is_float=True)
for dim in [0, 1, 2]:
# Test unsqueeze
reference = tensor.unsqueeze(dim)
encrypted = ArithmeticSharedTensor(tensor)
encrypted_out = encrypted.unsqueeze(dim)
self._check(encrypted_out, reference, "unsqueeze failed")
# Test squeeze
encrypted = ArithmeticSharedTensor(tensor.unsqueeze(0))
encrypted_out = encrypted.squeeze()
self._check(encrypted_out, reference.squeeze(), "squeeze failed")
# Check that the encrypted_out and encrypted point to the same
# thing.
encrypted_out[0:2] = torch.tensor([0.0, 1.0], dtype=torch.float)
ref = encrypted.squeeze().get_plain_text()
self._check(encrypted_out, ref, "squeeze failed")
def test_transpose(self):
sizes = [
(1,),
(5,),
(1, 1),
(1, 5),
(5, 1),
(5, 5),
(1, 5, 5),
(5, 1, 5),
(5, 5, 1),
(5, 5, 5),
(1, 3, 32, 32),
(5, 3, 32, 32),
]
for size in sizes:
tensor = get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = ArithmeticSharedTensor(tensor)
if len(size) == 2: # t() asserts dim == 2
reference = tensor.t()
encrypted_out = encrypted_tensor.t()
self._check(encrypted_out, reference, "t() failed")
for dim0 in range(len(size)):
for dim1 in range(len(size)):
reference = tensor.transpose(dim0, dim1)
encrypted_out = encrypted_tensor.transpose(dim0, dim1)
self._check(encrypted_out, reference, "transpose failed")
def test_permute(self):
"""Test the permute operations"""
sizes = [
(1,),
(5,),
(1, 5),
(1, 5, 7),
(7, 1, 5),
(5, 7, 1),
(1, 3, 5, 7),
(5, 3, 32, 32),
]
for size in sizes:
tensor = get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = ArithmeticSharedTensor(tensor)
# test reversing the dimensions
dim_arr = [x - 1 for x in range(tensor.dim(), 0, -1)]
reference = tensor.permute(dim_arr)
encrypted_out = encrypted_tensor.permute(dim_arr)
self._check(encrypted_out, reference, "permute failed")
# test one particular non-reversed permutation
if tensor.dim() == 4:
dim_arr = [1, 3, 0, 2]
reference = tensor.permute(dim_arr)
encrypted_out = encrypted_tensor.permute(dim_arr)
self._check(encrypted_out, reference, "permute failed")
def test_conv1d_smaller_signal_one_channel(self):
self._conv1d(5, 1)
def test_conv1d_smaller_signal_many_channels(self):
self._conv1d(5, 5)
def test_conv1d_larger_signal_one_channel(self):
self._conv1d(16, 1)
def test_conv1d_larger_signal_many_channels(self):
self._conv1d(16, 5)
def _conv1d(self, signal_size, in_channels):
"""Test convolution of encrypted tensor with public/private tensors."""
nbatches = [1, 3]
kernel_sizes = [1, 2, 3]
ochannels = [1, 3, 6]
paddings = [0, 1]
strides = [1, 2]
dilations = [1, 2]
groupings = [1, 2]
for func_name in ["conv1d", "conv_transpose1d"]:
for kernel_type in [lambda x: x, ArithmeticSharedTensor]:
for (
batches,
kernel_size,
out_channels,
padding,
stride,
dilation,
groups,
) in itertools.product(
nbatches,
kernel_sizes,
ochannels,
paddings,
strides,
dilations,
groupings,
):
input_size = (batches, in_channels * groups, signal_size)
signal = get_random_test_tensor(size=input_size, is_float=True)
if func_name == "conv1d":
k_size = (out_channels * groups, in_channels, kernel_size)
else:
k_size = (in_channels * groups, out_channels, kernel_size)
kernel = get_random_test_tensor(size=k_size, is_float=True)
reference = getattr(F, func_name)(
signal,
kernel,
padding=padding,
stride=stride,
dilation=dilation,
groups=groups,
)
encrypted_signal = ArithmeticSharedTensor(signal)
encrypted_kernel = kernel_type(kernel)
encrypted_conv = getattr(encrypted_signal, func_name)(
encrypted_kernel,
padding=padding,
stride=stride,
dilation=dilation,
groups=groups,
)
self._check(encrypted_conv, reference, f"{func_name} failed")
def test_conv2d_square_image_one_channel(self):
self._conv2d((5, 5), 1)
def test_conv2d_square_image_many_channels(self):
self._conv2d((5, 5), 5)
def test_conv2d_rectangular_image_one_channel(self):
self._conv2d((16, 7), 1)
def test_conv2d_rectangular_image_many_channels(self):
self._conv2d((16, 7), 5)
def _conv2d(self, image_size, in_channels):
"""Test convolution of encrypted tensor with public/private tensors."""
nbatches = [1, 3]
kernel_sizes = [(1, 1), (2, 2), (2, 3)]
ochannels = [1, 3, 6]
paddings = [0, 1, (0, 1)]
strides = [1, 2, (1, 2)]
dilations = [1, 2]
groupings = [1, 2]
for func_name in ["conv2d", "conv_transpose2d"]:
for kernel_type in [lambda x: x, ArithmeticSharedTensor]:
for (
batches,
kernel_size,
out_channels,
padding,
stride,
dilation,
groups,
) in itertools.product(
nbatches,
kernel_sizes,
ochannels,
paddings,
strides,
dilations,
groupings,
):
# sample input:
input_size = (batches, in_channels * groups, *image_size)
input = get_random_test_tensor(size=input_size, is_float=True)
# sample filtering kernel:
if func_name == "conv2d":
k_size = (out_channels * groups, in_channels, *kernel_size)
else:
k_size = (in_channels * groups, out_channels, *kernel_size)
kernel = get_random_test_tensor(size=k_size, is_float=True)
# perform filtering:
encr_matrix = ArithmeticSharedTensor(input)
encr_kernel = kernel_type(kernel)
encr_conv = getattr(encr_matrix, func_name)(
encr_kernel,
padding=padding,
stride=stride,
dilation=dilation,
groups=groups,
)
# check that result is correct:
reference = getattr(F, func_name)(
input,
kernel,
padding=padding,
stride=stride,
dilation=dilation,
groups=groups,
)
self._check(encr_conv, reference, "%s failed" % func_name)
def test_pooling(self):
"""Test avgPool of encrypted tensor."""
for width in range(2, 5):
for width2 in range(1, width):
matrix_size = (1, 4, 5, width)
matrix = get_random_test_tensor(size=matrix_size, is_float=True)
pool_size = width2
for stride in range(1, width2):
for padding in range(2):
reference = F.avg_pool2d(
matrix, pool_size, stride=stride, padding=padding
)
encrypted_matrix = ArithmeticSharedTensor(matrix)
encrypted_pool = encrypted_matrix.avg_pool2d(
pool_size, stride=stride, padding=padding
)
self._check(encrypted_pool, reference, "avg_pool2d failed")
def test_take(self):
"""Tests take function of encrypted tensor"""
tensor_size = [5, 5, 5, 5]
index = torch.tensor([[[1, 2], [3, 4]], [[4, 2], [1, 3]]], dtype=torch.long)
tensor = get_random_test_tensor(size=tensor_size, is_float=True)
# Test when dimension!=None
for dimension in range(0, 4):
reference = torch.from_numpy(tensor.numpy().take(index, dimension))
encrypted_tensor = ArithmeticSharedTensor(tensor)
encrypted_out = encrypted_tensor.take(index, dimension)
self._check(encrypted_out, reference, "take function failed: dimension set")
# Test when dimension is default (i.e. None)
sizes = [(15,), (5, 10), (15, 10, 5)]
for size in sizes:
tensor = get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = ArithmeticSharedTensor(tensor)
take_indices = [[0], [10], [0, 5, 10]]
for indices in take_indices:
indices = torch.tensor(indices)
self._check(
encrypted_tensor.take(indices),
tensor.take(indices),
f"take failed with indices {indices}",
)
def test_get_set(self):
for tensor_type in [lambda x: x, ArithmeticSharedTensor]:
for size in range(1, 5):
# Test __getitem__
tensor = get_random_test_tensor(size=(size, size), is_float=True)
reference = tensor[:, 0]
encrypted_tensor = ArithmeticSharedTensor(tensor)
encrypted_out = encrypted_tensor[:, 0]
self._check(encrypted_out, reference, "getitem failed")
reference = tensor[0, :]
encrypted_out = encrypted_tensor[0, :]
self._check(encrypted_out, reference, "getitem failed")
# Test __setitem__
tensor2 = get_random_test_tensor(size=(size,), is_float=True)
reference = tensor.clone()
reference[:, 0] = tensor2
encrypted_out = ArithmeticSharedTensor(tensor)
encrypted2 = tensor_type(tensor2)
encrypted_out[:, 0] = encrypted2
self._check(
encrypted_out, reference, "%s setitem failed" % type(encrypted2)
)
reference = tensor.clone()
reference[0, :] = tensor2
encrypted_out = ArithmeticSharedTensor(tensor)
encrypted2 = tensor_type(tensor2)
encrypted_out[0, :] = encrypted2
self._check(
encrypted_out, reference, "%s setitem failed" % type(encrypted2)
)
def test_pad(self):
sizes = [(1,), (5,), (1, 1), (5, 5), (5, 5, 5), (5, 3, 32, 32)]
pads = [
(0, 0, 0, 0),
(1, 0, 0, 0),
(0, 1, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1),
(1, 1, 1, 1),
(2, 2, 1, 1),
(2, 2, 2, 2),
]
for size in sizes:
tensor = get_random_test_tensor(size=size, is_float=True)
encrypted_tensor = ArithmeticSharedTensor(tensor)
for pad in pads:
for value in [0, 1, 10]:
for tensor_type in [lambda x: x, ArithmeticSharedTensor]:
if tensor.dim() < 2:
pad = pad[:2]
reference = torch.nn.functional.pad(tensor, pad, value=value)
encrypted_value = tensor_type(value)
encrypted_out = encrypted_tensor.pad(pad, value=encrypted_value)
self._check(encrypted_out, reference, "pad failed")
def test_broadcast(self):
"""Test broadcast functionality."""
arithmetic_functions = ["add", "sub", "mul", "div"]
arithmetic_sizes = [
(),
(1,),
(2,),
(1, 1),
(1, 2),
(2, 1),
(2, 2),
(1, 1, 1),
(1, 1, 2),
(1, 2, 1),
(2, 1, 1),
(2, 2, 2),
(1, 1, 1, 1),
(1, 1, 1, 2),
(1, 1, 2, 1),
(1, 2, 1, 1),
(2, 1, 1, 1),
(2, 2, 2, 2),
]
matmul_sizes = [(1, 1), (1, 5), (5, 1), (5, 5)]
batch_dims = [(), (1,), (5,), (1, 1), (1, 5), (5, 5)]
for tensor_type in [lambda x: x, ArithmeticSharedTensor]:
for func in arithmetic_functions:
for size1, size2 in itertools.combinations(arithmetic_sizes, 2):
tensor1 = get_random_test_tensor(size=size1, is_float=True)
tensor2 = get_random_test_tensor(size=size2, is_float=True)
# ArithmeticSharedTensors can't divide by negative
# private values - MPCTensor overrides this to allow negatives
# multiply denom by 10 to avoid division by small num
if func == "div" and tensor_type == ArithmeticSharedTensor:
continue
encrypted1 = ArithmeticSharedTensor(tensor1)
encrypted2 = tensor_type(tensor2)
reference = getattr(tensor1, func)(tensor2)
encrypted_out = getattr(encrypted1, func)(encrypted2)
private = isinstance(encrypted2, ArithmeticSharedTensor)
self._check(
encrypted_out,
reference,
"%s %s broadcast failed"
% ("private" if private else "public", func),
)
for size in matmul_sizes:
for batch1, batch2 in itertools.combinations(batch_dims, 2):
size1 = (*batch1, *size)
size2 = (*batch2, *size)
tensor1 = get_random_test_tensor(size=size1, is_float=True)
tensor2 = get_random_test_tensor(size=size2, is_float=True)
tensor2 = tensor1.transpose(-2, -1)
encrypted1 = ArithmeticSharedTensor(tensor1)
encrypted2 = tensor_type(tensor2)
reference = tensor1.matmul(tensor2)
encrypted_out = encrypted1.matmul(encrypted2)
private = isinstance(encrypted2, ArithmeticSharedTensor)
self._check(
encrypted_out,
reference,
"%s matmul broadcast failed"
% ("private" if private else "public"),
)
def test_inplace(self):
"""Test inplace vs. out-of-place functions"""
for op in ["add", "sub", "mul", "div"]:
for tensor_type in [lambda x: x, ArithmeticSharedTensor]:
tensor1 = get_random_test_tensor(is_float=True)
tensor2 = get_random_test_tensor(is_float=True)
# ArithmeticSharedTensors can't divide by negative
# private values - MPCTensor overrides this to allow negatives
if op == "div" and tensor_type == ArithmeticSharedTensor:
continue
reference = getattr(torch, op)(tensor1, tensor2)
encrypted1 = ArithmeticSharedTensor(tensor1)
encrypted2 = tensor_type(tensor2)
input_plain_id = id(encrypted1.share)
input_encrypted_id = id(encrypted1)
# Test that out-of-place functions do not modify the input
private = isinstance(encrypted2, ArithmeticSharedTensor)
encrypted_out = getattr(encrypted1, op)(encrypted2)
self._check(
encrypted1,
tensor1,
"%s out-of-place %s modifies input"
% ("private" if private else "public", op),
)
self._check(
encrypted_out,
reference,
"%s out-of-place %s produces incorrect output"
% ("private" if private else "public", op),
)
self.assertFalse(id(encrypted_out.share) == input_plain_id)
self.assertFalse(id(encrypted_out) == input_encrypted_id)
# Test that in-place functions modify the input
encrypted_out = getattr(encrypted1, op + "_")(encrypted2)
self._check(
encrypted1,
reference,
"%s in-place %s_ does not modify input"
% ("private" if private else "public", op),
)
self._check(
encrypted_out,
reference,
"%s in-place %s_ produces incorrect output"
% ("private" if private else "public", op),
)
self.assertTrue(id(encrypted_out.share) == input_plain_id)
self.assertTrue(id(encrypted_out) == input_encrypted_id)
def test_control_flow_failure(self):
"""Tests that control flow fails as expected"""
tensor = get_random_test_tensor(is_float=True)
encrypted_tensor = ArithmeticSharedTensor(tensor)
with self.assertRaises(RuntimeError):
if encrypted_tensor:
pass
with self.assertRaises(RuntimeError):
tensor = 5 if encrypted_tensor else 0
with self.assertRaises(RuntimeError):
if False:
pass
elif encrypted_tensor:
pass
def test_src_failure(self):
"""Tests that out-of-bounds src fails as expected"""
tensor = get_random_test_tensor(is_float=True)
for src in [None, "abc", -2, self.world_size]:
with self.assertRaises(AssertionError):
ArithmeticSharedTensor(tensor, src=src)
def test_src_match_input_data(self):
"""Tests incorrect src in ArithmeticSharedTensor fails as expected"""
tensor = get_random_test_tensor(is_float=True)
tensor.src = 0
for testing_src in [None, "abc", -2, self.world_size]:
with self.assertRaises(AssertionError):
ArithmeticSharedTensor(tensor, src=testing_src)
def test_where(self):
"""Tests where() conditional element selection"""
sizes = [(10,), (5, 10), (1, 5, 10)]
y_types = [lambda x: x, ArithmeticSharedTensor]
for size, y_type in itertools.product(sizes, y_types):
tensor1 = get_random_test_tensor(size=size, is_float=True)
encrypted_tensor1 = ArithmeticSharedTensor(tensor1)
tensor2 = get_random_test_tensor(size=size, is_float=True)
encrypted_tensor2 = y_type(tensor2)
condition_tensor = (
get_random_test_tensor(max_value=1, size=size, is_float=False) + 1
)
condition_encrypted = ArithmeticSharedTensor(condition_tensor)
condition_bool = condition_tensor.bool()
reference_out = tensor1.where(condition_bool, tensor2)
encrypted_out = encrypted_tensor1.where(condition_bool, encrypted_tensor2)
y_is_private = y_type == ArithmeticSharedTensor
self._check(
encrypted_out,
reference_out,
f"{'private' if y_is_private else 'public'} y "
"where failed with public condition",
)
encrypted_out = encrypted_tensor1.where(
condition_encrypted, encrypted_tensor2
)
self._check(
encrypted_out,
reference_out,
f"{'private' if y_is_private else 'public'} y "
"where failed with private condition",
)
# test scalar y
scalar = get_random_test_tensor(max_value=0, size=[1], is_float=True)
self._check(
encrypted_tensor1.where(condition_bool, scalar),
tensor1.where(condition_bool, scalar),
"where failed against scalar y with public condition",
)
self._check(
encrypted_tensor1.where(condition_encrypted, scalar),
tensor1.where(condition_bool, scalar),
"where failed against scalar y with private condition",
)
def test_gather(self):
"""Test gather function of encrypted tensor"""
sizes = [(5, 5), (5, 5, 5), (5, 5, 5, 5)]
for size in sizes:
for dim in range(len(size)):
tensor = get_random_test_tensor(size=size, is_float=True)
index = get_random_test_tensor(size=size, is_float=False)
index = index.abs().clamp(0, 4)
encrypted = ArithmeticSharedTensor(tensor)
reference = tensor.gather(dim, index)
encrypted_out = encrypted.gather(dim, index)
self._check(encrypted_out, reference, f"gather failed with size {size}")
def test_split(self):
"""Test split function of encrypted tensor"""
sizes = [(5, 5), (5, 5, 5), (5, 5, 5, 5)]
for size in sizes:
for dim in range(len(size)):
tensor = get_random_test_tensor(size=size, is_float=True)
encrypted = ArithmeticSharedTensor(tensor)
for idx in range(6):
split = (idx, 5 - idx)
reference0, reference1 = tensor.split(split, dim=dim)
encrypted_out0, encrypted_out1 = encrypted.split(split, dim=dim)
self._check(
encrypted_out0, reference0, f"split failed with input {split}"
)
self._check(
encrypted_out1, reference1, f"split failed with input {split}"
)
split = (5,)
(reference,) = tensor.split(split, dim=dim)
(encrypted_out,) = encrypted.split(split, dim=dim)
self._check(
encrypted_out, reference, f"split failed with input {split}"
)
with self.assertRaises(RuntimeError):
encrypted_out.split((5, 1))
# This code only runs when executing the file outside the test harness
if __name__ == "__main__":
unittest.main()
| CrypTen-main | test/test_arithmetic.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import collections
import io
import logging
import os
import unittest
import crypten
import torch
from crypten.common.tensor_types import is_float_tensor
from crypten.config import cfg
from crypten.nn import onnx_converter
from test.multiprocess_test_case import (
get_random_test_tensor,
MultiProcessTestCase,
onehot,
)
class TestOnnxConverter:
"""Tests PyTorch and Tensorflow model imports"""
def _check(self, encrypted_tensor, reference, msg, tolerance=None):
if tolerance is None:
tolerance = getattr(self, "default_tolerance", 0.05)
tensor = encrypted_tensor.get_plain_text()
# Check sizes match
self.assertTrue(tensor.size() == reference.size(), msg)
if is_float_tensor(reference):
diff = (tensor - reference).abs_()
norm_diff = diff.div(tensor.abs() + reference.abs()).abs_()
test_passed = norm_diff.le(tolerance) + diff.le(tolerance * 0.2)
test_passed = test_passed.gt(0).all().item() == 1
else:
test_passed = (tensor == reference).all().item() == 1
if not test_passed:
logging.info(msg)
logging.info("Result %s" % tensor)
logging.info("Result - Reference = %s" % (tensor - reference))
self.assertTrue(test_passed, msg=msg)
def _check_reference_parameters(self, init_name, reference, model):
for name, param in model.named_parameters(recurse=False):
local_name = init_name + "_" + name
self._check(param, reference[local_name], "parameter update failed")
for name, module in model._modules.items():
local_name = init_name + "_" + name
self._check_reference_parameters(local_name, reference, module)
def _compute_reference_parameters(self, init_name, reference, model, learning_rate):
for name, param in model.named_parameters(recurse=False):
local_name = init_name + "_" + name
reference[local_name] = (
param.get_plain_text() - learning_rate * param.grad.get_plain_text()
)
for name, module in model._modules.items():
local_name = init_name + "_" + name
reference = self._compute_reference_parameters(
local_name, reference, module, learning_rate
)
return reference
def setUp(self):
super().setUp()
# We don't want the main process (rank -1) to initialize the communicator
if self.rank >= 0:
crypten.init()
"""
@unittest.skip("CrypTen no longer supports from_tensorflow")
def test_tensorflow_model_conversion(self):
import tensorflow as tf
import tf2onnx
# create simple model
model_tf1 = tf.keras.Sequential(
[
tf.keras.layers.Dense(
10,
activation=tf.nn.relu,
kernel_initializer="ones",
bias_initializer="ones",
input_shape=(4,),
),
tf.keras.layers.Dense(
10,
activation=tf.nn.relu,
kernel_initializer="ones",
bias_initializer="ones",
),
tf.keras.layers.Dense(3, kernel_initializer="ones"),
]
)
model_tf2 = tf.keras.Sequential(
[
tf.keras.layers.Conv2D(
32,
3,
activation="relu",
strides=1,
kernel_initializer="ones",
bias_initializer="ones",
input_shape=(32, 32, 3),
),
tf.keras.layers.MaxPooling2D(3),
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dropout(0.5),
]
)
model_tf3 = tf.keras.Sequential(
[
tf.keras.layers.Conv1D(
32,
1,
activation="relu",
strides=1,
kernel_initializer="ones",
bias_initializer="ones",
input_shape=(6, 128),
),
tf.keras.layers.AvgPool1D(1),
]
)
feature_sizes = [(1, 4), (1, 32, 32, 3), (1, 6, 128)]
label_sizes = [(1, 3), (1, 32), (1, 6, 32)]
for i, curr_model_tf in enumerate([model_tf1, model_tf2, model_tf3]):
# create a random feature vector
features = get_random_test_tensor(
size=feature_sizes[i], is_float=True, min_value=1, max_value=3
)
labels = get_random_test_tensor(
size=label_sizes[i], is_float=True, min_value=1
)
# convert to a TF tensor via numpy
features_tf = tf.convert_to_tensor(features.numpy())
labels_tf = tf.convert_to_tensor(labels.numpy())
# compute the tensorflow predictions
curr_model_tf.compile("sgd", loss=tf.keras.losses.MeanSquaredError())
curr_model_tf.fit(features_tf, labels_tf)
result_tf = curr_model_tf(features_tf, training=False)
# convert TF model to CrypTen model
# write as a SavedModel, then load GraphDef from it
import tempfile
saved_model_dir = tempfile.NamedTemporaryFile(delete=True).name
os.makedirs(saved_model_dir, exist_ok=True)
curr_model_tf.save(saved_model_dir)
graph_def, inputs, outputs = tf2onnx.tf_loader.from_saved_model(
saved_model_dir, None, None
)
model_enc = crypten.nn.from_tensorflow(graph_def, inputs, outputs)
# encrypt model and run it
model_enc.encrypt()
features_enc = crypten.cryptensor(features)
result_enc = model_enc(features_enc)
# compare the results
result = torch.tensor(result_tf.numpy())
self._check(result_enc, result, "nn.from_tensorflow failed")
"""
def test_from_pytorch_training_classification(self):
"""Tests from_pytorch CrypTen training for classification models"""
import torch.nn as nn
import torch.nn.functional as F
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=5, padding=1)
self.fc1 = nn.Linear(16 * 13 * 13, 100)
self.fc2 = nn.Linear(100, 2)
def forward(self, x):
out = self.conv1(x)
out = F.relu(out)
out = F.max_pool2d(out, 2)
out = out.view(-1, 16 * 13 * 13)
out = self.fc1(out)
out = F.relu(out)
out = self.fc2(out)
out = F.softmax(out, dim=1)
return out
model_plaintext = CNN()
batch_size = 5
x_orig = get_random_test_tensor(size=(batch_size, 1, 28, 28), is_float=True)
y_orig = (
get_random_test_tensor(size=(batch_size, 1), is_float=True).gt(0).long()
)
y_one_hot = onehot(y_orig, num_targets=2)
# encrypt training sample:
x_train = crypten.cryptensor(x_orig, requires_grad=True)
y_train = crypten.cryptensor(y_one_hot)
dummy_input = torch.empty((1, 1, 28, 28))
for loss_name in ["BCELoss", "CrossEntropyLoss"]:
# create encrypted model
model = crypten.nn.from_pytorch(model_plaintext, dummy_input)
model.train()
model.encrypt()
self._check_training(model, x_train, y_train, loss_name)
self._check_model_export(model, x_train)
def test_from_pytorch_training_regression(self):
"""Tests from_pytorch CrypTen training for regression models"""
import torch.nn as nn
import torch.nn.functional as F
class FeedForward(nn.Module):
def __init__(self):
super(FeedForward, self).__init__()
self.fc1 = nn.Linear(3, 10)
self.fc2 = nn.Linear(10, 1)
def forward(self, x):
out = self.fc1(x)
out = F.relu(out)
out = self.fc2(out)
return out
model_plaintext = FeedForward()
batch_size = 5
x_orig = get_random_test_tensor(size=(batch_size, 3), is_float=True)
dummy_input = torch.empty((1, 3))
# y is a linear combo of features 1 and 3
y_orig = 2 * x_orig[:, 0] + 3 * x_orig[:, 2]
x_train = crypten.cryptensor(x_orig, requires_grad=True)
y_train = crypten.cryptensor(y_orig.unsqueeze(-1))
# create encrypted model
model = crypten.nn.from_pytorch(model_plaintext, dummy_input)
model.train()
model.encrypt()
self._check_training(model, x_train, y_train, "MSELoss")
self._check_model_export(model, x_train)
def _check_training(
self, model, x_train, y_train, loss_name, num_epochs=2, learning_rate=0.001
):
"""Verifies gradient updates and loss decreases during training"""
# create loss function
loss = getattr(crypten.nn, loss_name)()
for i in range(num_epochs):
output = model(x_train)
loss_value = loss(output, y_train)
# set gradients to "zero"
model.zero_grad()
for param in model.parameters():
self.assertIsNone(param.grad, "zero_grad did not reset gradients")
# perform backward pass
loss_value.backward()
for param in model.parameters():
if param.requires_grad:
self.assertIsNotNone(
param.grad, "required parameter gradient not created"
)
# update parameters
orig_parameters, upd_parameters = {}, {}
orig_parameters = self._compute_reference_parameters(
"", orig_parameters, model, 0
)
model.update_parameters(learning_rate)
upd_parameters = self._compute_reference_parameters(
"", upd_parameters, model, learning_rate
)
# check parameter update
parameter_changed = False
for name, value in orig_parameters.items():
if param.requires_grad and param.grad is not None:
unchanged = torch.allclose(upd_parameters[name], value)
if unchanged is False:
parameter_changed = True
self.assertTrue(
parameter_changed, "no parameter changed in training step"
)
# record initial and current loss
if i == 0:
orig_loss = loss_value.get_plain_text()
curr_loss = loss_value.get_plain_text()
# check that the loss has decreased after training
self.assertTrue(
curr_loss.item() < orig_loss.item(),
f"{loss_name} has not decreased after training",
)
def _check_model_export(self, crypten_model, x_enc):
"""Checks that exported model returns the same results as crypten model"""
pytorch_model = crypten_model.decrypt().to_pytorch()
x_plain = x_enc.get_plain_text()
y_plain = pytorch_model(x_plain)
crypten_model.encrypt()
y_enc = crypten_model(x_enc)
self._check(y_enc, y_plain, msg="Model export failed.")
def test_get_operator_class(self):
"""Checks operator is a valid crypten module"""
Node = collections.namedtuple("Node", "op_type")
op_types = ["Sum", "AveragePool", "Mean"]
for op_type in op_types:
node = Node(op_type)
operator = onnx_converter._get_operator_class(node.op_type, {})
self.assertTrue(
issubclass(operator, crypten.nn.Module),
f"{op_type} operator class {operator} is not a CrypTen module.",
)
# check conv
kernel_shapes = [[1], [3, 3]]
node = Node("Conv")
for kernel_shape in kernel_shapes:
attributes = {"kernel_shape": kernel_shape}
operator = onnx_converter._get_operator_class(node.op_type, attributes)
# check invalid op_types
invalid_types = [("Convolution", {"kernel_shape": [3, 3, 3]}), ("Banana", {})]
for invalid_type, attr in invalid_types:
with self.assertRaises(ValueError):
node = Node(invalid_type)
operator = onnx_converter._get_operator_class(node.op_type, attr)
def test_export_pytorch_model(self):
"""Tests loading of onnx model from a file"""
pytorch_model = PyTorchLinear()
dummy_input = torch.empty(10, 10)
with io.BytesIO() as f:
onnx_converter._export_pytorch_model(f, pytorch_model, dummy_input)
def test_from_onnx(self):
"""Tests construction of crypten model from onnx graph"""
pytorch_model = PyTorchLinear()
dummy_input = torch.empty(10, 10)
with io.BytesIO() as f:
f = onnx_converter._export_pytorch_model(f, pytorch_model, dummy_input)
f.seek(0)
crypten_model = onnx_converter.from_onnx(f)
self.assertTrue(hasattr(crypten_model, "encrypt"))
def test_reshape_plain_text_conversion(self):
"""Verifies shape inputs in reshape are properly imported"""
class Net(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Linear(4, 4)
def forward(self, x):
# (1, 4) is stored in a constant module
out = x.reshape(1, 4)
out = self.fc1(out)
return out
model = Net()
x = torch.ones(2, 2)
x_enc = crypten.cryptensor(x)
y = model(x)
model_crypten = onnx_converter.from_pytorch(model, torch.empty(x.shape))
model_crypten.encrypt()
y_enc = model_crypten(x_enc)
self.assertTrue(y_enc.shape == y.shape)
class PyTorchLinear(torch.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = torch.nn.Linear(10, 1)
def forward(self, x):
x = self.fc1(x)
return x
# Run all unit tests with both TFP and TTP providers
class TestTFP(MultiProcessTestCase, TestOnnxConverter):
def setUp(self):
self._original_provider = cfg.mpc.provider
cfg.mpc.provider = "TFP"
super(TestTFP, self).setUp()
def tearDown(self):
cfg.mpc.provider = self._original_provider
super(TestTFP, self).tearDown()
class TestTTP(MultiProcessTestCase, TestOnnxConverter):
def setUp(self):
self._original_provider = cfg.mpc.provider
cfg.mpc.provider = "TTP"
super(TestTTP, self).setUp()
def tearDown(self):
cfg.mpc.provider = self._original_provider
super(TestTTP, self).tearDown()
# This code only runs when executing the file outside the test harness
if __name__ == "__main__":
unittest.main()
| CrypTen-main | test/test_onnx_converter.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import itertools
import logging
import unittest
from collections import namedtuple
import crypten
import torch
import torch.nn.functional as F
from crypten.common.tensor_types import is_float_tensor
from crypten.config import cfg
from crypten.gradients import AutogradContext
from test.multiprocess_test_case import (
get_random_test_tensor,
MultiProcessTestCase,
onehot,
)
# Sizes for tensor operations
SIZES = [
(),
(1,),
(3,),
(1, 1),
(1, 3),
(3, 1),
(3, 3),
(1, 1, 1),
(1, 1, 3),
(1, 3, 1),
(3, 1, 1),
(3, 3, 3),
(1, 1, 1, 1),
(1, 1, 3, 1),
(3, 3, 3, 3),
]
class TestGradients:
"""
This class tests all autograd functions implemented in gradients.py.
"""
def setUp(self):
super().setUp()
# We don't want the main process (rank -1) to initialize the communicator
if self.rank >= 0:
crypten.init()
def _check(self, encrypted_tensor, reference, msg, tolerance=None):
if tolerance is None:
tolerance = getattr(self, "default_tolerance", 0.05)
tensor = encrypted_tensor.get_plain_text()
# Check sizes match
self.assertTrue(tensor.size() == reference.size(), msg)
self.assertTrue(is_float_tensor(reference), "reference must be a float")
diff = (tensor - reference).abs_()
norm_diff = diff.div(tensor.abs() + reference.abs()).abs_()
test_passed = norm_diff.le(tolerance) + diff.le(tolerance * 0.1)
test_passed = test_passed.gt(0).all().item() == 1
if not test_passed:
logging.info(msg)
logging.info("Result %s" % tensor)
logging.info("Result - Reference = %s" % (tensor - reference))
self.assertTrue(test_passed, msg=msg)
def _check_forward_backward(
self,
func_name,
input_tensor,
*args,
torch_func_name=None,
msg=None,
addl_args=None,
**kwargs,
):
"""Checks forward and backward against PyTorch
Args:
func_name (str): PyTorch/CrypTen function name
input_tensor (torch.tensor): primary input
args (list): contains arguments for function
msg (str): additional message for mismatch
kwargs (list): keyword arguments for function
"""
if msg is None:
msg = f"{func_name} grad_fn incorrect"
input = input_tensor.clone()
input.requires_grad = True
input_encr = crypten.cryptensor(input, requires_grad=True)
crypten_kwargs = copy.deepcopy(kwargs)
if addl_args is not None:
for item, val in addl_args.items():
crypten_kwargs[item] = val
for private in [False, True]:
input.grad = None
input_encr.grad = None
args = self._set_grad_to_zero(args)
args_encr = self._set_grad_to_zero(list(args), make_private=private)
# obtain torch function
if torch_func_name is not None:
torch_func = self._get_torch_func(torch_func_name)
else:
torch_func = self._get_torch_func(func_name)
reference = torch_func(input, *args, **kwargs)
encrypted_out = getattr(input_encr, func_name)(*args_encr, **crypten_kwargs)
# extract argmax output for max / min with keepdim=False
if isinstance(encrypted_out, (list, tuple)):
reference = reference[0]
encrypted_out = encrypted_out[0]
self._check(encrypted_out, reference, msg + " in forward")
# check backward pass
grad_output = get_random_test_tensor(
max_value=2, size=reference.size(), is_float=True
)
grad_output_encr = crypten.cryptensor(grad_output)
reference.backward(grad_output)
encrypted_out.backward(grad_output_encr)
self._check(input_encr.grad, input.grad, msg + " in backward")
for i, arg_encr in enumerate(args_encr):
if crypten.is_encrypted_tensor(arg_encr):
self._check(arg_encr.grad, args[i].grad, msg + " in backward args")
def _set_grad_to_zero(self, args, make_private=False):
"""Sets gradients for args to zero
Args:
args (list of torch.tensors): contains arguments
make_private (bool): encrypt args using CrypTensor
"""
args_zero_grad = []
for arg in args:
if is_float_tensor(arg) and make_private:
arg = crypten.cryptensor(arg, requires_grad=True)
elif is_float_tensor(arg):
arg.requires_grad = True
arg.grad = None
args_zero_grad.append(arg)
return args_zero_grad
def _get_torch_func(self, func_name):
"""Returns PyTorch function from tensor or functional API"""
if hasattr(torch.Tensor, func_name):
return getattr(torch.Tensor, func_name)
elif hasattr(F, func_name):
return getattr(F, func_name)
else:
raise ValueError("unknown PyTorch function: %s" % func_name)
def test_arithmetic(self):
"""Tests arithmetic functions with broadcasting."""
arithmetic_functions = ["add", "sub", "mul"]
for func in arithmetic_functions:
# Test on operator
ofunc = "__" + func + "__"
# Test both left functions and right functions
rfunc = ofunc[:2] + "r" + ofunc[2:]
# Test on both float inputs and tensor inputs
for use_tensor in [False, True]:
for size1 in SIZES:
tensor1 = get_random_test_tensor(size=size1, is_float=True)
if use_tensor:
for size2 in SIZES:
tensor2 = get_random_test_tensor(size=size2, is_float=True)
self._check_forward_backward(func, tensor1, tensor2)
self._check_forward_backward(ofunc, tensor1, tensor2)
self._check_forward_backward(rfunc, tensor1, tensor2)
else:
scalar = 2.0
self._check_forward_backward(func, tensor1, scalar)
self._check_forward_backward(ofunc, tensor1, scalar)
self._check_forward_backward(rfunc, tensor1, scalar)
def test_div(self):
self._div_helper("div")
def test_truediv(self):
self._div_helper("__truediv__")
def test_rtruediv(self):
self._div_helper("__rtruediv__")
def _div_helper(self, func):
for size1 in SIZES:
tensor1 = get_random_test_tensor(size=size1, is_float=True)
for size2 in SIZES:
tensor2 = get_random_test_tensor(
min_value=0.5, size=size2, is_float=True
) # do not divide by value very close to zero
if func == "__rtruediv__":
# denominator is first argument for rtruediv
self._check_forward_backward(func, tensor2, tensor1)
else:
self._check_forward_backward(func, tensor1, tensor2)
if func == "__rtruediv__":
self._check_forward_backward(func, torch.tensor(2.0), tensor2)
else:
self._check_forward_backward(func, tensor1, 2.0)
def test_sum_mean_reductions(self):
reductions = ["sum", "mean"]
self._reductions_helper(reductions)
def test_max_min_reductions_pairwise(self):
reductions = ["max", "min"]
self._reductions_helper(reductions, "pairwise")
def test_max_min_reductions_log_reduction(self):
reductions = ["max", "min"]
self._reductions_helper(reductions, "log_reduction")
def test_max_min_reductions_double_log_reduction(self):
reductions = ["max", "min"]
self._reductions_helper(reductions, "double_log_reduction")
def test_max_min_reductions_accelerated_cascade(self):
reductions = ["max", "min"]
self._reductions_helper(reductions, "accelerated_cascade")
def _reductions_helper(self, input_reductions, method=None):
"""Tests input reductions on tensors of various sizes."""
for size in SIZES[: min(5, len(SIZES))]:
tensor = get_random_test_tensor(size=size, is_float=True)
for reduction in input_reductions:
if method is None:
self._check_forward_backward(reduction, tensor)
else:
with cfg.temp_override({"functions.max_method": method}):
self._check_forward_backward(reduction, tensor)
# Check dim 0 if tensor is 0-dimensional
dims = 1 if tensor.dim() == 0 else tensor.dim()
for dim in range(dims):
# check when keepdim is not provided as a kwarg
if method is None:
self._check_forward_backward(reduction, tensor, dim=dim)
else:
with cfg.temp_override({"functions.max_method": method}):
self._check_forward_backward(reduction, tensor, dim=dim)
# check when keepdim is provided as a kwarg
for keepdim in [False, True]:
if method is None:
self._check_forward_backward(
reduction, tensor, dim, keepdim=keepdim
)
self._check_forward_backward(
reduction, tensor, dim=dim, keepdim=keepdim
)
else:
with cfg.temp_override({"functions.max_method": method}):
self._check_forward_backward(
reduction, tensor, dim, keepdim=keepdim
)
self._check_forward_backward(
reduction, tensor, dim=dim, keepdim=keepdim
)
def test_matmul(self):
"""Test matmul with broadcasting."""
matmul_sizes = [(1, 1), (1, 5), (5, 1), (5, 5)]
batch_dims = [(), (1,), (5,), (1, 1), (1, 5), (5, 5)]
matched_sizes = [
((1,), (1,)),
((10,), (10,)),
((10,), (10, 5)),
((5, 10), (10,)),
]
matmul_funcs = ["matmul", "__matmul__", "__imatmul__"]
torch_funcs = ["matmul", "__matmul__", "__matmul__"]
for i, func in enumerate(matmul_funcs):
for size in matmul_sizes:
for batch1, batch2 in itertools.combinations(batch_dims, 2):
size1 = (*batch1, *size)
size2 = (*batch2, *size)
tensor1 = get_random_test_tensor(size=size1, is_float=True)
tensor2 = get_random_test_tensor(size=size2, is_float=True)
tensor2 = tensor2.transpose(-2, -1)
self._check_forward_backward(
func, tensor1, tensor2, torch_func_name=torch_funcs[i]
)
for sizes in matched_sizes:
tensor1 = get_random_test_tensor(size=sizes[0], is_float=True)
tensor2 = get_random_test_tensor(size=sizes[1], is_float=True)
self._check_forward_backward(
func, tensor1, tensor2, torch_func_name=torch_funcs[i]
)
def test_unary_functions(self):
"""Test unary functions on tensors of various sizes."""
unary_functions = [
"neg",
"__neg__",
"exp",
"reciprocal",
"abs",
"__abs__",
"sign",
"relu",
"sin",
"cos",
"sigmoid",
"tanh",
"log",
"sqrt",
]
pos_only_functions = ["log", "sqrt"]
for func in unary_functions:
for size in SIZES:
tensor = get_random_test_tensor(size=size, is_float=True)
# Make tensor positive when positive inputs are required
if func in pos_only_functions:
tensor = tensor.abs()
self._check_forward_backward(func, tensor)
def test_hardtanh(self):
tensor = torch.arange(-10, 10, dtype=torch.float32)
for minval in range(-10, 10):
for maxval in range(minval, 11):
self._check_forward_backward("hardtanh", tensor, minval, maxval)
self._check_forward_backward("relu6", tensor)
def test_inplace_warning(self):
"""Tests that a warning is thrown that indicates that the `inplace` kwarg
is ignored when a function is called with `inplace=True`
"""
tensor = get_random_test_tensor(is_float=True)
encrypted = crypten.cryptensor(tensor)
functions = ["dropout", "_feature_dropout"]
for func in functions:
warning_str = (
f"CrypTen {func} does not support inplace computation during training."
)
with self.assertLogs(logger=logging.getLogger(), level="WARNING") as cm:
getattr(encrypted, func)(inplace=True)
self.assertTrue(f"WARNING:root:{warning_str}" in cm.output)
def test_dot_ger(self):
"""Test inner and outer products of encrypted tensors."""
for length in range(1, 10):
tensor1 = get_random_test_tensor(size=(length,), is_float=True)
tensor2 = get_random_test_tensor(size=(length,), is_float=True)
self._check_forward_backward("dot", tensor1, tensor2)
self._check_forward_backward("ger", tensor1, tensor2)
def test_squeeze_unsqueeze(self):
"""Test addition and removal of tensor dimensions"""
for size in SIZES:
tensor = get_random_test_tensor(size=size, is_float=True)
self._check_forward_backward("squeeze", tensor)
for dim in range(tensor.dim()):
self._check_forward_backward("squeeze", tensor, dim)
self._check_forward_backward("unsqueeze", tensor, dim)
# Check unsqueeze on last dimension
self._check_forward_backward("unsqueeze", tensor, tensor.dim())
def test_softmax(self):
"""Test softmax"""
for size in SIZES:
tensor = get_random_test_tensor(size=size, is_float=True)
# Check dim 0 if tensor is 0-dimensional
dims = 1 if tensor.dim() == 0 else tensor.dim()
for dim in range(dims):
self._check_forward_backward("softmax", tensor, dim)
def test_log_softmax(self):
"""Test log_softmax"""
for size in SIZES:
tensor = get_random_test_tensor(size=size, is_float=True)
# Check dim 0 if tensor is 0-dimensional
dims = 1 if tensor.dim() == 0 else tensor.dim()
for dim in range(dims):
self._check_forward_backward("log_softmax", tensor, dim)
def test_transpose(self):
for size in SIZES:
tensor = get_random_test_tensor(size=size, is_float=True)
if tensor.dim() == 2: # t() asserts dim == 2
self._check_forward_backward("t", tensor)
for dim0 in range(tensor.dim()):
for dim1 in range(tensor.dim()):
self._check_forward_backward("transpose", tensor, dim0, dim1)
def test_permute(self):
for ndims in range(5):
size = tuple([3] * ndims)
tensor = get_random_test_tensor(size=size, is_float=True)
for perm in itertools.permutations(list(range(ndims))):
self._check_forward_backward("permute", tensor, perm)
def test_conv1d_smaller_signal_one_channel(self):
self._conv1d(5, 1)
def test_conv1d_smaller_signal_many_channels(self):
self._conv1d(5, 5)
def test_conv1d_larger_signal_one_channel(self):
self._conv1d(16, 1)
def test_conv1d_larger_signal_many_channels(self):
self._conv1d(16, 5)
def _conv1d(self, signal_size, in_channels):
"""Test convolution of encrypted tensor with public/private tensors."""
nbatches = [1, 3]
nout_channels = [1, 5]
kernel_sizes = [1, 2, 3]
paddings = [0, 1]
strides = [1, 2]
dilations = [1, 2]
groupings = [1, 2]
for (
batches,
kernel_size,
out_channels,
padding,
stride,
dilation,
groups,
) in itertools.product(
nbatches,
kernel_sizes,
nout_channels,
paddings,
strides,
dilations,
groupings,
):
# TODO: Fix conv1d gradient in this case:
if in_channels > 1 and groups > 1:
continue
size = (batches, in_channels * groups, signal_size)
signal = get_random_test_tensor(size=size, is_float=True)
kernel_size = (out_channels * groups, in_channels, kernel_size)
kernel = get_random_test_tensor(size=kernel_size, is_float=True)
self._check_forward_backward(
"conv1d",
signal,
kernel,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
)
def test_conv2d_square_image_one_channel(self):
self._conv2d((5, 5), 1)
def test_conv2d_square_image_many_channels(self):
self._conv2d((5, 5), 5)
def test_conv2d_rectangular_image_one_channel(self):
self._conv2d((16, 7), 1)
def test_conv2d_rectangular_image_many_channels(self):
self._conv2d((16, 7), 5)
def _conv2d(self, image_size, in_channels):
"""Test convolution of encrypted tensor with public/private tensors."""
nbatches = [1, 3]
kernel_sizes = [(1, 1), (2, 2), (2, 3)]
ochannels = [1, 3]
paddings = [0, 1, (0, 1)]
strides = [1, 2, (1, 2)]
dilations = [1, 2, (1, 2)]
groupings = [1, 2]
for (
batches,
kernel_size,
out_channels,
padding,
stride,
dilation,
groups,
) in itertools.product(
nbatches, kernel_sizes, ochannels, paddings, strides, dilations, groupings
):
# TODO: Fix conv2d gradient in this case:
if in_channels > 1 and groups > 1:
continue
size = (batches, in_channels * groups, *image_size)
image = get_random_test_tensor(size=size, is_float=True)
kernel_size = (out_channels * groups, in_channels, *kernel_size)
kernel = get_random_test_tensor(size=kernel_size, is_float=True)
self._check_forward_backward(
"conv2d",
image,
kernel,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
)
def test_max_pool2d(self):
"""Tests max pooling gradient"""
self._check_pooling("max_pool2d")
def test_avg_pool2d(self):
"""Tests average pooling gradient"""
self._check_pooling("avg_pool2d")
def _check_pooling(self, func):
"""Helper for testing pooling gradients to avoid test timeouts"""
image_sizes = [(5, 5), (6, 7)]
nchannels = [1, 3]
nbatches = [1, 3]
kernel_sizes = [1, 2, (2, 3)]
paddings = [1, (0, 0)]
strides = [1, (2, 2)]
dilations = [1, 2]
ceil_modes = [False, True] if func == "max_pool2d" else [False]
for image_size, channels, batches, kernel_size in itertools.product(
image_sizes, nchannels, nbatches, kernel_sizes
):
size = (batches, channels, *image_size)
image = get_random_test_tensor(size=size, is_float=True)
for padding, stride, ceil_mode in itertools.product(
paddings, strides, ceil_modes
):
# Skip invalid padding sizes
if kernel_size == 1 and padding == 1:
continue
if func == "max_pool2d":
for dilation in dilations:
self._check_max_pool2d_forward_backward(
image, kernel_size, padding, stride, dilation, ceil_mode
)
else:
self._check_forward_backward(
func, image, kernel_size, padding=padding, stride=stride
)
def _check_max_pool2d_forward_backward(
self, image, kernel_size, padding, stride, dilation, ceil_mode, tol=0.1
):
"""Checks forward and backward are for max pool 2d.
Verifies gradients by checking sum of non-matching elements to account for
differences in tie resolution in max between PyTorch and CrypTen:
PyTorch returns smallest index for max entries,
whereas CrypTen returns a random index.
Args:
image (torch.tensor): input
kernel_size (tuple of ints): size of the window over which to compute max
padding (int or tuple of ints): implicit zero padding to added on both sides
stride (int or tuple of ints): the stride of the window
ceil_mode (bool): determines whether output size is rounded down or up
"""
# check forward
image = image.clone()
image.requires_grad = True
image_enc = crypten.cryptensor(image, requires_grad=True)
out = torch.nn.functional.max_pool2d(
image,
kernel_size,
padding=padding,
stride=stride,
dilation=dilation,
ceil_mode=ceil_mode,
)
out_enc = image_enc.max_pool2d(
kernel_size,
padding=padding,
stride=stride,
dilation=dilation,
ceil_mode=ceil_mode,
)
if out.isinf().any():
# PyTorch can produce improperly sized outputs with Inf values using ceil_mode in some cases
if ceil_mode:
return
self.assertTrue(
out.size() == out_enc.size(), "max_pool2d forward incorrect"
)
return # backward will break if output is -inf
else:
self._check(out_enc, out, "max_pool2d forward incorrect")
# check backward
grad_output = get_random_test_tensor(size=out.size(), is_float=True)
grad_output_enc = crypten.cryptensor(grad_output)
out.backward(grad_output)
out_enc.backward(grad_output_enc)
# check sum of non-matching gradient entries
crypten_grad = image_enc.grad.get_plain_text()
non_matching_indices = (image.grad - crypten_grad).abs() > tol
sum_is_close = (
crypten_grad[non_matching_indices].sum()
- image.grad[non_matching_indices].sum()
) < tol
if not sum_is_close:
msg = "max_pool2d backward failed"
logging.info(msg)
logging.info(f"Result: crypten image gradient {crypten_grad}")
logging.info(f"Result - Reference {image.grad - crypten_grad}")
self.assertTrue(sum_is_close, msg=msg)
def test_square(self):
"""Tests square function gradient.
Note: torch pow(2) is used to verify gradient,
since PyTorch does not implement square().
"""
for size in SIZES:
tensor = get_random_test_tensor(size=size, is_float=True)
tensor.requires_grad = True
tensor_encr = crypten.cryptensor(tensor, requires_grad=True)
out = tensor.pow(2)
out_encr = tensor_encr.square()
self._check(out_encr, out, f"square forward failed with size {size}")
grad_output = get_random_test_tensor(size=out.shape, is_float=True)
out.backward(grad_output)
out_encr.backward(crypten.cryptensor(grad_output))
self._check(
tensor_encr.grad,
tensor.grad,
f"square backward failed with size {size}",
)
def test_pow(self):
self._pow_helper("pow")
def test_magic_pow(self):
self._pow_helper("__pow__")
def _pow_helper(self, pow_fn):
for size in SIZES:
tensor = get_random_test_tensor(size=size, min_value=0.5, is_float=True)
for power in [-3, -2, -1, 0, 1, 2, 3]:
self._check_forward_backward(pow_fn, tensor, power)
self._check_forward_backward(pow_fn, tensor, float(power))
def test_norm(self):
"""Tests p-norm"""
self.default_tolerance *= 2 # Increase tolerance for norm test
for p in [1, 1.5, 2, 3, float("inf"), "fro"]:
tensor = get_random_test_tensor(max_value=2, size=(3, 3, 3), is_float=True)
self._check_forward_backward("norm", tensor, p=p)
for dim in [0, 1, 2]:
self._check_forward_backward("norm", tensor, p=p, dim=dim)
def test_pad(self):
"""Tests padding"""
sizes = [(1,), (5,), (1, 1), (5, 5), (5, 5, 5), (5, 3, 32, 32)]
pads = [
# (0, 0, 0, 0), NOTE: Pytorch backward fails when padding is all 0s
(1, 0, 0, 0),
(0, 1, 0, 0),
(0, 0, 1, 0),
(0, 0, 0, 1),
(1, 1, 1, 1),
(2, 2, 1, 1),
(2, 2, 2, 2),
]
for size in sizes:
tensor = get_random_test_tensor(size=size, is_float=True)
for pad in pads:
if tensor.dim() < 2:
pad = pad[:2]
# NOTE: Pytorch backward fails when padding is all 0s
if pad[0] == 0 and pad[1] == 0:
continue
for value in [0, 1, 10]:
self._check_forward_backward("pad", tensor, pad, value=value)
def test_clone(self):
"""Tests shallow_copy and clone of encrypted tensors."""
sizes = [(5,), (1, 5), (5, 10, 15)]
for size in sizes:
tensor = get_random_test_tensor(size=size, is_float=True)
self._check_forward_backward("clone", tensor)
def test_cat_stack(self):
for module in [crypten, torch]: # torch.cat on CrypTensor runs crypten.cat
for func in ["cat", "stack"]:
for dimensions in range(1, 5):
size = [5] * dimensions
for num_tensors in range(1, 5):
for dim in range(dimensions):
tensors = [
get_random_test_tensor(size=size, is_float=True)
for _ in range(num_tensors)
]
encrypted_tensors = [
crypten.cryptensor(t, requires_grad=True)
for t in tensors
]
for i in range(len(tensors)):
tensors[i].grad = None
tensors[i].requires_grad = True
encrypted_tensors[i].grad = None
encrypted_tensors[i].requires_grad = True
# Forward
reference = getattr(torch, func)(tensors, dim=dim)
encrypted_out = getattr(module, func)(
encrypted_tensors, dim=dim
)
self._check(
encrypted_out, reference, f"{func} forward failed"
)
# Backward
grad_output = get_random_test_tensor(
size=reference.size(), is_float=True
)
encrypted_grad_output = crypten.cryptensor(grad_output)
reference.backward(grad_output)
encrypted_out.backward(encrypted_grad_output)
for i in range(len(tensors)):
self._check(
encrypted_tensors[i].grad,
tensors[i].grad,
f"{func} backward failed",
)
def test_dropout(self):
"""Tests forward for dropout"""
# Create a separate test for dropout since it cannot use the
# regular forward function
# There's no need to check backwards since PyTorch backwards fails
all_prob_values = [x * 0.2 for x in range(0, 5)]
for dropout_fn in ["dropout", "_feature_dropout"]:
for prob in all_prob_values:
for size in [(5, 10), (5, 10, 15), (5, 10, 15, 20)]:
for use_zeros in [False, True]:
tensor = get_random_test_tensor(
size=size, ex_zero=True, min_value=1.0, is_float=True
)
if use_zeros:
# turn the first row to all zeros
index = [1] + [
slice(0, tensor.size(i)) for i in range(1, tensor.dim())
]
tensor[index] = 0.0
encr_tensor = crypten.cryptensor(tensor, requires_grad=True)
encr_tensor_out = getattr(encr_tensor, dropout_fn)(p=prob)
dropout_tensor = encr_tensor_out.get_plain_text()
# Check the scaling for non-zero elements
scaled_tensor = tensor / (1 - prob)
reference = dropout_tensor.where(
dropout_tensor == 0.0, scaled_tensor
)
self._check(
encr_tensor_out,
reference,
"dropout failed with size {}, use_zeros {}, and "
"probability {}".format(size, use_zeros, prob),
)
def test_batchnorm(self):
"""
Tests batchnorm forward and backward steps with training on / off.
"""
tolerance = 0.1
sizes = [(8, 5), (16, 3), (32, 5), (8, 6, 4), (8, 4, 3, 5)]
torch.autograd.set_detect_anomaly(True)
for size in sizes:
for is_training in (False, True):
# sample input data, weight, and bias:
tensor = get_random_test_tensor(size=size, is_float=True)
encrypted_input = crypten.cryptensor(tensor)
C = size[1]
weight = get_random_test_tensor(size=[C], max_value=1, is_float=True)
bias = get_random_test_tensor(size=[C], max_value=1, is_float=True)
weight.requires_grad = True
bias.requires_grad = True
# dimensions over which means and variances are computed:
stats_dimensions = list(range(tensor.dim()))
stats_dimensions.pop(1)
# dummy running mean and variance:
running_mean = tensor.mean(stats_dimensions).detach()
running_var = tensor.var(stats_dimensions).detach()
enc_running_mean = crypten.cryptensor(running_mean)
enc_running_var = crypten.cryptensor(running_var)
# compute reference output:
tensor.requires_grad = True
reference = torch.nn.functional.batch_norm(
tensor,
running_mean,
running_var,
weight=weight,
bias=bias,
training=is_training,
)
# compute CrypTen output:
encrypted_input.requires_grad = True
ctx = AutogradContext()
batch_norm_fn = crypten.gradients.get_grad_fn("batchnorm")
with crypten.no_grad():
encrypted_out = batch_norm_fn.forward(
ctx,
encrypted_input,
weight,
bias,
training=is_training,
running_mean=enc_running_mean,
running_var=enc_running_var,
)
# check forward
self._check(
encrypted_out,
reference,
"batchnorm forward failed with training "
f"{is_training} on {tensor.dim()}-D",
tolerance=tolerance,
)
# check backward (input, weight, and bias gradients):
grad_input = get_random_test_tensor(
size=reference.size(), is_float=True
)
reference.backward(grad_input)
with crypten.no_grad():
enc_grad_input = crypten.cryptensor(grad_input)
encrypted_grad = batch_norm_fn.backward(ctx, enc_grad_input)
TorchGrad = namedtuple("TorchGrad", ["name", "value"])
torch_gradients = [
TorchGrad("input gradient", tensor.grad),
TorchGrad("weight gradient", weight.grad),
TorchGrad("bias gradient", bias.grad),
]
for i, torch_gradient in enumerate(torch_gradients):
self._check(
encrypted_grad[i],
torch_gradient.value,
f"batchnorm backward {torch_gradient.name} failed "
f"with training {is_training} on {tensor.dim()}-D",
tolerance=tolerance,
)
def test_cross_entropy(self):
"""Tests cross_entropy and binary_cross_entropy"""
sizes = [(3, 2), (8, 4), (5, 10)]
losses = [
"binary_cross_entropy",
"binary_cross_entropy_with_logits",
"cross_entropy",
]
for size, loss in itertools.product(sizes, losses):
for skip_forward in [False, True]:
batch_size, num_targets = size
if loss in ["binary_cross_entropy", "binary_cross_entropy_with_logits"]:
if loss == "binary_cross_entropy":
tensor = get_random_test_tensor(
size=(batch_size,), max_value=0.998, is_float=True
)
tensor = tensor.abs().add_(0.001)
else:
tensor = get_random_test_tensor(
size=(batch_size,), is_float=True
)
target = get_random_test_tensor(size=(batch_size,), is_float=True)
target = target.gt(0.0).float()
target_encr = crypten.cryptensor(target)
else:
tensor = get_random_test_tensor(size=size, is_float=True)
target = get_random_test_tensor(
size=(batch_size,), max_value=num_targets - 1
)
target = onehot(target.abs(), num_targets=num_targets)
target_encr = crypten.cryptensor(target)
# CrypTen, unlike PyTorch, uses one-hot targets
target = target.argmax(1)
# forward
tensor.requires_grad = True
tensor_encr = crypten.cryptensor(tensor, requires_grad=True)
reference = getattr(torch.nn.functional, loss)(tensor, target)
out_encr = getattr(tensor_encr, loss)(
target_encr, skip_forward=skip_forward
)
if not skip_forward:
self._check(out_encr, reference, f"{loss} forward failed")
# backward
reference.backward()
out_encr.backward()
self._check(tensor_encr.grad, tensor.grad, f"{loss} backward failed")
def test_rappor_loss(self):
"""Tests RAPPOR Loss"""
sizes = [(3,), (8,), (5,)]
alphas = [0.1, 0.3, 0.4]
for size, alpha in itertools.product(sizes, alphas):
for skip_forward in [True, False]:
tensor = get_random_test_tensor(size=size, is_float=True)
target = get_random_test_tensor(size=size, is_float=True)
target = target.gt(0.0).float()
target_encr = crypten.cryptensor(target)
# forward
tensor.requires_grad = True
tensor_encr = crypten.cryptensor(tensor, requires_grad=True)
reference = tensor.sigmoid()
reference = alpha * reference + (1 - alpha) * (1 - reference)
reference = torch.nn.functional.binary_cross_entropy(reference, target)
out_encr = tensor_encr.rappor_loss(
target_encr, alpha, skip_forward=skip_forward
)
if not skip_forward:
self._check(out_encr, reference, "rappor_loss forward failed")
# backward
reference.backward()
out_encr.backward()
self._check(
tensor_encr.grad, tensor.grad, "rappor_loss backward failed"
)
def test_cosine_similarity(self):
"""Tests cosine_similarity"""
for size in SIZES:
tensor0 = get_random_test_tensor(size=size, is_float=True)
tensor1 = get_random_test_tensor(size=size, is_float=True)
# Check dim 0 if tensor is 0-dimensional
dims = 1 if len(size) == 0 else len(size)
for dim in range(dims):
self._check_forward_backward(
"cosine_similarity", tensor0, tensor1, dim=dim
)
def test_view_reshape(self):
"""Tests view and reshape gradients"""
size_to_views = {
(10,): [(5, 2), (1, 10)],
(10, 5): [(50), (2, 5, 5)],
(5, 10, 8): [(400), (50, 8), (5, 5, 2, 8)],
}
for size in size_to_views:
for view in size_to_views[size]:
tensor = get_random_test_tensor(size=size, is_float=True)
self._check_forward_backward("view", tensor, view)
self._check_forward_backward("reshape", tensor, view)
def test_narrow_flatten(self):
"""Tests narrow and flatten gradients"""
sizes = [(10,), (5, 4), (10, 6, 8)]
for size in sizes:
tensor = get_random_test_tensor(size=size, is_float=True)
self._check_forward_backward("flatten", tensor)
for dim in range(tensor.dim()):
self._check_forward_backward("narrow", tensor, dim, 0, 2)
self._check_forward_backward("narrow", tensor, dim, 1, 3)
def test_flip(self):
"""Tests flip gradient"""
sizes = [(2, 3, 7, 2), (5, 10, 15)]
flips = [(0, 2, 1), (0, 1)]
for size in sizes:
tensor = get_random_test_tensor(size=size, is_float=True)
for flip in flips:
self._check_forward_backward("flip", tensor, flip)
def test_gather_scatter(self):
"""Tests gather and scatter gradients"""
sizes = [(2, 2), (3, 5), (3, 5, 10)]
indices = [[0, 1, 0, 0], [0, 1, 0, 0, 1] * 3, [0, 0, 1] * 50]
dims = [0, 1]
funcs = ["scatter", "gather"]
for dim, func in itertools.product(dims, funcs):
for size, index in zip(sizes, indices):
tensor = get_random_test_tensor(size=size, is_float=True)
index = torch.tensor(index).reshape(tensor.shape)
tensor.requires_grad = True
tensor_encr = crypten.cryptensor(tensor, requires_grad=True)
if func == "gather":
reference = getattr(tensor, func)(dim, index)
out_encr = getattr(tensor_encr, func)(dim, index)
else:
src = get_random_test_tensor(size=index.shape, is_float=True)
reference = getattr(tensor, func)(dim, index, src)
out_encr = getattr(tensor_encr, func)(dim, index, src)
self._check(
out_encr, reference, f"{func} forward failed with index {index}"
)
grad_out = get_random_test_tensor(size=reference.shape, is_float=True)
grad_out_encr = crypten.cryptensor(grad_out)
reference.backward(grad_out)
out_encr.backward(grad_out_encr)
self._check(
tensor_encr.grad,
tensor.grad,
f"{func} backward failed with index {index}",
)
def test_index_select(self):
"""Tests index_select gradients"""
sizes = [(2, 2), (3, 5), (3, 5, 10), (4, 8, 2, 5)]
for size in sizes:
tensor = get_random_test_tensor(size=size, is_float=True)
for dim in range(len(size)):
for index_size in range(size[dim]):
index = get_random_test_tensor(
max_value=(size[dim] - 1),
min_value=0,
size=(index_size,),
is_float=False,
)
self._check_forward_backward("index_select", tensor, dim, index)
def test_take(self):
"""Tests take gradients"""
sizes = [(10,), (5, 10), (2, 5, 10)]
indices = [[0], [0, 5], [0, 2, 5, 8]]
for size, index in itertools.product(sizes, indices):
tensor = get_random_test_tensor(size=size, is_float=True)
index = torch.tensor(index)
self._check_forward_backward("take", tensor, index)
def test_roll(self):
"""Tests roll gradients"""
sizes = [(1, 10), (5, 10), (2, 5, 10)]
shifts = [1, 3, (1, 2)]
dims = [0, 1, (0, 1)]
for size, shift_dim in itertools.product(sizes, zip(shifts, dims)):
shift, dim = shift_dim
tensor = get_random_test_tensor(size=size, is_float=True)
self._check_forward_backward("roll", tensor, shift, dim)
def test_cumsum(self):
"""Tests cumsum gradient"""
sizes = [(), (10,), (5, 10), (2, 5, 10)]
for size in sizes:
tensor = get_random_test_tensor(size=size, is_float=True)
for dim in range(tensor.dim()):
self._check_forward_backward("cumsum", tensor, dim)
def test_trace(self):
"""Tests trace gradient"""
sizes = [(1, 1), (3, 3), (10, 10)]
for size in sizes:
tensor = get_random_test_tensor(size=size, is_float=True)
self._check_forward_backward("trace", tensor)
def test_var(self):
"""Tests var gradient"""
sizes = [(10,), (1, 10), (5, 10), (2, 5, 10)]
for size in sizes:
tensor = get_random_test_tensor(size=size, is_float=True)
self._check_forward_backward("var", tensor)
for unbiased in [False, True]:
self._check_forward_backward("var", tensor, unbiased=unbiased)
for dim, keepdim in itertools.product(range(len(size)), [False, True]):
# skip dimensions with 1 element
if size[dim] == 1:
continue
self._check_forward_backward(
"var", tensor, dim, unbiased=unbiased, keepdim=keepdim
)
def test_getitem(self):
"""Tests getitem gradient"""
sizes = [(10,), (10, 1), (5, 10), (5, 2, 10)]
indices = [0, 1, 3]
for size, index in itertools.product(sizes, indices):
tensor = get_random_test_tensor(size=size, is_float=True)
self._check_forward_backward("__getitem__", tensor, index)
def test_pos_pow(self):
"""Test gradient crypten pos_pow"""
for power in [3, -2, 1.75]:
# ensure base is positive for pos_pow
tensor = get_random_test_tensor(is_float=True, max_value=2) + 4
tensor.requires_grad = True
tensor_encr = crypten.cryptensor(tensor, requires_grad=True)
reference = tensor.pow(power)
out_encr = tensor_encr.pos_pow(power)
self._check(
out_encr, reference, f"pos_pow forward failed with power {power}"
)
grad_out = get_random_test_tensor(is_float=True)
grad_out_encr = crypten.cryptensor(grad_out)
reference.backward(grad_out)
out_encr.backward(grad_out_encr)
self._check(
tensor_encr.grad,
tensor.grad,
f"pos_pow backward failed with power {power}",
)
def test_polynomial(self):
for terms in range(1, 5):
for encrypt_coeffs in [False, True]:
tensor = get_random_test_tensor(is_float=True)
tensor.requires_grad = True
tensor_encr = crypten.cryptensor(tensor, requires_grad=True)
coeffs_size = (terms,)
coeffs = get_random_test_tensor(size=coeffs_size, is_float=True)
reference = (
tensor.unsqueeze(0)
.pow(torch.arange(terms).add(1).view([terms] + [1] * terms))
.mul(coeffs.view([terms] + [1] * terms))
.sum(0)
.view(tensor.size())
)
if encrypt_coeffs:
coeffs = crypten.cryptensor(coeffs)
out_encr = tensor_encr.polynomial(coeffs)
self._check(out_encr, reference, "polynomial forward failed")
grad_out = get_random_test_tensor(size=reference.size(), is_float=True)
grad_out_encr = crypten.cryptensor(grad_out)
reference.backward(grad_out)
out_encr.backward(grad_out_encr)
self._check(
tensor_encr.grad,
tensor.grad,
"polynomial backward failed",
)
# Run all unit tests with both TFP and TTP providers
class TestTFP(MultiProcessTestCase, TestGradients):
def setUp(self):
self._original_provider = cfg.mpc.provider
cfg.mpc.provider = "TFP"
super(TestTFP, self).setUp()
def tearDown(self):
cfg.mpc.provider = self._original_provider
super(TestTFP, self).tearDown()
class TestTTP(MultiProcessTestCase, TestGradients):
def setUp(self):
self._original_provider = cfg.mpc.provider
cfg.mpc.provider = "TTP"
super(TestTTP, self).setUp()
def tearDown(self):
cfg.mpc.provider = self._original_provider
super(TestTTP, self).tearDown()
class TestPTT(unittest.TestCase, TestGradients):
def setUp(self):
self.default_tolerance = 0.5
self._original_backend = crypten.get_default_cryptensor_type()
crypten.set_default_cryptensor_type("ptt")
super(TestPTT, self).setUp()
crypten.init()
def tearDown(self):
crypten.set_default_cryptensor_type(self._original_backend)
super(TestPTT, self).setUp()
# This code only runs when executing the file outside the test harness
if __name__ == "__main__":
unittest.main()
| CrypTen-main | test/test_gradients.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from crypten import CrypTensor, register_cryptensor
@register_cryptensor("ptt")
class PyTorchTensor(CrypTensor):
"""
CrypTensor class that uses plaintext PyTorch tensors as underlying backend.
This class should be used for testing purposes.
"""
def __init__(self, tensor, device=None, *args, **kwargs):
# take required_grad from kwargs, input tensor, or set to False:
default = tensor.requires_grad if torch.is_tensor(tensor) else False
requires_grad = kwargs.pop("requires_grad", default)
# call CrypTensor constructor:
super().__init__(requires_grad=requires_grad)
if device is None:
device = torch.device("cpu")
if not torch.is_tensor(tensor):
tensor = torch.tensor(tensor, device=device)
else:
tensor = tensor.detach().to(device=device)
tensor.requires_grad = False
self._tensor = tensor
def get_plain_text(self):
return self._tensor
def shallow_copy(self):
result = PyTorchTensor([])
result._tensor = self._tensor
return result
def clone(self):
result = PyTorchTensor([])
result._tensor = self._tensor.clone()
return result
def copy_(self, other):
"""Copies value of other PyTorchTensor into this PyTorchTensor."""
assert isinstance(other, PyTorchTensor), "other must be PyTorchTensor"
self._tensor = other._tensor
def add(self, tensor):
result = self.clone()
tensor = getattr(tensor, "_tensor", tensor)
result._tensor = result._tensor + tensor
return result
def neg(self):
result = self.clone()
result._tensor.neg_()
return result
def mul(self, tensor):
result = self.clone()
tensor = getattr(tensor, "_tensor", tensor)
result._tensor = result._tensor * tensor
return result
def div(self, tensor):
result = self.clone()
tensor = getattr(tensor, "_tensor", tensor)
result._tensor = result._tensor / tensor
return result
def matmul(self, tensor):
result = self.clone()
tensor = getattr(tensor, "_tensor", tensor)
result._tensor = result._tensor @ tensor
return result
def conv1d(self, kernel, *args, **kwargs):
result = self.clone()
kernel = getattr(kernel, "_tensor", kernel)
result._tensor = torch.nn.functional.conv1d(
result._tensor, kernel, *args, **kwargs
)
return result
def conv2d(self, kernel, *args, **kwargs):
result = self.clone()
kernel = getattr(kernel, "_tensor", kernel)
result._tensor = torch.nn.functional.conv2d(
result._tensor, kernel, *args, **kwargs
)
return result
def conv_transpose1d(self, kernel, *args, **kwargs):
result = self.clone()
kernel = getattr(kernel, "_tensor", kernel)
result._tensor = torch.nn.functional.conv_transpose1d(
result._tensor, kernel, *args, **kwargs
)
return result
def conv_transpose2d(self, kernel, *args, **kwargs):
result = self.clone()
kernel = getattr(kernel, "_tensor", kernel)
result._tensor = torch.nn.functional.conv_transpose2d(
result._tensor, kernel, *args, **kwargs
)
return result
def avg_pool2d(self, kernel_size, stride=None, padding=0):
result = self.clone()
result._tensor = torch.nn.functional.avg_pool2d(
result._tensor, kernel_size, stride=stride, padding=padding
)
return result
@property
def dtype(self):
return self._tensor.dtype
def _ltz(self):
"""Returns 1 for elements that are < 0 and 0 otherwise"""
result = self.clone()
result._tensor = result._tensor.lt(0).to(self.dtype)
return result
@staticmethod
def rand(*sizes, device=None):
"""
Returns a tensor with elements uniformly sampled in [0, 1). The uniform
random samples are generated by generating random bits using fixed-point
encoding and converting the result to an ArithmeticSharedTensor.
"""
if device is None:
device = torch.device("cpu")
return PyTorchTensor(torch.rand(*sizes, device=device))
| CrypTen-main | test/pytorch_backend/pytorch_tensor.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .pytorch_tensor import PyTorchTensor
__all__ = ["PyTorchTensor"]
| CrypTen-main | test/pytorch_backend/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = "CrypTen"
copyright = "2019, Facebook AI Research"
author = "Facebook AI Research"
# The full version, including alpha/beta/rc tags
release = "0.1"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.napoleon",
"sphinx.ext.intersphinx",
"sphinx.ext.mathjax",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_options = {"includehidden": True}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
| CrypTen-main | docs/conf.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import crypten
import torch
from .gradients import AutogradContext as _AutogradContext
class AutogradContext(_AutogradContext):
"""
DEPRECATED: Object used by AutogradFunctions for saving context information.
"""
def __init__(self):
raise DeprecationWarning(
"crypten.autograd_cryptensor.AutogradContext is deprecated. Please "
"use crypten.gradients.AutogradContext instead."
)
super().__init__(self)
def AutogradCrypTensor(tensor, requires_grad=True):
"""
DEPRECATED: CrypTensor with support for autograd, akin to the `Variable`
originally in PyTorch.
"""
raise DeprecationWarning(
"AutogradCrypTensor is deprecated. Please set the "
"requires_grad attribute on the CrypTensor instead."
)
if torch.is_tensor(tensor):
tensor = crypten.cryptensor(tensor)
tensor.requires_grad = requires_grad
return tensor
| CrypTen-main | crypten/autograd_cryptensor.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import math
from functools import reduce
import crypten
import torch
from .common.util import _grad_input_padding
# registry that maps function names to AutogradFunctions:
FUNCTION_REGISTRY = {}
def register_function(name):
"""Decorator that registers a new autograd function."""
def register_function_cls(cls):
"""Function performing the actual registration."""
if name in FUNCTION_REGISTRY:
raise ValueError("Cannot register duplicate function ({})".format(name))
if not issubclass(cls, AutogradFunction):
raise ValueError(
"Function (%s: %s) must extend AutogradFunction" % (name, cls.__name__)
)
cls.name = name
FUNCTION_REGISTRY[name] = cls
return cls
return register_function_cls
def get_grad_fn(name):
"""
Returns gradient function for the CrypTen function with the specified name.
"""
if name in FUNCTION_REGISTRY:
return FUNCTION_REGISTRY[name]
return None
def _ensure_tensor(input):
"""
Converts scalars in inputs to correct tensor type.
"""
if isinstance(input, (int, float)):
input = torch.tensor(input)
return input
def _inverse_broadcast(grad_output, input_size):
"""
Performs the inverse operation of a broadcast.
"""
# special case where input was a scalar:
if input_size == torch.Size():
return grad_output.sum()
# remove leading dimensions:
while grad_output.dim() > len(input_size):
grad_output = grad_output.sum(0, keepdim=False)
assert grad_output.dim() == len(input_size), "cannot perform inverse broadcast"
# perform accumulation across broadcast dimensions:
for dim in range(grad_output.dim()):
if input_size[dim] == 1 and grad_output.size(dim) > 1:
grad_output = grad_output.sum(dim, keepdim=True)
return grad_output
class BaseAutogradContext:
"""
Base implementation for AutogradContext, which saves context information
for AutogradFunctions. Base implementation contains no-ops for all functions.
"""
def __init__(self):
pass
def reset(self):
pass
def save_for_backward(self, value):
pass
def save_multiple_for_backward(self, values):
pass
def mark_non_differentiable(self, non_differentiable):
pass
def is_differentiable(self, tensor):
raise RuntimeError("Cannot check differentiability in BaseAutogradContext.")
@property
def saved_tensors(self):
raise RuntimeError("Cannot check saved_tensors in BaseAutogradContext.")
class AutogradContext(BaseAutogradContext):
"""
Object that can be used by AutogradFunction for saving context information.
"""
def __init__(self):
self.reset()
def reset(self):
self.context = []
self.non_differentiable = []
def save_for_backward(self, value):
self.context.append(value)
def save_multiple_for_backward(self, values):
for value in values:
self.save_for_backward(value)
def mark_non_differentiable(self, non_differentiable):
if not isinstance(non_differentiable, list):
non_differentiable = [non_differentiable]
self.non_differentiable.extend(id(x) for x in non_differentiable)
def is_differentiable(self, tensor):
return id(tensor) not in self.non_differentiable
@property
def saved_tensors(self):
return self.context
class AutogradFunction:
"""
Base implementation of a function that supports autograd.
"""
@staticmethod
def forward(ctx, input):
raise NotImplementedError("Forward function not implemented.")
@staticmethod
def backward(ctx, grad_output):
raise NotImplementedError("Backward function not implemented.")
def __str__(self):
if hasattr(self, "name"):
return self.name
@register_function("t")
class AutogradT(AutogradFunction):
@staticmethod
def forward(ctx, input):
return input.t()
@staticmethod
def backward(ctx, grad_output):
return grad_output.t()
@register_function("transpose")
class AutogradTranspose(AutogradFunction):
@staticmethod
def forward(ctx, input, dim1, dim2):
ctx.save_multiple_for_backward((dim1, dim2))
return input.transpose(dim1, dim2)
@staticmethod
def backward(ctx, grad_output):
dim1, dim2 = ctx.saved_tensors
return grad_output.transpose(dim2, dim1)
@register_function("permute")
class AutogradPermute(AutogradFunction):
@staticmethod
def forward(ctx, input, dims):
ctx.save_for_backward(dims)
return input.permute(dims)
@staticmethod
def backward(ctx, grad_output):
(dims,) = ctx.saved_tensors
inds = [dims.index(x) for x in range(len(dims))]
return grad_output.permute(inds)
@register_function("flip")
class AutogradFlip(AutogradFunction):
@staticmethod
def forward(ctx, input, dims):
ctx.save_for_backward(dims)
return input.flip(dims)
@staticmethod
def backward(ctx, grad_output):
(dims,) = ctx.saved_tensors
return grad_output.flip(dims)
@register_function("clone")
class AutogradClone(AutogradFunction):
@staticmethod
def forward(ctx, input):
return input.clone()
@staticmethod
def backward(ctx, grad_output):
return grad_output.clone()
@register_function("cat")
class AutogradCat(AutogradFunction):
@staticmethod
def forward(ctx, input, dim=0):
split_sections = [t.size(dim) for t in input]
ctx.save_multiple_for_backward((dim, split_sections))
return crypten.cat(input, dim=dim)
@staticmethod
def backward(ctx, grad_output):
dim, split_sections = ctx.saved_tensors
return grad_output.split(split_sections, dim=dim)
@register_function("stack")
class AutogradStack(AutogradFunction):
@staticmethod
def forward(ctx, input, dim=0):
ctx.save_for_backward(dim)
return crypten.stack(input, dim=dim)
@staticmethod
def backward(ctx, grad_output):
(dim,) = ctx.saved_tensors
return grad_output.unbind(dim=dim)
@register_function("view")
class AutogradView(AutogradFunction):
@staticmethod
def forward(ctx, input, *size):
ctx.save_for_backward(input.size())
return input.view(*size)
@staticmethod
def backward(ctx, grad_output):
(input_size,) = ctx.saved_tensors
return grad_output.view(input_size)
@register_function("reshape")
class AutogradReshape(AutogradFunction):
@staticmethod
def forward(ctx, input, *shape):
ctx.save_for_backward(input.size())
return input.reshape(*shape)
@staticmethod
def backward(ctx, grad_output):
(size,) = ctx.saved_tensors
return grad_output.reshape(size)
@register_function("flatten")
class AutogradFlatten(AutogradFunction):
@staticmethod
def forward(ctx, input, start_dim=0, end_dim=-1):
ctx.save_for_backward(input.size())
return input.flatten(start_dim=start_dim, end_dim=end_dim)
@staticmethod
def backward(ctx, grad_output):
(size,) = ctx.saved_tensors
return grad_output.reshape(size)
@register_function("narrow")
class AutogradNarrow(AutogradFunction):
@staticmethod
def forward(ctx, input, dim, start, length):
ctx.save_multiple_for_backward((input.size(dim), dim, start, length))
return input.narrow(dim, start, length)
@staticmethod
def backward(ctx, grad_output):
size, dim, start, length = ctx.saved_tensors
# pad is applied to dimensions in reverse order
dim = grad_output.dim() - 1 - dim
# pad is applied in pairs that denote the pads at the beginning and end
# of the tensor along the given dimension
pad = [0] * 2 * grad_output.dim()
pad[2 * dim] = start
pad[2 * dim + 1] = size - length - start
return grad_output.pad(pad)
@register_function("take")
class AutogradTake(AutogradFunction):
@staticmethod
def forward(ctx, input, index, dim=None):
ctx.save_multiple_for_backward((input.size(), index, dim))
return input.take(index, dim)
@staticmethod
def backward(ctx, grad_output):
size, index, dim = ctx.saved_tensors
grad = grad_output.new(torch.zeros(size))
if dim is None:
grad_flat = grad.flatten()
flat_index = index.flatten()
grad_output_flat = grad_output.flatten()
grad_flat[flat_index] = grad_output_flat
grad = grad_flat.reshape(size)
else:
flat_index = index.flatten()
grad_output_flat = grad_output.flatten(
start_dim=dim, end_dim=(dim + index.dim() - 1)
)
grad.index_add_(dim, flat_index, grad_output_flat)
return grad
@register_function("index_select")
class AutogradIndexSelect(AutogradFunction):
@staticmethod
def forward(ctx, input, dim, index):
ctx.save_multiple_for_backward([input.size(), dim, index])
return input.index_select(dim, index)
@staticmethod
def backward(ctx, grad_output):
size, dim, index = ctx.saved_tensors
index = index.unsqueeze(0) if index.dim() == 0 else index
return grad_output.new(torch.zeros(size)).index_add_(dim, index, grad_output)
@register_function("gather")
class AutogradGather(AutogradFunction):
@staticmethod
def forward(ctx, input, dim, index):
ctx.save_multiple_for_backward([input.size(), dim, index])
return input.gather(dim, index)
@staticmethod
def backward(ctx, grad_output):
size, dim, index = ctx.saved_tensors
return grad_output.new(torch.zeros(size)).scatter_add_(dim, index, grad_output)
@register_function("scatter")
class AutogradScatter(AutogradFunction):
@staticmethod
def forward(ctx, input, dim, index, src):
output = input.scatter(dim, index, src)
ctx.save_multiple_for_backward([dim, index])
return output
@staticmethod
def backward(ctx, grad_output):
dim, index = ctx.saved_tensors
size = grad_output.size()
mask = torch.ones(size).scatter(dim, index, torch.zeros(size)).long()
input_grad = grad_output.mul(mask)
src_grad = grad_output.gather(dim, index)
return (input_grad, src_grad)
@register_function("roll")
class AutogradRoll(AutogradFunction):
@staticmethod
def forward(ctx, input, shifts, dims=None):
ctx.save_multiple_for_backward((shifts, dims))
return input.roll(shifts, dims=dims)
@staticmethod
def backward(ctx, grad_output):
shifts, dims = ctx.saved_tensors
# Reverse and negate shifts
if isinstance(shifts, (tuple, list)):
shifts = list(shifts)
for i, shift in enumerate(shifts):
shifts[i] = -shift
shifts.reverse()
else:
shifts = -shifts
# Reverse dims
if isinstance(dims, (tuple, list)):
dims = list(dims)
dims.reverse()
return grad_output.roll(shifts, dims)
@register_function("squeeze")
class AutogradSqueeze(AutogradFunction):
@staticmethod
def forward(ctx, *args, **kwargs):
# preprocess inputs:
assert len(args) >= 1
if len(args) == 1:
(input,) = args # no dimension to squeeze in args
dim = kwargs.get("dim", None)
else:
assert len(args) == 2
assert "dim" not in kwargs
input, dim = args # dimension to squeeze in args
# perform the actual squeeze:
output = input.squeeze() if dim is None else input.squeeze(dim)
# keep correct dimensions for backward pass:
if dim is None:
dims = [idx for idx, sz in enumerate(input.size()) if sz == 1]
else:
# Squeezeing non singleton dimensions is a no-op:
dims = [dim] if input.size(dim) == 1 else []
ctx.save_for_backward(dims)
return output
@staticmethod
def backward(ctx, grad_output):
(dims,) = ctx.saved_tensors
grad_input = grad_output
for dim in dims:
grad_input = grad_input.unsqueeze(dim)
return grad_input
@register_function("unsqueeze")
class AutogradUnsqueeze(AutogradFunction):
@staticmethod
def forward(ctx, input, dim):
ctx.save_for_backward(dim)
return input.unsqueeze(dim)
@staticmethod
def backward(ctx, grad_output):
(dim,) = ctx.saved_tensors
return grad_output.squeeze(dim)
@register_function("__getitem__")
class AutogradGetItem(AutogradFunction):
@staticmethod
def forward(ctx, input, index):
ctx.save_multiple_for_backward([input.size(), index])
return input[index]
@staticmethod
def backward(ctx, grad_output):
size, index = ctx.saved_tensors
grad = grad_output.new(torch.zeros(size))
grad[index] = grad_output
return grad
@register_function("neg")
class AutogradNeg(AutogradFunction):
@staticmethod
def forward(ctx, input):
return input.neg()
@staticmethod
def backward(ctx, grad_output):
return grad_output.neg()
@register_function("relu")
class AutogradReLU(AutogradFunction):
@staticmethod
def forward(ctx, input):
mask = input.gt(0.0)
ctx.save_for_backward(mask)
return input.mul(mask)
@staticmethod
def backward(ctx, grad_output):
(mask,) = ctx.saved_tensors
return grad_output.mul(mask)
@register_function("dropout")
class AutogradDropout(AutogradFunction):
@staticmethod
def forward(ctx, input, p=0.5, training=True, inplace=False):
if training and inplace:
logging.warning(
"CrypTen dropout does not support inplace computation during training."
)
if not training:
if inplace:
return input
else:
return input.clone()
# training mode:
generator = crypten.generators["global"][input.device]
random_tensor = torch.rand(
input.size(), generator=generator, device=input.device
)
boolean_mask = (random_tensor > p).to(input.device, dtype=torch.float)
if inplace:
result = input.mul_(boolean_mask.div(1.0 - p))
else:
result = input.mul(boolean_mask.div(1.0 - p))
ctx.save_multiple_for_backward([boolean_mask, p])
return result
@staticmethod
def backward(ctx, grad_output):
if len(ctx.saved_tensors) == 0:
return grad_output # forward pass was run in eval mode
boolean_mask, p = ctx.saved_tensors
return grad_output.mul(boolean_mask.div(1.0 - p))
@register_function("_feature_dropout")
class AutogradFeatureDropout(AutogradFunction):
@staticmethod
def forward(ctx, input, p=0.5, training=True, inplace=False):
if training and inplace:
logging.warning(
"CrypTen _feature_dropout does not support inplace computation during training."
)
# inference mode:
if not training:
if inplace:
return input
else:
return input.clone()
# training mode:
feature_dropout_size = input.size()[0:2]
generator = crypten.generators["global"][input.device]
random_tensor = torch.rand(feature_dropout_size, generator=generator)
boolean_mask = (random_tensor > p).to(dtype=torch.float)
for i in range(2, input.dim()):
boolean_mask = boolean_mask.unsqueeze(i)
boolean_mask, _ = torch.broadcast_tensors(boolean_mask, input.data)
if inplace:
result = input.mul_(boolean_mask.div(1.0 - p))
else:
result = input.mul(boolean_mask.div(1.0 - p))
ctx.save_multiple_for_backward([boolean_mask, p])
return result
@staticmethod
def backward(ctx, grad_output):
if len(ctx.saved_tensors) == 0:
return grad_output # forward pass was run in eval mode
boolean_mask, p = ctx.saved_tensors
return grad_output.mul(boolean_mask.div(1.0 - p))
@register_function("tanh")
class AutogradTanh(AutogradFunction):
@staticmethod
def forward(ctx, input):
activations = input.tanh()
ctx.save_for_backward(activations)
return activations
@staticmethod
def backward(ctx, grad_output):
(activations,) = ctx.saved_tensors
return grad_output.mul(activations.square().neg().add(1.0))
@register_function("hardtanh")
class AutogradHardtanh(AutogradFunction):
@staticmethod
def forward(ctx, input, min_val=-1, max_val=1):
assert isinstance(
min_val, (int, float)
), "hardtanh min_val must be an int or float"
assert isinstance(
max_val, (int, float)
), "hardtanh max_val must be an int or float"
if min_val == max_val:
grad = input.new(torch.zeros(input.size()))
ctx.save_for_backward(grad)
return grad + min_val
intermediate = crypten.stack([input - min_val, max_val - input]).gt(0)
grad = intermediate.sum(0).sub(1)
ctx.save_for_backward(grad)
result = grad.mul(input)
result += (1 - intermediate[0]).mul(min_val)
result += (1 - intermediate[1]).mul(max_val)
return result
@staticmethod
def backward(ctx, grad_output):
(grad,) = ctx.saved_tensors
return grad.mul(grad_output)
@register_function("erf")
class AutogradErf(AutogradFunction):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return input.erf()
@staticmethod
def backward(ctx, grad_output):
(input,) = ctx.saved_tensors
grad = input.pos_pow(2).neg().exp().mul(2.0 / math.sqrt(math.pi))
return grad_output.mul(grad)
@register_function("relu6")
class AutogradReLU6(AutogradFunction):
r"""Applies the element-wise function:
.. math::
\text{ReLU6}(x) = \min(\max(0,x), 6)
"""
@staticmethod
def relu6(self):
return self.hardtanh(min_value=0, max_value=6)
@staticmethod
def forward(ctx, input):
intermediate = crypten.stack([input, 6 - input]).gt(0)
grad = intermediate.sum(0).sub(1)
ctx.save_for_backward(grad)
result = grad.mul(input)
result += (1 - intermediate[1]).mul(6)
return result
@staticmethod
def backward(ctx, grad_output):
(grad,) = ctx.saved_tensors
return grad.mul(grad_output)
@register_function("add")
class AutogradAdd(AutogradFunction):
@staticmethod
def forward(ctx, input, other):
input = _ensure_tensor(input)
other = _ensure_tensor(other)
ctx.save_multiple_for_backward([input.size(), other.size()])
return input.add(other)
@staticmethod
def backward(ctx, grad_output):
input_size1, input_size2 = ctx.saved_tensors
return (
_inverse_broadcast(grad_output.clone(), input_size1),
_inverse_broadcast(grad_output.clone(), input_size2),
)
@register_function("sub")
class AutogradSub(AutogradFunction):
@staticmethod
def forward(ctx, input, other):
input = _ensure_tensor(input)
other = _ensure_tensor(other)
ctx.save_multiple_for_backward([input.size(), other.size()])
return input.sub(other)
@staticmethod
def backward(ctx, grad_output):
input_size1, input_size2 = ctx.saved_tensors
return (
_inverse_broadcast(grad_output.clone(), input_size1),
_inverse_broadcast(grad_output.clone(), input_size2).neg(),
)
@register_function("__rsub__")
class AutogradRSub(AutogradFunction):
@staticmethod
def forward(ctx, input, other):
input = _ensure_tensor(input)
other = _ensure_tensor(other)
ctx.save_multiple_for_backward([input.size(), other.size()])
return (-input).add(other)
@staticmethod
def backward(ctx, grad_output):
input_size1, input_size2 = ctx.saved_tensors
return (
_inverse_broadcast(grad_output.clone(), input_size1).neg(),
_inverse_broadcast(grad_output.clone(), input_size2),
)
@register_function("mul")
class AutogradMul(AutogradFunction):
@staticmethod
def forward(ctx, input, other):
input = _ensure_tensor(input)
other = _ensure_tensor(other)
ctx.save_multiple_for_backward([input, other])
return input.mul(other)
@staticmethod
def backward(ctx, grad_output):
self_, other = ctx.saved_tensors
return (
_inverse_broadcast(grad_output.mul(other), self_.size()),
_inverse_broadcast(grad_output.mul(self_), other.size()),
)
@register_function("matmul")
class AutogradMatMul(AutogradFunction):
@staticmethod
def forward(ctx, input, other):
ctx.save_multiple_for_backward([input, other])
return input.matmul(other)
@staticmethod
def backward(ctx, grad_output):
self_, other = ctx.saved_tensors
# Cache sizes for inverse_broadcast
self_size = self_.size()
other_size = other.size()
# Deal with vectors that are represented by a
# < 2 dimensional tensor
if self_.dim() < 2:
self_ = self_.unsqueeze(0)
grad_output = grad_output.unsqueeze(0)
if other.dim() < 2:
other = other.unsqueeze(1)
grad_output = grad_output.unsqueeze(1)
# Compute gradients
self_grad = grad_output.matmul(other.transpose(-2, -1))
other_grad = self_.transpose(-2, -1).matmul(grad_output)
# Fix gradient sizes for vector inputs
if len(self_size) < 2:
self_grad = self_grad.squeeze()
if self_grad.dim() < 1:
self_grad = self_grad.unsqueeze(0)
if len(other_size) < 2:
other_grad = other_grad.squeeze()
if other_grad.dim() < 1:
other_grad = other_grad.unsqueeze(0)
return (
_inverse_broadcast(self_grad, self_size),
_inverse_broadcast(other_grad, other_size),
)
@register_function("div")
class AutogradDiv(AutogradFunction):
@staticmethod
def forward(ctx, input, other):
if crypten.is_encrypted_tensor(other):
other_reciprocal = other.reciprocal()
ctx.save_multiple_for_backward([input, other_reciprocal])
return input.mul(other_reciprocal)
else:
ctx.save_multiple_for_backward([input.size(), other])
return input.div(other)
@staticmethod
def backward(ctx, grad_output):
saved = ctx.saved_tensors
# saved is a list of [input, other_reciprocal]
if crypten.is_encrypted_tensor(saved[1]):
input, other_reciprocal = saved
grad_input = other_reciprocal.mul(grad_output)
grad_other = other_reciprocal.square().mul(input).mul(grad_output).neg()
return (
_inverse_broadcast(grad_input, input.size()),
_inverse_broadcast(grad_other, other_reciprocal.size()),
)
# saved is a public tensor or scalar
else:
input_size, other = saved
grad_input = grad_output.div(other)
if torch.is_tensor(other):
return _inverse_broadcast(grad_input, input_size)
else:
return grad_input
@register_function("__rtruediv__")
class AutogradRDiv(AutogradFunction):
@staticmethod
def forward(ctx, input, other):
reciprocal = input.reciprocal()
ctx.save_multiple_for_backward([reciprocal, other])
return reciprocal.mul(other)
@staticmethod
def backward(ctx, grad_output):
reciprocal, other = ctx.saved_tensors
grad_input = reciprocal.square().mul(other).mul(grad_output).neg()
grad_input = _inverse_broadcast(grad_input, reciprocal.size())
if torch.is_tensor(other) or crypten.is_encrypted_tensor(other):
grad_other = reciprocal.mul(grad_output)
grad_other = _inverse_broadcast(grad_other, other.size())
return (grad_input, grad_other)
else:
return grad_input
@register_function("polynomial")
class AutogradPolynomial(AutogradFunction):
@staticmethod
def forward(ctx, input, coeffs, func="mul"):
ctx.mark_non_differentiable(coeffs)
if isinstance(coeffs, (list, tuple)):
coeffs = torch.tensor(coeffs)
ctx.save_multiple_for_backward([input, coeffs, func])
return input.polynomial(coeffs, func)
@staticmethod
def backward(ctx, grad_output):
input, coeffs, func = ctx.saved_tensors
coeffs *= torch.arange(coeffs.size(0)).add(1)
return input.polynomial(coeffs[1:], func).add(coeffs[0]).mul_(grad_output)
@register_function("pow")
class AutogradPow(AutogradFunction):
@staticmethod
def forward(ctx, input, power):
grad_pow = input.pow(power - 1)
grad = grad_pow.mul(power)
ctx.save_multiple_for_backward([input, grad])
return grad_pow.mul(input)
@staticmethod
def backward(ctx, grad_output):
input, grad = ctx.saved_tensors
return grad.mul_(grad_output)
@register_function("pos_pow")
class AutogradPosPow(AutogradFunction):
@staticmethod
def forward(ctx, input, power):
if isinstance(power, int) or (isinstance(power, float) and int(power) == power):
ctx.save_multiple_for_backward([input, power])
return input.pow(power)
else:
log_input = input.log()
ctx.save_multiple_for_backward([log_input, power])
return log_input.mul(power).exp()
@staticmethod
def backward(ctx, grad_output):
input, power = ctx.saved_tensors
if isinstance(power, int) or (isinstance(power, float) and int(power) == power):
return input.pow(power - 1.0).mul_(power).mul_(grad_output)
else:
return input.mul(power - 1.0).mul_(power).exp().mul(grad_output)
@register_function("square")
class AutogradSquare(AutogradFunction):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return input.square()
@staticmethod
def backward(ctx, grad_output):
(input,) = ctx.saved_tensors
return grad_output.mul(input.mul(2.0))
@register_function("sqrt")
class AutogradSqrt(AutogradFunction):
@staticmethod
def forward(ctx, input):
inv_sqrt = input.inv_sqrt()
ctx.save_for_backward(inv_sqrt)
return inv_sqrt.mul(input)
@staticmethod
def backward(ctx, grad_output):
(inv_sqrt,) = ctx.saved_tensors
return inv_sqrt.div_(2).mul_(grad_output)
@register_function("exp")
class AutogradExp(AutogradFunction):
@staticmethod
def forward(ctx, input):
output = input.exp()
ctx.save_for_backward(output)
return output
@staticmethod
def backward(ctx, grad_output):
(output,) = ctx.saved_tensors
return output.mul(grad_output)
@register_function("log")
class AutogradLog(AutogradFunction):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input)
return input.log()
@staticmethod
def backward(ctx, grad_output):
(input,) = ctx.saved_tensors
return grad_output.div(input)
@register_function("reciprocal")
class AutogradReciprocal(AutogradFunction):
@staticmethod
def forward(ctx, input):
reciprocal = input.reciprocal()
ctx.save_for_backward(reciprocal)
return reciprocal
@staticmethod
def backward(ctx, grad_output):
(reciprocal,) = ctx.saved_tensors
return grad_output.neg().mul_(reciprocal.square())
@register_function("dot")
class AutogradDot(AutogradFunction):
@staticmethod
def forward(ctx, input, other):
ctx.save_multiple_for_backward([input, other])
return input.dot(other)
@staticmethod
def backward(ctx, grad_output):
self_, other = ctx.saved_tensors
return (grad_output.mul(other), grad_output.mul(self_))
@register_function("ger")
class AutogradGer(AutogradFunction):
@staticmethod
def forward(ctx, input, other):
ctx.save_multiple_for_backward([input, other])
return input.ger(other)
@staticmethod
def backward(ctx, grad_output):
input, other = ctx.saved_tensors
return (grad_output.matmul(other), input.matmul(grad_output))
@register_function("sin")
class AutogradSin(AutogradFunction):
@staticmethod
def forward(ctx, input):
cossin = input.cossin()
ctx.save_for_backward(cossin[0])
return cossin[1]
@staticmethod
def backward(ctx, grad_output):
(cos,) = ctx.saved_tensors
return grad_output.mul(cos)
@register_function("cos")
class AutogradCos(AutogradFunction):
@staticmethod
def forward(ctx, input):
cossin = input.cossin()
ctx.save_for_backward(cossin[1])
return cossin[0]
@staticmethod
def backward(ctx, grad_output):
(sin,) = ctx.saved_tensors
return grad_output.mul(sin.neg_())
@register_function("cosine_similarity")
class AutogradCosineSimilarity(AutogradFunction):
@staticmethod
def forward(ctx, x1, x2, dim=1, eps=None):
assert x1.size() == x2.size(), "cosine_similarity sizes must match"
# Handle 0-d case
zero_dim = x1.dim() == 0
if zero_dim:
x1 = x1.unsqueeze(0)
x2 = x2.unsqueeze(0)
# Handle 1-d vectors
if x1.size(dim) == 1:
ctx.save_multiple_for_backward([dim, zero_dim])
return x1.mul(x2).sign().squeeze(dim)
if not isinstance(x2, crypten.CrypTensor):
x2 = x1.new(x2)
xy = crypten.stack([x1, x2], dim=0) # [x, y]
norm_sq = xy.square().sum(dim=(dim + 1)) # [||x||^2, ||y||^2]
inv_norms = norm_sq.inv_sqrt() # [1 / ||x||, 1 / ||y||]
ctx.save_multiple_for_backward((xy, inv_norms, dim))
inv_norm = inv_norms.prod(0) # 1 / ||x||||y||
dot = xy.prod(0).sum(dim) # x . y
return dot.mul(inv_norm)
@staticmethod
def backward(ctx, grad_output):
# Handle 1-d vectors
if len(ctx.saved_tensors) == 2:
(dim, zero_dim) = ctx.saved_tensors
zeros = torch.zeros(grad_output.size()).unsqueeze(dim)
result = grad_output.new(zeros, device=grad_output.device)
if zero_dim:
result = result.squeeze()
return result, result.clone()
xy, inv_norms, dim = ctx.saved_tensors
dot = xy.prod(0).sum(dim, keepdim=True)
inv_norms = inv_norms.unsqueeze(dim + 1)
sq_inv_norms = inv_norms.square()
xy_normalized = xy.mul(sq_inv_norms)
yx = xy.roll(1, 0)
grads = yx.sub(dot.mul(xy_normalized)).mul(inv_norms.prod(0))
grads = grads.mul(grad_output.unsqueeze(dim))
x_grad, y_grad = grads
return x_grad, y_grad
@register_function("abs")
class AutogradAbs(AutogradFunction):
@staticmethod
def forward(ctx, input):
sign = input.sign()
ctx.save_for_backward(sign)
return input.mul(sign)
@staticmethod
def backward(ctx, grad_output):
(sign,) = ctx.saved_tensors
return grad_output.mul(sign)
@register_function("sign")
class AutogradSign(AutogradFunction):
@staticmethod
def forward(ctx, input):
return input.sign()
@staticmethod
def backward(ctx, grad_output):
return grad_output.sub(grad_output)
@register_function("norm")
class AutogradNorm(AutogradFunction):
@staticmethod
def forward(ctx, input, p="fro", dim=None, keepdim=False):
if p == float("inf"):
sign = input.sign()
if dim is None:
input = input.mul(sign)
argmax = input.argmax(one_hot=True)
max = input.mul(argmax).sum()
else:
max, argmax = input.mul(sign).max(dim, keepdim=keepdim, one_hot=True)
ctx.save_multiple_for_backward((sign, argmax, p, dim, keepdim))
return max
else:
if dim is None:
norm = input.norm(p=p)
else:
norm = input.norm(p=p, dim=dim, keepdim=keepdim)
ctx.save_multiple_for_backward((input, norm, p, dim, keepdim))
return norm
@staticmethod
def backward(ctx, grad_output):
input, norm, p, dim, keepdim = ctx.saved_tensors
if not keepdim and dim is not None:
grad_output.unsqueeze(dim)
if p == 2 or p == "fro":
return grad_output.mul(input.div(norm))
elif p == float("inf"):
sign, argmax = input, norm
return grad_output.mul(argmax).mul(sign)
else:
sign = input.sign()
abs = input.mul(sign)
return grad_output.mul(abs.div(norm).pos_pow(p - 1).mul(sign))
@register_function("sum")
class AutogradSum(AutogradFunction):
@staticmethod
def forward(ctx, *args, **kwargs):
# preprocess inputs:
assert len(args) >= 1
if len(args) == 1:
(input,) = args # no dimension to sum over in args
dim = kwargs.get("dim", None)
else:
assert len(args) == 2
assert "dim" not in kwargs
input, dim = args # dimension to sum over in args
keepdim = kwargs.get("keepdim", False)
# compute sum:
ctx.save_multiple_for_backward((input.size(), dim, keepdim))
return input.sum(dim, keepdim=keepdim) if dim is not None else input.sum()
@staticmethod
def backward(ctx, grad_output):
input_size, dim, keepdim = ctx.saved_tensors
# Handle special case where input is 0-dimensional
if len(input_size) == 0:
return grad_output
if not keepdim and dim is not None:
grad_output = grad_output.unsqueeze(dim)
return grad_output.mul(torch.ones(input_size))
@register_function("cumsum")
class AutogradCumsum(AutogradFunction):
@staticmethod
def forward(ctx, input, dim):
ctx.save_for_backward(dim)
return input.cumsum(dim)
@staticmethod
def backward(ctx, grad_output):
(dim,) = ctx.saved_tensors
return grad_output.flip(dim).cumsum(dim).flip(dim)
@register_function("trace")
class AutogradTrace(AutogradFunction):
@staticmethod
def forward(ctx, input):
ctx.save_for_backward(input.size()[0])
return input.trace()
@staticmethod
def backward(ctx, grad_output):
(size,) = ctx.saved_tensors
return grad_output.new(torch.eye(size)).mul_(grad_output)
@register_function("mean")
class AutogradMean(AutogradFunction):
@staticmethod
def forward(ctx, *args, **kwargs):
# preprocess inputs:
assert len(args) >= 1
if len(args) == 1:
(input,) = args # no dimension to average over in args
dim = kwargs.get("dim", None)
else:
assert len(args) == 2
assert "dim" not in kwargs
input, dim = args # dimension to average over in args
keepdim = kwargs.get("keepdim", False)
# compute mean:
ctx.save_multiple_for_backward((input.size(), dim, keepdim))
return input.mean(dim, keepdim=keepdim) if dim is not None else input.mean()
@staticmethod
def backward(ctx, grad_output):
input_size, dim, keepdim = ctx.saved_tensors
# Handle special case where input is 0-dimensional
if len(input_size) == 0:
return grad_output
nelement = float(
reduce(lambda x, y: x * y, input_size) if dim is None else input_size[dim]
)
if not keepdim and dim is not None:
grad_output = grad_output.unsqueeze(dim)
return grad_output.mul(
torch.ones(input_size, device=grad_output.device).div_(nelement)
)
@register_function("var")
class AutogradVariance(AutogradFunction):
@staticmethod
def forward(ctx, self, *args, **kwargs):
# preprocess inputs:
if len(args) == 0:
dim = None
unbiased = kwargs.get("unbiased", False)
keepdim = False
mean = self.mean()
elif len(args) == 1:
dim = args[0]
unbiased = kwargs.get("unbiased", False)
keepdim = kwargs.get("keepdim", False)
elif len(args) == 2:
dim, unbiased = args[0], args[1]
keepdim = kwargs.get("keepdim", False)
else:
dim, unbiased, keepdim = args[0], args[1], args[2]
if dim is not None: # dimension is specified
mean = self.mean(dim, keepdim=True)
# Compute square error
result = (self - mean).square()
if dim is None:
result = result.sum()
else:
result = result.sum(dim, keepdim=keepdim)
# Determine divisor
divisor = self.nelement() // result.nelement()
if not unbiased:
divisor -= 1
# compute variance:
ctx.save_multiple_for_backward((self, mean, divisor, dim, keepdim))
return (
self.var(dim, unbiased=unbiased, keepdim=keepdim)
if dim is not None
else self.var(unbiased=unbiased)
)
@staticmethod
def backward(ctx, grad_output):
input, mean, divisor, dim, keepdim = ctx.saved_tensors
if not keepdim and dim is not None:
grad_output = grad_output.unsqueeze(dim)
numerator = input.sub(mean).mul(2).mul(grad_output)
if divisor == 0:
return numerator
return numerator.div(divisor)
@register_function("min")
class AutogradMin(AutogradFunction):
@staticmethod
def forward(ctx, *args, **kwargs):
# preprocess inputs:
assert len(args) >= 1
if len(args) == 1:
(input,) = args # no dimension to min over in args
dim = kwargs.pop("dim", None) # remove dim from kwargs after obtaining it
else:
assert len(args) == 2
assert "dim" not in kwargs
input, dim = args # dimension to min over in args
keepdim = kwargs.get("keepdim", False)
one_hot = kwargs.get("one_hot", True)
# find minimum value (and corresponding argmin):
if dim is None:
argmin = input.argmin(one_hot=one_hot)
min = input.mul(argmin).sum()
else:
min, argmin = input.min(dim, **kwargs)
# save context and return:
ctx.save_multiple_for_backward((dim, keepdim, argmin, one_hot))
if dim is None:
return min
else:
ctx.mark_non_differentiable(argmin)
return min, argmin
@staticmethod
def backward(ctx, grad_output):
dim, keepdim, argmin, one_hot = ctx.saved_tensors
assert one_hot, (
"cannot backpropagate through min layer that does not"
"use one-hot representation because private indexing is unsupported"
)
# Handle special case where input is 0-dimensional
if len(argmin.size()) == 0:
return grad_output
if not keepdim and dim is not None:
grad_output = grad_output.unsqueeze(dim)
return grad_output.mul(argmin)
@register_function("max")
class AutogradMax(AutogradFunction):
@staticmethod
def forward(ctx, *args, **kwargs):
# preprocess inputs:
assert len(args) >= 1
if len(args) == 1:
(input,) = args # no dimension to max over in args
dim = kwargs.pop("dim", None) # remove dim from kwargs after obtaining it
else:
assert len(args) == 2
assert "dim" not in kwargs
input, dim = args # dimension to max over in args
keepdim = kwargs.get("keepdim", False)
one_hot = kwargs.get("one_hot", True)
# find maximum value (and corresponding argmax):
if dim is None:
shape = input.size()
input_flat = input.flatten()
max, argmax = input_flat.max(0, **kwargs)
argmax = argmax.reshape(shape)
else:
max, argmax = input.max(dim, **kwargs)
# save context and return:
ctx.save_multiple_for_backward((dim, keepdim, argmax, one_hot))
if dim is None:
return max
else:
ctx.mark_non_differentiable(argmax)
return max, argmax
@staticmethod
def backward(ctx, grad_output):
dim, keepdim, argmax, one_hot = ctx.saved_tensors
assert one_hot, (
"cannot backpropagate through max layer that does not"
"use one-hot representation because private indexing is unsupported"
)
# Handle special case where input is 0-dimensional
if len(argmax.size()) == 0:
return grad_output
if not keepdim and dim is not None:
grad_output = grad_output.unsqueeze(dim)
return grad_output.mul(argmax)
@register_function("sigmoid")
class AutogradSigmoid(AutogradFunction):
@staticmethod
def forward(ctx, input):
probs = input.sigmoid()
ctx.save_for_backward(probs)
return probs
@staticmethod
def backward(ctx, grad_output):
(probs,) = ctx.saved_tensors
return grad_output.mul(probs).mul_(probs.neg().add_(1.0))
@register_function("softmax")
class AutogradSoftmax(AutogradFunction):
@staticmethod
def forward(ctx, input, dim):
probs = input.softmax(dim)
ctx.save_multiple_for_backward([probs, dim])
return probs
@staticmethod
def backward(ctx, grad_output):
probs, dim = ctx.saved_tensors
if grad_output.dim() == 0 or grad_output.size(dim) == 1:
return grad_output.new(torch.zeros(grad_output.size()))
return grad_output.add(-probs.mul(grad_output).sum(dim, keepdim=True)).mul_(
probs
)
@register_function("log_softmax")
class AutogradLogSoftmax(AutogradFunction):
@staticmethod
def forward(ctx, input, dim):
probs = input.log_softmax(dim)
ctx.save_multiple_for_backward([probs, dim])
return probs
@staticmethod
def backward(ctx, grad_output):
probs, dim = ctx.saved_tensors
if grad_output.dim() == 0 or grad_output.size(dim) == 1:
return grad_output.new(torch.zeros(grad_output.size()))
z = probs.exp()
result = grad_output - z * grad_output.sum(dim, keepdim=True)
return result
@register_function("pad")
class AutogradPad(AutogradFunction):
@staticmethod
def forward(ctx, input, padding, value=0.0, mode="constant"):
ctx.save_for_backward(padding)
output = input.pad(padding, value=value, mode=mode)
return output
@staticmethod
def backward(ctx, grad_output):
(padding,) = ctx.saved_tensors
for idx in range(0, len(padding), 2):
dim = grad_output.dim() - (idx // 2) - 1
start = padding[idx]
end = grad_output.size(dim) - padding[idx + 1] - padding[idx]
grad_output = grad_output.narrow(dim, start, end)
return grad_output
@register_function("avg_pool2d")
class AutogradAvgPool2D(AutogradFunction):
@staticmethod
def forward(ctx, input, kernel_size, stride=None, padding=0, ceil_mode=False):
# preprocess inputs:
if stride is None:
stride = kernel_size
if isinstance(stride, (int, float)):
stride = (stride, stride)
if isinstance(padding, (int, float)):
padding = (padding, padding)
if isinstance(kernel_size, (int, float)):
kernel_size = (kernel_size, kernel_size)
# perform average pooling:
output = input.avg_pool2d(kernel_size, padding=padding, stride=stride)
# store information for backward pass:
ctx.save_multiple_for_backward(
(input.shape, output, kernel_size, padding, stride)
)
return output
@staticmethod
def backward(ctx, grad_output):
"""Computes the gradient with respect to the input"""
input_shape, output, kernel_size, padding, stride = ctx.saved_tensors
in_channels = input_shape[-3]
# compute as d conv2d / d input with kernel as average filter
kernel = torch.ones(
in_channels, 1, kernel_size[0], kernel_size[1], device=grad_output.device
) / (kernel_size[0] * kernel_size[1])
grad_input_padding = _grad_input_padding(
grad_output,
input_shape,
stride,
padding,
kernel_size,
dilation=([1] * len(stride)),
)
# set groups=in_channels so input gradient is computed per channel
if isinstance(grad_output, crypten.CrypTensor):
return grad_output.conv_transpose2d(
kernel,
bias=None,
stride=stride,
padding=padding,
output_padding=grad_input_padding,
groups=in_channels,
)
return torch.conv_transpose2d(
grad_output,
kernel,
bias=None,
stride=stride,
padding=padding,
output_padding=grad_input_padding,
groups=in_channels,
)
@register_function("max_pool2d")
class AutogradMaxPool2D(AutogradFunction):
@staticmethod
def forward(
ctx,
input,
kernel_size,
padding=0,
stride=None,
dilation=1,
ceil_mode=False,
return_indices=False,
):
# preprocess inputs:
if stride is None:
stride = kernel_size
if isinstance(stride, (int, float)):
stride = (stride, stride)
if isinstance(padding, (int, float)):
padding = (padding, padding)
if isinstance(dilation, (int, float)):
dilation = (dilation, dilation)
# perform max pooling:
# Note return_indices is required to be True to computing backward.
output, indices = input.max_pool2d(
kernel_size,
padding=padding,
stride=stride,
dilation=dilation,
ceil_mode=ceil_mode,
return_indices=True,
)
# store information for backward pass and return:
ctx.save_multiple_for_backward(
(input.size(), indices, kernel_size, padding, stride, dilation, ceil_mode)
)
if return_indices:
ctx.mark_non_differentiable(indices)
return output, indices
else:
return output
@staticmethod
def backward(ctx, grad_output):
(
output_size,
indices,
kernel_size,
padding,
stride,
dilation,
ceil_mode,
) = ctx.saved_tensors
assert padding[0] == padding[1], "padding must be same in all axes"
return grad_output._max_pool2d_backward(
indices,
kernel_size,
padding=padding,
stride=stride,
dilation=dilation,
ceil_mode=ceil_mode,
output_size=output_size,
)
@register_function("conv1d")
class AutogradConv1D(AutogradFunction):
@staticmethod
def forward(ctx, input, kernel, padding=0, stride=1, dilation=1, groups=1):
if isinstance(stride, (int, float)):
stride = (stride,)
if isinstance(padding, (int, float)):
padding = (padding,)
if isinstance(dilation, (int, float)):
dilation = (dilation,)
ctx.save_multiple_for_backward(
(input, kernel, padding, stride, dilation, groups)
)
return input.conv1d(
kernel, padding=padding, stride=stride, dilation=dilation, groups=groups
)
@staticmethod
def backward(ctx, grad_output):
# Gradient function adapts code from:
# https://github.com/pytorch/pytorch/blob/master/torch/nn/grad.py
# get input, kernel, and sizes:
input, kernel, padding, stride, dilation, groups = ctx.saved_tensors
batch_size = input.size(0)
out_channels, in_channels, kernel_size = kernel.size()
in_channels *= groups
assert input.size(1) == in_channels, "wrong number of input channels"
assert grad_output.size(1) == out_channels, "wrong number of output channels"
assert grad_output.size(0) == batch_size, "wrong batch size"
# TODO: Implement conv1d gradient under following condition:
if groups > 1 and input.size(1) > groups:
raise NotImplementedError(
"conv1d backward with groups > 1 and in_channels > groups not implemented"
)
# compute gradient with respect to input:
output_padding = _grad_input_padding(
grad_output,
input.size(),
stride,
padding,
(kernel_size,),
dilation=dilation,
)
grad_input = grad_output.conv_transpose1d(
kernel,
stride=stride,
padding=padding,
output_padding=output_padding,
groups=groups,
dilation=dilation,
)
# compute gradient with respect to kernel:
grad_output = grad_output.repeat(1, in_channels // groups, 1)
grad_output = grad_output.view(
grad_output.size(0) * grad_output.size(1), 1, grad_output.size(2)
)
input = input.view(1, input.size(0) * input.size(1), input.size(2))
grad_kernel = input.conv1d(
grad_output,
stride=dilation,
padding=padding,
dilation=stride,
groups=in_channels * batch_size,
)
grad_kernel = grad_kernel.view(
batch_size, grad_kernel.size(1) // batch_size, grad_kernel.size(2)
)
grad_kernel = grad_kernel.sum(dim=0)
grad_kernel = grad_kernel.view(
in_channels // groups, out_channels, grad_kernel.size(1)
)
grad_kernel = grad_kernel.transpose(0, 1).narrow(2, 0, kernel_size)
return (grad_input, grad_kernel)
@register_function("conv2d")
class AutogradConv2D(AutogradFunction):
@staticmethod
def forward(ctx, input, kernel, stride=1, padding=0, dilation=1, groups=1):
if isinstance(stride, (int, float)):
stride = (stride, stride)
if isinstance(padding, (int, float)):
padding = (padding, padding)
if isinstance(dilation, (int, float)):
dilation = (dilation, dilation)
ctx.save_multiple_for_backward(
(input, kernel, padding, stride, dilation, groups)
)
return input.conv2d(
kernel, stride=stride, padding=padding, dilation=dilation, groups=groups
)
@staticmethod
def backward(ctx, grad_output):
# Gradient function adapts code from:
# https://github.com/pytorch/pytorch/blob/master/torch/nn/grad.py
# get input, kernel, and sizes:
input, kernel, padding, stride, dilation, groups = ctx.saved_tensors
batch_size = input.size(0)
out_channels, in_channels, kernel_size_y, kernel_size_x = kernel.size()
in_channels *= groups
assert input.size(1) == in_channels, "wrong number of input channels"
assert grad_output.size(1) == out_channels, "wrong number of output channels"
assert grad_output.size(0) == batch_size, "wrong batch size"
# TODO: Implement conv2d gradient under following condition:
if groups > 1 and input.size(1) > groups:
raise NotImplementedError(
"conv2d backward with groups > 1 and in_channels > groups not implemented"
)
# compute gradient with respect to input:
output_padding = _grad_input_padding(
grad_output,
input.size(),
stride,
padding,
(kernel_size_y, kernel_size_x),
dilation=dilation,
)
grad_input = grad_output.conv_transpose2d(
kernel,
stride=stride,
padding=padding,
output_padding=output_padding,
groups=groups,
dilation=dilation,
)
# compute gradient with respect to kernel:
grad_output = grad_output.repeat(1, in_channels // groups, 1, 1)
grad_output = grad_output.view(
grad_output.size(0) * grad_output.size(1),
1,
grad_output.size(2),
grad_output.size(3),
)
input = input.view(
1, input.size(0) * input.size(1), input.size(2), input.size(3)
)
# dilation and stride are swapped based on PyTorch's conv2d_weight implementation
grad_kernel = input.conv2d(
grad_output,
stride=dilation,
padding=padding,
dilation=stride,
groups=in_channels * batch_size,
)
grad_kernel = grad_kernel.view(
batch_size,
grad_kernel.size(1) // batch_size,
grad_kernel.size(2),
grad_kernel.size(3),
)
grad_kernel = (
grad_kernel.sum(0)
.view(
in_channels // groups,
out_channels,
grad_kernel.size(2),
grad_kernel.size(3),
)
.transpose(0, 1)
)
grad_kernel = grad_kernel.narrow(2, 0, kernel_size_y)
grad_kernel = grad_kernel.narrow(3, 0, kernel_size_x)
return (grad_input, grad_kernel)
@register_function("batchnorm")
class AutogradBatchNorm(AutogradFunction):
@staticmethod
def forward(
ctx,
x,
weight,
bias,
running_mean=None,
running_var=None,
training=False,
eps=1e-05,
momentum=0.1,
inv_var=None,
):
"""
Computes forward step of batch norm by normalizing x
and returning weight * x_norm + bias.
Running mean and var are computed over the `C` dimension for an input
of size `(N, C, +)`.
Note: inv_var can introduce precision errors due to sqrt and division
particularly when the number of samples in a batch is small.
Args:
ctx (autograd_cyptensor.AutogradContext): context which
stores parameters such as weight and bias for backward step.
input (tuple of torch.tensors or cryptensor):
containing (x, weight, bias) with shapes `(N, C, +)`, `C`, and `C`
in turn.
training (bool): if training is True, running mean and var are
updated with the momentum factor and stored in module. Forward
is performed using batch statistics. If training is False,
running statistics are used and therefore cannot be none.
running_mean (torch.tensor or cryptensor): with shape `C`
running_var (torch.tensor or cryptensor): with shape `C`
eps (float): specifies epsilon used for numerical precision in inv_var
momentum (float): moment factor used in updating running mean and var.
Returns: (weight * normalized input + bias) of shape `(N, C, +)`.
"""
# determine dimensions over which means and variances are computed:
stats_dimensions = list(range(x.dim()))
stats_dimensions.pop(1)
# shape for broadcasting statistics with input:
broadcast_shape = [1] * x.dim()
broadcast_shape[1] = x.shape[1]
# compute mean and variance, track batch statistics:
if training:
mean = x.mean(stats_dimensions)
variance = x.var(stats_dimensions, unbiased=True)
if running_mean is not None and running_var is not None:
running_var.set(running_var * (1.0 - momentum) + variance * momentum)
running_mean.set(running_mean * (1.0 - momentum) + mean * momentum)
else:
if running_mean is None or running_var is None:
raise ValueError(
"Must provide running_mean and running_var when training is False"
)
mean = running_mean
variance = running_var
if training or inv_var is None:
# compute inverse variance:
if torch.is_tensor(variance):
inv_var = 1.0 / torch.sqrt(variance + eps)
else:
inv_var = (variance + eps).inv_sqrt()
# reshape shape (C) to broadcastable (1, C, 1, +):
mean = mean.reshape(broadcast_shape)
inv_var = inv_var.reshape(broadcast_shape)
weight = weight.reshape(broadcast_shape)
bias = bias.reshape(broadcast_shape)
# compute z-scores:
x_norm = (x - mean) * inv_var
# save context and return:
ctx.save_multiple_for_backward((x_norm, weight, inv_var, training))
return x_norm * weight + bias
@staticmethod
def backward(ctx, grad_output):
"""
Computes the gradient with respect to x, weight, and bias.
Statistics are assumed to be computed along dimension C
for an input of shape (N, C, ...). Note, partials with respect to
the input treat mean and variance as constants similar to torch.
Args:
ctx (autograd_cyptensor.AutogradContext): context containing
x_norm, weight, and inv_var. Note weight
and inv_var must be broadcastable with grad_output.
grad_output (cryptensor): batchnorm output of shape (N, C, +).
Returns:
x_grad (cryptensor): gradient with respect to x with shape (N, C, +).
weight_grad (cryptensor): gradient with respect to the weight of
with shape (C).
bias_grad (cryptensor): gradient with respect to bias of shape (C).
"""
# retrieve context:
x_norm, weight, inv_var, training = ctx.saved_tensors
# determine dimensions over which means and variances are computed:
stats_dimensions = list(range(len(grad_output.shape)))
stats_dimensions.pop(1)
# shape for broadcasting statistics with output gradient:
broadcast_shape = [1] * grad_output.dim()
broadcast_shape[1] = grad_output.shape[1]
# compute gradient w.r.t. weight:
grad_weight = grad_output.mul(x_norm)
grad_weight = grad_weight.sum(stats_dimensions)
# compute gradient w.r.t. bias:
grad_bias = grad_output.sum(stats_dimensions)
# compute gradient with respect to the input:
grad_output = grad_output.mul(weight)
grad_input = grad_output.mul(inv_var)
if training:
# compute gradient term that is due to the mean:
num_element = reduce(
lambda x, y: x * y, [grad_output.size(d) for d in stats_dimensions]
)
grad_mean = grad_output.sum(stats_dimensions)
grad_mean = grad_mean.reshape(broadcast_shape)
grad_mean = grad_mean.mul(inv_var.div(-num_element))
# compute gradient term that is due to the standard deviation:
grad_std = x_norm.mul(grad_output).sum(stats_dimensions)
grad_std = grad_std.reshape(broadcast_shape)
grad_std = x_norm.mul(grad_std).mul(inv_var.div(-num_element))
# put all the terms together:
grad_input = grad_input.add(grad_mean).add(grad_std)
# return gradients:
return (grad_input, grad_weight, grad_bias)
@register_function("binary_cross_entropy")
class AutogradBinaryCrossEntropy(AutogradFunction):
@staticmethod
def forward(ctx, pred, target, skip_forward=False):
ctx.mark_non_differentiable(target)
ctx.save_multiple_for_backward([pred, target])
if skip_forward:
return pred.new(0)
# Compute full forward pass
log_pos, log_neg = (
crypten.stack([pred, 1.0 - pred]).log(input_in_01=True).unbind(dim=0)
)
loss_values = target * log_pos + ((1.0 - target) * log_neg)
return -(loss_values.mean())
@staticmethod
def backward(ctx, grad_output):
pred, target = ctx.saved_tensors
rec_pos, rec_neg = (
crypten.stack([pred, 1.0 - pred]).reciprocal(input_in_01=True).unbind(dim=0)
)
grad = (rec_neg * (1.0 - target)) - rec_pos * target
return grad.div_(target.nelement()).mul_(grad_output)
@register_function("binary_cross_entropy_with_logits")
class AutogradBinaryCrossEntropyWithLogits(AutogradFunction):
@staticmethod
def forward(ctx, logit, target, skip_forward=False):
sigmoid_out = logit.sigmoid()
assert (
sigmoid_out.size() == target.size()
), "Incorrect input sizes for binary_cross_entropy_with_logits"
ctx.mark_non_differentiable(target)
ctx.save_multiple_for_backward([target, sigmoid_out])
if skip_forward:
return sigmoid_out.new(0)
# Compute full forward pass
log_pos, log_neg = (
crypten.stack([sigmoid_out, 1.0 - sigmoid_out])
.log(input_in_01=True)
.unbind(dim=0)
)
loss_values = target * log_pos + ((1.0 - target) * log_neg)
return -(loss_values.mean())
@staticmethod
def backward(ctx, grad_output):
target, sigmoid_out = ctx.saved_tensors
return (sigmoid_out - target).div(target.nelement()).mul_(grad_output)
@register_function("rappor_loss")
class AutogradRAPPORLoss(AutogradFunction):
@staticmethod
def forward(ctx, logit, target, alpha, skip_forward=False):
assert (
logit.size() == target.size()
), "Logit and target sizes must match for rappor loss"
pred = logit.sigmoid()
ctx.mark_non_differentiable(target)
if alpha == 0.0:
ctx.save_multiple_for_backward([target, pred, None, alpha])
pred_normalized = pred
else:
pred_normalized = alpha * pred + (1 - alpha) * (1 - pred)
grad_correction = pred * (1 - pred)
grad_correction *= (pred_normalized * (1 - pred_normalized)).reciprocal(
input_in_01=True
)
ctx.save_multiple_for_backward(
[target, pred_normalized, grad_correction, alpha]
)
if skip_forward:
return pred.new(0)
log_pos, log_neg = (
crypten.stack([pred_normalized, 1.0 - pred_normalized])
.log(input_in_01=True)
.unbind(dim=0)
)
loss_values = target * log_pos + (1.0 - target) * log_neg
return -(loss_values.mean())
@staticmethod
def backward(ctx, grad_output):
target, pred_normalized, grad_correction, alpha = ctx.saved_tensors
if alpha == 0.0:
return (pred_normalized - target).div(target.nelement()).mul_(grad_output)
grad = (pred_normalized - target).div(target.nelement())
grad *= 2 * alpha - 1
grad *= grad_correction
return grad.mul_(grad_output)
@register_function("cross_entropy")
class AutogradCrossEntropy(AutogradFunction):
@staticmethod
def forward(ctx, pred, target, skip_forward=False):
# NOTE: target is assumed to be one-hot vector.
assert pred.size() == target.size()
# Ignore batch dimension
dim = 1 if pred.dim() > 1 else 0
softmax = pred.softmax(dim)
ctx.save_multiple_for_backward([softmax, target])
ctx.mark_non_differentiable(target)
if skip_forward:
return softmax.new(0)
# Compute full forward pass
loss_values = softmax.log(input_in_01=True).mul_(target).neg_()
return loss_values.sum().div_(target.size(0))
@staticmethod
def backward(ctx, grad_output):
softmax, target = ctx.saved_tensors
loss_grad = softmax.sub(target)
return loss_grad.div_(target.size(0)).mul_(grad_output)
| CrypTen-main | crypten/gradients.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
__version__ = "0.4.0"
import builtins
import copy
import logging
import os
import warnings
import crypten.common # noqa: F401
import crypten.communicator as comm
import crypten.config # noqa: F401
import crypten.mpc # noqa: F401
import crypten.nn # noqa: F401
import crypten.optim # noqa: F401
import torch
# other imports:
from . import debug
from .config import cfg
from .cryptensor import CrypTensor
# functions controlling autograd:
no_grad = CrypTensor.no_grad
enable_grad = CrypTensor.enable_grad
set_grad_enabled = CrypTensor.set_grad_enabled
# Setup RNG generators
generators = {
"prev": {},
"next": {},
"local": {},
"global": {},
}
def init(config_file=None, party_name=None, device=None):
"""
Initialize CrypTen. It will initialize communicator, setup party
name for file save / load, and setup seeds for Random Number Generatiion.
By default the function will initialize a set of RNG generators on CPU.
If torch.cuda.is_available() returns True, it will initialize an additional
set of RNG generators on GPU. Users can specify the GPU device the generators are
initialized with device.
Args:
party_name (str): party_name for file save and load, default is None
device (int, str, torch.device): Specify device for RNG generators on
GPU. Must be a GPU device.
"""
# Load config file
if config_file is not None:
cfg.load_config(config_file)
# Return and raise warning if initialized
if comm.is_initialized():
warnings.warn("CrypTen is already initialized.", RuntimeWarning)
return
# Initialize communicator
# os.environ["GLOO_SOCKET_IFNAME"] = "en0"
comm._init(use_threads=False, init_ttp=crypten.mpc.ttp_required())
# Setup party name for file save / load
if party_name is not None:
comm.get().set_name(party_name)
# Setup seeds for Random Number Generation
if comm.get().get_rank() < comm.get().get_world_size():
_setup_prng()
if crypten.mpc.ttp_required():
crypten.mpc.provider.ttp_provider.TTPClient._init()
def init_thread(rank, world_size):
comm._init(use_threads=True, rank=rank, world_size=world_size)
_setup_prng()
def uninit():
return comm.uninit()
def is_initialized():
return comm.is_initialized()
def print_communication_stats():
comm.get().print_communication_stats()
def reset_communication_stats():
comm.get().reset_communication_stats()
# set tensor type to be used for CrypTensors:
def register_cryptensor(name):
"""Registers a custom :class:`CrypTensor` subclass.
This decorator allows the user to instantiate a subclass of `CrypTensor`
from Python cpde, even if the class itself is not part of CrypTen. To use
it, apply this decorator to a `CrypTensor` subclass, like this:
.. code-block:: python
@CrypTensor.register_cryptensor('my_cryptensor')
class MyCrypTensor(CrypTensor):
...
"""
return CrypTensor.register_cryptensor(name)
def set_default_cryptensor_type(cryptensor_type):
"""Sets the default type used to create `CrypTensor`s."""
if cryptensor_type not in CrypTensor.__CRYPTENSOR_TYPES__.keys():
raise ValueError("CrypTensor type %s does not exist." % cryptensor_type)
CrypTensor.__DEFAULT_CRYPTENSOR_TYPE__ = cryptensor_type
def get_default_cryptensor_type():
"""Gets the default type used to create `CrypTensor`s."""
return CrypTensor.__DEFAULT_CRYPTENSOR_TYPE__
def get_cryptensor_type(tensor):
"""Gets the type name of the specified `tensor` `CrypTensor`."""
if not isinstance(tensor, CrypTensor):
raise ValueError(
"Specified tensor is not a CrypTensor: {}".format(type(tensor))
)
for name, cls in CrypTensor.__CRYPTENSOR_TYPES__.items():
if isinstance(tensor, cls):
return name
raise ValueError("Unregistered CrypTensor type: {}".format(type(tensor)))
def cryptensor(*args, cryptensor_type=None, **kwargs):
"""
Factory function to return encrypted tensor of given `cryptensor_type`. If no
`cryptensor_type` is specified, the default type is used.
"""
# determine CrypTensor type to use:
if cryptensor_type is None:
cryptensor_type = get_default_cryptensor_type()
if cryptensor_type not in CrypTensor.__CRYPTENSOR_TYPES__:
raise ValueError("CrypTensor type %s does not exist." % cryptensor_type)
# create CrypTensor:
return CrypTensor.__CRYPTENSOR_TYPES__[cryptensor_type](*args, **kwargs)
def is_encrypted_tensor(obj):
"""
Returns True if obj is an encrypted tensor.
"""
return isinstance(obj, CrypTensor)
def _setup_prng():
"""
Generate shared random seeds to generate pseudo-random sharings of
zero. For each device, we generator four random seeds:
"prev" - shared seed with the previous party
"next" - shared seed with the next party
"local" - seed known only to the local party (separate from torch's default seed to prevent interference from torch.manual_seed)
"global"- seed shared by all parties
The "prev" and "next" random seeds are shared such that each process shares
one seed with the previous rank process and one with the next rank.
This allows for the generation of `n` random values, each known to
exactly two of the `n` parties.
For arithmetic sharing, one of these parties will add the number
while the other subtracts it, allowing for the generation of a
pseudo-random sharing of zero. (This can be done for binary
sharing using bitwise-xor rather than addition / subtraction)
"""
global generators
# Initialize RNG Generators
for key in generators.keys():
generators[key][torch.device("cpu")] = torch.Generator(
device=torch.device("cpu")
)
if torch.cuda.is_available():
cuda_device_names = ["cuda"]
for i in range(torch.cuda.device_count()):
cuda_device_names.append(f"cuda:{i}")
cuda_devices = [torch.device(name) for name in cuda_device_names]
for device in cuda_devices:
for key in generators.keys():
generators[key][device] = torch.Generator(device=device)
# Generate random seeds for Generators
# NOTE: Chosen seed can be any number, but we choose as a random 64-bit
# integer here so other parties cannot guess its value. We use os.urandom(8)
# here to generate seeds so that forked processes do not generate the same seed.
# Generate next / prev seeds.
seed = int.from_bytes(os.urandom(8), "big") - 2**63
next_seed = torch.tensor(seed)
# Create local seed - Each party has a separate local generator
local_seed = int.from_bytes(os.urandom(8), "big") - 2**63
# Create global generator - All parties share one global generator for sync'd rng
global_seed = int.from_bytes(os.urandom(8), "big") - 2**63
global_seed = torch.tensor(global_seed)
_sync_seeds(next_seed, local_seed, global_seed)
def _sync_seeds(next_seed, local_seed, global_seed):
"""
Sends random seed to next party, recieve seed from prev. party, and broadcast global seed
After seeds are distributed. One seed is created for each party to coordinate seeds
across cuda devices.
"""
global generators
# Populated by recieving the previous party's next_seed (irecv)
prev_seed = torch.tensor([0], dtype=torch.long)
# Send random seed to next party, receive random seed from prev party
world_size = comm.get().get_world_size()
rank = comm.get().get_rank()
if world_size >= 2: # Guard against segfaults when world_size == 1.
next_rank = (rank + 1) % world_size
prev_rank = (next_rank - 2) % world_size
req0 = comm.get().isend(next_seed, next_rank)
req1 = comm.get().irecv(prev_seed, src=prev_rank)
req0.wait()
req1.wait()
else:
prev_seed = next_seed
prev_seed = prev_seed.item()
next_seed = next_seed.item()
# Broadcase global generator - All parties share one global generator for sync'd rng
global_seed = comm.get().broadcast(global_seed, 0).item()
# Create one of each seed per party
# Note: This is configured to coordinate seeds across cuda devices
# so that we can one party per gpu. If we want to support configurations
# where each party runs on multiple gpu's across machines, we will
# need to modify this.
for device in generators["prev"].keys():
generators["prev"][device].manual_seed(prev_seed)
generators["next"][device].manual_seed(next_seed)
generators["local"][device].manual_seed(local_seed)
generators["global"][device].manual_seed(global_seed)
def manual_seed(next_seed, local_seed, global_seed):
"""
Allow users to set their random seed for testing purposes. For each device, we set three random seeds.
Note that prev_seed is populated using next_seed
Args:
next_seed - shared seed with the next party
local_seed - seed known only to the local party (separate from torch's default seed to prevent interference from torch.manual_seed)
global_seed - seed shared by all parties
"""
if cfg.debug.debug_mode:
next_seed = torch.tensor(next_seed)
global_seed = torch.tensor(global_seed)
_sync_seeds(next_seed, local_seed, global_seed)
else:
raise ValueError("User-supplied random seeds is only allowed in debug mode")
def load_from_party(
f=None,
preloaded=None,
encrypted=False,
model_class=None,
src=0,
load_closure=torch.load,
**kwargs,
):
"""
Loads an object saved with `torch.save()` or `crypten.save_from_party()`.
Args:
f: a file-like object (has to implement `read()`, `readline()`,
`tell()`, and `seek()`), or a string containing a file name
preloaded: Use the preloaded value instead of loading a tensor/model from f.
encrypted: Determines whether crypten should load an encrypted tensor
or a plaintext torch tensor.
model_class: Takes a model architecture class that is being communicated. This
class will be considered safe for deserialization so non-source
parties will be able to receive a model of this type from the
source party.
src: Determines the source of the tensor. If `src` is None, each
party will attempt to read in the specified file. If `src` is
specified, the source party will read the tensor from `f` and it
will broadcast it to the other parties
load_closure: Custom load function that matches the interface of `torch.load`,
to be used when the tensor is saved with a custom save function in
`crypten.save_from_party`. Additional kwargs are passed on to the closure.
"""
if encrypted:
raise NotImplementedError("Loading encrypted tensors is not yet supported")
else:
assert isinstance(src, int), "Load failed: src argument must be an integer"
assert (
src >= 0 and src < comm.get().get_world_size()
), "Load failed: src must be in [0, world_size)"
# source party
if comm.get().get_rank() == src:
assert (f is None and (preloaded is not None)) or (
(f is not None) and preloaded is None
), "Exactly one of f and preloaded must not be None"
if f is None:
result = preloaded
if preloaded is None:
result = load_closure(f, **kwargs)
# Zero out the tensors / modules to hide loaded data from broadcast
if torch.is_tensor(result):
result_zeros = result.new_zeros(result.size())
elif isinstance(result, torch.nn.Module):
result_zeros = copy.deepcopy(result)
for p in result_zeros.parameters():
p.data.fill_(0)
else:
result = comm.get().broadcast_obj(-1, src)
raise TypeError("Unrecognized load type %s" % type(result))
comm.get().broadcast_obj(result_zeros, src)
# Non-source party
else:
if model_class is not None:
crypten.common.serial.register_safe_class(model_class)
result = comm.get().broadcast_obj(None, src)
if isinstance(result, int) and result == -1:
raise TypeError("Unrecognized load type from src party")
if torch.is_tensor(result):
result = crypten.cryptensor(result, src=src)
# TODO: Encrypt modules before returning them
# if isinstance(result, torch.nn.Module):
# result = crypten.nn.from_pytorch(result, src=src)
result.src = src
return result
def load(f, load_closure=torch.load, **kwargs):
"""
Loads shares from an encrypted object saved with `crypten.save()`
Args:
f: a file-like object (has to implement `read()`, `readline()`,
`tell()`, and `seek()`), or a string containing a file name
load_closure: Custom load function that matches the interface of
`torch.load`, to be used when the tensor is saved with a custom
save function in `crypten.save`. Additional kwargs are passed on
to the closure.
"""
if "src" in kwargs:
raise SyntaxError(
"crypten.load() should not be used with `src` argument. Use load_from_party() instead."
)
# TODO: Add support for loading from correct device (kwarg: map_location=device)
if load_closure == torch.load:
obj = load_closure(f)
else:
obj = load_closure(f, **kwargs)
return obj
def save_from_party(obj, f, src=0, save_closure=torch.save, **kwargs):
"""
Saves a CrypTensor or PyTorch tensor to a file.
Args:
obj: The CrypTensor or PyTorch tensor to be saved
f: a file-like object (has to implement `read()`, `readline()`,
`tell()`, and `seek()`), or a string containing a file name
src: The source party that writes data to the specified file.
save_closure: Custom save function that matches the interface of `torch.save`,
to be used when the tensor is saved with a custom load function in
`crypten.load_from_party`. Additional kwargs are passed on to the closure.
"""
if is_encrypted_tensor(obj):
raise NotImplementedError("Saving encrypted tensors is not yet supported")
else:
assert isinstance(src, int), "Save failed: src must be an integer"
assert (
src >= 0 and src < comm.get().get_world_size()
), "Save failed: src must be an integer in [0, world_size)"
if comm.get().get_rank() == src:
save_closure(obj, f, **kwargs)
# Implement barrier to avoid race conditions that require file to exist
comm.get().barrier()
def save(obj, f, save_closure=torch.save, **kwargs):
"""
Saves the shares of CrypTensor or an encrypted model to a file.
Args:
obj: The CrypTensor or PyTorch tensor to be saved
f: a file-like object (has to implement `read()`, `readline()`,
`tell()`, and `seek()`), or a string containing a file name
save_closure: Custom save function that matches the interface of `torch.save`,
to be used when the tensor is saved with a custom load function in
`crypten.load`. Additional kwargs are passed on to the closure.
"""
# TODO: Add support for saving to correct device (kwarg: map_location=device)
save_closure(obj, f, **kwargs)
comm.get().barrier()
def where(condition, input, other):
"""
Return a tensor of elements selected from either `input` or `other`, depending
on `condition`.
"""
if is_encrypted_tensor(condition):
return condition * input + (1 - condition) * other
elif torch.is_tensor(condition):
condition = condition.float()
return input * condition + other * (1 - condition)
def cat(tensors, dim=0):
"""
Concatenates the specified CrypTen `tensors` along dimension `dim`.
"""
assert isinstance(tensors, list), "input to cat must be a list"
if all(torch.is_tensor(t) for t in tensors):
return torch.cat(tensors)
assert all(isinstance(t, CrypTensor) for t in tensors), "inputs must be CrypTensors"
tensor_types = [get_cryptensor_type(t) for t in tensors]
assert all(
ttype == tensor_types[0] for ttype in tensor_types
), "cannot concatenate CrypTensors with different underlying types"
if len(tensors) == 1:
return tensors[0]
return type(tensors[0]).cat(tensors, dim=dim)
def stack(tensors, dim=0):
"""
Stacks the specified CrypTen `tensors` along dimension `dim`. In contrast to
`crypten.cat`, this adds a dimension to the result tensor.
"""
assert isinstance(tensors, list), "input to stack must be a list"
assert all(isinstance(t, CrypTensor) for t in tensors), "inputs must be CrypTensors"
tensor_types = [get_cryptensor_type(t) for t in tensors]
assert all(
ttype == tensor_types[0] for ttype in tensor_types
), "cannot stack CrypTensors with different underlying types"
if len(tensors) == 1:
return tensors[0].unsqueeze(dim)
return type(tensors[0]).stack(tensors, dim=dim)
def rand(*sizes, device=None, cryptensor_type=None):
"""
Returns a tensor with elements uniformly sampled in [0, 1).
"""
with no_grad():
if cryptensor_type is None:
cryptensor_type = get_default_cryptensor_type()
return CrypTensor.__CRYPTENSOR_TYPES__[cryptensor_type].rand(
*sizes, device=device
)
def randn(*sizes, cryptensor_type=None):
"""
Returns a tensor with normally distributed elements.
"""
with no_grad():
if cryptensor_type is None:
cryptensor_type = get_default_cryptensor_type()
return CrypTensor.__CRYPTENSOR_TYPES__[cryptensor_type].randn(*sizes)
def bernoulli(tensor, cryptensor_type=None):
"""
Returns a tensor with elements in {0, 1}. The i-th element of the
output will be 1 with probability according to the i-th value of the
input tensor.
"""
return rand(tensor.size(), cryptensor_type=cryptensor_type) < tensor
def __multiprocess_print_helper(print_func, *args, in_order=False, dst=0, **kwargs):
"""
Helper for print / log functions to reduce copy-pasted code
"""
# in_order : True
if in_order:
for i in range(comm.get().get_world_size()):
if comm.get().get_rank() == i:
print_func(*args, **kwargs)
comm.get().barrier()
return
# in_order : False
if isinstance(dst, int):
dst = [dst]
assert isinstance(
dst, (list, tuple)
), "print destination must be a list or tuple of party ranks"
if comm.get().get_rank() in dst:
print_func(*args, **kwargs)
def print(*args, in_order=False, dst=0, **kwargs):
"""
Prints with formatting options that account for multiprocessing. This
function prints with the output of:
print(*args, **kwargs)
Args:
in_order: A boolean that determines whether to print from one-party only
or all parties, in order. If True, this function will output from
party 0 first, then print in order through party N. If False, this
function will only output from a single party, given by `dst`.
dst: The destination party rank(s) to output from if `in_order` is False.
This can be an integer or list of integers denoting a single rank or
multiple ranks to print from.
"""
__multiprocess_print_helper(
builtins.print, *args, in_order=in_order, dst=dst, **kwargs
)
def log(*args, in_order=False, dst=0, **kwargs):
"""
Logs with formatting options that account for multiprocessing. This
function logs with the output of:
logging.log(*args, **kwargs)
Args:
in_order: A boolean that determines whether to log from one-party only
or all parties, in order. If True, this function will output from
party 0 first, then log in order through party N. If False, this
function will only output from a single party, given by `dst`.
dst: The destination party rank(s) to output from if `in_order` is False.
This can be an integer or list of integers denoting a single rank or
multiple ranks to log from.
"""
__multiprocess_print_helper(
logging.info, *args, in_order=in_order, dst=dst, **kwargs
)
# TupleProvider tracing functions
def trace(tracing=True):
crypten.mpc.get_default_provider().trace(tracing=tracing)
def trace_once():
crypten.mpc.get_default_provider().trace_once()
def fill_cache():
crypten.mpc.get_default_provider().fill_cache()
# expose classes and functions in package:
__all__ = [
"CrypTensor",
"no_grad",
"enable_grad",
"set_grad_enabled",
"debug",
"fill_cache",
"generators",
"init",
"init_thread",
"log",
"mpc",
"nn",
"print",
"trace",
"trace_once",
"uninit",
]
| CrypTen-main | crypten/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import numpy as np
import torch
from .common.tensor_types import is_float_tensor, is_int_tensor
from .config import cfg
from .cryptensor import CrypTensor
def nearest_integer_division(tensor, integer):
"""Performs division of integer tensor, rounding to nearest integer."""
assert integer > 0, "only supports positive divisors"
assert is_int_tensor(tensor), "unsupported type: %s" % type(tensor)
lez = (tensor < 0).long()
pos_remainder = (1 - lez) * tensor % integer
neg_remainder = lez * ((integer - tensor) % integer)
remainder = pos_remainder + neg_remainder
quotient = tensor.div(integer, rounding_mode="trunc")
correction = (2 * remainder > integer).long()
return quotient + tensor.sign() * correction
class FixedPointEncoder:
"""Encoder that encodes long or float tensors into scaled integer tensors."""
def __init__(self, precision_bits=None):
if precision_bits is None:
precision_bits = cfg.encoder.precision_bits
self._precision_bits = precision_bits
self._scale = int(2**precision_bits)
def encode(self, x, device=None):
"""Helper function to wrap data if needed"""
if isinstance(x, CrypTensor):
return x
elif isinstance(x, int) or isinstance(x, float):
# Squeeze in order to get a 0-dim tensor with value `x`
return torch.tensor(
[self._scale * x], dtype=torch.long, device=device
).squeeze()
elif isinstance(x, list):
return (
torch.tensor(x, dtype=torch.float, device=device)
.mul_(self._scale)
.long()
)
elif is_float_tensor(x):
return (self._scale * x).long()
# For integer types cast to long prior to scaling to avoid overflow.
elif is_int_tensor(x):
return self._scale * x.long()
elif isinstance(x, np.ndarray):
return self._scale * torch.from_numpy(x).long().to(device)
elif torch.is_tensor(x):
raise TypeError("Cannot encode input with dtype %s" % x.dtype)
else:
raise TypeError("Unknown tensor type: %s." % type(x))
def decode(self, tensor):
"""Helper function that decodes from scaled tensor"""
if tensor is None:
return None
assert is_int_tensor(tensor), "input must be a LongTensor"
if self._scale > 1:
correction = (tensor < 0).long()
dividend = tensor.div(self._scale - correction, rounding_mode="floor")
remainder = tensor % self._scale
remainder += (remainder == 0).long() * self._scale * correction
tensor = dividend.float() + remainder.float() / self._scale
else:
tensor = nearest_integer_division(tensor, self._scale)
return tensor.data
def __setattr__(self, name, value):
if name == "_precision_bits":
dict.__setattr__(self, "_scale", int(2**value))
elif name == "_scale":
dict.__setattr__(self, "_precision_bits", int(math.log2(value)))
dict.__setattr__(self, name, value)
@property
def scale(self):
return self._scale
| CrypTen-main | crypten/encoder.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from contextlib import contextmanager
import torch
from .debug import register_validation
from .gradients import AutogradContext, BaseAutogradContext, get_grad_fn
# list of all static functions that CrypTensors support:
STATIC_FUNCTIONS = ["cat", "stack"]
STATIC_FUNCTION_MAPPING = {getattr(torch, name): name for name in STATIC_FUNCTIONS}
def _find_all_cryptensors(inputs):
"""
Recursively find all CrypTensors in an input list, tuple, set, or dict.
"""
cryptensors = []
for input in inputs:
if isinstance(input, CrypTensor):
cryptensors.append(input)
elif isinstance(input, (list, tuple, set)):
cryptensors.extend(_find_all_cryptensors(input))
elif isinstance(input, dict):
for value in input.values():
cryptensors.extend(_find_all_cryptensors(value))
return cryptensors
class CrypTensorMetaclass(type):
"""
Metaclass for CrypTensor that ensures autograd is invoked for calls to
static methods such as `crypten.cat` and `crypten.stack`.
"""
def __getattribute__(cls, name):
if name in STATIC_FUNCTIONS:
dummy = cls([]) # this creates an empty CrypTensor
dummy.__IS_DUMMY__ = True
return cls.__getattribute__(dummy, name)
return type.__getattribute__(cls, name)
class CrypTensor(object, metaclass=CrypTensorMetaclass):
"""
Abstract implementation of encrypted tensor type. Every subclass of `CrypTensor`
must implement the methods defined here. The actual tensor data should live in
an instance attribute called `_tensor`. When implemented, the `CrypTensor`
provides a full autograd implementation to the user.
"""
__CRYPTENSOR_TYPES__ = {}
__DEFAULT_CRYPTENSOR_TYPE__ = "mpc"
@staticmethod
def register_cryptensor(name):
"""Registers a custom :class:`CrypTensor` subclass.
This decorator allows the user to instantiate a subclass of `CrypTensor`
from Python cpde, even if the class itself is not part of CrypTen. To use
it, apply this decorator to a `CrypTensor` subclass, like this:
.. code-block:: python
@CrypTensor.register_cryptensor('my_cryptensor')
class MyCrypTensor(CrypTensor):
...
"""
def register_cryptensor_cls(cls):
if name in CrypTensor.__CRYPTENSOR_TYPES__:
raise ValueError(
"Cannot register duplicate CrypTensor type: \
tensor type {} already exists.".format(
name
)
)
if not issubclass(cls, CrypTensor):
raise ValueError(
"Registered tensor ({}: {}) must extend \
CrypTensor".format(
name, cls.__name__
)
)
CrypTensor.__CRYPTENSOR_TYPES__[name] = cls
return cls
return register_cryptensor_cls
# attributes that should be dispatched to underlying tensor:
PROTECTED_ATTRIBUTES = [
"__dict__",
"__class__",
"requires_grad",
"grad",
"grad_fn",
"grad_expected",
"grad_received",
"children",
"ctx",
"backward",
"detach",
"detach_",
"_reset_gradients",
]
# functions that should be implemented by CrypTensor subclass:
REQUIRED_FUNCTIONS = [
"_ltz",
"add",
"avg_pool1d",
"avg_pool2d",
"clone",
"conv1d",
"conv2d",
"copy_",
"div_",
"matmul",
"neg",
]
# dict for storing functional overrides from subclasses:
FUNCTION_OVERRIDES = {}
# mapping of Python built-in methods to CrypTensor methods:
PYTHON_BUILTIN = {
"__abs__": "abs",
"__neg__": "neg",
"__pow__": "pow",
"__add__": "add",
"__radd__": "add",
"__sub__": "sub",
"__rsub__": "__rsub__",
"__mul__": "mul",
"__rmul__": "mul",
"__div__": "div",
"__truediv__": "div",
"__rtruediv__": "__rtruediv__",
"__matmul__": "matmul",
"__imatmul__": "matmul", # not in-place, matching PyTorch
}
# TODO: Automatically register all these functions in CrypTensor?
AUTOGRAD_ENABLED = True
@staticmethod
@contextmanager
def no_grad():
"""
Context manager that disables Crypten's autograd.
"""
prior_value = CrypTensor.AUTOGRAD_ENABLED
CrypTensor.set_grad_enabled(False)
try:
yield
finally:
CrypTensor.set_grad_enabled(prior_value)
@staticmethod
@contextmanager
def enable_grad():
"""
Context manager that enables Crypten's autograd.
"""
prior_value = CrypTensor.AUTOGRAD_ENABLED
CrypTensor.set_grad_enabled(True)
try:
yield
finally:
CrypTensor.set_grad_enabled(prior_value)
@staticmethod
def set_grad_enabled(mode):
"""
Enables (`mode = True`) or disables (`mode = False`) Crypten's autograd.
"""
CrypTensor.AUTOGRAD_ENABLED = mode
def __init__(self, requires_grad=False):
"""
Creates a new `CrypTensor` object. The `requires_grad` flag determines
if computations on the created tensor are logged on the autograd tape.
NOTE: This constructor cannot be called directly. It is only be called
via `super()` from classes that implement the `CrypTensor` abstraction.
"""
self.requires_grad = requires_grad # whether tensors needs gradient
self._reset_gradients()
def __new__(cls, *args, **kwargs):
if cls is CrypTensor:
raise TypeError("CrypTensor class cannot be instantiated directly.")
return object.__new__(cls)
def _reset_gradients(self):
"""Resets gradient information in tensor."""
self.grad = None # gradient itself
self.grad_fn = None # functions to call for gradient
self.grad_expected = 0 # number of gradients expected from parents
self.grad_received = 0 # number of gradients received from parents
self.children = [] # children of node in graph
self.ctx = AutogradContext() # contexts for AutogradFunctions
def _identify_required_grads(self):
"""Flag all nodes for which gradient needs to be evaluated."""
self.grad_expected += 1
if self.grad_expected == 1: # only backpropagate once from each node
for child in self.children:
child._identify_required_grads()
def backward(self, grad_input=None, top_node=True):
"""
Backpropagates gradient through the computation graph. The function
only maintains the gradients in leaf nodes of the graph.
"""
if self.requires_grad:
with CrypTensor.no_grad(): # disable autograd for backward pass
# in initial backward call, identify all required nodes:
if top_node:
self._identify_required_grads()
# if undefined, set gradient input to one:
if grad_input is None:
if self.nelement() == 1:
grad_input = self.new(torch.ones_like(self.data))
else:
raise RuntimeError(
"grad can be implicitly created only for scalar outputs"
)
# process gradient input:
self.grad_received += 1
if self.grad is None:
self.grad = grad_input # store gradient...
else:
self.grad.add_(grad_input) # ... or accumulate gradient
# if we are in a leaf or if not all parents have backpropagated:
if len(self.children) == 0 or self.grad_received < self.grad_expected:
return # ... do not proceed.
# check that we can actually backpropagate:
if self.grad_fn is None:
raise ValueError("Cannot call backward() before forward().")
# perform backpropagation:
grad = self.grad_fn.backward(self.ctx, self.grad)
differentiable_children = [
x for x in self.children if self.ctx.is_differentiable(x)
]
self.ctx.reset() # free up memory used for context
# call backward function on children:
if not isinstance(grad, (list, tuple)):
grad = (grad,)
assert len(differentiable_children) <= len(
grad
), "number of gradients does not match number of children"
for idx, child in enumerate(differentiable_children):
child.backward(grad_input=grad[idx], top_node=False)
# clean up gradients except in leaf nodes:
if len(differentiable_children) > 0:
self.grad = None
# remove node from graph:
self.children = []
self.grad_expected = 0
self.grad_received = 0
def detach_(self):
"""Detaches tensor from the autograd graph (in-place), making it a leaf."""
self.requires_grad = False
return self
def detach(self):
"""Detaches tensor from the autograd graph, making it a leaf."""
clone = self.clone()
clone.requires_grad = False
return clone
def __torch_function__(self, func, types, args=(), kwargs=None):
"""Allows torch static functions to work on CrypTensors."""
if kwargs is None:
kwargs = {}
if func in STATIC_FUNCTION_MAPPING:
import crypten
# dispatch torch.{cat,stack} call on CrypTensor to CrypTen:
return getattr(crypten, STATIC_FUNCTION_MAPPING[func])(*args, **kwargs)
else:
raise NotImplementedError(
f"CrypTen does not support torch function {func}."
)
def _get_forward_function_no_ctx(self, grad_fn):
# determine if self is a dummy object (the case for staticmethods):
is_dummy = getattr(self, "__IS_DUMMY__", False)
def autograd_forward_no_ctx(*args, **kwargs):
if not is_dummy:
args = [self] + list(args)
# Create dummy AutogradContext that stores no data
ctx = BaseAutogradContext()
with CrypTensor.no_grad():
result = grad_fn.forward(ctx, *args, **kwargs)
return result
return autograd_forward_no_ctx
def _get_autograd_forward_function(self, name, grad_fn, in_place):
# determine if self is a dummy object (the case for staticmethods):
is_dummy = getattr(self, "__IS_DUMMY__", False)
def autograd_forward(*args, **kwargs):
"""Forward function that stores data for autograd in result."""
with CrypTensor.no_grad():
# only CrypTensors can be children:
tensor_args = _find_all_cryptensors(args)
children = tensor_args if is_dummy else [self, *tensor_args]
# identify whether result requires gradient:
requires_grad = any(child.requires_grad for child in children)
if not requires_grad:
return self.__getattribute__(name)(*args, **kwargs)
# in-place functions are not supported when requires_grad:
if in_place:
raise RuntimeError("Cannot use in-place functions with autograd.")
# prepare inputs and context for forward call:
ctx = AutogradContext()
if not is_dummy:
args = [self] + list(args)
# apply correct autograd function:
result = grad_fn.forward(ctx, *args, **kwargs)
# output may be tensor or tuple
if not isinstance(result, tuple):
result = (result,)
remove_tuple = True
else:
remove_tuple = False
# maintain references to children and context in result:
for res in result:
res.requires_grad = ctx.is_differentiable(res)
if res.requires_grad:
res.children = children
res.grad_fn = grad_fn
res.ctx = ctx
# return result:
if remove_tuple:
result = result[0]
return result
return autograd_forward
@register_validation
def __getattribute__(self, name):
"""
Makes sure that any function call on the tensor gets recorded in order
to facilitate gradient computation using autograd.
For clarity, this function attempts to fetch functions with the following priority:
1. If name is in PROTECTED_ATTRIBUTES, fetch from the CrypTensor object.
2. If requires_grad:
a. Fetch from grad_fn.forward; if none exists
b. raise NotImplementedError telling user to use `detach()`
3. If no_grad or not requires_grad:
a. Try to fetch function from CrypTensor object
- If this fails and function is REQUIRED, raise error
b. Fetch from grad_fn.forward, ignoring AutogradContext
"""
# 1. If name is in PROTECTED_ATTRIBUTES, fetch from the CrypTensor object.
if name in CrypTensor.PROTECTED_ATTRIBUTES:
return object.__getattribute__(self, name)
# Special case for copy_ inplace.
if name == "copy_":
return object.__getattribute__(self, "copy_")
# replace Python built-in methods with corresponding method name:
name = CrypTensor.PYTHON_BUILTIN.get(name, name)
# determine inplace and modify name accordingly
inplace = name.endswith("_") and not name.endswith("__")
if inplace:
if CrypTensor.AUTOGRAD_ENABLED and self.requires_grad:
raise RuntimeError("Autograd is not supported for in-place functions.")
# Note: native in-place support is now deprecated
# Instead, CrypTensors now compute out-of-place and
# copy_ in-place
name = name[:-1]
func = self.__getattribute__(name)
def oop_and_copy(*args, **kwargs):
result = func(*args, **kwargs)
self.copy_(result)
return self
return oop_and_copy
# identify the AutogradFunction corresponding to the function name:
grad_fn = get_grad_fn(name)
# dispatch calls to size(), etc. without going through AutogradFunction:
if grad_fn is None:
return object.__getattribute__(self, name)
# 2. If requires_grad:
# a. Fetch from grad_fn.forward; if none exists
# b. raise NotImplementedError telling user to use `detach()`
if CrypTensor.AUTOGRAD_ENABLED:
if not hasattr(grad_fn, "forward"):
raise NotImplementedError(
f"Autograd forward not implemented for {name}. Please use detach()."
)
return self._get_autograd_forward_function(name, grad_fn, inplace)
# TODO: Add validation_mode / validate_correctness
# 3. If no_grad or not requires_grad:
# a. Try to fetch function from CrypTensor object
# - If this fails and function is REQUIRED, raise error
# b. Fetch from grad_fn.forward, ignoring AutogradContext
try:
return object.__getattribute__(self, name)
except AttributeError as e:
if name in CrypTensor.REQUIRED_FUNCTIONS:
raise e
assert hasattr(grad_fn, "forward")
return self._get_forward_function_no_ctx(grad_fn)
# Common functions:
@classmethod
def new(cls, *args, **kwargs):
return cls(*args, **kwargs)
def __abs__(self):
return self.abs()
def __add__(self, tensor):
"""Adds tensor to this tensor."""
return self.add(tensor)
__radd__ = __add__
def __iadd__(self, tensor):
"""Adds tensor to this tensor (in-place)."""
return self.add_(tensor)
def sub(self, tensor):
"""Subtracts a :attr:`tensor` from :attr:`self` tensor.
The shape of :attr:`tensor` must be
`broadcastable`_ with the shape of :attr:`self`.
.. _broadcastable:
https://pytorch.org/docs/stable/notes/broadcasting.html#broadcasting-semantics
"""
return self.add(-tensor)
def __sub__(self, tensor):
"""Subtracts tensor from this tensor."""
return self.sub(tensor)
def __rsub__(self, tensor):
"""Subtracts self from tensor."""
return -self + tensor
def __isub__(self, tensor):
"""Subtracts tensor from this tensor (in-place)."""
return self.sub_(tensor)
def __mul__(self, tensor):
"""Element-wise multiply with a tensor."""
return self.mul(tensor)
__rmul__ = __mul__
def __imul__(self, tensor):
"""Element-wise multiply with a tensor."""
return self.mul_(tensor)
def __div__(self, tensor):
"""Element-wise divide by a tensor."""
return self.div(tensor)
def __truediv__(self, scalar):
"""Element-wise divide by a tensor."""
return self.div(scalar)
def __itruediv__(self, scalar):
"""Element-wise divide by a tensor."""
return self.div_(scalar)
def __neg__(self):
return self.neg()
def __matmul__(self, tensor):
"""Perform matrix multiplication using some tensor"""
return self.matmul(tensor)
def __imatmul__(self, tensor):
"""Perform matrix multiplication using some tensor"""
# Note: Matching PyTorch convention, which is not in-place here.
return self.matmul(tensor)
def square(self):
"""
Computes the square of :attr:`self`
"""
return self * self
def set(self, enc_tensor):
"""Sets self encrypted to enc_tensor in place"""
if not isinstance(enc_tensor, CrypTensor):
enc_tensor = self.new(enc_tensor)
return self.copy_(enc_tensor)
@property
def shape(self):
return self.size()
@property
def device(self):
return self._tensor.device
@property
def data(self):
return self._tensor.data
@data.setter
def data(self, value):
self._tensor.data = value
def __repr__(self):
return f"{self.__class__.__name__}({self._tensor})"
def __bool__(self):
"""Override bool operator since encrypted tensors cannot evaluate"""
raise RuntimeError("Cannot evaluate CrypTensors to boolean values")
def __nonzero__(self):
"""__bool__ for backwards compatibility with Python 2"""
raise RuntimeError("Cannot evaluate CrypTensors to boolean values")
##############################################################
# All CrypTensor subclasses should implement the following: #
##############################################################
def get_plain_text(self):
"""Decrypts the encrypted tensor."""
raise NotImplementedError("get_plain_text is not implemented")
def shallow_copy(self):
"""Creates a shallow copy of the CrypTensor."""
# TODO: Rename this to __copy__()?
raise NotImplementedError("shallow_copy is not implemented")
def copy_(self, other):
"""Copies value of other CrypTensor into this CrypTensor."""
raise NotImplementedError("copy_ is not implemented")
def clone(self):
"""
Returns a copy of the :attr:`self` tensor.
The copy has the same size and data type as :attr:`self`.
.. note::
This function is recorded in the computation graph. Gradients
propagating to the cloned tensor will propagate to the original tensor.
"""
raise NotImplementedError("clone is not implemented")
def add(self, tensor):
r"""Adds :attr:`tensor` to this :attr:`self`.
Args:
tensor: can be a torch tensor or a CrypTensor.
The shapes of :attr:`self` and :attr:`tensor` must be
`broadcastable`_.
For a scalar `tensor`,
.. math::
\text{{out_i}} = \text{{input_i}} + \text{{tensor}}
.. _broadcastable:
https://pytorch.org/docs/stable/notes/broadcasting.html#broadcasting-semantics
"""
raise NotImplementedError("add is not implemented")
def mul(self, tensor):
r"""Element-wise multiply with a :attr:`tensor`.
.. math::
\text{out}_i = \text{tensor}_i \times \text{self}_i
Args:
tensor (Tensor or float): the tensor or value to multiply.
The shapes of :attr:`self` and :attr:`tensor` must be
`broadcastable`_.
.. _broadcastable:
https://pytorch.org/docs/stable/notes/broadcasting.html#broadcasting-semantics
"""
raise NotImplementedError("mul is not implemented")
def div(self, tensor):
r"""
Divides each element of :attr:`self` with the :attr:`tensor`
and returns a new resulting tensor.
.. math::
\text{out}_i = \frac{\text{input}_i}{\text{tensor}_i}
The shapes of :attr:`self` and :attr:`tensor` must be
`broadcastable`_.
Args:
tensor (Tensor or float): the tensor or value in the denominator.
.. _broadcastable:
https://pytorch.org/docs/stable/notes/broadcasting.html#broadcasting-semantics
"""
raise NotImplementedError("div is not implemented")
def neg(self):
r"""
Returns a new tensor with the negative of the elements of :attr:`self`.
.. math::
\text{out} = -1 \times \text{input}
"""
raise NotImplementedError("neg is not implemented")
def matmul(self, tensor):
r"""Performs matrix multiplication of :attr:`self` with :attr:`tensor`
The behavior depends on the dimensionality of the tensors as follows:
- If both tensors are 1-dimensional, the dot product (scalar) is returned.
- If both arguments are 2-dimensional, the matrix-matrix product is returned.
- If the first argument is 1-dimensional and the second argument is
2-dimensional, a 1 is prepended to its dimension for the purpose of
the matrix multiply. After the matrix multiply, the
prepended dimension is removed.
- If the first argument is 2-dimensional and the second argument is
1-dimensional, the matrix-vector product is returned.
- If both arguments are at least 1-dimensional and at least one argument
is N-dimensional (where N > 2), then a batched matrix multiply is returned.
If the first argument is 1-dimensional, a 1 is prepended to its dimension
for the purpose of the batched matrix multiply and removed after.
If the second argument is 1-dimensional, a 1 is appended to its dimension
for the purpose of the batched matrix multiple and removed after.
The non-matrix (i.e. batch) dimensions are broadcasted (and thus
must be `broadcastable`_). For example, if :attr:`self` is a
:math:`(j \times 1 \times n \times m)` tensor and :attr:`tensor` is a
:math:`(k \times m \times p)` tensor, :attr:`out` will be an
:math:`(j \times k \times n \times p)` tensor.
Arguments:
tensor (Tensor): the tensor to be multiplied
.. _broadcastable:
https://pytorch.org/docs/stable/notes/broadcasting.html#broadcasting-semantics
"""
raise NotImplementedError("matmul is not implemented")
def conv1d(self, kernel, *args, **kwargs):
"""1D convolution."""
raise NotImplementedError("conv1d is not implemented")
def conv2d(self, kernel, *args, **kwargs):
"""2D convolution."""
raise NotImplementedError("conv2d is not implemented")
def conv_transpose1d(self, kernel, **kwargs):
"""Perform a 1D transpose convolution (deconvolution) using the given kernel"""
raise NotImplementedError("conv_transpose1d is not implemented")
def conv_transpose2d(self, kernel, **kwargs):
"""Perform a 2D transpose convolution (deconvolution) using the given kernel"""
raise NotImplementedError("conv_transpose2d is not implemented")
def avg_pool2d(self, kernel_size, stride=None, padding=0):
"""Perform an average pooling on each 2D matrix of the given tensor
Args:
kernel_size (int or tuple): pooling kernel size.
"""
raise NotImplementedError("avg_pool2d is not implemented")
def _ltz(self):
"""Returns 1 for elements that are < 0 and 0 otherwise"""
raise NotImplementedError("_ltz is not implemented")
@staticmethod
def rand(*sizes, device=None):
"""
Returns a tensor with elements uniformly sampled in [0, 1). The uniform
random samples are generated by generating random bits using fixed-point
encoding and converting the result to an ArithmeticSharedTensor.
"""
raise NotImplementedError("rand is not implemented")
from .common import functions
# Register common functions
for module_name in functions.__all__:
module = getattr(functions, module_name)
for func in module.__all__:
setattr(CrypTensor, func, getattr(module, func))
| CrypTen-main | crypten/cryptensor.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import copy
import io
import onnx
import torch
import torch.onnx.symbolic_helper as sym_help
import torch.onnx.utils
from onnx import numpy_helper
from torch.onnx import OperatorExportTypes
from . import module
try:
import tensorflow as tf # noqa
import tf2onnx
TF_AND_TF2ONNX = True
except ImportError:
TF_AND_TF2ONNX = False
try:
import torch.onnx.symbolic_registry as sym_registry # noqa
SYM_REGISTRY = True
except ImportError:
from torch.onnx._internal.registration import registry # noqa
SYM_REGISTRY = False
_OPSET_VERSION = 17
def from_onnx(onnx_string_or_file):
"""
Converts an ONNX model serialized in an `onnx_string_or_file` to a CrypTen model.
"""
onnx_model = _load_onnx_model(onnx_string_or_file)
return _to_crypten(onnx_model)
def from_pytorch(pytorch_model, dummy_input):
"""
Converts a PyTorch model `pytorch_model` into a CrypTen model by tracing it
using the input `dummy_input`.
"""
# construct CrypTen model:
f = _from_pytorch_to_bytes(pytorch_model, dummy_input)
crypten_model = from_onnx(f)
f.close()
# set model architecture to export model back to pytorch model
crypten_model.pytorch_model = copy.deepcopy(pytorch_model)
# make sure training / eval setting is copied:
crypten_model.train(mode=pytorch_model.training)
return crypten_model
def from_tensorflow(tensorflow_graph_def, inputs, outputs):
"""
Function that converts Tensorflow model into CrypTen model based on
https://github.com/onnx/tensorflow-onnx/blob/master/tf2onnx/convert.py
The model is returned in evaluation mode.
Args:
`tensorflow_graph_def`: Input Tensorflow GraphDef to be converted
`inputs`: input nodes
`outputs`: output nodes
"""
raise DeprecationWarning(
"crypten.nn.from_tensorflow is deprecated. ",
"CrypTen will no longer support model conversion from TensorFlow.",
)
# Exporting model to ONNX graph
if not TF_AND_TF2ONNX:
raise ImportError("Please install both tensorflow and tf2onnx packages")
with tf.Graph().as_default() as tf_graph:
tf.import_graph_def(tensorflow_graph_def, name="")
with tf2onnx.tf_loader.tf_session(graph=tf_graph):
g = tf2onnx.tfonnx.process_tf_graph(
tf_graph,
opset=10,
continue_on_error=False,
input_names=inputs,
output_names=outputs,
)
onnx_graph = tf2onnx.optimizer.optimize_graph(g)
model_proto = onnx_graph.make_model(
"converted from {}".format(tensorflow_graph_def)
)
f = io.BytesIO()
f.write(model_proto.SerializeToString())
# construct CrypTen model
# Note: We don't convert crypten model to training mode, as Tensorflow
# models are used for both training and evaluation without the specific
# conversion of one mode to another
f.seek(0)
crypten_model = from_onnx(f)
return crypten_model
def _from_pytorch_to_bytes(pytorch_model, dummy_input):
"""
Returns I/O stream containing ONNX graph for `pytorch_model` traced with
input `dummy_input`.
"""
# first export is only used to obtain the PyTorch-to-ONNX symbolic registry:
with io.BytesIO() as f:
_export_pytorch_model(f, pytorch_model, dummy_input)
# update ONNX symbolic registry with CrypTen-specific functions:
_update_onnx_symbolic_registry()
# export again so the graph is created with CrypTen-specific registry:
f = io.BytesIO()
f = _export_pytorch_model(f, pytorch_model, dummy_input)
f.seek(0)
return f
def _export_pytorch_model(f, pytorch_model, dummy_input):
"""
Returns a binary I/O stream containing ONNX-exported pytorch_model that was
traced with input `dummy_input`.
"""
kwargs = {
"do_constant_folding": False,
"export_params": True,
"input_names": ["input"],
"operator_export_type": OperatorExportTypes.ONNX,
"output_names": ["output"],
"opset_version": _OPSET_VERSION,
}
torch.onnx.export(pytorch_model, dummy_input, f, **kwargs)
return f
# mapping from ONNX to crypten.nn for modules with different names:
ONNX_TO_CRYPTEN = {
"adaptive_avg_pool2d": module.AdaptiveAvgPool2d,
"adaptive_max_pool2d": module.AdaptiveMaxPool2d,
"AveragePool": module.AvgPool2d,
"Clip": module.Hardtanh,
"MaxPool": module.MaxPool2d,
"Pad": module._ConstantPad,
"Relu": module.ReLU,
"ReduceMean": module.Mean,
"ReduceSum": module.Sum,
}
def _to_crypten(onnx_model):
"""
Function that converts an `onnx_model` to a CrypTen model.
"""
# create graph:
input_names, output_names = _get_input_output_names(onnx_model)
assert len(output_names) == 1, "Only one output per model supported."
crypten_model = module.Graph(input_names, output_names[0])
# create nodes for the parameters:
for node in onnx_model.graph.initializer:
param = torch.from_numpy(numpy_helper.to_array(node))
crypten_model.add_module(node.name, module.Parameter(param), [])
# loop over all nodes:
for node in onnx_model.graph.node:
# get attributes and node type:
attributes = {attr.name: _get_attribute_value(attr) for attr in node.attribute}
crypten_class = _get_operator_class(node.op_type, attributes)
# add CrypTen module to graph:
crypten_module = crypten_class.from_onnx(attributes=attributes)
input_names = list(node.input)
output_names = list(node.output)
if node.op_type == "Dropout":
output_names = [output_names[0]] # do not output Dropout mask
crypten_model.add_module(
output_names[0], crypten_module, input_names, output_names=output_names
)
# return final model:
crypten_model = _get_model_or_module(crypten_model)
return crypten_model
def _load_onnx_model(onnx_string_or_file):
"""
Loads ONNX model from file or string.
"""
if hasattr(onnx_string_or_file, "seek"):
onnx_string_or_file.seek(0)
return onnx.load(onnx_string_or_file)
return onnx.load_model_from_string(onnx_string_or_file)
def _get_input_output_names(onnx_model):
"""
Return input and output names of the ONNX graph.
"""
input_names = [input.name for input in onnx_model.graph.input]
output_names = [output.name for output in onnx_model.graph.output]
assert len(input_names) >= 1, "number of inputs should be at least 1"
assert len(output_names) == 1, "number of outputs should be 1"
return input_names, output_names
def _get_model_or_module(crypten_model):
"""
Returns `Module` if model contains only one module. Otherwise returns model.
"""
num_modules = len(list(crypten_model.modules()))
if num_modules == 1:
for crypten_module in crypten_model.modules():
return crypten_module
return crypten_model
def _get_attribute_value(attr):
"""
Retrieves value from an ONNX attribute.
"""
if attr.HasField("f"): # floating-point attribute
return attr.f
elif attr.HasField("i"): # integer attribute
return attr.i
elif attr.HasField("s"): # string attribute
return attr.s # TODO: Sanitize string.
elif attr.HasField("t"): # tensor attribute
return torch.from_numpy(numpy_helper.to_array(attr.t))
elif len(attr.ints) > 0:
return list(attr.ints)
elif len(attr.floats) > 0:
return list(attr.floats)
raise ValueError("Unknown attribute type for attribute %s." % attr.name)
def _get_operator_class(node_op_type, attributes):
"""
Returns the `crypten.nn.Module` type corresponding to an ONNX node.
"""
crypten_class = getattr(
module, node_op_type, ONNX_TO_CRYPTEN.get(node_op_type, None)
)
if crypten_class is None:
raise ValueError(f"CrypTen does not support ONNX op {node_op_type}.")
return crypten_class
def _update_onnx_symbolic_registry():
"""
Updates the ONNX symbolic registry for operators that need a CrypTen-specific
implementation and custom operators.
"""
if SYM_REGISTRY:
# update PyTorch's symbolic ONNX registry to output different functions:
for version_key, version_val in sym_registry._registry.items():
for function_key in version_val.keys():
if function_key == "softmax":
sym_registry._registry[version_key][
function_key
] = _onnx_crypten_softmax
if function_key == "log_softmax":
sym_registry._registry[version_key][
function_key
] = _onnx_crypten_logsoftmax
if function_key == "dropout":
sym_registry._registry[version_key][
function_key
] = _onnx_crypten_dropout
if function_key == "feature_dropout":
sym_registry._registry[version_key][
function_key
] = _onnx_crypten_feature_dropout
else:
# Update ONNX symbolic registry using torch.onnx.register_custom_op_symbolic
torch.onnx.register_custom_op_symbolic(
"aten::softmax", _onnx_crypten_softmax, _OPSET_VERSION
)
torch.onnx.register_custom_op_symbolic(
"aten::log_softmax", _onnx_crypten_logsoftmax, _OPSET_VERSION
)
torch.onnx.register_custom_op_symbolic(
"aten::dropout", _onnx_crypten_dropout, _OPSET_VERSION
)
torch.onnx.register_custom_op_symbolic(
"aten::feature_dropout", _onnx_crypten_feature_dropout, _OPSET_VERSION
)
@sym_help.parse_args("v", "i", "none")
def _onnx_crypten_softmax(g, input, dim, dtype=None):
"""
This function converts PyTorch's Softmax module to a Softmax module in
the ONNX model. It overrides PyTorch's default conversion of Softmax module
to a sequence of Exp, ReduceSum and Div modules, since this default
conversion can cause numerical overflow when applied to CrypTensors.
"""
result = g.op("Softmax", input, axis_i=dim)
if dtype and dtype.node().kind() != "prim::Constant":
parsed_dtype = sym_help._get_const(dtype, "i", "dtype")
result = g.op("Cast", result, to_i=sym_help.scalar_type_to_onnx[parsed_dtype])
return result
@sym_help.parse_args("v", "i", "none")
def _onnx_crypten_logsoftmax(g, input, dim, dtype=None):
"""
This function converts PyTorch's LogSoftmax module to a LogSoftmax module in
the ONNX model. It overrides PyTorch's default conversion of LogSoftmax module
to avoid potentially creating Transpose operators.
"""
result = g.op("LogSoftmax", input, axis_i=dim)
if dtype and dtype.node().kind() != "prim::Constant":
parsed_dtype = sym_help._get_const(dtype, "i", "dtype")
result = g.op("Cast", result, to_i=sym_help.scalar_type_to_onnx[parsed_dtype])
return result
@sym_help.parse_args("v", "f", "i")
def _onnx_crypten_dropout(g, input, p, train):
"""
This function converts PyTorch's Dropout module to a Dropout module in the ONNX
model. It overrides PyTorch's default implementation to ignore the Dropout module
during the conversion. PyTorch assumes that ONNX models are only used for
inference and therefore Dropout modules are not required in the ONNX model.
However, CrypTen needs to convert ONNX models to trainable
CrypTen models, and so the Dropout module needs to be included in the
CrypTen-specific conversion.
"""
r, _ = g.op("Dropout", input, ratio_f=p, outputs=2)
return r
@sym_help.parse_args("v", "f", "i")
def _onnx_crypten_feature_dropout(g, input, p, train):
"""
This function converts PyTorch's DropoutNd module to a DropoutNd module in the ONNX
model. It overrides PyTorch's default implementation to ignore the DropoutNd module
during the conversion. PyTorch assumes that ONNX models are only used for
inference and therefore DropoutNd modules are not required in the ONNX model.
However, CrypTen needs to convert ONNX models to trainable
CrypTen models, and so the DropoutNd module needs to be included in the
CrypTen-specific conversion.
"""
r, _ = g.op("DropoutNd", input, ratio_f=p, outputs=2)
return r
| CrypTen-main | crypten/nn/onnx_converter.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .loss import _Loss
from .module import Module
class CosineSimilarity(Module):
r"""Returns cosine similarity between :math:`x_1` and :math:`x_2`, computed along dim.
.. math ::
\text{similarity} = \dfrac{x_1 \cdot x_2}{\max(\Vert x_1 \Vert _2 \cdot \Vert x_2 \Vert _2, \epsilon)}.
Args:
dim (int, optional): Dimension where cosine similarity is computed. Default: 1
eps (float, optional): Not used in CrypTen
Shape:
- Input1: :math:`(\ast_1, D, \ast_2)` where D is at position `dim`
- Input2: :math:`(\ast_1, D, \ast_2)`, same shape as the Input1
- Output: :math:`(\ast_1, \ast_2)`
Examples::
>>> input1 = crypten.randn(100, 128)
>>> input2 = crypten.randn(100, 128)
>>> cos = nn.CosineSimilarity(dim=1, eps=1e-6)
>>> output = cos(input1, input2)
"""
def __init__(self, dim=1, eps=1e-8):
super(CosineSimilarity, self).__init__()
self.dim = dim
def forward(self, x1, x2):
return x1.cosine_similarity(x2, self.dim)
# Remove need to call module.encrypt()
__getattribute__ = _Loss.__getattribute__
| CrypTen-main | crypten/nn/distances.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .init import * # noqa: F403
from .distances import CosineSimilarity
from .loss import _Loss, BCELoss, BCEWithLogitsLoss, CrossEntropyLoss, L1Loss, MSELoss
from .module import (
AdaptiveAvgPool2d,
AdaptiveMaxPool2d,
Add,
AvgPool2d,
BatchNorm1d,
BatchNorm2d,
BatchNorm3d,
Cast,
Concat,
Constant,
ConstantOfShape,
ConstantPad1d,
ConstantPad2d,
ConstantPad3d,
Conv,
Conv1d,
Conv2d,
Div,
Dropout,
Dropout2d,
Dropout3d,
DropoutNd,
Equal,
Erf,
Exp,
Expand,
Flatten,
Gather,
Gemm,
GlobalAveragePool,
Graph,
GroupNorm,
Hardtanh,
Linear,
LogSoftmax,
MatMul,
MaxPool2d,
Mean,
Module,
ModuleDict,
ModuleList,
Mul,
Parameter,
Pow,
Range,
ReLU,
ReLU6,
Reshape,
Sequential,
Shape,
Sigmoid,
Slice,
Softmax,
Sqrt,
Squeeze,
Sub,
Sum,
Transpose,
Unsqueeze,
Where,
)
from .onnx_converter import from_onnx, from_pytorch, from_tensorflow, TF_AND_TF2ONNX
# expose contents of package
__all__ = [ # noqa: F405
"_Loss",
"AdaptiveAvgPool2d",
"AdaptiveMaxPool2d",
"Add",
"AvgPool2d",
"BatchNorm1d",
"BatchNorm2d",
"BatchNorm3d",
"BCELoss",
"BCEWithLogitsLoss",
"Cast",
"Concat",
"Constant",
"ConstantOfShape",
"ConstantPad1d",
"ConstantPad2d",
"ConstantPad3d",
"Conv",
"Conv1d",
"Conv2d",
"CosineSimilarity",
"CrossEntropyLoss",
"Div",
"Dropout",
"Dropout2d",
"Dropout3d",
"DropoutNd",
"Erf",
"Equal",
"Exp",
"Expand",
"Flatten",
"from_pytorch",
"from_onnx",
"from_tensorflow",
"Gather",
"Gemm",
"GlobalAveragePool",
"Graph",
"GroupNorm",
"Hardtanh",
"L1Loss",
"Linear",
"LogSoftmax",
"MatMul",
"MaxPool2d",
"Mean",
"Module",
"ModuleDict",
"ModuleList",
"MSELoss",
"Mul",
"Parameter",
"Pow",
"Range",
"ReLU",
"ReLU6",
"Reshape",
"Sequential",
"Shape",
"Sigmoid",
"Slice",
"Softmax",
"Sqrt",
"Squeeze",
"Sub",
"Sum",
"TF_AND_TF2ONNX",
"Transpose",
"Unsqueeze",
"Where",
"init",
]
| CrypTen-main | crypten/nn/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import crypten
import torch
from .module import Module
class _Loss(Module):
"""
Base criterion class that mimics Pytorch's Loss.
"""
def __init__(self, reduction="mean", skip_forward=False):
super(_Loss, self).__init__()
if reduction != "mean":
raise NotImplementedError("reduction %s not supported")
self.reduction = reduction
self.skip_forward = skip_forward
def forward(self, *args, **kwargs):
raise NotImplementedError("forward not implemented")
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
def __getattribute__(self, name):
if name != "forward":
return object.__getattribute__(self, name)
def forward_function(*args, **kwargs):
"""Silently encrypt Torch tensors if needed."""
if self.encrypted or any(
isinstance(arg, crypten.CrypTensor) for arg in args
):
args = list(args)
for idx, arg in enumerate(args):
if torch.is_tensor(arg):
args[idx] = crypten.cryptensor(arg)
return object.__getattribute__(self, name)(*tuple(args), **kwargs)
return forward_function
class MSELoss(_Loss):
r"""
Creates a criterion that measures the mean squared error (squared L2 norm) between
each element in the prediction :math:`x` and target :math:`y`.
The loss can be described as:
.. math::
\ell(x, y) = mean(L) = mean(\{l_1,\dots,l_N\}^\top), \quad
l_n = (x_n - y_n)^2,
where :math:`N` is the batch size, :math:`x` and :math:`y` are tensors of
arbitrary shapes with a total of :math:`n` elements each.
""" # noqa: W605
def forward(self, x, y):
assert x.size() == y.size(), "input and target must have the same size"
return (x - y).square().mean()
class L1Loss(_Loss):
r"""
Creates a criterion that measures the mean absolute error between each element in
the prediction :math:`x` and target :math:`y`.
The loss can be described as:
.. math::
\ell(x, y) = mean(L) = mean(\{l_1,\dots,l_N\}^\top), \quad
l_n = \left | x_n - y_n \right |,
where :math:`N` is the batch size, :math:`x` and :math:`y` are tensors of
arbitrary shapes with a total of :math:`n` elements each.
""" # noqa: W605
def forward(self, x, y):
assert x.size() == y.size(), "input and target must have the same size"
return (x - y).abs().mean()
class BCELoss(_Loss):
r"""
Creates a criterion that measures the Binary Cross Entropy
between the prediction :math:`x` and the target :math:`y`.
The loss can be described as:
.. math::
\ell(x, y) = mean(L) = mean(\{l_1,\dots,l_N\}^\top), \quad
l_n = - \left [ y_n \cdot \log x_n + (1 - y_n) \cdot \log (1 - x_n) \right ],
where :math:`N` is the batch size, :math:`x` and :math:`y` are tensors of
arbitrary shapes with a total of :math:`n` elements each.
This is used for measuring the error of a reconstruction in for example
an auto-encoder. Note that the targets :math:`y` should be numbers
between 0 and 1.
""" # noqa: W605
def forward(self, x, y):
assert x.size() == y.size(), "input and target must have the same size"
return x.binary_cross_entropy(y, skip_forward=self.skip_forward)
class CrossEntropyLoss(_Loss):
r"""
Creates a criterion that measures cross-entropy loss between the
prediction :math:`x` and the target :math:`y`. It is useful when
training a classification problem with `C` classes.
The prediction `x` is expected to contain raw, unnormalized scores for each class.
The prediction `x` has to be a Tensor of size either :math:`(N, C)` or
:math:`(N, C, d_1, d_2, ..., d_K)`, where :math:`N` is the size of the minibatch,
and with :math:`K \geq 1` for the `K`-dimensional case (described later).
This criterion expects a class index in the range :math:`[0, C-1]` as the
target `y` for each value of a 1D tensor of size `N`.
The loss can be described as:
.. math::
\text{loss}(x, class) = -\log \left(
\frac{\exp(x[class])}{\sum_j \exp(x[j])} \right )
= -x[class] + \log \left (\sum_j \exp(x[j]) \right)
The losses are averaged across observations for each batch
Can also be used for higher dimension inputs, such as 2D images, by providing
an input of size :math:`(N, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1`,
where :math:`K` is the number of dimensions, and a target of appropriate shape.
""" # noqa: W605
def forward(self, x, y):
x = x.squeeze()
y = y.squeeze()
assert x.size() == y.size(), "input and target must have the same size"
return x.cross_entropy(y, skip_forward=self.skip_forward)
class BCEWithLogitsLoss(_Loss):
r"""
This loss combines a Sigmoid layer and the BCELoss in one single class.
The loss can be described as:
.. math::
p = \sigma(x)
.. math::
\ell(x, y) = mean(L) = mean(\{l_1,\dots,l_N\}^\top), \quad
l_n = - \left [ y_n \cdot \log p_n + (1 - y_n) \cdot \log (1 - p_n) \right ],
This is used for measuring the error of a reconstruction in for example an
auto-encoder. Note that the targets t[i] should be numbers between 0 and 1.
""" # noqa: W605
def forward(self, x, y):
assert x.size() == y.size(), "input and target must have the same size"
return x.binary_cross_entropy_with_logits(y, skip_forward=self.skip_forward)
class RAPPORLoss(_Loss):
r"""
This loss computes the BCEWithLogitsLoss with corrections applied to account
for randomized response, where the input `alpha` represents the probability
of flipping a label.
The loss can be described as:
.. math::
p = \sigma(x)
.. math::
r = \alpha * p + (1 - \alpha) * (1 - p)
.. math::
\ell(x, y) = mean(L) = mean(\{l_1,\dots,l_N\}^\top), \quad
l_n = - \left [ y_n \cdot \log r_n + (1 - y_n) \cdot \log (1 - r_n) \right ],
This is used for measuring the error of a reconstruction in for example an
auto-encoder. Note that the targets t[i] should be numbers between 0 and 1.
"""
def __init__(self, alpha, reduction="mean", skip_forward=False):
super(RAPPORLoss, self).__init__(reduction=reduction, skip_forward=skip_forward)
self.alpha = alpha
def forward(self, x, y):
assert x.size() == y.size(), "input and target must have the same size"
return x.rappor_loss(y, self.alpha, skip_forward=self.skip_forward)
| CrypTen-main | crypten/nn/loss.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import warnings
from collections import OrderedDict
import crypten
import torch
import torch.onnx.symbolic_helper as sym_help
from crypten.common.functions.pooling import _adaptive_pool2d_helper
class Module:
"""
Base Module class that mimics the torch.nn.Module class.
"""
# allow for versioning of modules:
_version = 1
SUPPORTS_PLAINTEXT_INPUTS = False
def __init__(self):
self._parameters = OrderedDict()
self._buffers = OrderedDict()
self._modules = OrderedDict()
self.encrypted = False
self.train()
def __repr__(self):
encrypted_str = "encrypted" if self.encrypted else "unencrypted"
return f"{type(self).__name__} {encrypted_str} module"
@staticmethod
def from_onnx(attributes=None):
"""
Constructs a CrypTen module from an ONNX Protobuf string or file.
"""
raise NotImplementedError("Call this function on a Module type.")
def forward(self, *args, **kwargs):
"""Perform forward pass on model."""
raise NotImplementedError("forward not implemented")
def __call__(self, *args, **kwargs):
return self.forward(*args, **kwargs)
def train(self, mode=True):
"""Sets the module in the specified training mode."""
for param in self.parameters():
param.requires_grad = mode
self.training = mode
# Recursively set train mode
for module in self.children():
module.train(mode=mode)
return self
def eval(self):
"""Sets the module in evaluation mode."""
return self.train(False)
def children(self):
"""Returns an iterator over immediate children modules.
Yields:
Module: a child module
"""
for _, module in self.named_children():
yield module
def named_children(self):
"""Returns an iterator over immediate children modules, yielding both
the name of the module as well as the module itself.
Yields:
(string, Module): Tuple containing a name and child module
Example::
>>> for name, module in model.named_children():
>>> if name in ['conv4', 'conv5']:
>>> print(module)
"""
memo = set()
for name, module in self._modules.items():
if module is not None and module not in memo:
memo.add(module)
yield name, module
def register_module(self, name, module):
"""Registers child module in the module."""
self._modules[name] = module
def modules(self):
"""Returns an iterator over all modules in the network.
Yields:
Module: a module in the network
Note:
Duplicate modules are returned only once.
"""
for _, module in self.named_modules():
yield module
def named_modules(self, memo=None, prefix=""):
"""Returns iterator over named modules (non-recursively)."""
if memo is None:
memo = set()
if self not in memo:
memo.add(self)
yield prefix, self
for name, module in self.named_children():
if module is None:
continue
submodule_prefix = prefix + ("." if prefix else "") + name
for m in module.named_modules(memo, submodule_prefix):
yield m
def register_parameter(self, name, param, requires_grad=True):
"""
Register parameter in the module. This function cannot register
parameters in child modules.
"""
if name in self._parameters or hasattr(self, name):
raise ValueError("Parameter or field %s already exists." % name)
param.requires_grad = requires_grad
self._parameters[name] = param
setattr(self, name, param)
def set_parameter(self, name, param):
"""
Sets value of parameter in the module. This function cannot set
parameters in child modules.
"""
if name not in self._parameters or not hasattr(self, name):
raise ValueError("Parameter %s does not exist." % name)
self._parameters[name] = param
setattr(self, name, param)
def set_parameter_from_shares(self, name, share, **kwargs):
"""
Sets value of parameter in the module from shares. This functionality is
only for MPC-encrypted models.
Supported named arguments for `MPCTensor` parameters include the `precision`
of the encoder (default = `None`), the rank of the `src` (default = 0),
and the `ptype` of the shares (default = `crypten.mpc.arithmetic`).
This function cannot set the parameters in child modules.
"""
# functionality is only supported when parameters are MPCTensors:
assert self.encrypted, "can only set parameters from shares in encrypted models"
if name not in self._parameters or not hasattr(self, name):
raise ValueError("Parameter %s does not exist." % name)
cls = type(self._parameters[name])
assert hasattr(
self._parameters[name], "from_shares"
), "parameter type {} does not supporting setting from shares".format(cls)
# load parameters from shares:
self._parameters[name] = cls.from_shares(share, **kwargs)
setattr(self, name, self._parameters[name])
def parameters(self, recurse=True):
"""Returns an iterator over module parameters.
This is typically passed to an optimizer.
Args:
recurse (bool): if True, then yields parameters of this module
and all submodules. Otherwise, yields only parameters that
are direct members of this module.
Yields:
CrypTensor or torch.Tensor: module parameter
"""
for _, param in self.named_parameters(recurse=recurse):
yield param
def named_parameters(self, recurse=True, prefix=None):
"""
Returns an iterator over module parameters, yielding both the
name of the parameter as well as the parameter itself.
Args:
prefix (str): prefix to prepend to all parameter names.
recurse (bool): if True, then yields parameters of this module
and all submodules. Otherwise, yields only parameters that
are direct members of this module.
Yields:
(string, CrypTensor or torch.Tensor): Tuple containing the name and parameter
"""
for name, param in self._parameters.items():
param_name = name if prefix is None else prefix + "." + name
yield param_name, param
if recurse:
for module_name, module in self.named_children():
pre = module_name if prefix is None else prefix + "." + module_name
yield from module.named_parameters(recurse=recurse, prefix=pre)
def _save_to_state_dict(self, destination, prefix, keep_vars):
"""
Saves module state to `destination` dictionary, containing a state
of the module, but not its descendants. The specified `prefix` will be
used in names of parameters and buffers in this module. The `keep_vars`
boolean determines if parameters and buffers are kept on the autograd tape.
"""
for name, param in self.named_parameters(recurse=False):
if param is not None:
destination[prefix + name] = param if keep_vars else param.detach()
for name, buffer in self.named_buffers(recurse=False):
if buffer is not None:
destination[prefix + name] = buffer if keep_vars else buffer.detach()
def state_dict(self, destination=None, prefix="", keep_vars=False):
"""
Returns a dictionary containing the state of the module. Both parameters
and persistent buffers (e.g., running averages) are included. Keys are
corresponding parameter and buffer names.
"""
# save parameters and buffers of current module:
if destination is None:
destination = OrderedDict()
destination._metadata = {"version": Module._version}
self._save_to_state_dict(destination, prefix, keep_vars)
# recurse over modules:
for name, module in self.named_children():
if module is not None:
module.state_dict(destination, prefix + name + ".", keep_vars=keep_vars)
return destination
def _load_from_state_dict_crypten(self, state_dict, prefix, strict):
"""
Copies parameters and buffers from `state_dict` into only this module
but not its children. This is called on every submodule in the
`load_state_dict` function.
"""
# get state dict for just the current module (without children)
local_state = {
key: val for key, val in self.named_parameters() if val is not None
}
# in strict mode, check for missing keys in the state_dict:
if strict:
for name in local_state.keys():
key = prefix + name
if key not in state_dict:
raise ValueError("Key {} not found in state dict.".format(key))
# loop over parameters / buffers in module:
for name, param in local_state.items():
key = prefix + name
input_param = state_dict[key]
# size in state_dict should match size of parameters:
if input_param.size() != param.size():
raise ValueError(
"Size mismatch for {}: copying a param with"
"shape {} from checkpoint, the shape in"
"current model is {}.".format(key, input_param.size(), param.size())
)
continue
# cannot copy encrypted tensors into unencrypted models and vice versa:
param_encrypted = isinstance(input_param, crypten.CrypTensor)
if param_encrypted:
assert (
self.encrypted
), "cannot copy encrypted parameters into unencrypted model"
else:
assert (
not self.encrypted
), "cannot copy unencrypted parameters into encrypted model"
# copy parameters from state_dict:
with crypten.no_grad(), torch.no_grad():
param.copy_(input_param)
def load_state_dict(self, state_dict, strict=True):
"""
Copies parameters and buffers from `state_dict` into this module and its
children. If `strict` is `True`, then the keys of `state_dict` must
exactly match the keys returned by this module's `state_dict` function.
"""
# check version of state_dict:
if strict:
metadata = getattr(state_dict, "_metadata", None)
if metadata is None:
raise ValueError("Specified state_dict does not have metadata.")
version = metadata.get("version", -1)
if version != Module._version:
raise ValueError(
"Specified state_dict has incorrect version: {}".format(version)
)
# make copy state_dict so that load() can modify it:
state_dict = state_dict.copy()
def load(module, prefix=""):
"""
Closure that performs the loading of a module recursively.
"""
module._load_from_state_dict_crypten(state_dict, prefix, strict)
for name, child in module.named_children():
if child is not None:
load(child, prefix + name + ".")
# perform the actual loading:
load(self)
load = None # break load->load reference cycle
def zero_grad(self):
"""Sets gradients of all parameters to zero."""
for param in self.parameters():
param.grad = None
def update_parameters(self, learning_rate, grad_threshold=100):
"""Performs gradient step on parameters.
Parameters:
grad_threshold - Because arithmetic operations can extremely rarely
return large incorrect results, we zero-out all elements
with magnitude larger than this given threshold. To turn
off thresholding, set to `None`.
"""
assert self.training, "module not in training mode"
with crypten.no_grad(), torch.no_grad():
for param in self.parameters():
if param.grad is None:
continue
if not self.encrypted and isinstance(param.grad, crypten.CrypTensor):
raise RuntimeError(
"Cannot update parameters of unencrypted "
"model using encrypted gradients. Encrypt "
"model before updating parameters."
)
# Threshold gradients to prevent gradient explosion from wrap overflow
if self.encrypted and grad_threshold is not None:
# Compute based on square value since abs is more expensive
square_threshold = grad_threshold * grad_threshold
grad = param.grad.mul(param.grad.square().lt(square_threshold))
else:
grad = param.grad
param.sub_(grad.mul_(learning_rate))
def register_buffer(self, name, buffer):
"""
Register buffer in the module. Buffers are encrypted like parameters but
they are not updated by parameter updates.
"""
if name in self._buffers or hasattr(self, name):
raise ValueError("Buffer or field %s already exists." % name)
buffer.requires_grad = False
self._buffers[name] = buffer
setattr(self, name, buffer)
def set_buffer(self, name, buffer):
"""
Sets value of buffer in the module. This function cannot set the
parameters in child modules.
"""
if name not in self._buffers or not hasattr(self, name):
raise ValueError("Buffer %s does not exist." % name)
self._buffers[name] = buffer
setattr(self, name, buffer)
def buffers(self, recurse=True):
"""Returns an iterator over module buffers.
Args:
recurse (bool): if True, then yields buffers of this module
and all submodules. Otherwise, yields only buffers that
are direct members of this module.
Yields:
CrypTensor or torch.Tensor: module buffer
"""
for _, buffer in self.named_buffers(recurse=recurse):
yield buffer
def named_buffers(self, recurse=True, prefix=None):
"""Returns an iterator over module buffers, yielding both the
name of the buffer as well as the buffer itself.
Args:
prefix (str): prefix to prepend to all buffer names.
recurse (bool): if True, then yields buffers of this module
and all submodules. Otherwise, yields only buffers that
are direct members of this module.
Yields:
(string, CrypTensor or torch.Tensor): Tuple containing the name and buffer
Example::
>>> for name, buf in self.named_buffers():
>>> if name in ['running_var']:
>>> print(buf.size())
"""
for name, buffer in self._buffers.items():
buffer_name = name if prefix is None else prefix + "." + name
yield buffer_name, buffer
if recurse:
for module_name, module in self.named_children():
pre = module_name if prefix is None else prefix + "." + module_name
yield from module.named_buffers(recurse=recurse, prefix=pre)
def to(self, *args, **kwargs):
"""
Moves and/or casts the parameters and buffers.
This can be called as
`to(device=None, dtype=None, non_blocking=False)`
`to(dtype, non_blocking=False)`
`to(tensor, non_blocking=False)`
`to(memory_format=torch.channels_last)`
Args:
device (torch.device) – the desired device of the parameters
and buffers in this module
dtype (torch.dtype) – the desired floating point type of the
floating point parameters and buffers in this module
tensor (torch.Tensor) – Tensor whose dtype and device are the
desired dtype and device for all parameters and buffers in this module
memory_format (torch.memory_format) – the desired memory format
for 4D parameters and buffers in this module (keyword only argument)
"""
for name, param in self._parameters.items():
self.set_parameter(name, param.to(*args, **kwargs))
for name, buffer in self._buffers.items():
self.set_buffer(name, buffer.to(*args, **kwargs))
for module in self.children():
module.to(*args, **kwargs)
return self
def cuda(self, device=None):
"""
Moves all model parameters and buffers to the GPU.
Args:
device (int, optional) – if specified, all parameters will be copied
to that device
"""
for name, param in self._parameters.items():
self.set_parameter(name, param.cuda(device=device))
for name, buffer in self._buffers.items():
self.set_buffer(name, buffer.cuda(device=device))
for module in self.children():
module.cuda(device=device)
return self
def cpu(self):
"""Moves all model parameters and buffers to the CPU."""
for name, param in self._parameters.items():
self.set_parameter(name, param.cpu())
for name, buffer in self._buffers.items():
self.set_buffer(name, buffer.cpu())
for module in self.children():
module.cpu()
return self
def _apply(self, fn):
"""Applies a function recursively on all modules."""
for module in self.children():
module._apply(fn)
fn(self)
return self
def encrypt(self, mode=True, src=0):
"""Encrypts the model."""
if mode != self.encrypted:
# encrypt / decrypt parameters:
self.encrypted = mode
for name, param in self.named_parameters(recurse=False):
requires_grad = param.requires_grad
if mode: # encrypt parameter
self.set_parameter(
name,
crypten.cryptensor(
param, **{"src": src}, requires_grad=requires_grad
),
)
else: # decrypt parameter
self.set_parameter(name, param.get_plain_text())
self._parameters[name].requires_grad = requires_grad
# encrypt / decrypt buffers:
for name, buffer in self.named_buffers(recurse=False):
# encrypt buffer only if it's a torch tensor (not shapes)
if mode and torch.is_tensor(buffer):
self.set_buffer(
name,
crypten.cryptensor(buffer, **{"src": src}, requires_grad=False),
)
# decrypt buffer if it's a cryptensor
elif isinstance(buffer, crypten.CrypTensor):
self.set_buffer(name, buffer.get_plain_text())
self._buffers[name].requires_grad = False
# apply encryption recursively:
return self._apply(lambda m: m.encrypt(mode=mode, src=src))
return self
def decrypt(self):
"""Decrypts model."""
return self.encrypt(mode=False)
def __getattribute__(self, name):
if name != "forward":
return object.__getattribute__(self, name)
def forward_function(*args, **kwargs):
"""
Silently encrypted Torch inputs tensors (deprecated).
"""
if self.encrypted and not self.SUPPORTS_PLAINTEXT_INPUTS:
if any(torch.is_tensor(arg) for arg in args):
warnings.warn(
"Earlier versions of CrypTen silently encrypted Torch tensors. "
"That behavior is now deprecated because it is dangerous. "
"Please make sure you feed your model CrypTensors when needed.",
DeprecationWarning,
)
elif not self.encrypted:
if any(isinstance(arg, crypten.CrypTensor) for arg in args):
raise RuntimeError(
"Cannot input CrypTensors into unencrypted model. "
"Encrypt the model before feeding it CrypTensors."
)
return object.__getattribute__(self, name)(*tuple(args), **kwargs)
return forward_function
def __getattr__(self, name):
"""Redefine __getattr__ so that any parameters, modules or buffers
inside the Module object can be accessed as attributes
"""
if "_parameters" in self.__dict__:
parameters = self.__dict__["_parameters"]
if name in parameters:
return parameters[name]
if "_modules" in self.__dict__:
modules = self.__dict__["_modules"]
if name in modules:
return modules[name]
if "_buffers" in self.__dict__:
buffers = self.__dict__["_buffers"]
if name in buffers:
return buffers[name]
raise AttributeError(
"'{}' object has no attribute '{}'".format(type(self).__name__, name)
)
def __setattr__(self, name, value):
"""Redefine __setattr__ so that any submodules created
inside the Module object are registered with _modules
OrderedDict.
"""
def remove_from(*dicts):
for d in dicts:
if name in d:
del d[name]
modules = self.__dict__.get("_modules")
if isinstance(value, Module):
if modules is None:
raise AttributeError(
"cannot assign module before Module.__init__() call"
)
remove_from(self.__dict__, self._parameters, self._buffers)
modules[name] = value
elif modules is not None and name in modules:
if value is not None:
raise TypeError(
"cannot assign '{}' as child module '{}' "
"(torch.nn.Module or None expected)".format(
torch.typename(value), name
)
)
modules[name] = value
else:
for key in ["_parameters", "_modules", "_buffers"]:
if key in self.__dict__ and name in self.__dict__[key]:
values = self.__dict__[key]
values[name] = value
return
object.__setattr__(self, name, value)
def add_module(self, name, module):
"""Adds and registers a submodule with a given name"""
assert name not in self._modules.keys(), "Module %s already exists." % name
self.register_module(name, module)
class Container(Module):
"""
Container allows distinguishing between individual modules and containers.
"""
pass
class Graph(Container):
"""
Acyclic graph of modules.
The module maintains a dict of named modules and a graph structure stored in
a dict where each key is a module name, and the associated value is a list
of module names that provide the input into the module. Each module may have
an optional list of output names as well, that is stored as an attribute in
the module by the `add_module` function.
"""
def __init__(self, input_names, output_names, modules=None, graph=None):
"""
Initializes a graph module with inputs named by `input_names`, that
produces outputs named by `output_names`.
Optionally, `modules` and the `graph` structure can be specified as well.
Alternatively, the graph can be built up using the `add_module` function.
"""
super().__init__()
if not isinstance(input_names, (list, tuple)):
input_names = [input_names]
if not isinstance(output_names, (list, tuple)):
output_names = [output_names]
self.input_names = input_names
self.output_names = output_names
self._graph = {}
if modules is not None:
self._modules = modules
if graph is not None:
self._graph = graph
def add_module(self, name, module, input_names=None, output_names=None):
"""
Adds a `module` with the specified `name` to the graph. If the `module`
expects inputs, their names should be specified via `input_names`.
The module is expected to produce an output with `name`. However, if the
module produces multiple outputs, these must be named in the
`output_names` list.
Both `input_names` and `output_names` are expected to be ordered.
"""
assert name not in self._graph, "Module %s already exists." % name
self.register_module(name, module)
if input_names is not None:
self._graph[name] = input_names
if output_names is not None:
module._output_names = output_names
def forward(self, *args):
assert len(args) == len(
self.input_names
), f"Expected {len(self.input_names)} inputs but received {len(args)}."
# keep track of all values that have been computed:
values = {self.input_names[idx]: args[idx] for idx in range(len(args))}
computed = {key: False for key in self._graph.keys()}
inputs_available = {
key: [False for _ in range(len(value_list))]
for key, value_list in self._graph.items()
}
def _mark_as_computed(name):
"""Marks a value as having been computed."""
computed[name] = True
for key, value_list in self._graph.items():
if name in value_list:
inputs_available[key][value_list.index(name)] = True
def _find_computable_node():
"""Find a node for which all inputs are available."""
for key, inputs_available_list in inputs_available.items():
if all(inputs_available_list) and not computed[key]:
return key
return None
def _clear_unused_values():
"""Clear values that are no longer needed (to save memory)."""
remove_keys = []
for remove_key in values.keys():
can_be_removed = True
# we cannot remove a value if it is still needed:
for key, value_list in self._graph.items():
if not computed[key] and remove_key in value_list:
can_be_removed = False
break
if can_be_removed:
remove_keys.append(remove_key)
# remove all values we no longer need:
for remove_key in remove_keys:
del values[remove_key]
# NOTE: We maintain inputs_available[remove_key] as True to
# prevent re-computation of the node.
# perform forward pass:
for input_name in self.input_names:
_mark_as_computed(input_name)
node_to_compute = _find_computable_node()
while node_to_compute is not None:
# compute output of module:
input = [values[name] for name in self._graph[node_to_compute]]
if len(input) == 1:
input = input[0] # unpack iterable if possible
module = self._modules[node_to_compute]
output = module(input)
# we may get one output:
output_names = getattr(module, "_output_names", None)
if output_names is None or len(output_names) == 1:
if output_names is not None:
assert output_names[0] == node_to_compute, "invalid graph"
values[node_to_compute] = output
_mark_as_computed(node_to_compute)
# or multiple outputs:
else:
assert isinstance(
output, tuple
), f"expected outputs {output_names} of {module} to be tuple, not {type(output)}"
assert len(output_names) == len(
output
), f"expected {len(output_names)} outputs from {module}, received {len(output)}"
for node, value in zip(output_names, output):
values[node] = value
_mark_as_computed(node)
# return output if it is available:
if all(computed[output_name] for output_name in self.output_names):
result = [values[output_name] for output_name in self.output_names]
return result[0] if len(result) == 1 else tuple(result)
# find next node to compute:
node_to_compute = _find_computable_node()
# clean up values we no longer need:
_clear_unused_values()
# this should never happen:
raise ValueError("nn.Graph.forward() failed. Is graph unconnected?")
def to_pytorch(self):
if not hasattr(self, "pytorch_model"):
raise AttributeError("CrypTen Graph detached from PyTorch model.")
if self.encrypted:
raise ValueError(
"CrypTen model must be decrypted before calling to_pytorch()"
)
with torch.no_grad():
for name, param in self.pytorch_model.named_parameters():
param.set_(self._modules[name].data)
return self.pytorch_model
class Sequential(Graph):
"""
Sequence of modules.
"""
def __init__(self, *module_list, input_names=None):
if input_names is None:
input_names = ["input"]
super().__init__(input_names, "output")
if len(module_list) == 1 and isinstance(module_list[0], list):
raise DeprecationWarning(
"passing crypten.nn.Sequential a list is deprecated. Please "
"pass unpacked arguments (e.g. Sequential(*my_modules))."
)
module_list = module_list[0]
for idx, module in enumerate(module_list):
if isinstance(module, OrderedDict):
for key, val in module.items():
self.add_module(key, val, input_names)
input_names = key
self.output_names = [key]
else:
module_name = str(idx)
if idx > 0:
input_names = [str(idx - 1)]
self.add_module(module_name, module, input_names)
self.output_names = [module_name]
class ModuleList(Module):
r"""Holds submodules in a list.
:class:`~crypten.nn.ModuleList` can be indexed like a regular Python list, but
modules it contains are properly registered, and will be visible by all
:class:`~crypten.nn.Module` methods.
Args:
modules (iterable, optional): an iterable of modules to add
Example::
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.linears = nn.ModuleList([nn.Linear(10, 10) for i in range(10)])
def forward(self, x):
# ModuleList can act as an iterable, or be indexed using ints
for i, l in enumerate(self.linears):
x = self.linears[i // 2](x) + l(x)
return x
"""
def __init__(self, modules=None):
super(ModuleList, self).__init__()
if modules is not None:
self += modules
def __dir__(self):
keys = super(ModuleList, self).__dir__()
keys = [key for key in keys if not key.isdigit()]
return keys
def __delitem__(self, idx):
if isinstance(idx, slice):
for k in range(len(self._modules))[idx]:
del self._modules[str(k)]
else:
del self._modules[self._get_abs_string_index(idx)]
# To preserve numbering, self._modules is being reconstructed with modules after deletion
str_indices = [str(i) for i in range(len(self._modules))]
self._modules = OrderedDict(list(zip(str_indices, self._modules.values())))
__module_list_func_names = [
"_get_abs_string_index",
"__getitem__",
"__setitem__",
"__len__",
"__iter__",
"__iadd__",
"insert",
"append",
"extend",
]
for func_name in __module_list_func_names:
func = getattr(torch.nn.ModuleList, func_name)
setattr(ModuleList, func_name, func)
class ModuleDict(Module):
r"""Holds submodules in a dictionary.
:class:`~crypten.nn.ModuleDict` can be indexed like a regular Python dictionary,
but modules it contains are properly registered, and will be visible by all
:class:`~crypten.nn.Module` methods.
:class:`~crypten.nn.ModuleDict` is an **ordered** dictionary that respects
* the order of insertion, and
* in :meth:`~crypten.nn.ModuleDict.update`, the order of the merged ``OrderedDict``
or another :class:`~crypten.nn.ModuleDict` (the argument to :meth:`~crypten.nn.ModuleDict.update`).
Note that :meth:`~crypten.nn.ModuleDict.update` with other unordered mapping
types (e.g., Python's plain ``dict``) does not preserve the order of the
merged mapping.
Arguments:
modules (iterable, optional): a mapping (dictionary) of (string: module)
or an iterable of key-value pairs of type (string, module)
Example::
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.choices = nn.ModuleDict({
'conv': nn.Conv2d(10, 10, 3),
'pool': nn.MaxPool2d(3)
})
self.activations = nn.ModuleDict([
['lrelu', nn.LeakyReLU()],
['prelu', nn.PReLU()]
])
def forward(self, x, choice, act):
x = self.choices[choice](x)
x = self.activations[act](x)
return x
"""
def __init__(self, modules=None):
super(ModuleDict, self).__init__()
if modules is not None:
self.update(modules)
__module_dict_func_names = [
"__getitem__",
"__setitem__",
"__delitem__",
"__len__",
"__iter__",
"__contains__",
"clear",
"pop",
"keys",
"items",
"values",
"update",
"forward",
]
for func_name in __module_dict_func_names:
func = getattr(torch.nn.ModuleDict, func_name)
setattr(ModuleDict, func_name, func)
class Parameter(Module):
"""
Module that holds a parameter tensor. If `trainable` is set to `False`, the
parameter will be treated as a buffer: it will not be trained. Hence,
parameters do not inherit `requires_grad` from the tensor they are initialized
with.
Parameters are encrypted when the `encrypt()` function is called on a module.
"""
def __init__(self, param, trainable=True):
super().__init__()
# ensure that input is a PyTorch or CrypTen tensor:
assert torch.is_tensor(param) or crypten.is_encrypted_tensor(
param
), f"param must be PyTorch of CrypTen tensor, not {type(param)}"
if isinstance(param, torch.nn.parameter.Parameter):
param = param.data
# register parameter or buffer:
if trainable:
self.register_parameter("data", param)
else:
self.register_buffer("data", param)
# register whether or not module is encrypted:
self.encrypted = crypten.is_encrypted_tensor(param)
def forward(self, input):
return self.data
@property
def requires_grad(self):
return self.data.requires_grad
class Constant(Module):
"""
Module that holds a constant tensor. If `trainable` is set to `False`, the
parameter will be treated as a buffer: it will not be trained.
Constants are not encrypted when the `encrypt()` function is called on a module.
"""
SUPPORTS_PLAINTEXT_INPUTS = True
def __init__(self, value):
super().__init__()
if not torch.is_tensor(value):
value = torch.tensor(value)
assert torch.is_tensor(
value
), f"value must be PyTorch tensor, not {type(value)}"
self.value = value.to(dtype=torch.float)
def forward(self, input):
return self.value
@staticmethod
def from_onnx(attributes=None):
if attributes is None:
attributes = {}
assert "value" in attributes, "No value for Constant specified."
return Constant(attributes["value"])
def encrypt(self, mode=True, src=0):
self.encrypted = mode
return self
class ConstantOfShape(Module):
"""
Modules that returns a matrix of specified size containing a constant.
"""
SUPPORTS_PLAINTEXT_INPUTS = True
def __init__(self, value):
super().__init__()
if not torch.is_tensor(value):
value = torch.tensor(value)
assert torch.is_tensor(
value
), f"value must be PyTorch tensor, not {type(value)}"
self.value = value.to(dtype=torch.float)
def forward(self, size):
if torch.is_tensor(size):
size = size.int().tolist()
assert isinstance(
size, (list, tuple)
), f"size must be list or tuple, not {type(size)}"
return self.value.expand(*size)
@staticmethod
def from_onnx(attributes=None):
if attributes is None:
attributes = {}
assert "value" in attributes, "No value for ConstantOfShape specified."
return ConstantOfShape(attributes["value"])
def encrypt(self, mode=True, src=0):
self.encrypted = mode
return self
class Add(Module):
"""
Module that sums two values.
"""
def forward(self, input):
assert isinstance(input, (list, tuple)), "input must be list or tuple"
assert len(input) == 2, "input must contain two tensors"
return input[0].add(input[1])
@staticmethod
def from_onnx(attributes=None):
return Add()
class Sub(Module):
"""
Module that subtracts two values.
"""
def forward(self, input):
assert isinstance(input, (list, tuple)), "input must be list or tuple"
assert len(input) == 2, "input must contain two tensors"
return input[0].sub(input[1])
@staticmethod
def from_onnx(attributes=None):
return Sub()
class Mul(Module):
"""
Module that multiplies two values.
"""
def forward(self, input):
assert isinstance(input, (list, tuple)), "input must be list or tuple"
assert len(input) == 2, "input must contain two tensors"
return input[0].mul(input[1])
@staticmethod
def from_onnx(attributes=None):
return Mul()
class Div(Module):
"""
Module that divides two values.
"""
def forward(self, input):
assert isinstance(input, (list, tuple)), "input must be list or tuple"
assert len(input) == 2, "input must contain two tensors"
return input[0].div(input[1])
@staticmethod
def from_onnx(attributes=None):
return Div()
class Pow(Module):
"""
Module that takes input to some power, where the power is an integer.
"""
def forward(self, input):
base, power = input
if torch.is_tensor(power) and power.nelement() == 1:
power = power.item()
if int(power) == power: # try to convert power to integer if possible
power = int(power)
return base.pow(power)
@staticmethod
def from_onnx(attributes=None):
return Pow()
class Sqrt(Module):
"""
Module that takes square-root of the input.
"""
def forward(self, input):
return input.sqrt()
@staticmethod
def from_onnx(attributes=None):
return Sqrt()
class Exp(Module):
"""
Module that calculates the exponential of the given input tensor, element-wise.
"""
def forward(self, input):
return input.exp()
@staticmethod
def from_onnx(attributes=None):
return Exp()
class Erf(Module):
"""
Module that calculates the error function of the given input tensor, element-wise.
"""
def forward(self, input):
return input.erf()
@staticmethod
def from_onnx(attributes=None):
return Erf()
class _Reduce(Module):
"""
Base class for the functionality of ONNX ReduceMean (defined here as Mean),
and ONNX ReduceSum (defined here as Sum).
"""
def __init__(self, dim, keepdim=False, reduction_fn="mean"):
super().__init__()
self.dim = dim
self.keepdim = keepdim
self.reduction_fn = reduction_fn
def forward(self, input):
return getattr(input, self.reduction_fn)(self.dim, keepdim=self.keepdim)
class Mean(_Reduce):
"""
Module that computes the mean of the input tensor's element along the provided axes.
If `keepdim` is True, the output tensor is of the same size as input
except in the dimension(s) `dim` where it is of size 1.
Otherwise, `dim` is squeezed, resulting in the output tensor having 1
(or `len(dim)`) fewer dimension(s).
"""
def __init__(self, dim, keepdim=False):
super().__init__(dim, keepdim, "mean")
@staticmethod
def from_onnx(attributes=None):
if attributes is None:
attributes = {}
keepdim = _identify_bool_attributes_with_defaults(attributes, "keepdims", 1)
return Mean(attributes["axes"], keepdim)
class Sum(_Reduce):
"""
Module that computes the sum of the input tensor's element along the provided axes.
If `keepdim` is True, the output tensor is of the same size as input
except in the dimension(s) `dim` where it is of size 1.
Otherwise, `dim` is squeezed, resulting in the output tensor having 1
(or `len(dim)`) fewer dimension(s).
"""
def __init__(self, dim, keepdim=False):
super().__init__(dim, keepdim, "sum")
@staticmethod
def from_onnx(attributes=None):
if attributes is None:
attributes = {}
keepdim = _identify_bool_attributes_with_defaults(attributes, "keepdims", 1)
return Sum(attributes["axes"], keepdim)
class Transpose(Module):
"""
Module that transposes the input tensor similar to
`numpy.transpose`. For example, when perm=(1, 0, 2), given an input
tensor of shape (1, 2, 3), the output shape will be (2, 1, 3). Note
that the signature of this module matches the ONNX specification
and differs from `torch.transpose`
Args:
`perm`: list of ints
"""
def __init__(self, perm):
super().__init__()
self.perm = perm
def forward(self, input):
# New Linear jit tracer causes Transpose module to have a weight
if hasattr(self, "weight"):
input = self.weight
assert input.dim() == len(self.perm)
return input.permute(self.perm)
@staticmethod
def from_onnx(attributes=None):
if attributes is None:
attributes = {}
# TODO: ONNX specification says the permutation should be
# reversed if not provided in the attributes. Because we
# don't have the input size here, we need figure out a
# different way of supporting this, if we want to do that.
return Transpose(attributes["perm"])
class Squeeze(Module):
r"""
Returns a tensor with all the dimensions of :attr:`input` of size `1` removed.
For example, if `input` is of shape:
:math:`(A \times 1 \times B \times C \times 1 \times D)` then the `out` tensor
will be of shape: :math:`(A \times B \times C \times D)`.
When :attr:`dimension` is given, a squeeze operation is done only in the given
dimension. If `input` is of shape: :math:`(A \times 1 \times B)`,
``squeeze(input, 0)`` leaves the tensor unchanged, but ``squeeze(input, 1)``
will squeeze the tensor to the shape :math:`(A \times B)`.
.. note:: The returned tensor shares the storage with the input tensor,
so changing the contents of one will change the contents of the other.
Args:
dimension (int, optional): if given, the input will be squeezed only in
this dimension
"""
def __init__(self, dimension):
super().__init__()
if isinstance(dimension, (list, tuple)):
assert len(dimension) == 1, "can only squeeze one dimension at a time"
dimension = dimension[0]
self.dimension = dimension
def forward(self, input):
return input.squeeze(self.dimension)
@staticmethod
def from_onnx(attributes=None):
if attributes is None:
attributes = {}
dimension = attributes["axes"]
assert len(dimension) == 1, "can only squeeze one dimension at a time"
return Squeeze(dimension[0])
class Unsqueeze(Module):
"""
Module that unsqueezes a tensor.
Returns a new tensor with a dimension of size one inserted at the
specified position.
The returned tensor shares the same underlying data with this tensor.
A :attr:`dimension` value within the range ``[-input.dim() - 1, input.dim() + 1)``
can be used. Negative :attr:`dimension` will correspond to :meth:`unsqueeze`
applied at :attr:`dimension` = ``dim + input.dim() + 1``.
Args:
dimension (int): the index at which to insert the singleton dimension
"""
SUPPORTS_PLAINTEXT_INPUTS = True
def __init__(self, dimension):
super().__init__()
if isinstance(dimension, (list, tuple)):
assert len(dimension) == 1, "can only squeeze one dimension at a time"
dimension = dimension[0]
self.dimension = dimension
def forward(self, input):
if isinstance(input, list):
assert len(input) == 2, "list input must be [x, dimension]"
input, dimension = input
assert len(dimension) == 1, "can only unsqueeze one dimension at a time"
dimension = int(dimension.item())
else:
dimension = self.dimension
return input.unsqueeze(dimension)
@staticmethod
def from_onnx(attributes=None):
if attributes is None:
attributes = {}
dimension = attributes.get("axes", [None])
assert len(dimension) == 1, "can only unsqueeze one dimension at a time"
return Unsqueeze(dimension[0])
class Slice(Module):
"""
Module that slices the input along the specified `axes` (list of `int`s) from
the indices in `start`s to the indices in `end`s.
This module definition matches ONNX opset version 11.
"""
def __init__(self, starts, ends, axes=None):
super().__init__()
self.starts = starts
self.ends = ends
self.axes = axes
def forward(self, x):
# Process inputs:
axes = None
if isinstance(x, list):
if len(x) == 3:
x, starts, ends = x
axes, steps = self.axes, 1
elif len(x) == 4:
x, starts, ends, axes = x
steps = 1
elif len(x) == 5:
x, starts, ends, axes, steps = x
if not torch.eq(steps.int(), 1).all():
raise ValueError("Only steps value of 1 currently supported.")
else:
raise ValueError("list input x must have 3, 4, or 5, values")
starts, ends = starts.int().tolist(), ends.int().tolist()
else:
starts, ends, axes = self.starts, self.ends, self.axes
steps = 1
if axes is None:
axes = list(range(len(starts)))
# Perform slicing:
output = x
for idx, axis in enumerate(axes):
start, end = int(starts[idx]), int(ends[idx])
length = min(end, output.size(int(axis))) - start
output = output.narrow(int(axis), start, length)
return output
@staticmethod
def from_onnx(attributes=None):
return Slice(
attributes.get("starts", None),
attributes.get("ends", None),
axes=attributes.get("axes", None),
)
class Expand(Module):
"""
Module that expands a tensor to the specified size.
"""
def forward(self, x):
# unpack inputs:
input, shape = tuple(x)
if torch.is_tensor(shape):
shape = shape.long().tolist()
# broadcasting in ONNX is different from PyTorch when shape is 1:
for idx in range(len(shape)):
if shape[idx] == 1 and input.size(idx) > 1:
shape[idx] = input.size(idx)
# perform the expansion:
return input.expand(shape)
@staticmethod
def from_onnx(attributes=None):
return Expand()
class Cast(Module):
"""
Module that casts the input tensor to the specified type.
"""
def __init__(self, dtype):
super().__init__()
self.dtype = dtype
def forward(self, x):
if torch.is_tensor(x):
return x.to(dtype=self.dtype)
return x # this is a no-op as MPCTensors do not know their dtype
@staticmethod
def from_onnx(attributes=None):
dtype = sym_help._get_const(attributes["to"], "i", "dtype")
return Cast(dtype=sym_help.scalar_type_to_pytorch_type[dtype])
class Range(Module):
"""
Module that returns a tensor with the specified range.
"""
SUPPORTS_PLAINTEXT_INPUTS = True
def forward(self, x):
if len(x) == 2:
start, end = tuple(x)
step = 1
elif len(x) == 3:
start, end, step = tuple(x)
else:
raise ValueError(f"Expected 2 or 3 inputs, but received {len(x)}.")
return torch.arange(start, end, step)
@staticmethod
def from_onnx(attributes=None):
return Range()
class Equal(Module):
"""
Module that compares two tensors to determine which elements are equal.
"""
def forward(self, x):
x1, x2 = tuple(x)
if x1.size() != x2.size():
return False
return x1.eq(x2)
@staticmethod
def from_onnx(attributes=None):
return Equal()
class Where(Module):
"""
Module that returns elements from one tensor or the other depending on the
value of the specified condition.
"""
def forward(self, x):
condition, x1, x2 = tuple(x)
return crypten.where(condition, x1, x2)
@staticmethod
def from_onnx(attributes=None):
return Where()
class Flatten(Module):
"""
Module that flattens the input tensor into a 2D matrix.
Args:
axis (int, optional): must not be larger than dimension
"""
def __init__(self, axis=1):
super().__init__()
self.axis = axis
def forward(self, x):
if self.axis == 0:
return x.view(1, -1)
else:
assert self.axis <= x.dim(), "axis must not be larger than dimension"
prod = 1
for i in range(self.axis):
prod *= x.size(i)
return x.view(prod, -1)
@staticmethod
def from_onnx(attributes=None):
if attributes is None:
attributes = {}
# axis : int (default is 1)
axis = 1
if "axis" in attributes:
axis = int(attributes["axis"])
assert axis >= 0, "axis must not be negative"
return Flatten(axis)
class Shape(Module):
"""
Module that returns the shape of a tensor. If the input tensor is encrypted,
the output size vector will be encrypted, too.
"""
SUPPORTS_PLAINTEXT_INPUTS = True
def __init__(self, dim=None):
super().__init__()
self.dim = dim
def forward(self, x, dim=None):
dim = dim if dim is not None else self.dim
if dim is None:
size = torch.tensor(x.size())
else:
size = torch.tensor(x.size(dim))
return size
@staticmethod
def from_onnx(attributes=None):
return Shape()
class Concat(Module):
"""
Module that concatenates tensors along a dimension.
Args:
dim (int, optional): the dimension over which to concatenate
"""
def __init__(self, dimension):
super().__init__()
self.dimension = dimension
def forward(self, input):
assert isinstance(input, (list, tuple)), "input needs to be a list or tuple"
assert len(input) >= 1, "need at least one tensor to concatenate"
return crypten.cat(input, self.dimension)
@staticmethod
def from_onnx(attributes=None):
if attributes is None:
attributes = {}
dimension = attributes["axis"]
return Concat(dimension)
class Reshape(Module):
"""
Module that reshapes tensors to new dimensions.
Returns a tensor with same data and number of elements as :attr:`self`,
but with the specified shape.
When possible, the returned tensor will be a view
of :attr:`self`. Otherwise, it will be a copy. Contiguous inputs and inputs
with compatible strides can be reshaped without copying, but you should not
depend on the copying vs. viewing behavior.
See :meth:`torch.Tensor.view` on when it is possible to return a view.
A single dimension may be -1, in which case it's inferred from the remaining
dimensions and the number of elements in :attr:`self`.
Args:
input (tuple): contains input tensor and shape (torch.Size)
"""
def __init__(self, shape=None):
super(Reshape, self).__init__()
self.shape = shape
def forward(self, tensor, shape=None):
if isinstance(tensor, list) and len(tensor) == 2:
tensor, shape = tensor
shape = shape if shape is not None else self.shape
assert (
shape is not None
), "Reshape requires a shape in forward if not supplied in initialization"
if torch.is_tensor(shape):
shape = torch.Size(shape.long())
return tensor.reshape(shape)
@staticmethod
def from_onnx(attributes=None):
if "shape" in attributes:
return Reshape(shape=attributes["shape"])
return Reshape()
class Dropout(Module):
r"""During training, randomly zeroes some of the elements of the input
tensor with probability :attr:`p` using samples from a Bernoulli
distribution. Furthermore, the outputs are scaled by a factor of
:math:`\frac{1}{1-p}` during training. This means that during evaluation
the module simply computes an identity function.
Args:
p: probability of an element to be zeroed. Default: 0.5
Shape:
- Input: :math:`(*)`. Input can be of any shape
- Output: :math:`(*)`. Output is of the same shape as input
"""
def __init__(self, p=0.5, inplace=False):
super().__init__()
if inplace:
logging.warning(
"CrypTen Dropout module does not support inplace computation."
)
self.p = p
def forward(self, input):
return input.dropout(p=self.p, training=self.training)
@staticmethod
def from_onnx(attributes=None):
if attributes is None:
attributes = {}
return Dropout(attributes["ratio"])
class DropoutNd(Module):
"""Randomly zero out entire channels (a channel is a nD feature map,
e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
batched input is a nD tensor :math:`\text{input}[i, j]`).
Each channel will be zeroed out independently on every forward call with
probability :attr:`p` using samples from a Bernoulli distribution.
Args:
p (float, optional): probability of an element to be zero-ed.
"""
def __init__(self, p=0.5, inplace=False):
super().__init__()
if inplace:
logging.warning(
"CrypTen DropoutNd module does not support inplace computation."
)
self.p = p
def forward(self, input):
return input._feature_dropout(p=self.p, training=self.training)
@staticmethod
def from_onnx(attributes=None):
if attributes is None:
attributes = {}
return DropoutNd(attributes["ratio"])
class Dropout2d(DropoutNd):
r"""
Randomly zero out entire channels (a channel is a 2D feature map,
e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
batched input is a 2D tensor :math:`\text{input}[i, j]`).
Each channel will be zeroed out independently on every forward call with
probability :attr:`p` using samples from a Bernoulli distribution.
Usually the input comes from :class:`nn.Conv2d` modules.
Args:
p (float, optional): probability of an element to be zero-ed.
Shape:
- Input: :math:`(N, C, H, W)`
- Output: :math:`(N, C, H, W)` (same shape as input)
"""
@staticmethod
def from_onnx(attributes=None):
if attributes is None:
attributes = {}
return Dropout2d(attributes["ratio"])
class Dropout3d(DropoutNd):
r"""Randomly zero out entire channels (a channel is a 3D feature map,
e.g., the :math:`j`-th channel of the :math:`i`-th sample in the
batched input is a 3D tensor :math:`\text{input}[i, j]`).
Each channel will be zeroed out independently on every forward call with
probability :attr:`p` using samples from a Bernoulli distribution.
Usually the input comes from :class:`nn.Conv3d` modules.
Args:
p (float, optional): probability of an element to be zeroed.
Shape:
- Input: :math:`(N, C, D, H, W)`
- Output: :math:`(N, C, D, H, W)` (same shape as input)
"""
@staticmethod
def from_onnx(attributes=None):
if attributes is None:
attributes = {}
return Dropout3d(attributes["ratio"])
class Gather(Module):
r"""
Module that gathers elements from tensor according to indices. Given data tensor
of rank :math:`r >= 1`, and indices tensor of rank :math:`q`, gather entries of
the axis dimension of data (by default outer-most one as `axis = 0`)
indexed by indices, and concatenates them in an output tensor of rank
:math:`q + (r - 1)`. For example, for `axis = 0`: Let :math:`k =
indices[i_{0}, ..., i_{q-1}]`. Then :math:`output[i_{0}, ..., i_{q-1}, j_{0},
..., j_{r-2}] = input[k, j_{0}, ..., j_{r-2}]`. This is an operation from the
ONNX specification.
Args:
dimension (int): the axis along which to index
index(tensor): the indices to select along the `dimension`
"""
SUPPORTS_PLAINTEXT_INPUTS = True
def __init__(self, dimension, indices=None):
super().__init__()
self.dimension = dimension
self.indices = indices
def forward(self, input):
if not isinstance(input, (list, tuple)):
tensor = input
indices = self.indices
elif len(input) == 1:
tensor = input[0]
indices = self.indices
else:
tensor, indices = input
# indices need to be a PyTorch tensor:
if crypten.is_encrypted_tensor(indices):
raise ValueError("Cannot perform Gather operation using encrypted indices.")
elif isinstance(indices, (int, list, tuple)):
indices = torch.tensor(indices)
indices = indices.long()
# CrypTensor input
if crypten.is_encrypted_tensor(tensor):
result = tensor.take(indices, self.dimension)
# Torch tensor input
elif self.dimension is None or tensor.dim() == 0:
result = torch.take(tensor, indices)
else:
all_indices = [slice(0, x) for x in tensor.size()]
all_indices[self.dimension] = indices
result = tensor[all_indices]
return result
@staticmethod
def from_onnx(attributes=None):
if attributes is None:
attributes = {}
if "axis" not in attributes:
attributes["axis"] = None
if "shape" not in attributes:
attributes["shape"] = None
return Gather(attributes["axis"], indices=attributes["shape"])
class _ConstantPad(Module):
"""
Module that pads a tensor.
"""
def __init__(self, padding, value, ndims, mode="constant"):
super().__init__()
if isinstance(padding, (int)):
padding = [padding, padding] * ndims
self.padding = padding
self.value = value
self.mode = mode
def forward(self, input):
if isinstance(input, list):
assert len(input) == 2, "input should be [tensor, pads] list"
padding = tuple(input[1].int().tolist())
input = input[0]
else:
padding = self.padding
return input.pad(padding, value=self.value, mode=self.mode)
@staticmethod
def from_onnx(attributes=None):
if attributes is None:
attributes = {}
assert attributes["mode"] == b"constant", "only constant padding supported"
return _ConstantPad(None, 0, 0, mode="constant")
class ConstantPad1d(_ConstantPad):
"""
Module that pads a 1D tensor.
"""
def __init__(self, padding, value, mode="constant"):
super(ConstantPad1d, self).__init__(padding, value, 1, mode=mode)
class ConstantPad2d(_ConstantPad):
"""
Module that pads a 2D tensor.
"""
def __init__(self, padding, value, mode="constant"):
super(ConstantPad2d, self).__init__(padding, value, 2, mode=mode)
class ConstantPad3d(_ConstantPad):
"""
Module that pads a 3D tensor.
"""
def __init__(self, padding, value, mode="constant"):
super(ConstantPad3d, self).__init__(padding, value, 3, mode=mode)
class Gemm(Module):
"""
Module that performs a general matrix multiplication.
Unlike the `Linear` module, this module is stateless.
"""
def __init__(self, alpha=1.0, beta=1.0, trans_a=False, trans_b=False):
super().__init__()
self.alpha = alpha
self.beta = beta
self.trans_a = trans_a
self.trans_b = trans_b
def forward(self, x):
a, b, c = tuple(x)
if self.trans_a:
a = a.t()
if self.trans_b:
b = b.t()
output = a.matmul(b).mul(self.alpha)
output = output.add(c.mul(self.beta))
return output
@staticmethod
def from_onnx(attributes=None):
if attributes is None:
attributes = {}
assert "alpha" in attributes, "attribute alpha missing"
assert "beta" in attributes, "attribute beta missing"
return Gemm(
alpha=attributes["alpha"],
beta=attributes["beta"],
trans_a=attributes.get("transA", False),
trans_b=attributes.get("transB", False),
)
class Linear(Module):
"""
Module that performs linear transformation.
Applies a linear transformation to the incoming data: :math:`y = xA^T + b`
Args:
in_features: size of each input sample
out_features: size of each output sample
bias: If set to ``False``, the layer will not learn an additive bias.
Default: ``True``
Shape:
- Input: :math:`(N, *, H_{in})` where :math:`*` means any number of
additional dimensions and :math:`H_{in} = \text{in\_features}`
- Output: :math:`(N, *, H_{out})` where all but the last dimension
are the same shape as the input and :math:`H_{out} = \text{out\_features}`.
""" # noqa: W605
def __init__(self, in_features, out_features, bias=True):
super().__init__()
# initialize model parameters:
pytorch_module = torch.nn.Linear(in_features, out_features, bias=bias)
self.register_parameter("weight", pytorch_module.weight)
if bias:
self.register_parameter("bias", pytorch_module.bias)
def forward(self, x):
output = x.matmul(self.weight.t())
if hasattr(self, "bias"):
output = output.add(self.bias)
return output
class MatMul(Module):
"""
Matrix product of two tensors.
The behavior depends on the dimensionality of the tensors as followsf
- If both tensors are 1-dimensional, the dot product (scalar) is returned.
- If both arguments are 2-dimensional, the matrix-matrix product is returned.
- If the first argument is 1-dimensional and the second argument is
2-dimensional, a 1 is prepended to its dimension for the purpose of the
matrix multiply. After the matrix multiply, the prepended dimension is removed.
- If the first argument is 2-dimensional and the second argument is
1-dimensional, the matrix-vector product is returned.
- If both arguments are at least 1-dimensional and at least one argument is
N-dimensional (where N > 2), then a batched matrix multiply is returned.
If the first argument is 1-dimensional, a 1 is prepended to its dimension
for the purpose of the batched matrix multiply and removed after. If the
second argument is 1-dimensional, a 1 is appended to its dimension for the
purpose of the batched matrix multiple and removed after.
The non-matrix (i.e. batch) dimensions are broadcasted (and thus
must be broadcastable). For example, if :attr:`input` is a
:math:`(j \times 1 \times n \times m)` tensor and :attr:`other` is
a :math:`(k \times m \times p)` tensor, :attr:`out` will be an
:math:`(j \times k \times n \times p)` tensor.
Arguments:
Option 1: [input1, input2]
input1: first input matrix to be multiplied
input2: second input matrix to be multiplied.
Option 2: input1
input1: first input matrix to be multiplied, if module
is already initialized with the second (i.e. multiplier) matrix.
"""
def __init__(self, weight=None):
super().__init__()
if weight is not None:
self.register_parameter("weight", weight)
def forward(self, x):
if hasattr(self, "weight"):
output = x.matmul(self.weight)
else:
assert isinstance(x, (list, tuple)), "input must be list or tuple"
assert len(x) == 2, "input must contain two tensors"
output = x[0].matmul(x[1])
return output
@staticmethod
def from_onnx(attributes=None):
return MatMul()
class Conv(Module):
"""
Module that performs convolution, following the ONNX specification of that
operation. Unlike other Conv modules, this module is stateless.
"""
def __init__(self, stride, padding, dilation, groups=1):
super().__init__()
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
def forward(self, x):
# unpack inputs:
if len(x) == 2:
x, weight = x
bias = None
elif len(x) == 3:
x, weight, bias = x
else:
raise ValueError(f"Conv module must have 2 or 3 inputs, not {len(x)}")
# prepare inputs into convolution function:
dim = weight.dim() - 2
if dim < 1 or dim > 2:
raise ValueError(
f"Convolution on {dim}-dimensional input is not supported."
)
args = [weight]
kwargs = {
"stride": self.stride,
"padding": self.padding,
"dilation": self.dilation,
"groups": self.groups,
}
# identify correct convolution function to use:
if torch.is_tensor(x):
func = getattr(torch.nn.functional, f"conv{dim}d", None)
args = [x] + args + bias # torch function takes different inputs
else:
func = getattr(x, f"conv{dim}d", None)
# perform the convolution:
x = func(*args, **kwargs)
# add the bias term if it is specified, and wasn;t already added:
if not torch.is_tensor(x) and bias is not None:
bias = bias.unsqueeze(0)
while bias.dim() < x.dim():
bias = bias.unsqueeze(-1)
x = x.add(bias)
return x
@staticmethod
def from_onnx(attributes=None):
# check attribute inputs:
if attributes is None:
attributes = {}
for attr in ["strides", "pads", "dilations"]:
assert attr in attributes, f"missing attribute {attr}"
# CrypTen and Torch use a single padding number per dimension:
padding = attributes["pads"]
padding = [padding[idx] for idx in range(0, len(padding), 2)]
# return module:
return Conv(
attributes["strides"],
padding,
attributes["dilations"],
groups=attributes.get("group", 1),
)
# TODO: Eliminate copy-pasta by implementing _Conv parent class
class Conv1d(Module):
r"""
Module that performs 1D convolution.
Applies a 1D convolution over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size
:math:`(N, C_{\text{in}}, L)` and output :math:`(N, C_{\text{out}}, L_{\text{out}})`
can be precisely described as:
.. math::
\text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
\sum_{k = 0}^{C_{\text{in}} - 1} \text{weight}(C_{\text{out}_j}, k)
\star \text{input}(N_i, k)
where :math:`\star` is the valid `cross-correlation`_ operator,
:math:`N` is a batch size, :math:`C` denotes a number of channels,
and :math:`L` is a length of signal sequence.
* :attr:`stride` controls the stride for the cross-correlation, a single
number or a one-element tuple.
* :attr:`padding` controls the amount of implicit zero-paddings on both
sides for :attr:`padding` number of points for each dimension.
* :attr:`dilation` controls the spacing between the kernel points; also
known as the à trous algorithm. It is harder to describe, but this `link`_
has a nice visualization of what :attr:`dilation` does.
* :attr:`groups` controls the connections between inputs and outputs.
:attr:`in_channels` and :attr:`out_channels` must both be divisible by
:attr:`groups`. For example,
* At groups=1, all inputs are convolved to all outputs.
* At groups=2, the operation becomes equivalent to having two conv
layers side by side, each seeing half the input channels,
and producing half the output channels, and both subsequently
concatenated.
* At groups= :attr:`in_channels`, each input channel is convolved with
its own set of filters, of size:
:math:`\left\lfloor\frac{out\_channels}{in\_channels}\right\rfloor`.
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`,
:attr:`dilation` can either be:
- a single ``int`` -- in which case the same value is used for the
height and width dimension
- a ``tuple`` of two ints -- in which case, the first `int` is used for
the height dimension, and the second `int` for the width dimension
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of
the input. Default: 0
padding_mode (string, optional). Accepted values `zeros` and `circular`
Default: `zeros`
dilation (int or tuple, optional): Spacing between kernel elements.
Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output.
Default: ``True``
Shape:
- Input: :math:`(N, C_{in}, L_{in})`
- Output: :math:`(N, C_{out}, L_{out})` where
.. math::
L_{out} = \left\lfloor\frac{L_{in} +
2 \times \text{padding} - \text{dilation} \times
(\text{kernel\_size} - 1) - 1}{\text{stride}} + 1\right\rfloor
.. _cross-correlation:
https://en.wikipedia.org/wiki/Cross-correlation
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
""" # noqa: W605
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
):
# check inputs:
super().__init__()
assert isinstance(stride, int), "stride must be an integer"
assert isinstance(padding, int), "padding must be an integer"
# initialize model parameters:
pytorch_module = torch.nn.Conv1d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
)
self.register_parameter("weight", pytorch_module.weight)
if bias:
self.register_parameter("bias", pytorch_module.bias)
# set other instance fields:
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
def forward(self, x):
x = x.conv1d(
self.weight,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
)
if hasattr(self, "bias"):
x = x.add(self.bias.unsqueeze(-1))
return x
class Conv2d(Module):
r"""
Module that performs 2D convolution.
Applies a 2D convolution over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size
:math:`(N, C_{\text{in}}, H, W)` and output :math:`(N, C_{\text{out}},
H_{\text{out}}, W_{\text{out}})` can be precisely described as:
.. math::
\text{out}(N_i, C_{\text{out}_j}) = \text{bias}(C_{\text{out}_j}) +
\sum_{k = 0}^{C_{\text{in}} - 1} \text{weight}(C_{\text{out}_j}, k)
\star \text{input}(N_i, k)
where :math:`\star` is the valid 2D `cross-correlation`_ operator,
:math:`N` is a batch size, :math:`C` denotes a number of channels,
:math:`H` is a height of input planes in pixels, and :math:`W` is
width in pixels.
* :attr:`stride` controls the stride for the cross-correlation, a single
number or a tuple.
* :attr:`padding` controls the amount of implicit zero-paddings on both
sides for :attr:`padding` number of points for each dimension.
* :attr:`dilation` controls the spacing between the kernel points; also
known as the à trous algorithm. It is harder to describe, but this `link`_
has a nice visualization of what :attr:`dilation` does.
* :attr:`groups` controls the connections between inputs and outputs.
:attr:`in_channels` and :attr:`out_channels` must both be divisible by
:attr:`groups`. For example,
* At groups=1, all inputs are convolved to all outputs.
* At groups=2, the operation becomes equivalent to having two conv
layers side by side, each seeing half the input channels,
and producing half the output channels, and both subsequently
concatenated.
* At groups= :attr:`in_channels`, each input channel is convolved with
its own set of filters, of size:
:math:`\left\lfloor\frac{out\_channels}{in\_channels}\right\rfloor`.
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`,
:attr:`dilation` can either be:
- a single ``int`` -- in which case the same value is used for the
height and width dimension
- a ``tuple`` of two ints -- in which case, the first `int` is used for
the height dimension, and the second `int` for the width dimension
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of
the input. Default: 0
padding_mode (string, optional). Accepted values `zeros` and `circular`
Default: `zeros`
dilation (int or tuple, optional): Spacing between kernel elements.
Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output.
Default: ``True``
Shape:
- Input: :math:`(N, C_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, H_{out}, W_{out})` where
.. math::
H_{out} = \left\lfloor\frac{H_{in} +
2 \times \text{padding}[0] - \text{dilation}[0] \times
(\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor
.. math::
W_{out} = \left\lfloor\frac{W_{in} +
2 \times \text{padding}[1] - \text{dilation}[1] \times
(\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor
.. _cross-correlation:
https://en.wikipedia.org/wiki/Cross-correlation
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
):
# check inputs:
super().__init__()
# initialize model parameters:
pytorch_module = torch.nn.Conv2d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
)
self.register_parameter("weight", pytorch_module.weight)
if bias:
self.register_parameter("bias", pytorch_module.bias)
else:
self.bias = pytorch_module.bias
# set other instance fields:
self.stride = stride
self.padding = padding
self.dilation = dilation
self.groups = groups
def forward(self, x):
x = x.conv2d(
self.weight,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
)
if hasattr(self, "bias") and self.bias is not None:
x = x.add(self.bias.unsqueeze(-1).unsqueeze(-1))
return x
class ReLU(Module):
r"""
Module that computes rectified linear unit (ReLU) activations element-wise.
:math:`\text{ReLU}(x)= \max(0, x)`
"""
def __init__(self, inplace=False):
super().__init__()
if inplace:
logging.warning("CrypTen ReLU module does not support inplace computation.")
def forward(self, x):
return x.relu()
@staticmethod
def from_onnx(attributes=None):
return ReLU()
class Hardtanh(Module):
r"""Applies the Hardtanh function element-wise
HardTtnh is defined as:
.. math::
\text{Hardtanh}(x) = \begin{cases}
1 & \text{ if } x > 1 \\
-1 & \text{ if } x < -1 \\
x & \text{ otherwise } \\
\end{cases}
The range of the linear region :math:`[-1, 1]` can be adjusted using
:attr:`min_val` and :attr:`max_val`.
Args:
min_val: minimum value of the linear region range. Default: -1
max_val: maximum value of the linear region range. Default: 1
inplace: can optionally do the operation in-place. Default: ``False``
Keyword arguments :attr:`min_value` and :attr:`max_value`
have been deprecated in favor of :attr:`min_val` and :attr:`max_val`.
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: ../scripts/activation_images/Hardtanh.png
Examples::
>>> m = nn.Hardtanh(-2, 2)
>>> input = torch.randn(2)
>>> output = m(input)
"""
def __init__(self, min_val=-1.0, max_val=1.0, inplace=False):
super().__init__()
self.min_val = min_val
self.max_val = max_val
if inplace:
logging.warning(
"CrypTen Hardtanh module does not support inplace computation."
)
def forward(self, input):
if isinstance(input, list):
input, min_val, max_val = input
min_val, max_val = min_val.item(), max_val.item()
else:
min_val, max_val = self.min_val, self.max_val
return input.hardtanh(min_val, max_val)
@staticmethod
def from_onnx(attributes=None):
return Hardtanh(
min_val=attributes.get("min", -1.0),
max_val=attributes.get("max", 1.0),
)
class ReLU6(Hardtanh):
r"""Applies the element-wise function:
.. math::
\text{ReLU6}(x) = \min(\max(0,x), 6)
Args:
inplace: can optionally do the operation in-place. Default: ``False``
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Output: :math:`(N, *)`, same shape as the input
.. image:: ../scripts/activation_images/ReLU6.png
Examples::
>>> m = nn.ReLU6()
>>> input = torch.randn(2)
>>> output = m(input)
"""
def __init__(self, inplace=False):
if inplace:
logging.warning(
"CrypTen ReLU6 module does not support inplace computation."
)
super(ReLU6, self).__init__(min_val=0, max_val=6, inplace=False)
class Sigmoid(Module):
r"""Applies the element-wise function:
.. math::
\text{Sigmoid}(x) = \sigma(x) = \frac{1}{1 + \exp(-x)}
"""
def forward(self, x):
return x.sigmoid()
@staticmethod
def from_onnx(attributes=None):
return Sigmoid()
class Softmax(Module):
r"""Applies the Softmax function to an n-dimensional input Tensor
rescaling them so that the elements of the n-dimensional output Tensor
lie in the range [0,1] and sum to 1.
Softmax is defined as:
.. math::
\text{Softmax}(x_{i}) = \frac{\exp(x_i)}{\sum_j \exp(x_j)}
Shape:
- Input: :math:`(*)` where `*` means, any number of additional
dimensions
- Output: :math:`(*)`, same shape as the input
Returns:
a Tensor of the same dimension and shape as the input with
values in the range [0, 1]
Arguments:
dim (int): A dimension along which Softmax will be computed (so every slice
along dim will sum to 1).
"""
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, input):
return input.softmax(self.dim)
@staticmethod
def from_onnx(attributes=None):
if attributes is None:
attributes = {}
assert "axis" in attributes, "axis attribute missing"
return Softmax(attributes["axis"])
class LogSoftmax(Module):
r"""Applies the :math:`\log(\text{Softmax}(x))` function to an n-dimensional
input Tensor. The LogSoftmax formulation can be simplified as:
.. math::
\text{LogSoftmax}(x_{i}) =
\log\left(\frac{\exp(x_i) }{ \sum_j \exp(x_j)} \right)
Shape:
- Input: :math:`(*)` where `*` means, any number of additional
dimensions
- Output: :math:`(*)`, same shape as the input
Arguments:
dim (int): A dimension along which LogSoftmax will be computed.
Returns:
a Tensor of the same dimension and shape as the input with
values in the range \[-inf, 0\)
"""
def __init__(self, dim):
super().__init__()
self.dim = dim
def forward(self, input):
return input.log_softmax(self.dim)
@staticmethod
def from_onnx(attributes=None):
if attributes is None:
attributes = {}
assert "axis" in attributes, "axis attribute missing"
return LogSoftmax(attributes["axis"])
class _Pool2d(Module):
"""
Module that performs 2D pooling.
Applies a 2D max or average pooling over an input signal composed of several input
planes.
If :attr:`padding` is non-zero, then the input is implicitly zero-padded on
both sides for :attr:`padding` number of points. :attr:`dilation` controls
the spacing between the kernel points. It is harder to describe, but this
`link`_ has a nice visualization of what :attr:`dilation` does.
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`,
:attr:`dilation` can either be:
- a single ``int`` -- in which case the same value is used for the
height and width dimension
- a ``tuple`` of two ints -- in which case, the first `int` is used for
the height dimension, and the second `int` for the width dimension
Args:
pool_type (str): specifies "average" or "max" pooling
kernel_size: the size of the window to take a max over
stride: the stride of the window. Default value is :attr:`kernel_size`
padding: implicit zero padding to be added on both sides
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
def __init__(self, pool_type, kernel_size, stride=None, padding=0, ceil_mode=False):
super().__init__()
self.pool_type = pool_type
self.kernel_size = kernel_size
self.padding = padding
self.stride = stride
self.ceil_mode = ceil_mode
def forward(self, x):
args = [self.kernel_size]
kwargs = {
"stride": self.stride,
"padding": self.padding,
"ceil_mode": self.ceil_mode,
}
if self.pool_type == "average":
return x.avg_pool2d(*args, **kwargs)
elif self.pool_type == "max":
return x.max_pool2d(*args, **kwargs)
else:
raise ValueError("Unknown pooling type: %s" % self.pool_type)
@staticmethod
def from_onnx(pool_type, attributes=None):
# check attributes:
if attributes is None:
attributes = {}
if "pads" not in attributes:
attributes["pads"] = [0]
assert _all_the_same(["kernel_shape"]), "only square kernels are supported"
assert _all_the_same(
attributes["strides"]
), "stride must be the same in each dimension"
attributes["ceil_mode"] = attributes.get("ceil_mode", 0)
attributes["ceil_mode"] = attributes["ceil_mode"] > 0
# initialize module
args = [attributes["kernel_shape"][0]]
kwargs = {
"stride": attributes["strides"][0],
"padding": attributes["pads"][0],
"ceil_mode": attributes["ceil_mode"],
}
if pool_type == "average":
return AvgPool2d(*args, **kwargs)
elif pool_type == "max":
return MaxPool2d(*args, **kwargs)
else:
raise ValueError("Unknown pooling type: %s" % pool_type)
class AvgPool2d(_Pool2d):
r"""
Module that Applies a 2D average pooling
over an input signal composed of several input planes.
In the simplest case, the output value of the layer with input size
:math:`(N, C, H, W)`, output :math:`(N, C, H_{out}, W_{out})` and
:attr:`kernel_size` :math:`(kH, kW)` can be precisely described as:
.. math::
out(N_i, C_j, h, w) = \frac{1}{kH * kW} \sum_{m=0}^{kH-1}
\sum_{n=0}^{kW-1} input(N_i, C_j, stride[0] \times h + m, stride[1]
\times w + n)
If :attr:`padding` is non-zero, then the input is implicitly zero-padded on
both sides for :attr:`padding` number of points.
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding` can either be:
- a single ``int`` -- in which case the same value is used for the
height and width dimension
- a ``tuple`` of two ints -- in which case, the first `int` is used for
the height dimension, and the second `int` for the width dimension
Args:
kernel_size: the size of the window
stride: the stride of the window. Default value is :attr:`kernel_size`
padding: implicit zero padding to be added on both sides
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})`, where
.. math::
H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[0] -
\text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
.. math::
W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[1] -
\text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
"""
def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False):
super().__init__(
"average", kernel_size, stride=stride, padding=padding, ceil_mode=ceil_mode
)
@staticmethod
def from_onnx(attributes=None):
return super(AvgPool2d, AvgPool2d).from_onnx("average", attributes=attributes)
class MaxPool2d(_Pool2d):
"""
Module that performs 2D max pooling (see :meth:`AvgPool2d`)
"""
def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False):
super().__init__(
"max", kernel_size, stride=stride, padding=padding, ceil_mode=ceil_mode
)
@staticmethod
def from_onnx(attributes=None):
return super(MaxPool2d, MaxPool2d).from_onnx("max", attributes=attributes)
class AdaptiveAvgPool2d(Module):
r"""Applies a 2D adaptive average pooling over an input signal composed of several input planes.
The output is of size H x W, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size of the image of the form H x W.
Can be a tuple (H, W) or a single H for a square image H x H.
H and W can be either a ``int``, or ``None`` which means the size will
be the same as that of the input.
Examples:
>>> # target output size of 5x7
>>> m = nn.AdaptiveAvgPool2d((5,7))
>>> input = crypten.randn(1, 64, 8, 9)
>>> output = m(input)
>>> # target output size of 7x7 (square)
>>> m = nn.AdaptiveAvgPool2d(7)
>>> input = crypten.randn(1, 64, 10, 9)
>>> output = m(input)
>>> # target output size of 10x7
>>> m = nn.AdaptiveAvgPool2d((None, 7))
>>> input = crypten.randn(1, 64, 10, 9)
>>> output = m(input)
"""
def __init__(self, output_size=None):
super(AdaptiveAvgPool2d, self).__init__()
self.output_size = output_size
def extra_repr(self) -> str:
return "output_size={}".format(self.output_size)
def forward(self, input_tensor, output_size=None):
if output_size is None:
output_size = self.output_size
assert (
output_size is not None
), "AdaptiveAvgPool2d requires an output_size in forward if not supplied in initialization"
resized_input, args, kwargs = _adaptive_pool2d_helper(
input_tensor, output_size, reduction="mean"
)
return resized_input.avg_pool2d(*args, **kwargs)
@staticmethod
def from_onnx(attributes=None):
if "shape" in attributes:
return AdaptiveAvgPool2d(output_size=attributes["shape"])
return AdaptiveAvgPool2d()
class AdaptiveMaxPool2d(Module):
r"""Applies a 2D adaptive max pooling over an input signal composed of several input planes.
The output is of size H x W, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size of the image of the form H x W.
Can be a tuple (H, W) or a single H for a square image H x H.
H and W can be either a ``int``, or ``None`` which means the size will
be the same as that of the input.
Examples:
>>> # target output size of 5x7
>>> m = nn.AdaptiveMaxPool2d((5,7))
>>> input = crypten.randn(1, 64, 8, 9)
>>> output = m(input)
>>> # target output size of 7x7 (square)
>>> m = nn.AdaptiveMaxPool2d(7)
>>> input = crypten.randn(1, 64, 10, 9)
>>> output = m(input)
>>> # target output size of 10x7
>>> m = nn.AdaptiveMaxPool2d((None, 7))
>>> input = crypten.randn(1, 64, 10, 9)
>>> output = m(input)
"""
def __init__(self, output_size=None):
super(AdaptiveMaxPool2d, self).__init__()
self.output_size = output_size
def extra_repr(self) -> str:
return "output_size={}".format(self.output_size)
def forward(self, input_tensor, output_size=None):
if output_size is None:
output_size = self.output_size
assert (
output_size is not None
), "AdaptiveMaxPool2d requires an output_size in forward if not supplied in initialization"
resized_input, args, kwargs = _adaptive_pool2d_helper(
input_tensor, output_size, reduction="max"
)
return resized_input.max_pool2d(*args, **kwargs)
@staticmethod
def from_onnx(attributes=None):
if "shape" in attributes:
return AdaptiveMaxPool2d(output_size=attributes["shape"])
return AdaptiveMaxPool2d()
class GlobalAveragePool(Module):
"""
GlobalAveragePool consumes an input tensor and applies average pooling
across the values in the same channel. This is equivalent to AveragePool
with kernel size equal to the spatial dimension of input tensor. This is an
operation from the ONNX specification.
"""
def forward(self, input):
assert input.dim() > 2, "input needs to have more than two dimensions"
# sum over all but batch dimension:
result = input.shallow_copy()
for dim in range(2, input.dim()):
result = result.sum(dim, keepdim=True)
# return average value:
first_two_dims = input.size(0) * input.size(1)
return result.div(input.nelement() / float(first_two_dims))
@staticmethod
def from_onnx(attributes=None):
return GlobalAveragePool()
class BatchNormalization(Module):
"""
Module that performs batch normalization following the ONNX specification.
Unlike the `BatchNorm1d`, `BatchNorm2d`, and `BatchNorm3d` classes, this
module is stateless. It takes `input`, `weight`, `bias`, `running_mean`, and
`running_var` tensors as input into `forward()`.
"""
def __init__(self, eps=1e-05, momentum=0.1):
super().__init__()
self.eps = eps
self.momentum = momentum
self.inv_var = None
self._running_var_id = None
def forward(self, x):
assert len(x), f"BatchNormalization expects 5 inputs, not {len(x)}"
input, weight, bias, running_mean, running_var = x
# in inference mode, we may be able to re-use inverse variance:
if not self.train:
if id(running_var) != self._running_var_id:
self.inv_var = self._compute_inv_var(running_var)
self._running_var_id = id(running_var)
else:
self.inv_var = None
self._running_var_id = None
# perform batch normalization:
output = input.batchnorm(
weight,
bias,
running_mean=running_mean,
running_var=running_var,
training=self.training,
eps=self.eps,
momentum=self.momentum,
inv_var=self.inv_var,
)
if self.training: # NOTE: Training graph is different from evaluation graph.
return output, running_mean, running_var, None, None
else:
return output
def _compute_inv_var(self, running_var):
"""Computes inverse variance."""
if isinstance(running_var, crypten.CrypTensor):
inv_var = running_var.add(self.eps).inv_sqrt()
else:
inv_var = running_var.add(self.eps).sqrt().reciprocal()
return inv_var
@staticmethod
def from_onnx(attributes=None):
if attributes is None:
attributes = {}
return BatchNormalization(
eps=attributes.get("epsilon", 1e-05),
momentum=1.0 - attributes.get("momentum", 0.9),
) # NOTE: Role of momentum is reversed in ONNX specification.
class _BatchNorm(Module):
"""
Module that performs batch normalization on 1D tensors. It is used as the
base implementation for the `BatchNorm1d`, `BatchNorm2d`, and `BatchNorm3d`
classes.
Unlike the `BatchNormalization` class, this module is stateful.
"""
def __init__(self, num_features, eps=1e-05, momentum=0.1):
super().__init__()
# initialize model parameters and buffers:
pytorch_module = torch.nn.BatchNorm1d(num_features)
for param in ["weight", "bias"]:
self.register_parameter(param, getattr(pytorch_module, param))
for buffer in ["running_mean", "running_var"]:
self.register_buffer(buffer, getattr(pytorch_module, buffer))
# set model attributes:
self.eps = eps
self.momentum = momentum
# do not precompute inverse variance during training
self.inv_var = None
def forward(self, input):
return input.batchnorm(
self.weight,
self.bias,
running_mean=self.running_mean,
running_var=self.running_var,
training=self.training,
eps=self.eps,
momentum=self.momentum,
inv_var=self.inv_var,
)
@staticmethod
def from_onnx(parameters=None, attributes=None):
# preprocess all attributes:
if parameters is None:
parameters = {}
if attributes is None:
attributes = {}
num_features = len(parameters["running_mean"])
# create module:
if "momentum" not in attributes:
attributes["momentum"] = 0.1
kwargs = {"eps": attributes["epsilon"], "momentum": attributes["momentum"]}
module = _BatchNorm(num_features, **kwargs)
# set parameters:
for key, value in parameters.items():
if key in ["running_mean", "running_var"]:
module.set_buffer(key, value)
else:
module.set_parameter(key, value)
return module
def train(self, mode=True):
"""Freezes the inverse variance during inference to save computation"""
super().train(mode=mode)
if self.training:
self.inv_var = None
elif isinstance(self.running_var, crypten.CrypTensor):
self.inv_var = self.running_var.add(self.eps).inv_sqrt()
else:
self.inv_var = self.running_var.add(self.eps).sqrt().reciprocal()
return self
class BatchNorm1d(_BatchNorm):
"""
Module that performs batch normalization on 1D tensors.
"""
pass
class BatchNorm2d(_BatchNorm):
"""
Module that performs batch normalization on 2D tensors.
"""
pass
class BatchNorm3d(_BatchNorm):
"""
Module that performs batch normalization on 3D tensors.
"""
pass
class GroupNorm(Module):
"""
Module that performs group normalization on tensors.
"""
def __init__(self, num_groups, num_channels, eps=1e-5, affine=True):
raise NotImplementedError("GroupNorm is not implemented.")
def _all_the_same(items):
"""
Checks whether all values in a list are the same.
"""
return all(items[0] == item for item in items)
def _identify_bool_attributes_with_defaults(
attributes, attr_name, attr_value, default=True
):
"""For boolean attributes that have default values in the ONNX specification
checks to see if they are present in `attributes`, and assigns the
default if not present and appropriate value if present. Note `attr_value`
must be the value `attributes[attr_name]` if the default is to be kept.
"""
output = default
if attr_name in attributes and attributes[attr_name] != attr_value:
output = not default
return output
| CrypTen-main | crypten/nn/module.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
# Makes nn.init functions
def make_crypten_compatible(initialization_function):
def wrapper_func(tensor, *args, **kwargs):
if not torch.is_tensor(tensor):
result = torch.empty(tensor.size())
result = initialization_function(result, *args, **kwargs)
tensor.set(result)
return tensor
return initialization_function(tensor, *args, **kwargs)
return wrapper_func
__all__ = [ # noqa: F822
"constant_",
"dirac_",
"kaiming_normal_",
"kaiming_uniform_",
"normal_",
"ones_",
"orthogonal_",
"sparse_",
"trunc_normal_",
"uniform_",
"xavier_normal_",
"xavier_uniform_",
"zeros_",
]
for func_name in __all__:
globals()[func_name] = make_crypten_compatible(getattr(torch.nn.init, func_name))
| CrypTen-main | crypten/nn/init.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import crypten.nn as nn
from tensorboard.compat.proto.attr_value_pb2 import AttrValue
from tensorboard.compat.proto.graph_pb2 import GraphDef
from tensorboard.compat.proto.node_def_pb2 import NodeDef
from tensorboard.compat.proto.versions_pb2 import VersionDef
from torch.utils.tensorboard import SummaryWriter as _SummaryWriter
def graph(model):
"""Converts a crypten.nn graph for consumption by TensorBoard."""
# convert individual module to graph:
assert isinstance(model, nn.Module), "model must be crypten.nn.Module"
if not isinstance(model, nn.Graph):
graph = nn.Graph("input", "output")
graph.add_module("output", model, ["input"])
model = graph
# create mapping to more interpretable node naming:
mapping = {input_name: input_name for input_name in model.input_names}
modules = {name: module for name, module in model.named_modules()}
for name, module in modules.items():
op = str(type(module))[26:-2]
mapping[name] = "%s_%s" % (op, name)
# create input variables:
nodes = [
NodeDef(
name=mapping[input_name].encode(encoding="utf_8"),
op="Variable",
input=[],
)
for input_name in model.input_names
]
# loop all graph connections:
for output_name, input_names in model._graph.items():
# get parameters and type of module:
module = modules[output_name]
op = str(type(module))
input_names = [mapping[name] for name in input_names]
parameters = [
"%s: %s" % (name, parameter.size())
for name, parameter in module.named_parameters()
]
parameter_string = "; ".join(parameters).encode(encoding="utf_8")
# add to graph:
nodes.append(
NodeDef(
name=mapping[output_name].encode(encoding="utf_8"),
op=op,
input=input_names,
attr={"attr": AttrValue(s=parameter_string)},
)
)
# return graph definition:
return GraphDef(node=nodes, versions=VersionDef(producer=22))
class SummaryWriter(_SummaryWriter):
"""
Adapts the PyTorch SummaryWriter to output crypten graphs.
"""
def add_graph(self, model, input_to_model=None, verbose=False):
self._get_file_writer().add_onnx_graph(graph(model))
| CrypTen-main | crypten/nn/tensorboard.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .dp_split import DPSplitModel, SkippedLoss
__all__ = ["DPSplitModel", "SkippedLoss"]
| CrypTen-main | crypten/nn/privacy/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import crypten
import crypten.communicator as comm
import torch
import torch.nn as nn
from crypten.config import cfg
from crypten.gradients import _inverse_broadcast
# TODO: Move SkippedLoss elsewhere
class SkippedLoss:
"""Placeholder for output of a skipped loss function"""
def __init__(self, msg=""):
self.msg = msg
def __repr__(self):
return f"SkippedLoss({self.msg})"
def _matmul_backward(input, weight, grad_output):
"""Implements matmul backward from crypten.gradients
This is necessary here because the forward in DPSplitModel is performed in plaintext
and does not appear on the CrypTen autograd tape, so we do not have a saved ctx.
Only returns gradient w.r.t. weight since that is all we need in this context.
"""
# Cache sizes for inverse_broadcast
weight_size = weight.t().size()
# Deal with vectors that are represented by a
# < 2 dimensional tensor
if input.dim() < 2:
input = input.unsqueeze(0)
grad_output = grad_output.unsqueeze(0)
if weight.dim() < 2:
weight = weight.unsqueeze(1)
grad_output = grad_output.unsqueeze(1)
# Compute gradients
weight_grad = input.transpose(-2, -1).matmul(grad_output)
# Fix gradient sizes for vector inputs
if len(weight_size) < 2:
weight_grad = weight_grad.squeeze()
if weight_grad.dim() < 1:
weight_grad = weight_grad.unsqueeze(0)
# Return weight grad
weight_grad = _inverse_broadcast(weight_grad, weight_size).t()
return weight_grad / weight_grad.size(0)
class DPSplitModel(nn.Module):
"""
Differentially Private Split-MPC module that provides label-DP. Models will
run in 6 steps:
(1) Run forward pass in plaintext using PyTorch to get logits
(2) Apply logistic function (sigmoid or softmax) to get predictions
(2) Compute loss function in CrypTen
(3) Compute dL/dZ (gradient w.r.t logits) in CrypTen
(5) Compute aggregated parameter gradients with differential privacy
(6) Decrypt noisy gradients
Step (5) is computed using different methods depending on protocol configuration
(See Config Options > protocol for descriptions)
Args:
pytorch_model (torch.nn.Module) : The input model to be trained
using DP-Split-MPC algorithm. Remains in plaintext throughout.
noise_magnitude (float) : The magnitude of DP noise to be applied to
gradients prior to decryption for each batch of training.
feature_src (int) : Source for input features to the model (also owns
the plaintext model throughout training)
label_src (int) : Source for training labels. Labels can either be input
as plaintext values from the label_src party or as CrypTensors.
Config Options:
skip_loss_forward (bool) : Determines whether to compute the
value of the loss during training (see crypten.nn._Loss definition
of skip_forward). If True, this model will output zeros for the value
of the loss function. However, correct gradients will still be computed
when calling backward(). Default: True
cache_pred_size (bool) : Determines whether the size of the predictions should
be cached. If True, DPSplitModel instances will remember the tensor and
batch sizes input. This saves one communication round per batch, but
the user will be responsible for using correct batch sizes to avoid
crashing.
protocol (string): Name of protocol to use to compute gradients:
"full_jacobian": Computes the full jacobian to compute all parameter gradients from dL/dP.
This jacobian will be encrypted and gradients are computed by an encrypted matrix multiplication.
"layer_estimation": Computes the jacobian only with respect to the last linear layer (dL/dW)
of the forward network. DP and aggregation are applied before decrypting dL/dW. This gradient
is then used to estimate dL/dZ (gradient w.r.t. logits). Backpropagation is
then computed normally in plaintext.
Example:
```
preds = dp_split_model(x)
loss = dp_split_model.compute_loss(targets)
dp_split_model.backward()
```
"""
def __init__(
self,
pytorch_model,
feature_src,
label_src,
noise_magnitude=None,
noise_src=None,
randomized_response_prob=None,
rappor_prob=None,
):
super().__init__()
# TODO: Compute noise magnitude based on jacobian.
self.noise_magnitude = noise_magnitude
self.feature_src = feature_src
self.label_src = label_src
self.noise_src = noise_src
# Model must be defined for model owning party
if self.is_feature_src():
assert isinstance(
pytorch_model, torch.nn.Module
), "pytorch_model must be a torch Module"
self.model = pytorch_model
self.train()
# Process Randomized Response parameters
if randomized_response_prob is not None:
assert (
0 < randomized_response_prob < 0.5
), "randomized_response_prob must be in the interval [0, 0.5)"
self.rr_prob = randomized_response_prob
# Apply RAPPOR correction:
if rappor_prob is not None:
assert 0 <= rappor_prob <= 1, "rappor_prob must be in [0, 1]"
self.alpha = rappor_prob
# TODO: Add support for multi-class predictions
self.multiclass = False
# Cache for tensor sizes
self.cache = {}
def eval(self):
self.train(mode=False)
@property
def training(self):
if hasattr(self, "model") and self.model is not None:
return self.model.training
return self._training
@training.setter
def training(self, mode):
self.train(mode)
def train(self, mode=True):
if hasattr(self, "model") and self.model is not None:
self.model.train(mode=mode)
else:
self._training = mode
def zero_grad(self):
if self.is_feature_src():
self.model.zero_grad()
def forward(self, input):
# During eval mode, just conduct forward pass.
if not self.training:
if self.is_feature_src():
return self.model(input)
# Parties without model should return None
return None
if self.is_feature_src():
self.logits = self.model(input)
self.preds = self.logits.sigmoid()
# Extract saved input to last layer from autograd tape if we need it
if cfg.nn.dpsmpc.protocol == "layer_estimation":
self.last_input = self.logits.grad_fn._saved_mat1
# Check that prediction size matches cached size
preds_size = self.preds.size()
if "preds_size" in self.cache:
cache_size = self.cache["preds_size"]
if preds_size != cache_size:
raise ValueError(
f"Logit size does not match cached size: {preds_size} vs. {cache_size}"
)
# Cache predictions size - Note batch size must match here
# TODO: Handle batch dimension here
if self.cache_pred_size:
preds_size = self._communicate_and_cache("preds_size", preds_size)
else:
preds_size = comm.get().broadcast_obj(preds_size, src=self.feature_src)
else:
# Cache predictions size - Note batch size must match here
# TODO: Handle batch dimension here
if self.cache_pred_size:
preds_size = self._communicate_and_cache("preds_size", None)
else:
preds_size = comm.get().broadcast_obj(None, src=self.feature_src)
self.logits = torch.empty(preds_size)
self.preds = torch.empty(preds_size)
return self.logits
def _communicate_and_cache(self, name, value):
"""If the requested name is in the size_cache, return the cached size.
On cache miss, the size will be communicated from feature_src party
"""
# Cache hit
if name in self.cache:
return self.cache[name]
# Cache miss
value = comm.get().broadcast_obj(value, src=self.feature_src)
self.cache[name] = value
return value
def is_feature_src(self):
return self.rank == self.feature_src
def is_label_src(self):
return self.rank == self.label_src
@property
def skip_loss_forward(self):
"""Determines whether to skip the forward computation for the loss function (Default: True)"""
return cfg.nn.dpsmpc.skip_loss_forward
@property
def rank(self):
"""Communicator rank in torch.distributed"""
return comm.get().get_rank()
@property
def cache_pred_size(self):
"""Bool that determines whether to cache the prediction size"""
return cfg.nn.dpsmpc.cache_pred_size
def _process_targets(self, targets):
"""Encrypts targets and RR to targets if necessary"""
if self.rr_prob is not None:
flip_probs = torch.tensor(self.rr_prob).expand(targets.size())
# Apply appropriate RR-protocol and encrypt targets if necessary
if self.rr_prob is not None:
flip_probs = torch.tensor(self.rr_prob).expand(targets.size())
if crypten.is_encrypted_tensor(targets):
if self.rr_prob is not None:
flip_mask = crypten.bernoulli(flip_probs)
targets = targets + flip_probs - 2 * flip_mask * targets
targets_enc = targets
else:
# Label provider adds RR label flips if they are plaintext
if self.rr_prob is not None and self.is_label_src():
flip_mask = flip_probs.bernoulli()
targets += flip_mask - 2 * targets * flip_mask
# Encrypt targets:
targets_enc = crypten.cryptensor(targets, src=self.label_src)
return targets_enc
def compute_loss(self, targets):
# Process predictions and targets
# Apply RAPPOR correction
if self.alpha is not None:
self.preds_rappor = self.alpha * self.preds
self.preds_rappor += (1 - self.alpha) * (1 - self.preds)
self.preds_enc = crypten.cryptensor(
self.preds_rappor, src=self.feature_src, requires_grad=True
)
else:
self.preds_enc = crypten.cryptensor(
self.preds, src=self.feature_src, requires_grad=True
)
self.targets_enc = self._process_targets(targets)
# Compute BCE loss or CrossEntropy loss depending on single or multiclass
if self.skip_loss_forward:
self.loss = SkippedLoss("Skipped CrossEntropy function")
else:
logits_enc = crypten.cryptensor(self.logits, src=self.feature_src)
# BCEWithLogitsLoss
if not self.multiclass:
if self.alpha is None:
self.loss = logits_enc.binary_cross_entropy_with_logits(
self.targets_enc
)
else:
self.loss = logits_enc.rappor_loss(self.targets_enc, self.alpha)
# CrossEntropyLoss
# TODO: Support Multi-class DPS-MPC
else:
raise NotImplementedError("Multi-class DPS-MPC is not supported")
"""
if self.alpha is not None:
raise NotImplementedError("Multi-class RAPPOR Loss not supported")
if self.is_feature_src:
logits_enc = crypten.cryptensor(self.logits, src=self.feature_src)
else:
logits_enc = crypten.cryptensor(self.preds, src=self.features_src)
self.loss = logits_enc.cross_entropy(self.targets_enc)
"""
# Overwrite loss backward to call model's backward function:
def backward_(self_, grad_output=None):
self.backward(grad_output=grad_output)
self.loss.backward = backward_
return self.loss
# TODO: Implement DP properly to make correct DP guarantees
# TODO: Implement custom DP mechanism (split noise / magnitude)
def _generate_noise_no_src(self, size):
return crypten.randn(size) * self.noise_magnitude
def _generate_noise_from_src(self, size):
noise = torch.randn(size) * self.noise_magnitude
noise = crypten.cryptensor(noise, src=self.noise_src)
return noise
def _add_dp_if_necessary(self, grad):
if self.noise_magnitude is None or self.noise_magnitude == 0.0:
return grad
# Determine noise generation function
generate_noise = (
self._generate_noise_from_src
if self.noise_src
else self._generate_noise_no_src
)
noise = generate_noise(grad.size())
with crypten.no_grad():
grad += noise
return grad
def _get_last_linear_layer(self):
layers = list(self.model.modules())
for last_layer in reversed(layers):
if isinstance(last_layer, torch.nn.Linear):
break
return last_layer
def _compute_model_jacobians(self):
"""Compute Jacobians with respect to each model parameter
If last_layer_only is True, this computes the jacobian only with respect
to the parameters of the last layer of the model.
"""
Z = self.logits.split(1, dim=-1)
# Store partial Jacobian for each parameter
jacobians = {}
# dL/dW_i = sum_j (dL/dP_j * dP_j/dW_i)
with crypten.no_grad():
# TODO: Async / parallelize this
for z in Z:
z.backward(torch.ones(z.size()), retain_graph=True)
params = self.model.parameters()
for param in params:
grad = param.grad.flatten().unsqueeze(-1)
# Accumulate partial gradients: dL/dZ_j * dP_j/dW_i
if param in jacobians.keys():
jacobians[param] = torch.cat([jacobians[param], grad], dim=-1)
else:
jacobians[param] = grad
param.grad = None # Reset grad for next p_j.backward()
return jacobians
def _compute_param_grads(self, dLdZ, jacobians):
"""Compute dLdW for all model parameters W"""
# Populate parameter grad fields using Jacobians
if self.is_feature_src():
# Cache / communicate number of parameters
params = torch.nn.utils.parameters_to_vector(self.model.parameters())
num_params = params.numel()
self._communicate_and_cache("num_params", num_params)
# Process jacobian
jacobian = torch.cat(
[jacobians[param] for param in self.model.parameters()], dim=0
)
else:
num_params = self._communicate_and_cache("num_params", None)
jacobian_size = (num_params, dLdZ.size(-2))
jacobian = torch.empty(jacobian_size)
jacobian = crypten.cryptensor(jacobian, src=self.feature_src)
# Compute gradeints wrt each param
while jacobian.dim() < dLdZ.dim():
jacobian = jacobian.unsqueeze(0)
grad = jacobian.matmul(dLdZ)
grad = grad.view(-1, num_params)
grad = self._add_dp_if_necessary(grad)
# Sum over batch dimension
while grad.numel() != num_params:
grad = grad.sum(0)
# Decrypt dL/dZ_j * dZ_j/dW_i with Differential Privacy
grads = grad.flatten().get_plain_text(dst=self.feature_src)
return grads
def _backward_full_jacobian(self, grad_output=None):
"""Computes backward for non-RR variant.
To add DP noise at the aggregated gradient level,
we compute the jacobians for dZ/dW in plaintext
so we can matrix multiply by dL/dZ to compute our
gradients without performing a full backward pass in
crypten.
"""
# Compute dL/dP_j
dLdZ = self.preds_enc.sub(self.targets_enc).div(self.preds_enc.nelement())
# Correct for RAPPOR Loss
if self.alpha is not None:
if self.is_feature_src:
correction = 2 * self.alpha - 1
correction *= self.preds * (1 - self.preds)
correction /= self.preds_rappor * (1 - self.preds_rappor)
else:
correction = torch.empty(self.preds.size())
correction_enc = crypten.cryptensor(correction, src=self.feature_src)
dLdZ *= correction_enc
# Turn batched vector into batched matrix for matmul
dLdZ = dLdZ.unsqueeze(-1)
# Compute Jacobians dP/dW wrt model weights
jacobians = self._compute_model_jacobians() if self.is_feature_src() else None
# Compute gradients dL/dW wrt model parameters
grads = self._compute_param_grads(dLdZ, jacobians)
# Populate grad fields of parameters:
if self.is_feature_src():
ind = 0
for param in self.model.parameters():
numel = param.numel()
param.grad = grads[ind : ind + numel].view(param.size())
ind += numel
def _solve_dLdZ(self, dLdW):
"""Generates noisy dLdP using de-aggregation trick"""
A = self.last_input
B = dLdW
# Apply pseudoinverse
dLdZ = torch.linalg.lstsq(A.t(), B.t()).solution
# dLdZ = B.matmul(A.pinverse()).t()
return dLdZ
def _compute_last_layer_grad(self, grad_output=None):
# Compute dL/dP_j
dLdZ_enc = self.preds_enc.sub(self.targets_enc).div(self.preds_enc.nelement())
# Correct for RAPPOR Loss
if self.alpha is not None:
if self.is_feature_src:
correction = 2 * self.alpha - 1
correction *= self.preds * (1 - self.preds)
correction /= self.preds_rappor * (1 - self.preds_rappor)
else:
correction = torch.empty(self.preds.size())
correction_enc = crypten.cryptensor(correction, src=self.feature_src)
dLdZ_enc *= correction_enc
# Communicate / cache last layer input / weight sizes
if self.is_feature_src():
last_weight = self._get_last_linear_layer().weight
self._communicate_and_cache("last_in_size", self.last_input.size())
self._communicate_and_cache("last_weight_size", last_weight.size())
else:
last_in_size = self._communicate_and_cache("last_in_size", None)
last_weight_size = self._communicate_and_cache("last_weight_size", None)
self.last_input = torch.empty(last_in_size)
last_weight = torch.empty(last_weight_size)
# Encrypt last layer values
# TODO: make this optional?
last_input_enc = crypten.cryptensor(self.last_input, src=self.feature_src)
# Compute last layer gradients (dLdW) and add DP if necessary
dLdW_enc = dLdZ_enc.t().matmul(last_input_enc)
dLdW_enc = self._add_dp_if_necessary(dLdW_enc)
# return dLdW
return dLdW_enc.get_plain_text(dst=self.feature_src)
def _backward_layer_estimation(self, grad_output=None):
with crypten.no_grad():
# Find dLdW for last layer weights
dLdW = self._compute_last_layer_grad(grad_output=grad_output)
# Run backprop in plaintext
if self.is_feature_src():
dLdZ = self._solve_dLdZ(dLdW)
self.logits.backward(dLdZ)
def backward(self, grad_output=None):
protocol = cfg.nn.dpsmpc.protocol
with crypten.no_grad():
if protocol == "full_jacobian":
self._backward_full_jacobian(grad_output=grad_output)
raise NotImplementedError(
"DPS protocol full_jacobian must be fixed before use."
)
elif protocol == "layer_estimation":
with torch.no_grad():
self._backward_layer_estimation(grad_output=grad_output)
else:
raise ValueError(
f"Unrecognized DPSplitMPC backward protocol: {protocol}"
)
| CrypTen-main | crypten/nn/privacy/dp_split.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from contextlib import contextmanager
import yaml
from omegaconf import OmegaConf
class CrypTenConfig:
"""
Configuration object used to store configurable parameters for CrypTen.
This object acts as a nested dictionary, but can be queried using dot-notation(
e.g. querying or setting `cfg.a.b` is equivalent to `cfg['a']['b']`).
Users can load a CrypTen config from a file using `cfg.load_config(filepath)`.
Users can temporarily override a config parameter using the contextmanager temp_override:
.. code-block:: python
cfg.a.b = outer # sets cfg["a"]["b"] to outer value
with cfg.temp_override("a.b", inner):
print(cfg.a.b) # prints inner value
print(cfg.a.b) # prints outer value
"""
__DEFAULT_CONFIG_PATH = os.path.normpath(
os.path.join(__file__, "../../../configs/default.yaml")
)
def __init__(self, config_file=None):
self.load_config(config_file)
def load_config(self, config_file):
"""Loads config from a yaml file"""
if config_file is None:
config_file = CrypTenConfig.__DEFAULT_CONFIG_PATH
# Use yaml to open stream for safe load
with open(config_file) as stream:
config_dict = yaml.safe_load(stream)
self.config = OmegaConf.create(config_dict)
def set_config(self, config):
if isinstance(config, CrypTenConfig):
self.config = config.config
else:
self.config = config
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
keys = name.split(".")
result = getattr(self.config, keys[0])
for key in keys[1:]:
result = getattr(result, key)
return result
def __getitem__(self, name):
return self.__getattribute__(name)
def __setattr__(self, name, value):
if name == "config":
object.__setattr__(self, name, value)
try:
# Can only set attribute if already exists
object.__getattribute__(self, name)
object.__setattr__(self, name, value)
except AttributeError:
dotlist = [f"{name}={value}"]
update = OmegaConf.from_dotlist(dotlist)
self.config = OmegaConf.merge(self.config, update)
def __setitem__(self, name, value):
self.__setattr__(name, value)
@contextmanager
def temp_override(self, override_dict):
old_config = self.config
try:
dotlist = [f"{k}={v}" for k, v in override_dict.items()]
update = OmegaConf.from_dotlist(dotlist)
self.config = OmegaConf.merge(self.config, update)
yield
finally:
self.config = old_config
| CrypTen-main | crypten/config/config.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .config import CrypTenConfig
cfg = CrypTenConfig()
__all__ = ["cfg"]
| CrypTen-main | crypten/config/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import math
import operator
import torch
def implements(torch_function):
"""Register a torch function override for CUDALongTensor"""
@functools.wraps(torch_function)
def decorator(func):
HANDLED_FUNCTIONS[torch_function] = func
return func
return decorator
HANDLED_FUNCTIONS = {}
class CUDALongTensor:
"""
A wrapper class for `torch.cuda.LongTensor`. When performing operations that are
currently not supported for `torch.cuda.LongTensor` (e.g `matmul`, `conv2d`), it will
convert the underlying LongTensor into DoubleTensor and convert the computed
result back to a LongTensor. The computed result will be the same as the original
expected result.
"""
__BITS = torch.iinfo(torch.long).bits
__DEFAULT_NBLOCKS = 3
__BLOCK_SIZE = {3: None, 4: None} # Number of bits per block
__INDICES = {3: [], 4: []}
__SHIFTS = {3: [], 4: []}
for nblocks in [3, 4]:
__BLOCK_SIZE[nblocks] = math.ceil(__BITS / nblocks)
for i in range(nblocks):
for j in range(nblocks):
if (i + j) * __BLOCK_SIZE[nblocks] >= __BITS:
continue
idx = i * nblocks + j
__INDICES[nblocks].append(idx)
__SHIFTS[nblocks].append((i + j) * __BLOCK_SIZE[nblocks])
def __init__(self, data=None, device=None):
r"""
Construct a CUDALongTensor with `data` on the specified `device`.
`data` can either be a torch tensor, a CUDALongTensor, or an array-like
object that can be converted to a torch tensor via torch.as_tensor(data)
`dtype` of the torch tensor will be automatically converted to torch.long
regardless of `dtype` of `data`. `device` must be a cuda device.
Args:
data (Tensor, array_like, or CUDALongTensor): Initial data for CUDALongTensor.
device (torch.device): The desired device of CUDALongTensor. Must be a cuda device.
"""
if device is None:
device = "cuda" if (data is None or not data.is_cuda) else data.device
else:
assert device.startswith(
"cuda"
), "cannot specify a non-cuda device for CUDALongTensor"
self._tensor = None
if data is None:
return
if isinstance(data, CUDALongTensor):
self._tensor = data._tensor
elif torch.is_tensor(data):
self._tensor = data.long().to(device)
else:
self._tensor = torch.as_tensor(data, dtype=torch.long, device=device)
def __torch_function__(self, func, types, args=(), kwargs=None):
if kwargs is None:
kwargs = {}
if func not in HANDLED_FUNCTIONS or not all(
issubclass(t, (torch.Tensor, CUDALongTensor)) for t in types
):
args = [t.tensor() if hasattr(t, "tensor") else t for t in args]
result = func(*args, **kwargs)
if torch.is_tensor(result):
return CUDALongTensor(result)
if isinstance(result, list):
return [CUDALongTensor(t) if torch.is_tensor(t) else t for t in result]
if isinstance(result, tuple):
return tuple(
CUDALongTensor(t) if torch.is_tensor(t) else t for t in result
)
return result
return HANDLED_FUNCTIONS[func](*args, **kwargs)
def __repr__(self):
return "CUDALongTensor({})".format(self._tensor)
def __setitem__(self, index, value):
self._tensor[index] = value.data
@property
def device(self):
return self._tensor.device
@property
def is_cuda(self):
return self._tensor.is_cuda
@property
def shape(self):
return self._tensor.shape
@property
def data(self):
return self._tensor.data
@property
def dtype(self):
return self._tensor.dtype
def tensor(self):
return self._tensor
def to(self, *args, **kwargs):
self._tensor = self._tensor.to(*args, **kwargs)
if not self._tensor.is_cuda:
return self._tensor
return self
def cuda(self, *args, **kwargs):
self._tensor = self._tensor.cuda(*args, **kwargs)
return self
def cpu(self, *args, **kwargs):
return self._tensor.cpu(*args, **kwargs)
def shallow_copy(self):
"""Create a shallow copy of the input tensor."""
# TODO: Rename this to __copy__()?
result = CUDALongTensor(self._tensor)
return result
def clone(self):
"""Create a deep copy of the input tensor."""
# TODO: Rename this to __deepcopy__()?
result = CUDALongTensor()
result._tensor = self._tensor.clone()
return result
@staticmethod
def __encode_as_fp64(x, num_blocks=3):
"""Converts a CUDALongTensor to an encoding of
torch.cuda.DoubleTensor that represent the same data.
"""
nb = num_blocks
bks = CUDALongTensor.__BLOCK_SIZE[num_blocks]
x_block = CUDALongTensor.stack(
[(x >> (bks * i)) & (2**bks - 1) for i in range(nb)]
)
return x_block.double()
@staticmethod
def __decode_as_int64(x, num_blocks=3):
"""Converts a CUDALongTensor encoded as torch.cuda.DoubleTensor
back to the CUDALongTensor it encodes
"""
x = x.long()
indices = CUDALongTensor.__INDICES[num_blocks]
shifts = CUDALongTensor.__SHIFTS[num_blocks]
indices = torch.tensor(indices, device=x.device)
shifts = torch.tensor(shifts, device=x.device)
shifts = shifts.view(-1, *([1] * (x.ndim - 1)))
result = torch.index_select(x, 0, indices)
result <<= shifts
return CUDALongTensor(result.sum(0))
@staticmethod
def __patched_conv_ops(op, x, y, *args, **kwargs):
if "groups" in kwargs:
groups = kwargs["groups"]
assert (
groups == 1
), f"more than one group is unsupported on GPU (groups = {groups})"
del kwargs["groups"]
bs, c, *img = x.size()
c_out, c_in, *ks = y.size()
kernel_elements = functools.reduce(operator.mul, ks)
nb = 3 if kernel_elements < 256 else 4
nb2 = nb**2
x_encoded = CUDALongTensor.__encode_as_fp64(x, nb).data
y_encoded = CUDALongTensor.__encode_as_fp64(y, nb).data
repeat_idx = [1] * (x_encoded.dim() - 1)
x_enc_span = x_encoded.repeat(nb, *repeat_idx)
y_enc_span = torch.repeat_interleave(y_encoded, repeats=nb, dim=0)
x_enc_span = x_enc_span.transpose_(0, 1).reshape(bs, nb2 * c, *img)
y_enc_span = y_enc_span.reshape(nb2 * c_out, c_in, *ks)
c_z = c_out if op in ["conv1d", "conv2d"] else c_in
z_encoded = getattr(torch, op)(
x_enc_span, y_enc_span, *args, **kwargs, groups=nb2
)
z_encoded = z_encoded.reshape(bs, nb2, c_z, *z_encoded.size()[2:]).transpose_(
0, 1
)
return CUDALongTensor.__decode_as_int64(z_encoded, nb)
@staticmethod
def stack(tensors, *args, **kwargs):
is_cuda_long = any(hasattr(t, "tensor") for t in tensors)
tensors = [t.tensor() if hasattr(t, "tensor") else t for t in tensors]
if is_cuda_long:
return CUDALongTensor(torch.stack(tensors, *args, **kwargs))
return torch.stack(tensors, *args, **kwargs)
@staticmethod
def cat(tensors, *args, **kwargs):
is_cuda_long = any(hasattr(t, "tensor") for t in tensors)
tensors = [t.tensor() if hasattr(t, "tensor") else t for t in tensors]
if is_cuda_long:
return CUDALongTensor(torch.cat(tensors, *args, **kwargs))
return torch.cat(tensors, *args, **kwargs)
@staticmethod
@implements(torch.matmul)
def matmul(x, y, *args, **kwargs):
# Use 4 blocks if each dot product is 256 elements or larger to prevent overflow in the sum
nb = 3 if x.size(-1) < 256 else 4
# Prepend 1 to the dimension of x or y if it is 1-dimensional
remove_x, remove_y = False, False
if x.dim() == 1:
x = x.view(1, x.shape[0])
remove_x = True
if y.dim() == 1:
y = y.view(y.shape[0], 1)
remove_y = True
x_encoded = CUDALongTensor.__encode_as_fp64(x, nb).data
y_encoded = CUDALongTensor.__encode_as_fp64(y, nb).data
# Span x and y for cross multiplication
repeat_idx = [1] * (x_encoded.dim() - 1)
x_enc_span = x_encoded.repeat(nb, *repeat_idx)
y_enc_span = torch.repeat_interleave(y_encoded, repeats=nb, dim=0)
# Broadcasting
for _ in range(abs(x_enc_span.ndim - y_enc_span.ndim)):
if x_enc_span.ndim > y_enc_span.ndim:
y_enc_span.unsqueeze_(1)
else:
x_enc_span.unsqueeze_(1)
z_encoded = torch.matmul(x_enc_span, y_enc_span, *args, **kwargs)
if remove_x:
z_encoded.squeeze_(-2)
if remove_y:
z_encoded.squeeze_(-1)
return CUDALongTensor.__decode_as_int64(z_encoded, nb)
@staticmethod
@implements(torch.conv1d)
def conv1d(input, weight, *args, **kwargs):
return CUDALongTensor.__patched_conv_ops(
"conv1d", input, weight, *args, **kwargs
)
@staticmethod
@implements(torch.conv_transpose1d)
def conv_transpose1d(input, weight, *args, **kwargs):
return CUDALongTensor.__patched_conv_ops(
"conv_transpose1d", input, weight, *args, **kwargs
)
@staticmethod
@implements(torch.conv2d)
def conv2d(input, weight, *args, **kwargs):
return CUDALongTensor.__patched_conv_ops(
"conv2d", input, weight, *args, **kwargs
)
@staticmethod
@implements(torch.conv_transpose2d)
def conv_transpose2d(input, weight, *args, **kwargs):
return CUDALongTensor.__patched_conv_ops(
"conv_transpose2d", input, weight, *args, **kwargs
)
@staticmethod
@implements(torch.nn.functional.avg_pool2d)
def avg_pool2d(x, kernel_size, divisor_override=None, *args, **kwargs):
nb = CUDALongTensor.__DEFAULT_NBLOCKS
bks = CUDALongTensor.__BLOCK_SIZE[nb]
x_encoded = CUDALongTensor.__encode_as_fp64(x, nb).data
bs, c, h, w = x.shape
x_encoded = x_encoded.reshape(nb * bs, c, h, w)
z_encoded = torch.nn.functional.avg_pool2d(
x_encoded, kernel_size, divisor_override=1, *args, **kwargs
)
z_enc = z_encoded.reshape(nb, bs, *z_encoded.shape[1:]).long()
z = torch.zeros(
(nb, bs, *z_encoded.shape[1:]), device=x.device, dtype=torch.long
)
z += z_enc << torch.tensor([bks * i for i in range(nb)], device=x.device).view(
nb, 1, 1, 1, 1
)
z = z.sum(0)
if isinstance(kernel_size, (int, float)):
pool_size = kernel_size**2
else:
pool_size = kernel_size[0] * kernel_size[1]
if divisor_override is not None:
z = torch.div(z, divisor_override, rounding_mode="trunc")
else:
z = torch.div(z, pool_size, rounding_mode="trunc")
return CUDALongTensor(z)
@staticmethod
@implements(torch.broadcast_tensors)
def broadcast_tensors(*tensors):
tensor_list = [t.data for t in tensors]
results = torch.broadcast_tensors(*tensor_list)
results = [CUDALongTensor(t) for t in results]
return results
def split(self, y, *args, **kwargs):
splits = self._tensor.split(y, *args, **kwargs)
splits = [CUDALongTensor(split) for split in splits]
return splits
def unbind(self, dim=0):
results = torch.unbind(self._tensor, dim)
results = tuple(CUDALongTensor(t) for t in results)
return results
def nonzero(self, *args, **kwargs):
result = self._tensor.nonzero(*args, **kwargs)
if isinstance(result, tuple):
return tuple(CUDALongTensor(t) for t in result)
return CUDALongTensor(result)
def all(self, *args, **kwargs):
return self._tensor.bool().all(*args, **kwargs)
def set_(self, source, *args, **kwargs):
"""CUDALongTensor currently does not support inplace set_"""
self._tensor = source.data
return self
def __iadd__(self, y):
if isinstance(y, CUDALongTensor):
y = y._tensor
self._tensor += y
return self
def __isub__(self, y):
if isinstance(y, CUDALongTensor):
y = y.tensor()
self._tensor -= y
return self
def __imul__(self, y):
if isinstance(y, CUDALongTensor):
y = y.tensor()
self._tensor *= y
return self
def __ifloordiv__(self, y):
if isinstance(y, CUDALongTensor):
y = y.tensor()
self._tensor = torch.div(self._tensor, y, rounding_mode="trunc")
return self
def __idiv__(self, y):
if isinstance(y, CUDALongTensor):
y = y.tensor()
self._tensor /= y
return self
def __imod__(self, y):
if isinstance(y, CUDALongTensor):
y = y.tensor()
self._tensor %= y
return self
def __iand__(self, y):
if isinstance(y, CUDALongTensor):
y = y.tensor()
self._tensor &= y
return self
def __ixor__(self, y):
if isinstance(y, CUDALongTensor):
y = y.tensor()
self._tensor ^= y
return self
def __ipow__(self, y):
if isinstance(y, CUDALongTensor):
y = y.tensor()
self._tensor **= y
return self
def __and__(self, y):
result = self.clone()
return result.__iand__(y)
def __xor__(self, y):
result = self.clone()
return result.__ixor__(y)
def __add__(self, y):
result = self.clone()
return result.__iadd__(y)
def __sub__(self, y):
result = self.clone()
return result.__isub__(y)
def __rsub__(self, y):
result = self.clone()
result._tensor = y - result._tensor
return result
def __mul__(self, y):
result = self.clone()
return result.__imul__(y)
def __floordiv__(self, y):
result = self.clone()
return result.__ifloordiv__(y)
def __truediv__(self, y):
result = self.clone()
return result.__idiv__(y)
def __mod__(self, y):
result = self.clone()
return result.__imod__(y)
def __pow__(self, y):
result = self.clone()
return result.__ipow__(y)
def __neg__(self):
result = self.clone()
result._tensor = -result._tensor
return result
def __eq__(self, y):
return CUDALongTensor(self._tensor == y)
def __ne__(self, y):
return CUDALongTensor(self._tensor != y)
def __lt__(self, y):
return CUDALongTensor(self._tensor < y)
def __gt__(self, y):
return CUDALongTensor(self._tensor > y)
def __le__(self, y):
return CUDALongTensor(self._tensor <= y)
def __ge__(self, y):
return CUDALongTensor(self._tensor >= y)
def __hash__(self):
return hash(self._tensor)
def lshift_(self, value):
"""Right shift elements by `value` bits"""
assert isinstance(value, int), "lshift must take an integer argument."
self._tensor <<= value
return self
def lshift(self, value):
"""Left shift elements by `value` bits"""
return self.clone().lshift_(value)
def rshift_(self, value):
"""Right shift elements by `value` bits"""
assert isinstance(value, int), "rshift must take an integer argument."
self._tensor >>= value
return self
def rshift(self, value):
"""Right shift elements by `value` bits"""
return self.clone().rshift_(value)
__lshift__ = lshift
__rshift__ = rshift
# In-place bitwise operators
__ilshift__ = lshift_
__irshift__ = rshift_
__radd__ = __add__
__rmul__ = __mul__
__rpow__ = __pow__
REGULAR_FUNCTIONS = [
"__getitem__",
"index_select",
"view",
"flatten",
"t",
"transpose",
"unsqueeze",
"repeat",
"squeeze",
"narrow",
"expand",
"roll",
"unfold",
"flip",
"trace",
"prod",
"sum",
"cumsum",
"reshape",
"permute",
"pow",
"float",
"long",
"double",
"scatter",
"scatter_add",
"index_fill",
"index_add",
"take",
"gather",
"where",
"add",
"sub",
"mul",
"div",
"le",
"ge",
"gt",
"lt",
"eq",
"ne",
"neg",
"abs",
"sign",
]
PROPERTY_FUNCTIONS = ["__len__", "nelement", "dim", "size", "numel", "item"]
INPLACE_FUNCTIONS = [
"add_",
"sub_",
"mul_",
"div_",
"copy_",
"abs_",
"neg_",
"index_fill_",
"index_add_",
"scatter_",
"scatter_add_",
"le_",
"ge_",
"gt_",
"lt_",
"eq_",
"ne_",
"neg_",
"abs_",
"sign_",
]
def _add_regular_function(func_name):
"""
Adds function to `CUDALongTensor` that is applied directly on the underlying
`_tensor` attribute, and stores the result in the same attribute.
"""
def regular_func(self, *args, **kwargs):
result = self.shallow_copy()
args = [t.tensor() if hasattr(t, "tensor") else t for t in args]
for key, value in kwargs.items():
if hasattr(value, "tensor"):
kwargs[key] = value.tensor()
result._tensor = getattr(result._tensor, func_name)(*args, **kwargs)
return result
setattr(CUDALongTensor, func_name, regular_func)
def _add_property_function(func_name):
"""
Adds function to `CUDALongTensor` that is applied directly on the underlying
`_tensor` attribute, and returns the result of that function.
"""
def property_func(self, *args, **kwargs):
result = getattr(self._tensor, func_name)(*args, **kwargs)
return result
setattr(CUDALongTensor, func_name, property_func)
def _add_inplace_function(func_name):
"""
Adds function to `CUDALongTensor` that is applied in place on the underlying
`_tensor` attribute, and returns the result of that function.
"""
def inplace_func(self, *args, **kwargs):
args = [t.tensor() if hasattr(t, "tensor") else t for t in args]
for key, value in kwargs.items():
if hasattr(value, "tensor"):
kwargs[key] = value.tensor()
result = getattr(self._tensor, func_name)(*args, **kwargs)
self._tensor.set_(result)
return self
setattr(CUDALongTensor, func_name, inplace_func)
for func_name in REGULAR_FUNCTIONS:
_add_regular_function(func_name)
for func_name in PROPERTY_FUNCTIONS:
_add_property_function(func_name)
for func_name in INPLACE_FUNCTIONS:
_add_inplace_function(func_name)
| CrypTen-main | crypten/cuda/cuda_tensor.py |
from .cuda_tensor import CUDALongTensor
__all__ = ["CUDALongTensor"]
| CrypTen-main | crypten/cuda/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import threading
from operator import itemgetter
from queue import Queue
import torch
from torch.distributed import ReduceOp
from .communicator import Communicator
class InProcessCommunicator(Communicator):
BYTES_PER_ELEMENT = 8
tls = threading.local()
mailbox = None
barrier = None
lock = threading.Lock()
@classmethod
def initialize(cls, rank, world_size, init_ttp=False):
cls.tls.instance = cls(rank, world_size)
def __init__(self, rank, world_size, init_ttp=False):
self.world_size = world_size
self.rank = rank
self.reset_communication_stats()
self._name = f"rank{rank}"
with InProcessCommunicator.lock:
if InProcessCommunicator.mailbox is None:
InProcessCommunicator.mailbox = [
Queue() for _ in range(self.world_size)
]
# This prevents one thread from running ahead of the others and doing
# multiple puts that would show up in the get calls below
InProcessCommunicator.barrier = threading.Barrier(self.world_size)
# logging:
level = logging.getLogger().level
logging.getLogger().setLevel(logging.INFO)
logging.info("==================")
logging.info("InProcessCommunicator with rank %d" % self.rank)
logging.info("==================")
logging.info("World size = %d" % self.get_world_size())
logging.getLogger().setLevel(level)
@classmethod
def get(cls):
if not hasattr(cls.tls, "instance"):
return None
return cls.tls.instance
@classmethod
def is_initialized(cls):
return hasattr(cls.tls, "instance")
def send(self, tensor, dst):
"""Sends the specified tensor to the destination dst."""
self.mailbox[dst].put((self.rank, tensor.clone()))
def recv(self, tensor, src=None):
"""Receives a tensor from an (optional) source src."""
rank, result = self.mailbox[self.rank].get()
if src is not None and rank != src:
raise NotImplementedError("Can't receive messages out of order yet")
return result
def isend(self, tensor, dst):
"""Sends the specified tensor to the destination dst."""
self.send(tensor, dst)
class Result:
def is_completed(self):
return True
def wait(self):
pass
return Result()
def irecv(self, tensor, src=None):
"""Receives a tensor from an (optional) source src."""
class Result:
def __init__(self, mailbox, rank):
self.completed = False
self.mailbox = mailbox
self.rank = rank
def is_completed(self):
return self.completed
def wait(self):
rank, result = self.mailbox[self.rank].get()
if src is not None and rank != src:
raise NotImplementedError("Can't receive messages out of order yet")
tensor.copy_(result)
return Result(self.mailbox, self.rank)
def scatter(self, scatter_list, src, size=None, async_op=False):
"""Scatters a list of tensors to all parties."""
if async_op:
raise NotImplementedError()
if src == self.rank:
for i in range(self.world_size):
self.mailbox[i].put(scatter_list[i].clone())
self.barrier.wait()
return self.mailbox[self.rank].get()
def reduce(self, tensor, dst, op=ReduceOp.SUM, async_op=False):
"""Reduces the tensor data across all parties."""
tensors = self.gather(tensor, dst)
if self.rank == dst:
reduce_fn = self._reduce_op_to_function(op)
return reduce_fn(torch.stack(tensors), dim=0)
@classmethod
def shutdown(cls):
# Destroy all thread-local instances
cls.tls = threading.local()
cls.mailbox = None
cls.barrier = None
def _reduce_op_to_function(self, op):
if op == ReduceOp.SUM:
return torch.sum
raise NotImplementedError()
def all_reduce(self, tensor, op=ReduceOp.SUM, async_op=False):
"""Reduces the tensor data across all parties; all get the final result."""
if async_op:
raise NotImplementedError()
ag = self.all_gather(tensor)
reduce_fn = self._reduce_op_to_function(op)
return reduce_fn(torch.stack(ag), dim=0)
def gather(self, tensor, dst, async_op=False):
"""Gathers a list of tensors in a single party."""
if async_op:
raise NotImplementedError()
self.mailbox[dst].put((self.rank, tensor.clone()))
self.barrier.wait()
if self.rank == dst:
result = [self.mailbox[dst].get() for _ in range(self.world_size)]
return [tensor for rank, tensor in sorted(result, key=itemgetter(0))]
def all_gather(self, tensor, async_op=False):
"""Gathers tensors from all parties in a list."""
if async_op:
raise NotImplementedError()
for i in range(self.world_size):
self.mailbox[i].put((self.rank, tensor.clone()))
self.barrier.wait()
result = sorted(
(self.mailbox[self.rank].get() for _ in range(self.world_size)),
key=itemgetter(0),
)
return [tensor for (rank, tensor) in result]
def broadcast(self, tensor, src, async_op=False):
"""Broadcasts the tensor to all parties."""
if async_op:
raise NotImplementedError()
if self.rank == src:
for i in range(self.get_world_size()):
self.mailbox[i].put(tensor.clone())
# No need for a barrier here.
return self.mailbox[self.rank].get()
def get_world_size(self):
"""Returns the size of the world."""
return self.world_size
def get_rank(self):
"""Returns the rank of the current process."""
return self.rank
def set_name(self, name):
"""Sets the party name of the current rank."""
assert isinstance(
name, str
), f"Improper name provided to process on rank {self.get_rank()}"
self._name = name
def get_name(self):
"""Returns the party name of the current rank."""
return self._name
| CrypTen-main | crypten/communicator/in_process_communicator.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import pickle
import random
import string
import numpy
import torch
import torch.distributed as dist
from crypten.common import serial
from torch.distributed import ReduceOp
from .communicator import _logging, Communicator
class DistributedCommunicator(Communicator):
"""
Implementation of the Communicator class via torch.distributed. Use this
communicator to communicate between different processes, potentially,
running on different nodes.
"""
BYTES_PER_ELEMENT = 8
instance = None
def __init__(self, init_ttp=False):
# no need to do anything if we already initialized the communicator:
if not dist.is_initialized():
# get configuration variables from environmens:
for key in ["distributed_backend", "rendezvous", "world_size", "rank"]:
if key.upper() not in os.environ:
raise ValueError("Environment variable %s must be set." % key)
setattr(self, key.lower(), os.environ[key.upper()])
# make sure world size and rank are integers; comms stats are reset:
self.world_size = int(self.world_size)
self.rank = int(self.rank)
self.reset_communication_stats()
self._name = f"rank{self.rank}"
# logging:
logging.info("==================")
logging.info("DistributedCommunicator with rank %d" % self.rank)
logging.info("==================")
# initialize process group:
total_ws = self.world_size + 1 if init_ttp else self.world_size
dist.init_process_group(
backend=self.distributed_backend,
init_method=self.rendezvous,
world_size=total_ws,
rank=self.rank,
)
self.ttp_group = dist.new_group(list(range(total_ws)))
if total_ws > 1:
self.ttp_comm_group = dist.new_group([0, total_ws - 1])
self.main_group = dist.new_group(list(range(self.world_size)))
self.ttp_initialized = init_ttp
logging.info("World size = %d" % self.world_size)
@classmethod
def is_initialized(cls):
if cls.instance is None:
return False
return dist.is_initialized()
@classmethod
def initialize(cls, rank, world_size, init_ttp=False):
import os
if os.name == "nt":
raise OSError(
"Multiprocessing is not supported on Windows. "
+ "Please initialize CrypTen via crypten.init_thread() instead."
)
# set default arguments for communicator:
randomized_path = "crypten-".join(
random.choice(string.ascii_letters) for i in range(10)
)
default_args = {
"DISTRIBUTED_BACKEND": "gloo",
"RENDEZVOUS": f"file:///tmp/{randomized_path}",
"WORLD_SIZE": world_size,
"RANK": rank,
}
for key, val in default_args.items():
if key not in os.environ:
os.environ[key] = str(val)
cls.instance = DistributedCommunicator(init_ttp=init_ttp)
@classmethod
def get(cls):
return cls.instance
@classmethod
def shutdown(cls):
if dist.get_rank() == 0 and cls.instance.ttp_initialized:
cls.instance.send_obj(
"terminate", cls.instance.get_ttp_rank(), cls.instance.ttp_group
)
dist.destroy_process_group(cls.instance.main_group)
dist.destroy_process_group(cls.instance.ttp_group)
dist.destroy_process_group()
cls.instance = None
@_logging
def send(self, tensor, dst):
"""Sends the specified tensor to the destination dst."""
assert dist.is_initialized(), "initialize the communicator first"
dist.send(tensor.data, dst, group=self.main_group)
@_logging
def recv(self, tensor, src=None):
"""Receives a tensor from an (optional) source src."""
assert dist.is_initialized(), "initialize the communicator first"
result = tensor.clone()
dist.recv(result.data, src=src, group=self.main_group)
return result
@_logging
def isend(self, tensor, dst):
"""Sends the specified tensor to the destination dst."""
assert dist.is_initialized(), "initialize the communicator first"
return dist.isend(tensor.data, dst, group=self.main_group)
@_logging
def irecv(self, tensor, src=None):
"""Receives a tensor from an (optional) source src."""
assert dist.is_initialized(), "initialize the communicator first"
return dist.irecv(tensor.data, src=src, group=self.main_group)
@_logging
def scatter(self, scatter_list, src, size=None, device=None):
"""Scatters a list of tensors to all parties."""
assert dist.is_initialized(), "initialize the communicator first"
if src != self.get_rank():
if size is None:
size = scatter_list[self.get_rank()].size()
if device is None:
try:
device = scatter_list[self.get_rank()].device
except Exception:
pass
tensor = torch.empty(size=size, dtype=torch.long, device=device)
dist.scatter(tensor.data, [], src, group=self.main_group)
else:
scatter_list = [s.data for s in scatter_list]
tensor = scatter_list[self.get_rank()]
dist.scatter(tensor.data, scatter_list, src, group=self.main_group)
return tensor
@_logging
def reduce(self, input, dst, op=ReduceOp.SUM, batched=False):
"""Reduces the input data across all parties."""
assert dist.is_initialized(), "initialize the communicator first"
if batched:
assert isinstance(input, list), "batched reduce input must be a list"
reqs = []
result = [x.clone().data for x in input]
for tensor in result:
reqs.append(
dist.reduce(
tensor.data, dst, op=op, group=self.main_group, async_op=True
)
)
for req in reqs:
req.wait()
else:
assert torch.is_tensor(
input.data
), "unbatched input for reduce must be a torch tensor"
result = input.clone()
dist.reduce(result.data, dst, op=op, group=self.main_group)
return result if dst == self.get_rank() else None
@_logging
def all_reduce(self, input, op=ReduceOp.SUM, batched=False):
"""Reduces the input data across all parties; all get the final result."""
assert dist.is_initialized(), "initialize the communicator first"
if batched:
assert isinstance(input, list), "batched reduce input must be a list"
reqs = []
result = [x.clone() for x in input]
for tensor in result:
reqs.append(
dist.all_reduce(
tensor.data, op=op, group=self.main_group, async_op=True
)
)
for req in reqs:
req.wait()
else:
assert torch.is_tensor(
input.data
), "unbatched input for reduce must be a torch tensor"
result = input.clone()
dist.all_reduce(result.data, op=op, group=self.main_group)
return result
@_logging
def gather(self, tensor, dst):
"""Gathers a list of tensors in a single party."""
assert dist.is_initialized(), "initialize the communicator first"
if self.get_rank() == dst:
result = []
device = tensor.data.device
for _ in range(self.get_world_size()):
result.append(
torch.empty(size=tensor.size(), dtype=torch.long, device=device)
)
dist.gather(tensor.data, result, dst, group=self.main_group)
return result
dist.gather(tensor.data, [], dst, group=self.main_group)
return [None]
@_logging
def all_gather(self, tensor):
"""Gathers tensors from all parties in a list."""
assert dist.is_initialized(), "initialize the communicator first"
result = []
device = tensor.data.device
for _ in range(self.get_world_size()):
result.append(
torch.empty(size=tensor.size(), dtype=torch.long, device=device)
)
dist.all_gather(result, tensor.data, group=self.main_group)
return result
@_logging
def broadcast(self, input, src, group=None, batched=False):
"""Broadcasts the tensor to all parties."""
assert dist.is_initialized(), "initialize the communicator first"
group = self.main_group if group is None else group
if batched:
assert isinstance(input, list), "batched reduce input must be a list"
reqs = []
for tensor in input:
reqs.append(
dist.broadcast(tensor.data, src, group=group, async_op=True)
)
for req in reqs:
req.wait()
else:
assert torch.is_tensor(
input.data
), "unbatched input for reduce must be a torch tensor"
dist.broadcast(input.data, src, group=group)
return input
@_logging
def barrier(self):
"""Synchronizes all processes.
This collective blocks processes until the whole group enters this
function.
"""
assert dist.is_initialized(), "initialize the communicator first"
dist.barrier(group=self.main_group)
@_logging
def send_obj(self, obj, dst, group=None):
"""Sends the specified object to the destination `dst`."""
if group is None:
group = self.main_group
buf = pickle.dumps(obj)
size = torch.tensor(len(buf), dtype=torch.int32)
arr = torch.from_numpy(numpy.copy(numpy.frombuffer(buf, dtype=numpy.int8)))
r0 = dist.isend(size, dst=dst, group=group)
r1 = dist.isend(arr, dst=dst, group=group)
r0.wait()
r1.wait()
@_logging
def recv_obj(self, src, group=None):
"""Receives an object from a source `src`."""
if group is None:
group = self.main_group
size = torch.tensor(1, dtype=torch.int32)
dist.irecv(size, src=src, group=group).wait()
data = torch.empty(size=(size,), dtype=torch.int8)
dist.irecv(data, src=src, group=group).wait()
buf = data.numpy().tobytes()
return serial.restricted_loads(buf)
@_logging
def broadcast_obj(self, obj, src, group=None):
"""Broadcasts a given object to all parties."""
if group is None:
group = self.main_group
if self.rank == src:
assert obj is not None, "src party must provide obj for broadcast"
buf = pickle.dumps(obj)
size = torch.tensor(len(buf), dtype=torch.int32)
arr = torch.from_numpy(numpy.copy(numpy.frombuffer(buf, dtype=numpy.int8)))
dist.broadcast(size, src, group=group)
dist.broadcast(arr, src, group=group)
else:
size = torch.tensor(1, dtype=torch.int32)
dist.broadcast(size, src, group=group)
data = torch.empty(size=(size,), dtype=torch.int8)
dist.broadcast(data, src, group=group)
buf = data.numpy().tobytes()
obj = serial.restricted_loads(buf)
return obj
def get_world_size(self):
"""Returns the size of the world."""
assert dist.is_initialized(), "initialize the communicator first"
return self.world_size
def get_rank(self):
"""Returns the rank of the current process."""
assert dist.is_initialized(), "initialize the communicator first"
return dist.get_rank()
def get_ttp_rank(self):
"""Returns the rank of the Trusted Third Party"""
return self.get_world_size()
def set_name(self, name):
"""Sets the party name of the current process."""
assert isinstance(
name, str
), f"Improper name provided to process on rank {self.get_rank()}"
self._name = name
def get_name(self):
"""Returns the party name of the current process."""
return self._name
def get_distributed_backend(self):
"""Returns name of torch.distributed backend used."""
assert dist.is_initialized(), "initialize the communicator first"
return dist.get_backend()
| CrypTen-main | crypten/communicator/distributed_communicator.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .communicator import Communicator
from .distributed_communicator import DistributedCommunicator
from .in_process_communicator import InProcessCommunicator
__use_threads = False
def get():
cls = InProcessCommunicator if __use_threads else DistributedCommunicator
if not cls.is_initialized():
raise RuntimeError("Crypten not initialized. Please call crypten.init() first.")
return cls.get()
def _init(use_threads, rank=0, world_size=1, init_ttp=False):
global __tls, __use_threads
__use_threads = use_threads
cls = InProcessCommunicator if __use_threads else DistributedCommunicator
if cls.is_initialized():
return
cls.initialize(rank, world_size, init_ttp=init_ttp)
def uninit():
global __use_threads
cls = InProcessCommunicator if __use_threads else DistributedCommunicator
cls.shutdown()
__use_threads = False
def is_initialized():
cls = InProcessCommunicator if __use_threads else DistributedCommunicator
return cls.is_initialized()
# expose classes and functions in package:
__all__ = ["Communicator", "DistributedCommunicator", "get", "uninit", "is_initialized"]
| CrypTen-main | crypten/communicator/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import sys
import timeit
from crypten.config import cfg
class Communicator:
"""
Abstract class defining the functions that a Communicator should implement.
"""
@classmethod
def is_initialized(cls):
"""Returns whether the communicator has been initialized"""
raise NotImplementedError("is_initialized is not implemented")
@classmethod
def get(cls):
"""Returns an instance of the communicator"""
raise NotImplementedError("get is not implemented")
@classmethod
def initialize(cls, **kwargs):
"""Initializes the communicator. Call this function before using it."""
raise NotImplementedError("initialize is not implemented")
@classmethod
def shutdown(cls):
raise NotImplementedError("shutdown is not implemented")
def send(self, tensor, dst):
"""Sends the specified tensor to the destination dst."""
raise NotImplementedError("send is not implemented")
def recv(self, tensor, src=None):
"""Receives a tensor from an (optional) source src."""
raise NotImplementedError("recv is not implemented")
def scatter(self, scatter_list, src, size=None, async_op=False):
"""Scatters a list of tensors to all parties."""
raise NotImplementedError("scatter is not implemented")
def reduce(self, tensor, op=None, async_op=False):
"""Reduces the tensor data across all parties."""
raise NotImplementedError("tensor is not implemented")
def all_reduce(self, tensor, op=None, async_op=False):
"""Reduces the tensor data across all parties; all get the final result."""
raise NotImplementedError("tensor is not implemented")
def gather(self, tensor, dst, async_op=False):
"""Gathers a list of tensors in a single party."""
raise NotImplementedError("gather is not implemented")
def all_gather(self, tensor, async_op=False):
"""Gathers tensors from all parties in a list."""
raise NotImplementedError("all_gather is not implemented")
def broadcast(self, tensor, src, async_op=False):
"""Broadcasts the tensor to all parties."""
raise NotImplementedError("broadcast is not implemented")
def barrier(self):
"""Synchronizes all processes.
This collective blocks processes until the whole group enters this
function.
"""
raise NotImplementedError("barrier is not implemented")
def send_obj(self, obj, dst):
"""Sends the specified object to the destination `dst`."""
raise NotImplementedError("send_obj is not implemented")
def recv_obj(self, src):
"""Receives a tensor from a source src."""
raise NotImplementedError("recv_obj is not implemented")
def broadcast_obj(self, obj, src):
"""Broadcasts a given object to all parties."""
raise NotImplementedError("broadcast_obj is not implemented")
def get_world_size(self):
"""Returns the size of the world."""
raise NotImplementedError("get_world_size is not implemented")
def get_rank(self):
"""Returns the rank of the current process."""
raise NotImplementedError("get_rank is not implemented")
def set_name(self):
"""Sets the party name of the current process."""
raise NotImplementedError("set_name is not implemented")
def get_name(self):
"""Returns the party name of the current process."""
raise NotImplementedError("get_name is not implemented")
def reset_communication_stats(self):
"""Resets communication statistics."""
self.comm_rounds = 0
self.comm_bytes = 0
self.comm_time = 0
def print_communication_stats(self):
"""
Prints communication statistics.
NOTE: Each party performs its own logging of communication, so one needs
to sum the number of bytes communicated over all parties and divide by
two (to prevent double-counting) to obtain the number of bytes
communicated in the overall system.
"""
import crypten
crypten.log("====Communication Stats====")
crypten.log("Rounds: {}".format(self.comm_rounds))
crypten.log("Bytes: {}".format(self.comm_bytes))
crypten.log("Communication time: {}".format(self.comm_time))
def get_communication_stats(self):
"""
Returns communication statistics in a Python dict.
NOTE: Each party performs its own logging of communication, so one needs
to sum the number of bytes communicated over all parties and divide by
two (to prevent double-counting) to obtain the number of bytes
communicated in the overall system.
"""
return {
"rounds": self.comm_rounds,
"bytes": self.comm_bytes,
"time": self.comm_time,
}
def _log_communication(self, nelement):
"""Updates log of communication statistics."""
self.comm_rounds += 1
self.comm_bytes += nelement * self.BYTES_PER_ELEMENT
def _log_communication_time(self, comm_time):
self.comm_time += comm_time
def _logging(func):
"""
Decorator that performs logging of communication statistics.
NOTE: Each party performs its own logging of communication, so one needs to
sum the number of bytes communicated over all parties and divide by two
(to prevent double-counting) to obtain the number of bytes communicated in
the overall system.
"""
from functools import wraps
@wraps(func)
def logging_wrapper(self, *args, **kwargs):
# TODO: Replace this
# - hacks the inputs into some of the functions for world_size 1:
world_size = self.get_world_size()
if world_size < 2:
if func.__name__ in ["gather", "all_gather"]:
return [args[0]]
elif len(args) > 0:
return args[0]
# only log communication if needed:
if cfg.communicator.verbose:
rank = self.get_rank()
_log = self._log_communication
# count number of bytes communicates for each MPI collective:
if func.__name__ == "barrier":
_log(0)
elif func.__name__ in ["send", "recv", "isend", "irecv"]:
_log(args[0].nelement()) # party sends or receives tensor
elif func.__name__ == "scatter":
if args[1] == rank: # party scatters P - 1 tensors
nelements = sum(
x.nelement() for idx, x in enumerate(args[0]) if idx != rank
)
_log(nelements) # NOTE: We deal with other parties later
elif func.__name__ == "all_gather":
_log(2 * (world_size - 1) * args[0].nelement())
# party sends and receives P - 1 tensors
elif func.__name__ == "send_obj":
nbytes = sys.getsizeof(args[0])
_log(nbytes / self.BYTES_PER_ELEMENT) # party sends object
elif func.__name__ == "broadcast_obj":
nbytes = sys.getsizeof(args[0])
_log(nbytes / self.BYTES_PER_ELEMENT * (world_size - 1))
# party sends object to P - 1 parties
elif func.__name__ in ["broadcast", "gather", "reduce"]:
multiplier = world_size - 1 if args[1] == rank else 1
# broadcast: party sends tensor to P - 1 parties, or receives 1 tensor
# gather: party receives P - 1 tensors, or sends 1 tensor
# reduce: party receives P - 1 tensors, or sends 1 tensor
if "batched" in kwargs and kwargs["batched"]:
nelements = sum(x.nelement() for x in args[0])
_log(nelements * multiplier)
else:
_log(args[0].nelement() * multiplier)
elif func.__name__ == "all_reduce":
# each party sends and receives one tensor in ring implementation
if "batched" in kwargs and kwargs["batched"]:
nelements = sum(2 * x.nelement() for x in args[0])
_log(nelements)
else:
_log(2 * args[0].nelement())
# execute and time the MPI collective:
tic = timeit.default_timer()
result = func(self, *args, **kwargs)
toc = timeit.default_timer()
self._log_communication_time(toc - tic)
# for some function, we only know the object size now:
if func.__name__ == "scatter" and args[1] != rank:
_log(result.nelement()) # party receives 1 tensor
if func.__name__ == "recv_obj":
_log(sys.getsizeof(result) / self.BYTES_PER_ELEMENT)
# party receives 1 object
return result
return func(self, *args, **kwargs)
return logging_wrapper
| CrypTen-main | crypten/communicator/communicator.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import crypten
from .optimizer import Optimizer
class SGD(Optimizer):
r"""Implements stochastic gradient descent (optionally with momentum).
Nesterov momentum is based on the formula from
`On the importance of initialization and momentum in deep learning`__.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float): learning rate
momentum (float, optional): momentum factor (default: 0)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
dampening (float, optional): dampening for momentum (default: 0)
nesterov (bool, optional): enables Nesterov momentum (default: False)
grad_threshold (float, optional): imposes a threshold on the magnitude of gradient values.
Gradient values with magnitude above the threshold will be replaced with 0.
Example:
>>> optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9)
>>> optimizer.zero_grad()
>>> loss_fn(model(input), target).backward()
>>> optimizer.step()
__ http://www.cs.toronto.edu/%7Ehinton/absps/momentum.pdf
.. note::
The implementation of SGD with Momentum/Nesterov subtly differs from
Sutskever et. al. and implementations in some other frameworks.
Considering the specific case of Momentum, the update can be written as
.. math::
\begin{aligned}
v_{t+1} & = \mu * v_{t} + g_{t+1}, \\
p_{t+1} & = p_{t} - \text{lr} * v_{t+1},
\end{aligned}
where :math:`p`, :math:`g`, :math:`v` and :math:`\mu` denote the
parameters, gradient, velocity, and momentum respectively.
This is in contrast to Sutskever et. al. and
other frameworks which employ an update of the form
.. math::
\begin{aligned}
v_{t+1} & = \mu * v_{t} + \text{lr} * g_{t+1}, \\
p_{t+1} & = p_{t} - v_{t+1}.
\end{aligned}
The Nesterov version is analogously modified.
"""
def __init__(
self,
params,
lr,
momentum=0,
dampening=0,
weight_decay=0,
nesterov=False,
grad_threshold=None,
):
if not isinstance(lr, (int, float)) or lr < 0.0:
raise ValueError("Invalid learning rate: {}".format(lr))
if not isinstance(momentum, (int, float)) or momentum < 0.0:
raise ValueError("Invalid momentum value: {}".format(momentum))
if not isinstance(dampening, (int, float)):
raise ValueError("Invalid dampening value {}".format(dampening))
if not isinstance(weight_decay, (int, float)) or weight_decay < 0.0:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = {
"lr": lr,
"momentum": momentum,
"dampening": dampening,
"weight_decay": weight_decay,
"nesterov": nesterov,
}
if nesterov and (momentum <= 0 or dampening != 0):
raise ValueError("Nesterov momentum requires a momentum and zero dampening")
# Compute thresholding based on square value since abs is more expensive
self.square_threshold = grad_threshold
if self.square_threshold is not None:
self.square_threshold *= self.square_threshold
super(SGD, self).__init__(params, defaults)
def __setstate__(self, state):
super(SGD, self).__setstate__(state)
for group in self.param_groups:
group.setdefault("nesterov", False)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
with crypten.no_grad():
loss = None
if closure is not None:
with crypten.enable_grad():
loss = closure()
for group in self.param_groups:
weight_decay = group["weight_decay"]
momentum = group["momentum"]
dampening = group["dampening"]
nesterov = group["nesterov"]
for p in group["params"]:
if p.grad is None:
continue
# Threshold gradients to prevent gradient explosion
if self.square_threshold is not None:
d_p = p.grad.mul(p.grad.square().lt(self.square_threshold))
else:
d_p = p.grad
if weight_decay != 0:
d_p = d_p.add(p.mul(weight_decay))
if momentum != 0:
param_state = self.state[id(p)]
if "momentum_buffer" not in param_state:
buf = param_state["momentum_buffer"] = d_p.clone().detach()
else:
buf = param_state["momentum_buffer"]
buf.mul_(momentum).add_(d_p.mul(1 - dampening))
if nesterov:
d_p = d_p.add(buf.mul(momentum))
else:
d_p = buf
p.sub_(d_p.mul(group["lr"]))
return loss
| CrypTen-main | crypten/optim/sgd.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .optimizer import Optimizer
from .sgd import SGD
__all__ = ["Optimizer", "SGD"]
| CrypTen-main | crypten/optim/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import crypten
import torch
from torch.optim.optimizer import required
class Optimizer(torch.optim.Optimizer):
r"""Base class for all optimizers.
.. warning::
Parameters need to be specified as collections that have a deterministic
ordering that is consistent between runs. Examples of objects that don't
satisfy those properties are sets and iterators over values of dictionaries.
Arguments:
params (iterable): an iterable of :class:`torch.Tensor` s,
:class:`dict` s, or :class:`crypten.CrypTensor`s. Specifies what Tensors
should be optimized.
defaults: (dict): a dict containing default values of optimization
options (used when a parameter group doesn't specify them).
Note: This optimizer is adapted from torch.optim.Optimizer to work with CrypTensors
"""
def add_param_group(self, param_group):
r"""Add a param group to the :class:`Optimizer` s `param_groups`.
This can be useful when fine tuning a pre-trained network as frozen layers can be made
trainable and added to the :class:`Optimizer` as training progresses.
Arguments:
param_group (dict): Specifies what Tensors should be optimized along with group
specific optimization options.
"""
assert isinstance(param_group, dict), "param group must be a dict"
params = param_group["params"]
if isinstance(params, (torch.Tensor, crypten.CrypTensor)):
param_group["params"] = [params]
elif isinstance(params, set):
raise TypeError(
"optimizer parameters need to be organized in ordered collections, but "
"the ordering of tensors in sets will change between runs. Please use a list instead."
)
else:
param_group["params"] = list(params)
for param in param_group["params"]:
if not isinstance(param, (torch.Tensor, crypten.CrypTensor)):
raise TypeError(
"optimizer can only optimize Tensors, "
"but one of the params is " + torch.typename(param)
)
for name, default in self.defaults.items():
if default is required and name not in param_group:
raise ValueError(
"parameter group didn't specify a value of required optimization parameter "
+ name
)
else:
param_group.setdefault(name, default)
self.param_groups.append(param_group)
def zero_grad(self, set_to_none=True):
r"""Sets the gradients of all optimized parameters to zero or None.
Args:
set_to_none (bool): instead of setting to zero, set the grads to None.
This will in general have lower memory footprint, and can modestly improve performance.
However, it changes certain behaviors. For example:
1. When the user tries to access a gradient and perform manual ops on it,
a None attribute or a Tensor full of 0s will behave differently.
2. If the user requests ``zero_grad(set_to_none=True)`` followed by a backward pass, ``.grad``\ s
are guaranteed to be None for params that did not receive a gradient.
3. ``crypten.optim`` optimizers have a different behavior if the gradient is 0 or None
(in one case it does the step with a gradient of 0 and in the other it skips
the step altogether).
Note that CrypTen differs from PyTorch by setting the default value of `set_to_none` to True.
This is because in CrypTen, it is often advantageous to set to None rather than to a zero-valued
CrypTensor.
"""
if set_to_none:
for group in self.param_groups:
for param in group["params"]:
param.grad = None
else:
for group in self.param_groups:
for param in group["params"]:
if param.grad is not None:
param.grad -= param.grad
| CrypTen-main | crypten/optim/optimizer.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib.util
import logging
import sys
import crypten.nn as cnn
import torch
# List of modules to import and additional classes to update from them
__import_list = {
"alexnet": [],
"densenet": ["_DenseLayer", "_DenseBlock", "_Transition"],
"googlenet": ["Inception", "InceptionAux", "BasicConv2d"],
"inception": [
"BasicConv2d",
"InceptionA",
"InceptionB",
"InceptionC",
"InceptionD",
"InceptionE",
"InceptionAux",
],
"mnasnet": ["_InvertedResidual"],
"mobilenet": [],
"resnet": ["BasicBlock", "Bottleneck"],
"shufflenetv2": ["InvertedResidual"],
"squeezenet": ["Fire"],
"vgg": [],
}
__all__ = []
def __import_module_copy(module_name):
"""
Returns a copy of an imported module so it can be modified
without modifying future imports of the given module
"""
starting_modules = sys.modules.copy()
module_spec = importlib.util.find_spec(module_name)
module = importlib.util.module_from_spec(module_spec)
module_spec.loader.exec_module(module)
new_modules = set(sys.modules) - set(starting_modules)
del module_spec
for m in new_modules:
del sys.modules[m]
return module
def __import_model_package_copy(import_name):
"""
Returns a copy of an imported model whose package contains
a function of the same name.
"""
starting_modules = sys.modules.copy()
model_type = importlib.import_module(f"torchvision.models.{import_name}")
new_modules = set(sys.modules) - set(starting_modules)
for m in new_modules:
del sys.modules[m]
return model_type
def __update_model_class_inheritance(cls):
"""
Updates the class inheritance of a torch.nn.Module to instead use
crypten.nn.Module
"""
bases = []
for m in cls.__bases__:
if m == torch.nn.Module:
bases.append(cnn.Module)
elif m == torch.nn.Sequential:
bases.append(cnn.Sequential)
elif m == torch.nn.ModuleDict:
bases.append(cnn.ModuleDict)
else:
bases.append(m)
cls.__bases__ = tuple(bases)
class FunctionalReplacement:
"""Replacement for `torch.nn.functional` that overwrites torch functionals to be crypten compatible"""
@staticmethod
def dropout(x, **kwargs):
return x.dropout(**kwargs)
@staticmethod
def relu(x, **kwargs):
return x.relu()
@staticmethod
def adaptive_avg_pool2d(x, *args):
return cnn.AdaptiveAvgPool2d(*args)(x)
@staticmethod
def avg_pool2d(x, *args, **kwargs):
return x.avg_pool2d(*args, **kwargs)
@staticmethod
def max_pool2d(x, *args, **kwargs):
return x.max_pool2d(*args, **kwargs)
def __update_torch_functions(module):
if hasattr(module, "nn"):
module.nn = cnn
# TODO: fix replacement in global `torch` module - perhaps use __torch_function__
if hasattr(module, "torch"):
module.torch.flatten = lambda x, *args: x.flatten(*args)
module.torch.transpose = lambda x, *args: x.transpose(*args)
# module.torch.cat = lambda *args, **kwargs: args[0].cat(*args, **kwargs)
if hasattr(module, "F"):
module.F = FunctionalReplacement()
def __get_module_list(model_name, model_type):
return __import_list[model_name] + model_type.__all__
try:
models = __import_module_copy("torchvision").models
except ModuleNotFoundError:
models = None
logging.warning("Unable to load torchvision models.")
if models is not None:
for import_name in __import_list.keys():
try:
model_type = getattr(models, import_name)
except AttributeError:
logging.warning(f"Could not load {import_name} from torchvision.modules")
continue
try:
# If function imported rather than package, replace with package
if not hasattr(model_type, "__all__"):
model_type = __import_model_package_copy(import_name)
__update_torch_functions(model_type)
module_list = __get_module_list(import_name, model_type)
for module_name in module_list:
module = getattr(model_type, module_name)
# Replace class inheritance from torch.nn.Module to crypten.nn.Module
if isinstance(module, type):
__update_model_class_inheritance(module)
module.load_state_dict = (
lambda *args, **kwargs: cnn.Module.load_state_dict(
*args, strict=False, **kwargs
)
)
if module_name in model_type.__all__:
globals()[module_name] = module
__all__.append(module_name)
except (RuntimeError, AttributeError) as e:
# Log that module produced an error
logging.warning(e)
raise DeprecationWarning(
"crypten.models is being deprecated. To import models from torchvision, ",
"please import them directly and use crypten.nn.from_pytorch() to convert",
" to CrypTen models.",
)
| CrypTen-main | crypten/models/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import crypten
import torch
from crypten.cuda import CUDALongTensor
def generate_random_ring_element(size, ring_size=(2**64), generator=None, **kwargs):
"""Helper function to generate a random number from a signed ring"""
if generator is None:
device = kwargs.get("device", torch.device("cpu"))
device = torch.device("cpu") if device is None else device
device = torch.device(device) if isinstance(device, str) else device
generator = crypten.generators["local"][device]
# TODO (brianknott): Check whether this RNG contains the full range we want.
rand_element = torch.randint(
-(ring_size // 2),
(ring_size - 1) // 2,
size,
generator=generator,
dtype=torch.long,
**kwargs,
)
if rand_element.is_cuda:
return CUDALongTensor(rand_element)
return rand_element
def generate_kbit_random_tensor(size, bitlength=None, generator=None, **kwargs):
"""Helper function to generate a random k-bit number"""
if bitlength is None:
bitlength = torch.iinfo(torch.long).bits
if bitlength == 64:
return generate_random_ring_element(size, generator=generator, **kwargs)
if generator is None:
device = kwargs.get("device", torch.device("cpu"))
device = torch.device("cpu") if device is None else device
device = torch.device(device) if isinstance(device, str) else device
generator = crypten.generators["local"][device]
rand_tensor = torch.randint(
0, 2**bitlength, size, generator=generator, dtype=torch.long, **kwargs
)
if rand_tensor.is_cuda:
return CUDALongTensor(rand_tensor)
return rand_tensor
| CrypTen-main | crypten/common/rng.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import abc
import functools
import numpy as np
import torch
from crypten.cuda import CUDALongTensor
def count_wraps(share_list):
"""Computes the number of overflows or underflows in a set of shares
We compute this by counting the number of overflows and underflows as we
traverse the list of shares.
"""
result = torch.zeros_like(share_list[0], dtype=torch.long)
prev = share_list[0]
for cur in share_list[1:]:
next = cur + prev
result -= ((prev < 0) & (cur < 0) & (next > 0)).long() # underflow
result += ((prev > 0) & (cur > 0) & (next < 0)).long() # overflow
prev = next
return result
@functools.lru_cache(maxsize=10)
def chebyshev_series(func, width, terms):
r"""Computes Chebyshev coefficients
For n = terms, the ith Chebyshev series coefficient is
.. math::
c_i = 2/n \sum_{k=1}^n \cos(j(2k-1)\pi / 4n) f(w\cos((2k-1)\pi / 4n))
Args:
func (function): function to be approximated
width (int): approximation will support inputs in range [-width, width]
terms (int): number of Chebyshev terms used in approximation
Returns:
Chebyshev coefficients with shape equal to num of terms.
"""
n_range = torch.arange(start=0, end=terms).float()
x = width * torch.cos((n_range + 0.5) * np.pi / terms)
y = func(x)
cos_term = torch.cos(torch.outer(n_range, n_range + 0.5) * np.pi / terms)
coeffs = (2 / terms) * torch.sum(y * cos_term, axis=1)
return coeffs
# FIXME: pytorch currently does not register `torch.cat` and
# `torch.stack` in __torch_function__. We therefore can not call
# torch.stack/torch.cat with CUDALongTensor as parameters. This is
# a temporary solution before pytorch fix their issue.
# See https://github.com/pytorch/pytorch/issues/34294 for details
def torch_cat(tensors, dim=0, out=None):
is_cuda = any(t.is_cuda for t in tensors)
if is_cuda:
return CUDALongTensor.cat(tensors, dim=dim, out=out)
return torch.cat(tensors, dim=dim, out=out)
def torch_stack(tensors, dim=0, out=None):
is_cuda = any(t.is_cuda for t in tensors)
if is_cuda:
return CUDALongTensor.stack(tensors, dim=dim, out=out)
return torch.stack(tensors, dim=dim, out=out)
# TODO: Remove this function and change the calling locations accordingly.
# See https://github.com/pytorch/pytorch/commit/445ee5620ec203cfccefd6f3dca4f0962a83b03e
def _grad_input_padding(
grad_output, input_size, stride, padding, kernel_size, dilation=None
):
if dilation is None:
# For backward compatibility
dilation = [1] * len(stride)
input_size = list(input_size)
k = grad_output.dim() - 2
if len(input_size) == k + 2:
input_size = input_size[-k:]
if len(input_size) != k:
raise ValueError(
"input_size must have {} elements (got {})".format(k + 2, len(input_size))
)
def dim_size(d):
return (
(grad_output.size(d + 2) - 1) * stride[d]
- 2 * padding[d]
+ 1
+ dilation[d] * (kernel_size[d] - 1)
)
min_sizes = [dim_size(d) for d in range(k)]
max_sizes = [min_sizes[d] + stride[d] - 1 for d in range(k)]
for size, min_size, max_size in zip(input_size, min_sizes, max_sizes):
if size < min_size or size > max_size:
raise ValueError(
(
"requested an input grad size of {}, but valid sizes range "
"from {} to {} (for a grad_output of {})"
).format(input_size, min_sizes, max_sizes, grad_output.size()[2:])
)
return tuple(input_size[d] - min_sizes[d] for d in range(k))
| CrypTen-main | crypten/common/util.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
__all__ = ["functions", "rng", "tensor_types", "util", "serial"]
| CrypTen-main | crypten/common/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from crypten.cuda import CUDALongTensor
# helper functions that determine if input is float, int, or base tensor:
def _is_type_tensor(tensor, types):
"""Checks whether the elements of the input tensor are of a given type"""
if is_tensor(tensor):
if any(tensor.dtype == type_ for type_ in types):
return True
return False
def is_tensor(tensor):
"""Checks if the input tensor is a Torch tensor or a CUDALongTensor"""
return torch.is_tensor(tensor) or isinstance(tensor, CUDALongTensor)
def is_float_tensor(tensor):
"""Checks if the input tensor is a Torch tensor of a float type."""
return _is_type_tensor(tensor, [torch.float16, torch.float32, torch.float64])
def is_int_tensor(tensor):
"""Checks if the input tensor is a Torch tensor of an int type."""
return _is_type_tensor(
tensor, [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]
)
| CrypTen-main | crypten/common/tensor_types.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import builtins # noqa
import collections
import inspect
import io
import logging
import pickle
import torch
def _safe_load_from_bytes(b):
return _safe_legacy_load(io.BytesIO(b))
# Legacy code from torch._utils_internal
def get_source_lines_and_file(obj, error_msg=None):
"""
Wrapper around inspect.getsourcelines and inspect.getsourcefile.
Returns: (sourcelines, file_lino, filename)
"""
filename = None # in case getsourcefile throws
try:
filename = inspect.getsourcefile(obj)
sourcelines, file_lineno = inspect.getsourcelines(obj)
except OSError as e:
msg = f"Can't get source for {obj}."
if error_msg:
msg += "\n" + error_msg
raise OSError(msg) from e
return sourcelines, file_lineno, filename
class RestrictedUnpickler(pickle.Unpickler):
__SAFE_CLASSES = {
"torch.storage._load_from_bytes": _safe_load_from_bytes,
}
__ALLOWLIST = [
"builtins.set",
"collections.OrderedDict",
"torch.nn.modules.activation.LogSigmoid",
"torch.nn.modules.activation.LogSoftmax",
"torch.nn.modules.activation.ReLU",
"torch.nn.modules.activation.Sigmoid",
"torch.nn.modules.activation.Softmax",
"torch.nn.modules.batchnorm.BatchNorm1d",
"torch.nn.modules.batchnorm.BatchNorm2d",
"torch.nn.modules.batchnorm.BatchNorm3d",
"torch.nn.modules.conv.Conv1d",
"torch.nn.modules.conv.Conv2d",
"torch.nn.modules.conv.ConvTranspose1d",
"torch.nn.modules.conv.ConvTranspose2d",
"torch.nn.modules.dropout.Dropout2d",
"torch.nn.modules.dropout.Dropout3d",
"torch.nn.modules.flatten.Flatten",
"torch.nn.modules.linear.Linear",
"torch.nn.modules.loss.BCELoss",
"torch.nn.modules.loss.BCEWithLogitsLoss",
"torch.nn.modules.loss.CrossEntropyLoss",
"torch.nn.modules.loss.L1Loss",
"torch.nn.modules.loss.MSELoss",
"torch.nn.modules.pooling.AvgPool2d",
"torch.nn.modules.pooling.MaxPool2d",
"torch._utils._rebuild_parameter",
"torch._utils._rebuild_tensor_v2",
"torch.Size",
"torch.BFloat16Storage",
"torch.BoolStorage",
"torch.CharStorage",
"torch.ComplexDoubleStorage",
"torch.ComplexFloatStorage",
"torch.HalfStorage",
"torch.IntStorage",
"torch.LongStorage",
"torch.QInt32Storage",
"torch.QInt8Storage",
"torch.QUInt8Storage",
"torch.ShortStorage",
"torch.storage._StorageBase",
"torch.ByteStorage",
"torch.DoubleStorage",
"torch.FloatStorage",
"torch._C.HalfStorageBase",
"torch._C.QInt32StorageBase",
"torch._C.QInt8StorageBase",
"torch.storage._TypedStorage",
]
for item in __ALLOWLIST:
try:
attrs = item.split(".")
g = globals()[attrs[0]]
for attr in attrs[1:]:
g = getattr(g, attr)
__SAFE_CLASSES[item] = g
except (KeyError, AttributeError):
logging.info(f"Could not find {item} to register as a SAFE_CLASS")
@classmethod
def register_safe_class(cls, input_class):
assert isinstance(input_class, type), "Cannot register %s type as safe" % type(
input_class
)
classname = str(input_class).split("'")[1]
logging.info(f"Registering {classname} class as safe for deserialization.")
cls.__SAFE_CLASSES[classname] = input_class
def find_class(self, module, name):
classname = f"{module}.{name}"
if classname not in self.__SAFE_CLASSES.keys():
raise ValueError(
f"Deserialization is restricted for pickled module {classname}"
)
return self.__SAFE_CLASSES[classname]
def register_safe_class(input_class):
RestrictedUnpickler.register_safe_class(input_class)
def _assert_empty_ordered_dict(x):
assert isinstance(x, collections.OrderedDict)
assert len(x) == 0
def _check_hooks_are_valid(result, hook_name):
if hasattr(result, hook_name):
_assert_empty_ordered_dict(getattr(result, hook_name))
if hasattr(result, "parameters"):
for param in result.parameters():
_assert_empty_ordered_dict(getattr(param, hook_name))
if hasattr(result, "modules"):
for module in result.modules():
_assert_empty_ordered_dict(getattr(module, hook_name))
def restricted_loads(s):
result = RestrictedUnpickler(io.BytesIO(s)).load()
if torch.is_tensor(result) or isinstance(result, torch.nn.Module):
_check_hooks_are_valid(result, "_backward_hooks")
return result
class safe_pickle:
Unpickler = RestrictedUnpickler
@staticmethod
def load(f):
return RestrictedUnpickler(f).load()
def _safe_legacy_load(f):
return torch.serialization._legacy_load(
f, map_location=None, pickle_module=safe_pickle
)
| CrypTen-main | crypten/common/serial.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from ..tensor_types import is_tensor
from ..util import torch_cat, torch_stack
__all__ = [ # noqa: F822
"__getitem__",
"__len__",
"__setitem__",
"cat",
"cumsum",
"dim",
"dot",
"expand",
"flatten",
"flip",
"gather",
"ger",
"index_add",
"index_select",
"mean",
"narrow",
"nelement",
"numel",
"pad",
"permute",
"prod",
"repeat",
"reshape",
"roll",
"scatter",
"scatter_add",
"size",
"split",
"squeeze",
"stack",
"sum",
"t",
"take",
"trace",
"transpose",
"unbind",
"unfold",
"unsqueeze",
"var",
"view",
]
PROPERTY_FUNCTIONS = ["__len__", "nelement", "dim", "size", "numel"]
def __setitem__(self, index, value):
"""Set tensor values by index"""
if not isinstance(value, type(self)):
kwargs = {"device": self.device}
if hasattr(self, "ptype"):
kwargs["ptype"] = self.ptype
value = self.new(value, **kwargs)
self._tensor.__setitem__(index, value._tensor)
def pad(self, pad, mode="constant", value=0):
result = self.shallow_copy()
if hasattr(value, "_tensor"):
value = value._tensor
if hasattr(result._tensor, "pad"):
result._tensor = self._tensor.pad(pad, mode=mode, value=value)
else:
result._tensor = torch.nn.functional.pad(
self._tensor, pad, mode=mode, value=value
)
return result
def index_add(self, dim, index, tensor):
"""Performs out-of-place index_add: Accumulate the elements of tensor into the
self tensor by adding to the indices in the order given in index.
"""
result = self.clone()
assert index.dim() == 1, "index needs to be a vector"
tensor = getattr(tensor, "_tensor", tensor)
result._tensor.index_add_(dim, index, tensor)
return result
def scatter_add(self, dim, index, other):
"""Adds all values from the tensor other into self at the indices
specified in the index tensor in a similar fashion as scatter_(). For
each value in other, it is added to an index in self which is specified
by its index in other for dimension != dim and by the corresponding
value in index for dimension = dim.
"""
result = self.clone()
other = getattr(other, "_tensor", other)
result._tensor.scatter_add_(dim, index, other)
return result
def scatter(self, dim, index, src):
"""Out-of-place version of :meth:`CrypTensor.scatter_`"""
result = self.clone()
if is_tensor(src):
src = self.new(src)
assert isinstance(src, type(self)), "Unrecognized scatter src type: %s" % type(src)
result._tensor.scatter_(dim, index, src._tensor)
return result
def unbind(self, dim=0):
tensors = self._tensor.unbind(dim=dim)
results = tuple(self.shallow_copy() for _ in range(len(tensors)))
for i in range(len(tensors)):
results[i]._tensor = tensors[i]
return results
def split(self, split_size, dim=0):
tensors = self._tensor.split(split_size, dim=dim)
results = tuple(self.shallow_copy() for _ in range(len(tensors)))
for i in range(len(tensors)):
results[i]._tensor = tensors[i]
return results
def take(self, index, dimension=None):
"""Take entries of tensor along a dimension according to the index.
This function is identical to torch.take() when dimension=None,
otherwise, it is identical to ONNX gather() function.
"""
result = self.shallow_copy()
index = index.long()
if dimension is None or self.dim() == 0:
result._tensor = self._tensor.take(index)
else:
all_indices = [slice(0, x) for x in self.size()]
all_indices[dimension] = index
result._tensor = self._tensor[all_indices]
return result
def mean(self, *args, **kwargs):
"""Computes mean of given tensor"""
result = self.sum(*args, **kwargs)
# Handle special case where input has 0 dimensions
if self.dim() == 0:
return result
# Compute divisor to use to compute mean
divisor = self.nelement() // result.nelement()
return result.div(divisor)
def var(self, *args, **kwargs):
"""Computes variance of tensor along specified dimensions."""
# preprocess inputs:
if len(args) == 0:
dim = None
unbiased = kwargs.get("unbiased", False)
mean = self.mean()
elif len(args) == 1:
dim = args[0]
unbiased = kwargs.get("unbiased", False)
keepdim = kwargs.get("keepdim", False)
elif len(args) == 2:
dim, unbiased = args[0], args[1]
keepdim = kwargs.get("keepdim", False)
else:
dim, unbiased, keepdim = args[0], args[1], args[2]
if dim is not None: # dimension is specified
mean = self.mean(dim, keepdim=True)
# Compute square error
result = (self - mean).square()
if dim is None:
result = result.sum()
else:
result = result.sum(dim, keepdim=keepdim)
# Determine divisor
divisor = self.nelement() // result.nelement()
if not unbiased:
divisor -= 1
# Compute mean square error
if divisor in [0, 1]:
return result
return result.div(divisor)
def prod(self, dim=None, keepdim=False):
"""
Returns the product of each row of the `input` tensor in the given
dimension `dim`.
If `keepdim` is `True`, the output tensor is of the same size as `input`
except in the dimension `dim` where it is of size 1. Otherwise, `dim` is
squeezed, resulting in the output tensor having 1 fewer dimension than
`input`.
"""
if dim is None:
return self.flatten().prod(dim=0)
result = self.clone()
while result.size(dim) > 1:
size = result.size(dim)
x, y, remainder = result.split([size // 2, size // 2, size % 2], dim=dim)
result = x.mul_(y)
result = type(self).cat([result, remainder], dim=dim)
# Squeeze result if necessary
if not keepdim:
result = result.squeeze(dim)
return result
def dot(self, y, weights=None):
"""Compute a dot product between two tensors"""
assert self.size() == y.size(), "Number of elements do not match"
if weights is not None:
assert weights.size() == self.size(), "Incorrect number of weights"
result = self * weights
else:
result = self.clone()
return result.mul(y).sum()
def ger(self, y):
"""Computer an outer product between two vectors"""
assert self.dim() == 1 and y.dim() == 1, "Outer product must be on 1D tensors"
return self.view((-1, 1)).matmul(y.view((1, -1)))
def __cat_stack_helper(op, tensors, *args, **kwargs):
assert op in ["cat", "stack"], "Unsupported op for helper function"
assert isinstance(tensors, list), "%s input must be a list" % op
assert len(tensors) > 0, "expected a non-empty list of CrypTensors"
# Determine op-type
funcs = {"cat": torch_cat, "stack": torch_stack}
func = funcs[op]
if hasattr(tensors[0]._tensor, op):
func = getattr(tensors[0]._tensor, op)
# type coordination
for i, tensor in enumerate(tensors[1:]):
if torch.is_tensor(tensor) or isinstance(tensor, (int, float)):
tensors[i] = tensors[0].new(tensor)
assert isinstance(tensors[i], type(tensors[0])), f"{op} tensor type mismatch"
# Operate on all input tensors
result = tensors[0].clone()
result._tensor = func([tensor._tensor for tensor in tensors], *args, **kwargs)
return result
def cat(tensors, *args, **kwargs):
"""Perform tensor concatenation"""
return __cat_stack_helper("cat", tensors, *args, **kwargs)
def stack(tensors, *args, **kwargs):
"""Perform tensor stacking"""
return __cat_stack_helper("stack", tensors, *args, **kwargs)
# Make static methods static
cat = staticmethod(cat)
stack = staticmethod(stack)
# Add remaining regular functions
def _add_regular_function(function_name):
"""
Adds regular function that is applied directly on the underlying
`_tensor` attribute, and stores the result in the same attribute.
"""
def regular_func(self, *args, **kwargs):
result = self.shallow_copy()
result._tensor = getattr(result._tensor, function_name)(*args, **kwargs)
return result
if function_name not in globals():
globals()[function_name] = regular_func
def _add_property_function(function_name):
"""
Adds regular function that is applied directly on the underlying
`_tensor` attribute, and returns the result of that function.
"""
def property_func(self, *args, **kwargs):
return getattr(self._tensor, function_name)(*args, **kwargs)
if function_name not in globals():
globals()[function_name] = property_func
for function_name in __all__:
if function_name in PROPERTY_FUNCTIONS:
continue
_add_regular_function(function_name)
for function_name in PROPERTY_FUNCTIONS:
_add_property_function(function_name)
| CrypTen-main | crypten/common/functions/regular.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
__all__ = [
"_max_pool2d_backward",
"adaptive_max_pool2d",
"adaptive_avg_pool2d",
"max_pool2d",
]
def max_pool2d(
self,
kernel_size,
padding=0,
stride=None,
dilation=1,
ceil_mode=False,
return_indices=False,
):
"""Applies a 2D max pooling over an input signal composed of several
input planes.
"""
max_input = self.clone()
max_input.data, output_size = _pool2d_reshape(
self.data,
kernel_size,
padding=padding,
stride=stride,
dilation=dilation,
ceil_mode=ceil_mode,
# padding with extremely negative values to avoid choosing pads.
# The magnitude of this value should not be too large because
# multiplication can otherwise fail.
pad_value=(-(2**24)),
# TODO: Find a better solution for padding with max_pooling
)
max_vals, argmax_vals = max_input.max(dim=-1, one_hot=True)
max_vals = max_vals.view(output_size)
if return_indices:
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
argmax_vals = argmax_vals.view(output_size + kernel_size)
return max_vals, argmax_vals
return max_vals
def _max_pool2d_backward(
self,
indices,
kernel_size,
padding=None,
stride=None,
dilation=1,
ceil_mode=False,
output_size=None,
):
"""Implements the backwards for a `max_pool2d` call."""
# Setup padding
if padding is None:
padding = 0
if isinstance(padding, int):
padding = padding, padding
assert isinstance(padding, tuple), "padding must be a int, tuple, or None"
p0, p1 = padding
# Setup stride
if stride is None:
stride = kernel_size
if isinstance(stride, int):
stride = stride, stride
assert isinstance(stride, tuple), "stride must be a int, tuple, or None"
s0, s1 = stride
# Setup dilation
if isinstance(stride, int):
dilation = dilation, dilation
assert isinstance(dilation, tuple), "dilation must be a int, tuple, or None"
d0, d1 = dilation
# Setup kernel_size
if isinstance(kernel_size, int):
kernel_size = kernel_size, kernel_size
assert isinstance(padding, tuple), "padding must be a int or tuple"
k0, k1 = kernel_size
assert self.dim() == 4, "Input to _max_pool2d_backward must have 4 dimensions"
assert (
indices.dim() == 6
), "Indices input for _max_pool2d_backward must have 6 dimensions"
# Computes one-hot gradient blocks from each output variable that
# has non-zero value corresponding to the argmax of the corresponding
# block of the max_pool2d input.
kernels = self.view(self.size() + (1, 1)) * indices
# Use minimal size if output_size is not specified.
if output_size is None:
output_size = (
self.size(0),
self.size(1),
s0 * self.size(2) - 2 * p0,
s1 * self.size(3) - 2 * p1,
)
# Account for input padding
result_size = list(output_size)
result_size[-2] += 2 * p0
result_size[-1] += 2 * p1
# Account for input padding implied by ceil_mode
if ceil_mode:
c0 = self.size(-1) * s1 + (k1 - 1) * d1 - output_size[-1]
c1 = self.size(-2) * s0 + (k0 - 1) * d0 - output_size[-2]
result_size[-2] += c0
result_size[-1] += c1
# Sum the one-hot gradient blocks at corresponding index locations.
result = self.new(torch.zeros(result_size, device=kernels.device))
for i in range(self.size(2)):
for j in range(self.size(3)):
left_ind = s0 * i
top_ind = s1 * j
result[
:,
:,
left_ind : left_ind + k0 * d0 : d0,
top_ind : top_ind + k1 * d1 : d1,
] += kernels[:, :, i, j]
# Remove input padding
if ceil_mode:
result = result[:, :, : result.size(2) - c0, : result.size(3) - c1]
result = result[:, :, p0 : result.size(2) - p0, p1 : result.size(3) - p1]
return result
def adaptive_avg_pool2d(self, output_size):
r"""
Applies a 2D adaptive average pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveAvgPool2d` for details and output shape.
Args:
output_size: the target output size (single integer or
double-integer tuple)
"""
if output_size is None or output_size[0] is None:
output_size = self.shape[-2:]
if self.shape[-2:] == output_size:
return self.clone()
resized_input, args, kwargs = _adaptive_pool2d_helper(
self, output_size, reduction="mean"
)
return resized_input.avg_pool2d(*args, **kwargs)
def adaptive_max_pool2d(self, output_size, return_indices=False):
r"""Applies a 2D adaptive max pooling over an input signal composed of
several input planes.
See :class:`~torch.nn.AdaptiveMaxPool2d` for details and output shape.
Args:
output_size: the target output size (single integer or
double-integer tuple)
return_indices: whether to return pooling indices. Default: ``False``
"""
if output_size is None or output_size[0] is None:
output_size = self.shape[-2:]
if self.shape[-2:] == output_size:
if return_indices:
return self.clone(), self.new(
torch.ones(self.size() + torch.Size(output_size))
)
return self.clone()
resized_input, args, kwargs = _adaptive_pool2d_helper(
self, output_size, reduction="max"
)
return resized_input.max_pool2d(*args, **kwargs, return_indices=return_indices)
# Helper functions
def _adaptive_pool2d_helper(input, output_size, reduction="mean"):
r"""
Provides a helper that adapts the input size and provides input
args / kwargs to allow pool2d functions to emulate adaptive pool2d
functions.
This function computes the kernel_size, stride, and padding for
pool2d functions and inserts rows along each dimension so that
a constant stride can be used.
"""
import crypten
input = input.clone()
if isinstance(output_size, int):
output_size = (output_size, output_size)
assert len(output_size) == 2, "output_size must be 2-dimensional."
output_size = list(output_size)
for i in range(2):
if output_size[i] is None:
output_size[i] = input.size(i - 2)
# Compute the start_index and end_index for kernels
def compute_kernels(in_size, out_size):
step = in_size / out_size
starts = []
ends = []
max_kernel_size = 0
for j in range(out_size):
# Compute local kernel size
start_index = int(j * step)
end_index = int(math.ceil((j + 1) * step))
k = end_index - start_index
# Update global kernel size
max_kernel_size = k if k > max_kernel_size else max_kernel_size
# Store local kernels
starts.append(start_index)
ends.append(end_index)
return starts, ends, max_kernel_size
# Repeats a row `ind` of `tensor` at dimension `dim` for overlapping kernels
def repeat_row(tensor, dim, ind):
device = tensor.device
x = tensor.index_select(dim, torch.arange(ind, device=device))
y = tensor.index_select(dim, torch.arange(ind, tensor.size(dim), device=device))
repeated_row = tensor.index_select(dim, torch.tensor(ind - 1, device=device))
return crypten.cat([x, repeated_row, y], dim=dim)
# Extends a row where a kernel is smaller than the maximum kernel size
def extend_row(tensor, dim, start_ind, end_ind):
device = tensor.device
if reduction == "mean":
extended_value = tensor.index_select(
dim, torch.arange(start_ind, end_ind, device=device)
)
extended_value = extended_value.mean(dim, keepdim=True)
elif reduction == "max":
extended_value = tensor.index_select(
dim, torch.tensor(start_ind, device=device)
)
else:
raise ValueError(f"Invalid reduction {reduction} for adaptive pooling.")
if start_ind == 0:
return crypten.cat([extended_value, tensor], dim=dim)
x = tensor.index_select(dim, torch.arange(start_ind, device=device))
y = tensor.index_select(
dim, torch.arange(start_ind, tensor.size(dim), device=device)
)
return crypten.cat([x, extended_value, y], dim=dim)
strides = []
for i in range(2):
dim = i - 2 + input.dim()
in_size = input.size(dim)
out_size = output_size[i] if output_size[i] is not None else in_size
# Compute repeats
if out_size > 1:
starts, ends, stride = compute_kernels(in_size, out_size)
added_rows = 0
for i in range(out_size):
start_ind = starts[i]
end_ind = ends[i]
# Extend kernel so all kernels have the same size
k = end_ind - start_ind
for _ in range(k, stride):
input = extend_row(
input, dim, start_ind + added_rows, end_ind + added_rows
)
added_rows += 1
if i == out_size - 1:
break
# Repeat overlapping rows so stride can be equal to the kernel size
if end_ind > starts[i + 1]:
input = repeat_row(input, dim, end_ind + added_rows)
added_rows += 1
else:
stride = in_size
strides.append(stride)
strides = tuple(strides)
kernel_sizes = strides
args = (kernel_sizes,)
kwargs = {"stride": strides}
return input, args, kwargs
def _pooling_output_shape(
input_size, kernel_size, pad_l, pad_r, stride, dilation, ceil_mode
):
"""
Generates output shape along a single dimension following conventions here:
https://github.com/pytorch/pytorch/blob/b0424a895c878cb865947164cb0ce9ce3c2e73ef/aten/src/ATen/native/Pool.h#L24-L38
"""
numerator = input_size + pad_l + pad_r - dilation * (kernel_size - 1) - 1
if ceil_mode:
numerator += stride - 1
output_size = numerator // stride + 1
# ensure that the last pooling starts inside the image
# needed to avoid problems in ceil mode
if ceil_mode and (output_size - 1) * stride >= input_size + pad_l:
output_size -= 1
return output_size
def _pool2d_reshape(
input,
kernel_size,
padding=None,
stride=None,
dilation=1,
ceil_mode=False,
pad_value=0,
):
"""Rearrange a 4-d tensor so that each kernel is represented by each row"""
# Setup kernel / stride / dilation values
k = kernel_size
if isinstance(k, int):
k = (k, k)
s = stride
if s is None:
s = k
elif isinstance(s, int):
s = (s, s)
d = dilation
if isinstance(d, int):
d = (d, d)
# Assert input parameters are correct type / size
assert isinstance(k, tuple), "kernel_size must be an int or tuple"
assert isinstance(s, tuple), "stride must be and int, a tuple, or None"
assert len(k) == 2, "kernel_size must be an int or tuple pair"
assert len(s) == 2, "stride must be an int or tuple pair"
assert isinstance(pad_value, int), "pad_value must be an integer"
assert input.dim() >= 2, "Pooling input dimension should be at least 2"
# Apply padding if necessary
if padding is not None:
padding = (padding, padding) if isinstance(padding, int) else padding
assert len(padding) == 2, "Padding must be an integer or a pair"
padding = (padding[0], padding[0], padding[1], padding[1])
else:
padding = (0, 0, 0, 0)
# Compute output size based on parameters
n = input.size()[:-2]
h = _pooling_output_shape(
input.size(-2), k[0], padding[0], padding[1], s[0], d[0], ceil_mode
)
w = _pooling_output_shape(
input.size(-1), k[1], padding[2], padding[3], s[1], d[1], ceil_mode
)
out_size = tuple(n + (h, w))
input = torch.nn.functional.pad(input, padding, value=pad_value)
if ceil_mode:
update_pad = [0, 0, 0, 0]
update_pad[3] = h * s[0] + (k[0] - 1) * d[0] - input.size(-2)
update_pad[1] = w * s[1] + (k[1] - 1) * d[1] - input.size(-1)
input = torch.nn.functional.pad(input, tuple(update_pad), value=pad_value)
# Reshape input to arrange kernels to be represented by rows
kernel_indices = torch.tensor(range(0, k[1] * d[1], d[1]), device=input.device)
kernel_indices = torch.cat(
[kernel_indices + i * input.size(-1) for i in range(0, k[0] * d[0], d[0])]
)
kernel_indices = torch.stack([kernel_indices + i * s[1] for i in range(w)])
offset = input.size(-1)
kernel_indices = torch.cat([kernel_indices + i * s[0] * offset for i in range(h)])
for dim in range(2, input.dim()):
offset *= input.size(-dim)
kernel_indices = torch.stack(
[kernel_indices + i * offset for i in range(input.size(-dim - 1))]
)
output = input.take(kernel_indices)
return output, out_size
| CrypTen-main | crypten/common/functions/pooling.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from . import approximations, dropout, logic, maximum, pooling, power, regular, sampling
__all__ = [
"approximations",
"dropout",
"logic",
"maximum",
"pooling",
"power",
"regular",
"sampling",
]
| CrypTen-main | crypten/common/functions/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import crypten
from crypten.common.tensor_types import is_tensor
__all__ = [
"__eq__",
"__ge__",
"__gt__",
"__le__",
"__lt__",
"__ne__",
"abs",
"eq",
"ge",
"gt",
"hardtanh",
"le",
"lt",
"ne",
"relu",
"sign",
"where",
]
def ge(self, y):
"""Returns self >= y"""
return 1 - self.lt(y)
def gt(self, y):
"""Returns self > y"""
return (-self + y)._ltz()
def le(self, y):
"""Returns self <= y"""
return 1 - self.gt(y)
def lt(self, y):
"""Returns self < y"""
return (self - y)._ltz()
def eq(self, y):
"""Returns self == y"""
return 1 - self.ne(y)
def ne(self, y):
"""Returns self != y"""
difference = self - y
difference = type(difference).stack([difference, -difference])
return difference._ltz().sum(0)
__eq__ = eq
__ge__ = ge
__gt__ = gt
__le__ = le
__lt__ = lt
__ne__ = ne
def sign(self):
"""Computes the sign value of a tensor (0 is considered positive)"""
return 1 - 2 * self._ltz()
def abs(self):
"""Computes the absolute value of a tensor"""
return self * self.sign()
def relu(self):
"""Compute a Rectified Linear function on the input tensor."""
return self * self.ge(0)
def hardtanh(self, min_value=-1, max_value=1):
r"""Applies the HardTanh function element-wise
HardTanh is defined as:
.. math::
\text{HardTanh}(x) = \begin{cases}
1 & \text{ if } x > 1 \\
-1 & \text{ if } x < -1 \\
x & \text{ otherwise } \\
\end{cases}
The range of the linear region :math:`[-1, 1]` can be adjusted using
:attr:`min_val` and :attr:`max_val`.
Args:
min_val: minimum value of the linear region range. Default: -1
max_val: maximum value of the linear region range. Default: 1
"""
intermediate = crypten.stack([self - min_value, self - max_value]).relu()
intermediate = intermediate[0].sub(intermediate[1])
return intermediate.add_(min_value)
def where(self, condition, y):
"""Selects elements from self or y based on condition
Args:
condition (torch.bool or MPCTensor): when True yield self,
otherwise yield y
y (torch.tensor or MPCTensor): values selected at indices
where condition is False.
Returns: MPCTensor or torch.tensor
"""
if is_tensor(condition):
condition = condition.float()
y_masked = y * (1 - condition)
else:
# encrypted tensor must be first operand
y_masked = (1 - condition) * y
return self * condition + y_masked
| CrypTen-main | crypten/common/functions/logic.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import crypten
import torch
from crypten.config import cfg
__all__ = [
"argmax",
"argmin",
"max",
"min",
]
def argmax(self, dim=None, keepdim=False, one_hot=True):
"""Returns the indices of the maximum value of all elements in the
`input` tensor.
"""
method = cfg.functions.max_method
if self.dim() == 0:
result = (
self.new(torch.ones((), device=self.device))
if one_hot
else self.new(torch.zeros((), device=self.device))
)
return result
result = _argmax_helper(self, dim, one_hot, method, _return_max=False)
if not one_hot:
result = _one_hot_to_index(result, dim, keepdim, self.device)
return result
def argmin(self, dim=None, keepdim=False, one_hot=True):
"""Returns the indices of the minimum value of all elements in the
`input` tensor.
"""
return (-self).argmax(dim=dim, keepdim=keepdim, one_hot=one_hot)
def max(self, dim=None, keepdim=False, one_hot=True):
"""Returns the maximum value of all elements in the input tensor."""
method = cfg.functions.max_method
if dim is None:
if method in ["log_reduction", "double_log_reduction"]:
# max_result can be obtained directly
max_result = _max_helper_all_tree_reductions(self, method=method)
else:
# max_result needs to be obtained through argmax
with cfg.temp_override({"functions.max_method": method}):
argmax_result = self.argmax(one_hot=True)
max_result = self.mul(argmax_result).sum()
return max_result
else:
argmax_result, max_result = _argmax_helper(
self, dim=dim, one_hot=True, method=method, _return_max=True
)
if max_result is None:
max_result = (self * argmax_result).sum(dim=dim, keepdim=keepdim)
if keepdim:
max_result = (
max_result.unsqueeze(dim)
if max_result.dim() < self.dim()
else max_result
)
if one_hot:
return max_result, argmax_result
else:
return (
max_result,
_one_hot_to_index(argmax_result, dim, keepdim, self.device),
)
def min(self, dim=None, keepdim=False, one_hot=True):
"""Returns the minimum value of all elements in the input tensor."""
result = (-self).max(dim=dim, keepdim=keepdim, one_hot=one_hot)
if dim is None:
return -result
else:
return -result[0], result[1]
# Helper functions
def _argmax_helper_pairwise(enc_tensor, dim=None):
"""Returns 1 for all elements that have the highest value in the appropriate
dimension of the tensor. Uses O(n^2) comparisons and a constant number of
rounds of communication
"""
dim = -1 if dim is None else dim
row_length = enc_tensor.size(dim) if enc_tensor.size(dim) > 1 else 2
# Copy each row (length - 1) times to compare to each other row
a = enc_tensor.expand(row_length - 1, *enc_tensor.size())
# Generate cyclic permutations for each row
b = crypten.stack([enc_tensor.roll(i + 1, dims=dim) for i in range(row_length - 1)])
# Use either prod or sum & comparison depending on size
if row_length - 1 < torch.iinfo(torch.long).bits * 2:
pairwise_comparisons = a.ge(b)
result = pairwise_comparisons.prod(0)
else:
# Sum of columns with all 1s will have value equal to (length - 1).
# Using ge() since it is slightly faster than eq()
pairwise_comparisons = a.ge(b)
result = pairwise_comparisons.sum(0).ge(row_length - 1)
return result, None
def _compute_pairwise_comparisons_for_steps(input_tensor, dim, steps):
"""
Helper function that does pairwise comparisons by splitting input
tensor for `steps` number of steps along dimension `dim`.
"""
enc_tensor_reduced = input_tensor.clone()
for _ in range(steps):
m = enc_tensor_reduced.size(dim)
x, y, remainder = enc_tensor_reduced.split([m // 2, m // 2, m % 2], dim=dim)
pairwise_max = crypten.where(x >= y, x, y)
enc_tensor_reduced = crypten.cat([pairwise_max, remainder], dim=dim)
return enc_tensor_reduced
def _max_helper_log_reduction(enc_tensor, dim=None):
"""Returns max along dim `dim` using the log_reduction algorithm"""
if enc_tensor.dim() == 0:
return enc_tensor
input, dim_used = enc_tensor, dim
if dim is None:
dim_used = 0
input = enc_tensor.flatten()
n = input.size(dim_used) # number of items in the dimension
steps = int(math.log(n))
enc_tensor_reduced = _compute_pairwise_comparisons_for_steps(input, dim_used, steps)
# compute max over the resulting reduced tensor with n^2 algorithm
# note that the resulting one-hot vector we get here finds maxes only
# over the reduced vector in enc_tensor_reduced, so we won't use it
with cfg.temp_override({"functions.max_method": "pairwise"}):
enc_max_vec, enc_one_hot_reduced = enc_tensor_reduced.max(dim=dim_used)
return enc_max_vec
def _max_helper_double_log_recursive(enc_tensor, dim):
"""Recursive subroutine for computing max via double log reduction algorithm"""
n = enc_tensor.size(dim)
# compute integral sqrt(n) and the integer number of sqrt(n) size
# vectors that can be extracted from n
sqrt_n = int(math.sqrt(n))
count_sqrt_n = n // sqrt_n
# base case for recursion: no further splits along dimension dim
if n == 1:
return enc_tensor
else:
# split into tensors that can be broken into vectors of size sqrt(n)
# and the remainder of the tensor
size_arr = [sqrt_n * count_sqrt_n, n % sqrt_n]
split_enc_tensor, remainder = enc_tensor.split(size_arr, dim=dim)
# reshape such that dim holds sqrt_n and dim+1 holds count_sqrt_n
updated_enc_tensor_size = [sqrt_n, enc_tensor.size(dim + 1) * count_sqrt_n]
size_arr = [enc_tensor.size(i) for i in range(enc_tensor.dim())]
size_arr[dim], size_arr[dim + 1] = updated_enc_tensor_size
split_enc_tensor = split_enc_tensor.reshape(size_arr)
# recursive call on reshaped tensor
split_enc_max = _max_helper_double_log_recursive(split_enc_tensor, dim)
# reshape the result to have the (dim+1)th dimension as before
# and concatenate the previously computed remainder
size_arr[dim], size_arr[dim + 1] = [count_sqrt_n, enc_tensor.size(dim + 1)]
enc_max_tensor = split_enc_max.reshape(size_arr)
full_max_tensor = crypten.cat([enc_max_tensor, remainder], dim=dim)
# call the max function on dimension dim
with cfg.temp_override({"functions.max_method": "pairwise"}):
enc_max, enc_arg_max = full_max_tensor.max(dim=dim, keepdim=True)
# compute max over the resulting reduced tensor with n^2 algorithm
# note that the resulting one-hot vector we get here finds maxes only
# over the reduced vector in enc_tensor_reduced, so we won't use it
return enc_max
def _max_helper_double_log_reduction(enc_tensor, dim=None):
"""Returns max along dim `dim` using the double_log_reduction algorithm"""
if enc_tensor.dim() == 0:
return enc_tensor
input, dim_used, size_arr = enc_tensor, dim, ()
if dim is None:
dim_used = 0
input = enc_tensor.flatten()
# turn dim_used into a positive number
dim_used = dim_used + input.dim() if dim_used < 0 else dim_used
if input.dim() > 1:
size_arr = [input.size(i) for i in range(input.dim()) if i != dim_used]
# add another dimension to vectorize double log reductions
input = input.unsqueeze(dim_used + 1)
enc_max_val = _max_helper_double_log_recursive(input, dim_used)
enc_max_val = enc_max_val.squeeze(dim_used + 1)
enc_max_val = enc_max_val.reshape(size_arr)
return enc_max_val
def _max_helper_accelerated_cascade(enc_tensor, dim=None):
"""Returns max along dimension `dim` using the accelerated cascading algorithm"""
if enc_tensor.dim() == 0:
return enc_tensor
input, dim_used = enc_tensor, dim
if dim is None:
dim_used = 0
input = enc_tensor.flatten()
n = input.size(dim_used) # number of items in the dimension
if n < 3:
with cfg.temp_override({"functions.max_method": "pairwise"}):
enc_max, enc_argmax = enc_tensor.max(dim=dim_used)
return enc_max
steps = int(math.log(math.log(math.log(n)))) + 1
enc_tensor_reduced = _compute_pairwise_comparisons_for_steps(
enc_tensor, dim_used, steps
)
enc_max = _max_helper_double_log_reduction(enc_tensor_reduced, dim=dim_used)
return enc_max
def _max_helper_all_tree_reductions(enc_tensor, dim=None, method="log_reduction"):
"""
Finds the max along `dim` using the specified reduction method. `method`
can be one of [`log_reduction`, `double_log_reduction`, 'accelerated_cascade`]
`log_reduction`: Uses O(n) comparisons and O(log n) rounds of communication
`double_log_reduction`: Uses O(n loglog n) comparisons and O(loglog n) rounds
of communication (Section 2.6.2 in https://folk.idi.ntnu.no/mlh/algkon/jaja.pdf)
`accelerated_cascade`: Uses O(n) comparisons and O(loglog n) rounds of
communication. (See Section 2.6.3 of https://folk.idi.ntnu.no/mlh/algkon/jaja.pdf)
"""
if method == "log_reduction":
return _max_helper_log_reduction(enc_tensor, dim)
elif method == "double_log_reduction":
return _max_helper_double_log_reduction(enc_tensor, dim)
elif method == "accelerated_cascade":
return _max_helper_accelerated_cascade(enc_tensor, dim)
else:
raise RuntimeError("Unknown max method")
def _argmax_helper_all_tree_reductions(enc_tensor, dim=None, method="log_reduction"):
"""
Returns 1 for all elements that have the highest value in the appropriate
dimension of the tensor. `method` can be one of [`log_reduction`,
`double_log_reduction`, `accelerated_cascade`].
`log_reduction`: Uses O(n) comparisons and O(log n) rounds of communication
`double_log_reduction`: Uses O(n loglog n) comparisons and O(loglog n) rounds
of communication (Section 2.6.2 in https://folk.idi.ntnu.no/mlh/algkon/jaja.pdf)
`accelerated_cascade`: Uses O(n) comparisons and O(loglog n) rounds of
communication. (See Section 2.6.3 of https://folk.idi.ntnu.no/mlh/algkon/jaja.pdf)
"""
enc_max_vec = _max_helper_all_tree_reductions(enc_tensor, dim=dim, method=method)
# reshape back to the original size
enc_max_vec_orig = enc_max_vec
if dim is not None:
enc_max_vec_orig = enc_max_vec.unsqueeze(dim)
# compute the one-hot vector over the entire tensor
enc_one_hot_vec = enc_tensor.eq(enc_max_vec_orig)
return enc_one_hot_vec, enc_max_vec
def _argmax_helper(
enc_tensor, dim=None, one_hot=True, method="pairwise", _return_max=False
):
"""
Returns 1 for one randomly chosen element among all the elements that have
the highest value in the appropriate dimension of the tensor. Sets up the CrypTensor
appropriately, and then chooses among the different argmax algorithms.
"""
if enc_tensor.dim() == 0:
result = (
enc_tensor.new(torch.ones(()))
if one_hot
else enc_tensor.new(torch.zeros(()))
)
if _return_max:
return result, None
return result
updated_enc_tensor = enc_tensor.flatten() if dim is None else enc_tensor
if method == "pairwise":
result_args, result_val = _argmax_helper_pairwise(updated_enc_tensor, dim)
elif method in ["log_reduction", "double_log_reduction", "accelerated_cascade"]:
result_args, result_val = _argmax_helper_all_tree_reductions(
updated_enc_tensor, dim, method
)
else:
raise RuntimeError("Unknown argmax method")
# Break ties by using a uniform weighted sample among tied indices
result_args = result_args.weighted_index(dim)
result_args = result_args.view(enc_tensor.size()) if dim is None else result_args
if _return_max:
return result_args, result_val
else:
return result_args
def _one_hot_to_index(tensor, dim, keepdim, device=None):
"""
Converts a one-hot tensor output from an argmax / argmin function to a
tensor containing indices from the input tensor from which the result of the
argmax / argmin was obtained.
"""
if dim is None:
result = tensor.flatten()
result = result * torch.tensor(list(range(tensor.nelement())), device=device)
return result.sum()
else:
size = [1] * tensor.dim()
size[dim] = tensor.size(dim)
result = tensor * torch.tensor(
list(range(tensor.size(dim))), device=device
).view(size)
return result.sum(dim, keepdim=keepdim)
| CrypTen-main | crypten/common/functions/maximum.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import crypten
import torch
__all__ = [
"bernoulli",
"randn",
"weighted_index",
"weighted_sample",
]
def randn(*sizes, device=None):
"""
Returns a tensor with normally distributed elements. Samples are
generated using the Box-Muller transform with optimizations for
numerical precision and MPC efficiency.
"""
u = crypten.rand(*sizes, device=device).flatten()
odd_numel = u.numel() % 2 == 1
if odd_numel:
u = crypten.cat([u, crypten.rand((1,), device=device)])
n = u.numel() // 2
u1 = u[:n]
u2 = u[n:]
# Radius = sqrt(- 2 * log(u1))
r2 = -2 * u1.log(input_in_01=True)
r = r2.sqrt()
# Theta = cos(2 * pi * u2) or sin(2 * pi * u2)
cos, sin = u2.sub(0.5).mul(6.28318531).cossin()
# Generating 2 independent normal random variables using
x = r.mul(sin)
y = r.mul(cos)
z = crypten.cat([x, y])
if odd_numel:
z = z[1:]
return z.view(*sizes)
def bernoulli(self):
"""Returns a tensor with elements in {0, 1}. The i-th element of the
output will be 1 with probability according to the i-th value of the
input tensor."""
return self > crypten.rand(self.size(), device=self.device)
def weighted_index(self, dim=None):
"""
Returns a tensor with entries that are one-hot along dimension `dim`.
These one-hot entries are set at random with weights given by the input
`self`.
Examples::
>>> encrypted_tensor = MPCTensor(torch.tensor([1., 6.]))
>>> index = encrypted_tensor.weighted_index().get_plain_text()
# With 1 / 7 probability
torch.tensor([1., 0.])
# With 6 / 7 probability
torch.tensor([0., 1.])
"""
if dim is None:
return self.flatten().weighted_index(dim=0).view(self.size())
x = self.cumsum(dim)
max_weight = x.index_select(dim, torch.tensor(x.size(dim) - 1, device=self.device))
r = crypten.rand(max_weight.size(), device=self.device) * max_weight
gt = x.gt(r)
shifted = gt.roll(1, dims=dim)
shifted.data.index_fill_(dim, torch.tensor(0, device=self.device), 0)
return gt - shifted
def weighted_sample(self, dim=None):
"""
Samples a single value across dimension `dim` with weights corresponding
to the values in `self`
Returns the sample and the one-hot index of the sample.
Examples::
>>> encrypted_tensor = MPCTensor(torch.tensor([1., 6.]))
>>> index = encrypted_tensor.weighted_sample().get_plain_text()
# With 1 / 7 probability
(torch.tensor([1., 0.]), torch.tensor([1., 0.]))
# With 6 / 7 probability
(torch.tensor([0., 6.]), torch.tensor([0., 1.]))
"""
indices = self.weighted_index(dim)
sample = self.mul(indices).sum(dim)
return sample, indices
| CrypTen-main | crypten/common/functions/sampling.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import crypten
__all__ = ["dropout"]
def dropout(self, p=0.5, training=True, inplace=False):
r"""
Randomly zeroes some of the elements of the input tensor with
probability :attr:`p`.
Args:
p: probability of a channel to be zeroed. Default: 0.5
training: apply dropout if is ``True``. Default: ``True``
inplace: If set to ``True``, will do this operation in-place.
Default: ``False``
"""
if p == 0.0:
return self
elif p == 1.0:
return self - self
assert p > 0.0 and p < 1.0, "dropout probability has to be between 0 and 1"
if training and inplace:
logging.warning(
"CrypTen dropout does not support inplace computation during training."
)
if not training:
if inplace:
return self
else:
return self.clone()
rand_tensor = crypten.rand(self.size(), device=self.device)
dropout_tensor = rand_tensor > p
if inplace:
result_tensor = self.div_(1 - p)
result_tensor = result_tensor.mul_(dropout_tensor)
else:
result_tensor = self.div(1 - p)
result_tensor = result_tensor.mul(dropout_tensor)
return result_tensor
| CrypTen-main | crypten/common/functions/dropout.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import crypten
import torch
from ..tensor_types import is_tensor
__all__ = ["norm", "polynomial", "pos_pow", "pow"]
def pow(self, p, **kwargs):
"""
Computes an element-wise exponent `p` of a tensor, where `p` is an
integer.
"""
if isinstance(p, float) and int(p) == p:
p = int(p)
if not isinstance(p, int):
raise TypeError(
"pow must take an integer exponent. For non-integer powers, use"
" pos_pow with positive-valued base."
)
if p < -1:
return self.reciprocal().pow(-p)
elif p == -1:
return self.reciprocal()
elif p == 0:
# Note: This returns 0 ** 0 -> 1 when inputs have zeros.
# This is consistent with PyTorch's pow function.
return self.new(torch.ones_like(self.data))
elif p == 1:
return self.clone()
elif p == 2:
return self.square()
elif p % 2 == 0:
return self.square().pow(p // 2)
else:
x = self.square().mul_(self)
return x.pow((p - 1) // 2)
def pos_pow(self, p):
"""
Approximates self ** p by computing: :math:`x^p = exp(p * log(x))`
Note that this requires that the base `self` contain only positive values
since log can only be computed on positive numbers.
Note that the value of `p` can be an integer, float, public tensor, or
encrypted tensor.
"""
if isinstance(p, int) or (isinstance(p, float) and int(p) == p):
return self.pow(p)
return self.log().mul_(p).exp()
def polynomial(self, coeffs, func="mul"):
"""Computes a polynomial function on a tensor with given coefficients,
`coeffs`, that can be a list of values or a 1-D tensor.
Coefficients should be ordered from the order 1 (linear) term first,
ending with the highest order term. (Constant is not included).
"""
# Coefficient input type-checking
if isinstance(coeffs, list):
coeffs = torch.tensor(coeffs, device=self.device)
assert is_tensor(coeffs) or crypten.is_encrypted_tensor(
coeffs
), "Polynomial coefficients must be a list or tensor"
assert coeffs.dim() == 1, "Polynomial coefficients must be a 1-D tensor"
# Handle linear case
if coeffs.size(0) == 1:
return self.mul(coeffs)
# Compute terms of polynomial using exponentially growing tree
terms = crypten.stack([self, self.square()])
while terms.size(0) < coeffs.size(0):
highest_term = terms.index_select(
0, torch.tensor(terms.size(0) - 1, device=self.device)
)
new_terms = getattr(terms, func)(highest_term)
terms = crypten.cat([terms, new_terms])
# Resize the coefficients for broadcast
terms = terms[: coeffs.size(0)]
for _ in range(terms.dim() - 1):
coeffs = coeffs.unsqueeze(1)
# Multiply terms by coefficients and sum
return terms.mul(coeffs).sum(0)
def norm(self, p="fro", dim=None, keepdim=False):
"""Computes the p-norm of the input tensor (or along a dimension)."""
if p == "fro":
p = 2
if isinstance(p, (int, float)):
assert p >= 1, "p-norm requires p >= 1"
if p == 1:
if dim is None:
return self.abs().sum()
return self.abs().sum(dim, keepdim=keepdim)
elif p == 2:
if dim is None:
return self.square().sum().sqrt()
return self.square().sum(dim, keepdim=keepdim).sqrt()
elif p == float("inf"):
if dim is None:
return self.abs().max()
return self.abs().max(dim=dim, keepdim=keepdim)[0]
else:
if dim is None:
return self.abs().pos_pow(p).sum().pos_pow(1 / p)
return self.abs().pos_pow(p).sum(dim, keepdim=keepdim).pos_pow(1 / p)
elif p == "nuc":
raise NotImplementedError("Nuclear norm is not implemented")
else:
raise ValueError(f"Improper value p ({p})for p-norm")
| CrypTen-main | crypten/common/functions/power.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import math
import crypten
import torch
from crypten.config import cfg
__all__ = [
"exp",
"log",
"reciprocal",
"inv_sqrt",
"sqrt",
"_eix",
"cossin",
"cos",
"sin",
"sigmoid",
"tanh",
"erf",
"softmax",
"log_softmax",
]
# Iterative methods:
def exp(self):
r"""Approximates the exponential function using a limit approximation:
.. math::
exp(x) = \lim_{n \\rightarrow \\infty} (1 + x / n) ^ n
Here we compute exp by choosing n = 2 ** d for some large d equal to
`iterations`. We then compute (1 + x / n) once and square `d` times.
Set the number of iterations for the limit approximation with
config.exp_iterations.
""" # noqa: W605
iters = cfg.functions.exp_iterations
result = 1 + self.div(2**iters)
for _ in range(iters):
result = result.square()
return result
def log(self, input_in_01=False):
r"""
Approximates the natural logarithm using 8th order modified
Householder iterations. This approximation is accurate within 2% relative
error on [0.0001, 250].
Iterations are computed by: :math:`h = 1 - x * exp(-y_n)`
.. math::
y_{n+1} = y_n - \sum_k^{order}\frac{h^k}{k}
Args:
input_in_01 (bool) : Allows a user to indicate that the input is in the domain [0, 1],
causing the function optimize for this domain. This is useful for computing
log-probabilities for entropy functions.
We shift the domain of convergence by a constant :math:`a` using the following identity:
.. math::
\ln{u} = \ln {au} - \ln{a}
Since the domain of convergence for CrypTen's log() function is approximately [1e-4, 1e2],
we can set :math:`a=100`.
Configuration parameters:
iterations (int): number of Householder iterations for the approximation
exp_iterations (int): number of iterations for limit approximation of exp
order (int): number of polynomial terms used (order of Householder approx)
"""
if input_in_01:
return log(self.mul(100)) - 4.605170
# Initialization to a decent estimate (found by qualitative inspection):
# ln(x) = x/120 - 20exp(-2x - 1.0) + 3.0
iterations = cfg.functions.log_iterations
exp_iterations = cfg.functions.log_exp_iterations
order = cfg.functions.log_order
term1 = self.div(120)
term2 = exp(self.mul(2).add(1.0).neg()).mul(20)
y = term1 - term2 + 3.0
# 8th order Householder iterations
with cfg.temp_override({"functions.exp_iterations": exp_iterations}):
for _ in range(iterations):
h = 1 - self * exp(-y)
y -= h.polynomial([1 / (i + 1) for i in range(order)])
return y
def reciprocal(self, input_in_01=False):
r"""
Args:
input_in_01 (bool) : Allows a user to indicate that the input is in the range [0, 1],
causing the function optimize for this range. This is useful for improving
the accuracy of functions on probabilities (e.g. entropy functions).
Methods:
'NR' : `Newton-Raphson`_ method computes the reciprocal using iterations
of :math:`x_{i+1} = (2x_i - self * x_i^2)` and uses
:math:`3*exp(1 - 2x) + 0.003` as an initial guess by default
'log' : Computes the reciprocal of the input from the observation that:
:math:`x^{-1} = exp(-log(x))`
Configuration params:
reciprocal_method (str): One of 'NR' or 'log'.
reciprocal_nr_iters (int): determines the number of Newton-Raphson iterations to run
for the `NR` method
reciprocal_log_iters (int): determines the number of Householder
iterations to run when computing logarithms for the `log` method
reciprocal_all_pos (bool): determines whether all elements of the
input are known to be positive, which optimizes the step of
computing the sign of the input.
reciprocal_initial (tensor): sets the initial value for the
Newton-Raphson method. By default, this will be set to :math:
`3*exp(-(x-.5)) + 0.003` as this allows the method to converge over
a fairly large domain
.. _Newton-Raphson:
https://en.wikipedia.org/wiki/Newton%27s_method
"""
pos_override = {"functions.reciprocal_all_pos": True}
if input_in_01:
with cfg.temp_override(pos_override):
rec = reciprocal(self.mul(64)).mul(64)
return rec
# Get config options
method = cfg.functions.reciprocal_method
all_pos = cfg.functions.reciprocal_all_pos
initial = cfg.functions.reciprocal_initial
if not all_pos:
sgn = self.sign()
pos = sgn * self
with cfg.temp_override(pos_override):
return sgn * reciprocal(pos)
if method == "NR":
nr_iters = cfg.functions.reciprocal_nr_iters
if initial is None:
# Initialization to a decent estimate (found by qualitative inspection):
# 1/x = 3exp(1 - 2x) + 0.003
result = 3 * (1 - 2 * self).exp() + 0.003
else:
result = initial
for _ in range(nr_iters):
if hasattr(result, "square"):
result += result - result.square().mul_(self)
else:
result = 2 * result - result * result * self
return result
elif method == "log":
log_iters = cfg.functions.reciprocal_log_iters
with cfg.temp_override({"functions.log_iters": log_iters}):
return exp(-log(self))
else:
raise ValueError(f"Invalid method {method} given for reciprocal function")
def inv_sqrt(self):
r"""
Computes the inverse square root of the input using the Newton-Raphson method.
Configuration params:
sqrt_nr_iters (int): determines the number of Newton-Raphson iterations to run.
sqrt_nr_initial (tensor): sets the initial value for the Newton-Raphson iterations.
By default, this will be set to allow the method to converge over a
fairly large domain.
.. _Newton-Raphson:
https://en.wikipedia.org/wiki/Fast_inverse_square_root#Newton's_method
"""
initial = cfg.functions.sqrt_nr_initial
iters = cfg.functions.sqrt_nr_iters
# Initialize using decent approximation
if initial is None:
y = exp(self.div(2).add(0.2).neg()).mul(2.2).add(0.2)
y -= self.div(1024)
else:
y = initial
# Newton Raphson iterations for inverse square root
for _ in range(iters):
y = y.mul_(3 - self * y.square()).div_(2)
return y
def sqrt(self):
r"""
Computes the square root of the input by computing its inverse square root using
the Newton-Raphson method and multiplying by the input.
Configuration params:
sqrt_nr_iters (int): determines the number of Newton-Raphson iterations to run
sqrt_initial (tensor): sets the initial value for the inverse square root
Newton-Raphson iterations. By default, this will be set to allow convergence
over a fairly large domain.
.. _Newton-Raphson:
https://en.wikipedia.org/wiki/Fast_inverse_square_root#Newton's_method
"""
return inv_sqrt(self).mul_(self)
def _eix(self):
r"""Computes e^(i * self) where i is the imaginary unit.
Returns (Re{e^(i * self)}, Im{e^(i * self)} = cos(self), sin(self)
"""
iterations = cfg.functions.trig_iterations
re = 1
im = self.div(2**iterations)
# First iteration uses knowledge that `re` is public and = 1
re -= im.square()
im *= 2
# Compute (a + bi)^2 -> (a^2 - b^2) + (2ab)i `iterations` times
for _ in range(iterations - 1):
a2 = re.square()
b2 = im.square()
im = im.mul_(re)
im._tensor *= 2
re = a2 - b2
return re, im
def cossin(self):
r"""Computes cosine and sine of input via exp(i * x).
Args:
iterations (int): for approximating exp(i * x)
"""
return self._eix()
def cos(self):
r"""Computes the cosine of the input using cos(x) = Re{exp(i * x)}
Args:
iterations (int): for approximating exp(i * x)
"""
return cossin(self)[0]
def sin(self):
r"""Computes the sine of the input using sin(x) = Im{exp(i * x)}
Args:
iterations (int): for approximating exp(i * x)
"""
return cossin(self)[1]
# Logistic Functions
def sigmoid(self):
r"""Computes the sigmoid function using the following definition
.. math::
\sigma(x) = (1 + e^{-x})^{-1}
If a valid method is given, this function will compute sigmoid
using that method:
"chebyshev" - computes tanh via Chebyshev approximation with
truncation and uses the identity:
.. math::
\sigma(x) = \frac{1}{2}tanh(\frac{x}{2}) + \frac{1}{2}
"reciprocal" - computes sigmoid using :math:`1 + e^{-x}` and computing
the reciprocal
""" # noqa: W605
method = cfg.functions.sigmoid_tanh_method
if method == "chebyshev":
tanh_approx = tanh(self.div(2))
return tanh_approx.div(2) + 0.5
elif method == "reciprocal":
ltz = self._ltz()
sign = 1 - 2 * ltz
pos_input = self.mul(sign)
denominator = pos_input.neg().exp().add(1)
# TODO: Set these with configurable parameters
with cfg.temp_override(
{
"functions.exp_iterations": 9,
"functions.reciprocal_nr_iters": 3,
"functions.reciprocal_all_pos": True,
"functions.reciprocal_initial": 0.75,
}
):
pos_output = denominator.reciprocal()
result = pos_output.where(1 - ltz, 1 - pos_output)
# TODO: Support addition with different encoder scales
# result = pos_output + ltz - 2 * pos_output * ltz
return result
else:
raise ValueError(f"Unrecognized method {method} for sigmoid")
def tanh(self):
r"""Computes the hyperbolic tangent function using the identity
.. math::
tanh(x) = 2\sigma(2x) - 1
If a valid method is given, this function will compute tanh using that method:
"chebyshev" - computes tanh via Chebyshev approximation with truncation.
.. math::
tanh(x) = \sum_{j=1}^terms c_{2j - 1} P_{2j - 1} (x / maxval)
where c_i is the ith Chebyshev series coefficient and P_i is ith polynomial.
The approximation is truncated to +/-1 outside [-1, 1].
Args:
terms (int): highest degree of Chebyshev polynomials.
Must be even and at least 6.
"""
method = cfg.functions.sigmoid_tanh_method
if method == "reciprocal":
return self.mul(2).sigmoid().mul(2).sub(1)
elif method == "chebyshev":
terms = cfg.functions.sigmoid_tanh_terms
coeffs = crypten.common.util.chebyshev_series(torch.tanh, 1, terms)[1::2]
tanh_polys = _chebyshev_polynomials(self, terms)
tanh_polys_flipped = (
tanh_polys.unsqueeze(dim=-1).transpose(0, -1).squeeze(dim=0)
)
out = tanh_polys_flipped.matmul(coeffs)
# truncate outside [-maxval, maxval]
return out.hardtanh()
else:
raise ValueError(f"Unrecognized method {method} for tanh")
def _chebyshev_polynomials(self, terms):
r"""Evaluates odd degree Chebyshev polynomials at x
Chebyshev Polynomials of the first kind are defined as
.. math::
P_0(x) = 1, P_1(x) = x, P_n(x) = 2 P_{n - 1}(x) - P_{n-2}(x)
Args:
self (MPCTensor): input at which polynomials are evaluated
terms (int): highest degree of Chebyshev polynomials.
Must be even and at least 6.
Returns:
MPCTensor of polynomials evaluated at self of shape `(terms, *self)`
"""
if terms % 2 != 0 or terms < 6:
raise ValueError("Chebyshev terms must be even and >= 6")
polynomials = [self.clone()]
y = 4 * self.square() - 2
z = y - 1
polynomials.append(z.mul(self))
for k in range(2, terms // 2):
next_polynomial = y * polynomials[k - 1] - polynomials[k - 2]
polynomials.append(next_polynomial)
return crypten.stack(polynomials)
def erf(tensor):
r"""
Approximates the error function of the input tensor using a Taylor approximation.
"""
iters = cfg.functions.erf_iterations
output = tensor.clone()
for n in range(1, iters + 1):
multiplier = ((-1) ** n) / (math.factorial(n) * (2 * n + 1))
output = output.add(tensor.pos_pow(2 * n + 1).mul(multiplier))
return output.mul(2.0 / math.sqrt(math.pi))
# NOTE: This approximation is not unstable for large tensor values.
def softmax(self, dim, **kwargs):
r"""Compute the softmax of a tensor's elements along a given dimension"""
# 0-d case
if self.dim() == 0:
assert dim == 0, "Improper dim argument"
return self.new(torch.ones_like((self.data)))
if self.size(dim) == 1:
return self.new(torch.ones_like(self.data))
maximum_value = self.max(dim, keepdim=True)[0]
logits = self - maximum_value
numerator = logits.exp()
with cfg.temp_override({"functions.reciprocal_all_pos": True}):
inv_denominator = numerator.sum(dim, keepdim=True).reciprocal()
return numerator * inv_denominator
def log_softmax(self, dim, **kwargs):
r"""Applies a softmax followed by a logarithm.
While mathematically equivalent to log(softmax(x)), doing these two
operations separately is slower, and numerically unstable. This function
uses an alternative formulation to compute the output and gradient correctly.
"""
# 0-d case
if self.dim() == 0:
assert dim == 0, "Improper dim argument"
return self.new(torch.zeros((), device=self.device))
if self.size(dim) == 1:
return self.new(torch.zeros_like(self.data))
maximum_value = self.max(dim, keepdim=True)[0]
logits = self - maximum_value
normalize_term = exp(logits).sum(dim, keepdim=True)
result = logits - normalize_term.log()
return result
| CrypTen-main | crypten/common/functions/approximations.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from .primitives import ArithmeticSharedTensor, BinarySharedTensor
class ptype(Enum):
"""Enumeration defining the private type attributes of encrypted tensors"""
arithmetic = 0
binary = 1
def to_tensor(self):
if self.value == 0:
return ArithmeticSharedTensor
elif self.value == 1:
return BinarySharedTensor
else:
raise ValueError("Cannot convert %s to encrypted tensor" % (self.name))
| CrypTen-main | crypten/mpc/ptype.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from crypten.config import cfg
from crypten.mpc import primitives, provider # noqa: F401
from .context import run_multiprocess
from .mpc import MPCTensor
from .ptype import ptype
__all__ = [
"MPCTensor",
"primitives",
"provider",
"ptype",
"run_multiprocess",
]
# the different private type attributes of an mpc encrypted tensor
arithmetic = ptype.arithmetic
binary = ptype.binary
# Set provider
__SUPPORTED_PROVIDERS = {
"TFP": provider.TrustedFirstParty(),
"TTP": provider.TrustedThirdParty(),
"HE": provider.HomomorphicProvider(),
}
def get_default_provider():
return __SUPPORTED_PROVIDERS[cfg.mpc.provider]
def ttp_required():
return cfg.mpc.provider == "TTP"
| CrypTen-main | crypten/mpc/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import logging
import multiprocessing
import os
import tempfile
from operator import itemgetter
import crypten
from crypten.communicator import DistributedCommunicator
def _launch(func, rank, world_size, rendezvous_file, queue, func_args, func_kwargs):
communicator_args = {
"WORLD_SIZE": world_size,
"RANK": rank,
"RENDEZVOUS": "file://%s" % rendezvous_file,
"DISTRIBUTED_BACKEND": "gloo",
}
for key, val in communicator_args.items():
os.environ[key] = str(val)
crypten.init()
return_value = func(*func_args, **func_kwargs)
crypten.uninit()
queue.put((rank, return_value))
def run_multiprocess(world_size, maxsize=None):
"""Defines decorator to run function across multiple processes
Args:
world_size (int): number of parties / processes to initiate.
maxsize: Enables the user to increase the size of returnable values
(See https://docs.python.org/3/library/multiprocessing.html#multiprocessing.Queue)
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
rendezvous_file = tempfile.NamedTemporaryFile(delete=True).name
if maxsize is None:
queue = multiprocessing.Queue()
else:
queue = multiprocessing.Queue(maxsize)
processes = [
multiprocessing.Process(
target=_launch,
args=(func, rank, world_size, rendezvous_file, queue, args, kwargs),
)
for rank in range(world_size)
]
# Initialize TTP process
if crypten.mpc.ttp_required():
processes += [
multiprocessing.Process(
target=_launch,
args=(
crypten.mpc.provider.TTPServer,
world_size,
world_size,
rendezvous_file,
queue,
(),
{},
),
)
]
# This process will be forked and we need to re-initialize the
# communicator in the children. If the parent process happened to
# call crypten.init(), which might be valid in a Jupyter notebook
# for instance, then the crypten.init() call on the children
# process will not do anything. The call to uninit here makes sure
# we actually get to initialize the communicator on the child
# process. An alternative fix for this issue would be to use spawn
# instead of fork, but we run into issues serializing the function
# in that case.
was_initialized = DistributedCommunicator.is_initialized()
if was_initialized:
crypten.uninit()
for process in processes:
process.start()
for process in processes:
process.join()
if was_initialized:
crypten.init()
successful = [process.exitcode == 0 for process in processes]
if not all(successful):
logging.error("One of the parties failed. Check past logs")
return None
return_values = []
while not queue.empty():
return_values.append(queue.get())
return [value for _, value in sorted(return_values, key=itemgetter(0))]
return wrapper
return decorator
| CrypTen-main | crypten/mpc/context.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from crypten import communicator as comm
from crypten.common.tensor_types import is_tensor
from crypten.common.util import torch_stack
from crypten.config import cfg
from crypten.cuda import CUDALongTensor
from ..cryptensor import CrypTensor
from ..encoder import FixedPointEncoder
from .primitives.binary import BinarySharedTensor
from .primitives.converters import convert
from .ptype import ptype as Ptype
@CrypTensor.register_cryptensor("mpc")
class MPCTensor(CrypTensor):
def __init__(self, tensor, ptype=Ptype.arithmetic, device=None, *args, **kwargs):
"""
Creates the shared tensor from the input `tensor` provided by party `src`.
The `ptype` defines the type of sharing used (default: arithmetic).
The other parties can specify a `tensor` or `size` to determine the size
of the shared tensor object to create. In this case, all parties must
specify the same (tensor) size to prevent the party's shares from varying
in size, which leads to undefined behavior.
Alternatively, the parties can set `broadcast_size` to `True` to have the
`src` party broadcast the correct size. The parties who do not know the
tensor size beforehand can provide an empty tensor as input. This is
guaranteed to produce correct behavior but requires an additional
communication round.
The parties can also set the `precision` and `device` for their share of
the tensor. If `device` is unspecified, it is set to `tensor.device`.
"""
if tensor is None:
raise ValueError("Cannot initialize tensor with None.")
# take required_grad from kwargs, input tensor, or set to False:
default = tensor.requires_grad if torch.is_tensor(tensor) else False
requires_grad = kwargs.pop("requires_grad", default)
# call CrypTensor constructor:
super().__init__(requires_grad=requires_grad)
if device is None and hasattr(tensor, "device"):
device = tensor.device
# create the MPCTensor:
tensor_type = ptype.to_tensor()
if tensor is []:
self._tensor = torch.tensor([], device=device)
else:
self._tensor = tensor_type(tensor=tensor, device=device, *args, **kwargs)
self.ptype = ptype
@staticmethod
def new(*args, **kwargs):
"""
Creates a new MPCTensor, passing all args and kwargs into the constructor.
"""
return MPCTensor(*args, **kwargs)
@staticmethod
def from_shares(share, precision=None, ptype=Ptype.arithmetic):
result = MPCTensor([])
from_shares = ptype.to_tensor().from_shares
result._tensor = from_shares(share, precision=precision)
result.ptype = ptype
return result
def clone(self):
"""Create a deep copy of the input tensor."""
# TODO: Rename this to __deepcopy__()?
result = MPCTensor([])
result._tensor = self._tensor.clone()
result.ptype = self.ptype
return result
def shallow_copy(self):
"""Create a shallow copy of the input tensor."""
# TODO: Rename this to __copy__()?
result = MPCTensor([])
result._tensor = self._tensor
result.ptype = self.ptype
return result
def copy_(self, other):
"""Copies value of other MPCTensor into this MPCTensor."""
assert isinstance(other, MPCTensor), "other must be MPCTensor"
self._tensor.copy_(other._tensor)
self.ptype = other.ptype
def to(self, *args, **kwargs):
r"""
Depending on the input arguments,
converts underlying share to the given ptype or
performs `torch.to` on the underlying torch tensor
To convert underlying share to the given ptype, call `to` as:
to(ptype, **kwargs)
It will call MPCTensor.to_ptype with the arguments provided above.
Otherwise, `to` performs `torch.to` on the underlying
torch tensor. See
https://pytorch.org/docs/stable/tensors.html?highlight=#torch.Tensor.to
for a reference of the parameters that can be passed in.
Args:
ptype: Ptype.arithmetic or Ptype.binary.
"""
if "ptype" in kwargs:
return self._to_ptype(**kwargs)
elif args and isinstance(args[0], Ptype):
ptype = args[0]
return self._to_ptype(ptype, **kwargs)
else:
share = self.share.to(*args, **kwargs)
if share.is_cuda:
share = CUDALongTensor(share)
self.share = share
return self
def _to_ptype(self, ptype, **kwargs):
r"""
Convert MPCTensor's underlying share to the corresponding ptype
(ArithmeticSharedTensor, BinarySharedTensor)
Args:
ptype (Ptype.arithmetic or Ptype.binary): The ptype to convert
the shares to.
precision (int, optional): Precision of the fixed point encoder when
converting a binary share to an arithmetic share. It will be ignored
if the ptype doesn't match.
bits (int, optional): If specified, will only preserve the bottom `bits` bits
of a binary tensor when converting from a binary share to an arithmetic share.
It will be ignored if the ptype doesn't match.
"""
retval = self.clone()
if retval.ptype == ptype:
return retval
retval._tensor = convert(self._tensor, ptype, **kwargs)
retval.ptype = ptype
return retval
@property
def device(self):
"""Return the `torch.device` of the underlying share"""
return self.share.device
@property
def is_cuda(self):
"""Return True if the underlying share is stored on GPU, False otherwise"""
return self.share.is_cuda
def cuda(self, *args, **kwargs):
"""Call `torch.Tensor.cuda` on the underlying share"""
self.share = CUDALongTensor(self.share.cuda(*args, **kwargs))
return self
def cpu(self):
"""Call `torch.Tensor.cpu` on the underlying share"""
self.share = self.share.cpu()
return self
def get_plain_text(self, dst=None):
"""Decrypts the tensor."""
return self._tensor.get_plain_text(dst=dst)
def reveal(self, dst=None):
"""Decrypts the tensor without any downscaling."""
return self._tensor.reveal(dst=dst)
def __repr__(self):
"""Returns a representation of the tensor useful for debugging."""
debug_mode = cfg.debug.debug_mode
share = self.share
plain_text = self._tensor.get_plain_text() if debug_mode else "HIDDEN"
ptype = self.ptype
return (
f"MPCTensor(\n\t_tensor={share}\n"
f"\tplain_text={plain_text}\n\tptype={ptype}\n)"
)
def __hash__(self):
return hash(self.share)
@property
def share(self):
"""Returns underlying share"""
return self._tensor.share
@share.setter
def share(self, value):
"""Sets share to value"""
self._tensor.share = value
@property
def encoder(self):
"""Returns underlying encoder"""
return self._tensor.encoder
@encoder.setter
def encoder(self, value):
"""Sets encoder to value"""
self._tensor.encoder = value
@staticmethod
def rand(*sizes, device=None):
"""
Returns a tensor with elements uniformly sampled in [0, 1). The uniform
random samples are generated by generating random bits using fixed-point
encoding and converting the result to an ArithmeticSharedTensor.
"""
rand = MPCTensor([])
encoder = FixedPointEncoder()
rand._tensor = BinarySharedTensor.rand(
*sizes, bits=encoder._precision_bits, device=device
)
rand._tensor.encoder = encoder
rand.ptype = Ptype.binary
return rand.to(Ptype.arithmetic, bits=encoder._precision_bits)
# Comparators
def _ltz(self):
"""Returns 1 for elements that are < 0 and 0 otherwise"""
shift = torch.iinfo(torch.long).bits - 1
precision = 0 if self.encoder.scale == 1 else None
result = self._to_ptype(Ptype.binary)
result.share >>= shift
result = result._to_ptype(Ptype.arithmetic, precision=precision, bits=1)
result.encoder._scale = 1
return result
def eq(self, y):
"""Returns self == y"""
if comm.get().get_world_size() == 2:
return (self - y)._eqz_2PC()
return 1 - self.ne(y)
def ne(self, y):
"""Returns self != y"""
if comm.get().get_world_size() == 2:
return 1 - self.eq(y)
difference = self - y
difference.share = torch_stack([difference.share, -(difference.share)])
return difference._ltz().sum(0)
def _eqz_2PC(self):
"""Returns self == 0"""
# Create BinarySharedTensors from shares
x0 = MPCTensor(self.share, src=0, ptype=Ptype.binary)
x1 = MPCTensor(-self.share, src=1, ptype=Ptype.binary)
# Perform equality testing using binary shares
x0._tensor = x0._tensor.eq(x1._tensor)
x0.encoder = self.encoder
# Convert to Arithmetic sharing
result = x0.to(Ptype.arithmetic, bits=1)
result.encoder._scale = 1
return result
def div(self, y):
r"""Divides each element of :attr:`self` with the scalar :attr:`y` or
each element of the tensor :attr:`y` and returns a new resulting tensor.
For `y` a scalar:
.. math::
\text{out}_i = \frac{\text{self}_i}{\text{y}}
For `y` a tensor:
.. math::
\text{out}_i = \frac{\text{self}_i}{\text{y}_i}
Note for :attr:`y` a tensor, the shapes of :attr:`self` and :attr:`y` must be
`broadcastable`_.
.. _broadcastable:
https://pytorch.org/docs/stable/notes/broadcasting.html#broadcasting-semantics""" # noqa: B950
result = self.clone()
if isinstance(y, CrypTensor):
result.share = torch.broadcast_tensors(result.share, y.share)[0].clone()
elif is_tensor(y):
result.share = torch.broadcast_tensors(result.share, y)[0].clone()
if isinstance(y, MPCTensor):
return result.mul(y.reciprocal())
result._tensor.div_(y)
return result
UNARY_FUNCTIONS = [
"avg_pool2d",
"square",
"neg",
]
BINARY_FUNCTIONS = [
"add",
"sub",
"mul",
"matmul",
"conv1d",
"conv2d",
"conv_transpose1d",
"conv_transpose2d",
]
def _add_unary_passthrough_function(name):
def unary_wrapper_function(self, *args, **kwargs):
result = self.shallow_copy()
result._tensor = getattr(result._tensor, name)(*args, **kwargs)
return result
setattr(MPCTensor, name, unary_wrapper_function)
def _add_binary_passthrough_function(name):
def binary_wrapper_function(self, value, *args, **kwargs):
result = self.shallow_copy()
if isinstance(value, MPCTensor):
value = value._tensor
result._tensor = getattr(result._tensor, name)(value, *args, **kwargs)
return result
setattr(MPCTensor, name, binary_wrapper_function)
for func_name in UNARY_FUNCTIONS:
_add_unary_passthrough_function(func_name)
for func_name in BINARY_FUNCTIONS:
_add_binary_passthrough_function(func_name)
| CrypTen-main | crypten/mpc/mpc.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import crypten.communicator as comm
import torch
from crypten.common.rng import generate_kbit_random_tensor, generate_random_ring_element
from crypten.common.util import count_wraps, torch_stack
from crypten.mpc.primitives import ArithmeticSharedTensor, BinarySharedTensor
from .provider import TupleProvider
class TrustedFirstParty(TupleProvider):
NAME = "TFP"
def generate_additive_triple(self, size0, size1, op, device=None, *args, **kwargs):
"""Generate multiplicative triples of given sizes"""
a = generate_random_ring_element(size0, device=device)
b = generate_random_ring_element(size1, device=device)
c = getattr(torch, op)(a, b, *args, **kwargs)
a = ArithmeticSharedTensor(a, precision=0, src=0)
b = ArithmeticSharedTensor(b, precision=0, src=0)
c = ArithmeticSharedTensor(c, precision=0, src=0)
return a, b, c
def square(self, size, device=None):
"""Generate square double of given size"""
r = generate_random_ring_element(size, device=device)
r2 = r.mul(r)
# Stack to vectorize scatter function
stacked = torch_stack([r, r2])
stacked = ArithmeticSharedTensor(stacked, precision=0, src=0)
return stacked[0], stacked[1]
def generate_binary_triple(self, size0, size1, device=None):
"""Generate xor triples of given size"""
a = generate_kbit_random_tensor(size0, device=device)
b = generate_kbit_random_tensor(size1, device=device)
c = a & b
a = BinarySharedTensor(a, src=0)
b = BinarySharedTensor(b, src=0)
c = BinarySharedTensor(c, src=0)
return a, b, c
def wrap_rng(self, size, device=None):
"""Generate random shared tensor of given size and sharing of its wraps"""
num_parties = comm.get().get_world_size()
r = [
generate_random_ring_element(size, device=device)
for _ in range(num_parties)
]
theta_r = count_wraps(r)
shares = comm.get().scatter(r, 0)
r = ArithmeticSharedTensor.from_shares(shares, precision=0)
theta_r = ArithmeticSharedTensor(theta_r, precision=0, src=0)
return r, theta_r
def B2A_rng(self, size, device=None):
"""Generate random bit tensor as arithmetic and binary shared tensors"""
# generate random bit
r = generate_kbit_random_tensor(size, bitlength=1, device=device)
rA = ArithmeticSharedTensor(r, precision=0, src=0)
rB = BinarySharedTensor(r, src=0)
return rA, rB
| CrypTen-main | crypten/mpc/provider/tfp_provider.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import crypten
import crypten.communicator as comm
import torch
class TupleProvider:
TRACEABLE_FUNCTIONS = [
"generate_additive_triple",
"square",
"generate_binary_triple",
"wrap_rng",
"B2A_rng",
]
_DEFAULT_CACHE_PATH = os.path.normpath(os.path.join(__file__, "../tuple_cache/"))
def __init__(self):
self.tracing = False
self.request_cache = []
self.tuple_cache = {}
@property
def rank(self):
return comm.get().get_rank()
def _get_request_path(self, prefix=None):
if prefix is None:
prefix = self._DEFAULT_CACHE_PATH
return prefix + f"/request_cache-{self.rank}"
def _get_tuple_path(self, prefix=None):
if prefix is None:
prefix = self._DEFAULT_CACHE_PATH
return prefix + f"/tuple_cache-{self.rank}"
def trace(self, tracing=True):
"""Sets tracing attribute.
When tracing is True, provider caches all tuple requests.
When tracing is False, provider attempts to load tuples from cache.
"""
self.tracing = tracing
def trace_once(self):
"""Sets tracing attribute True only if the request cache is empty.
If `trace_once()` is called again, it sets tracing attribute to False
"""
untraced = self.request_cache.empty()
self.trace(tracing=untraced)
def _save_requests(self, filepath=None):
# TODO: Deal with any overwrite issues
if len(self.request_cache) == 0:
crypten.log("Request cache not saved - cache is empty")
return
filepath = self._get_request_path(prefix=filepath)
torch.save(self.request_cache, filepath)
self.request_cache = []
def _load_requests(self, filepath=None):
filepath = self._get_request_path(prefix=filepath)
if os.path.exists(filepath):
self.request_cache = torch.load(filepath)
os.remove(filepath)
else:
crypten.log(f"Cache requests not loaded - File `{filepath}` not found")
def _save_tuples(self, filepath=None):
# TODO: Deal with any overwrite issues
if len(self.tuple_cache) == 0:
crypten.log("Tuple cache not saved - cache is empty")
return
filepath = self._get_tuple_path(prefix=filepath)
torch.save(self.tuple_cache, filepath)
self.tuple_cache = {}
def _load_tuples(self, filepath=None):
filepath = self._get_tuple_path(prefix=filepath)
if os.path.exists(filepath):
self.tuple_cache = torch.load(filepath)
os.remove(filepath)
else:
crypten.log(f"Tuple cache not loaded - File `{filepath}` not found")
def save_cache(self, filepath=None):
"""Saves request and tuple cache to a file.
args:
filepath - base filepath for cache folder (default: "provider/tuple_cache/")
"""
self._save_requests(filepath=filepath)
self._save_tuples(filepath=filepath)
def load_cache(self, filepath=None):
"""Loads request and tuple cache from a file.
args:
filepath - base filepath for cache folder (default: "provider/tuple_cache/")
"""
self._load_requests(filepath=filepath)
self._load_tuples(filepath=filepath)
def __getattribute__(self, func_name):
"""Deals with caching logic"""
if func_name not in TupleProvider.TRACEABLE_FUNCTIONS:
return object.__getattribute__(self, func_name)
# Trace requests while tracing
if self.tracing:
def func_with_trace(*args, **kwargs):
request = (func_name, args, kwargs)
self.request_cache.append(request)
return object.__getattribute__(self, func_name)(*args, **kwargs)
return func_with_trace
# If the cache is empty, call function directly
if len(self.tuple_cache) == 0:
return object.__getattribute__(self, func_name)
# Return results from cache if available
def func_from_cache(*args, **kwargs):
hashable_kwargs = frozenset(kwargs.items())
request = (func_name, args, hashable_kwargs)
# Read from cache
if request in self.tuple_cache.keys():
return self.tuple_cache[request].pop()
# Cache miss
return object.__getattribute__(self, func_name)(*args, **kwargs)
return func_from_cache
def fill_cache(self):
"""Fills tuple_cache with tuples requested in the request_cache"""
# TODO: parallelize / async this
for request in self.request_cache:
func_name, args, kwargs = request
result = object.__getattribute__(self, func_name)(*args, **kwargs)
hashable_kwargs = frozenset(kwargs.items())
hashable_request = (func_name, args, hashable_kwargs)
if hashable_request in self.tuple_cache.keys():
self.tuple_cache[hashable_request].append(result)
else:
self.tuple_cache[hashable_request] = [result]
def generate_additive_triple(self, size0, size1, op, device=None, *args, **kwargs):
"""Generate multiplicative triples of given sizes"""
raise NotImplementedError(
"TupleProvider generate_additive_triple not implemented."
)
def square(self, size, device=None):
"""Generate square double of given size"""
raise NotImplementedError("TupleProvider square not implemented.")
def generate_binary_triple(self, size0, size1, device=None):
"""Generate xor triples of given size"""
raise NotImplementedError(
"TupleProvider generate_binary_triple not implemented."
)
def wrap_rng(self, size, device=None):
"""Generate random shared tensor of given size and sharing of its wraps"""
raise NotImplementedError("TupleProvider wrap_rng not implemented.")
def B2A_rng(self, size, device=None):
"""Generate random bit tensor as arithmetic and binary shared tensors"""
raise NotImplementedError("TupleProvider B2A_rng not implemented.")
| CrypTen-main | crypten/mpc/provider/provider.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import crypten
import crypten.communicator as comm
import torch
import torch.distributed as dist
from crypten.common.rng import generate_kbit_random_tensor, generate_random_ring_element
from crypten.common.util import count_wraps
from crypten.mpc.primitives import ArithmeticSharedTensor, BinarySharedTensor
from .provider import TupleProvider
TTP_FUNCTIONS = ["additive", "square", "binary", "wraps", "B2A"]
class TrustedThirdParty(TupleProvider):
NAME = "TTP"
def generate_additive_triple(self, size0, size1, op, device=None, *args, **kwargs):
"""Generate multiplicative triples of given sizes"""
generator = TTPClient.get().get_generator(device=device)
a = generate_random_ring_element(size0, generator=generator, device=device)
b = generate_random_ring_element(size1, generator=generator, device=device)
if comm.get().get_rank() == 0:
# Request c from TTP
c = TTPClient.get().ttp_request(
"additive", device, size0, size1, op, *args, **kwargs
)
else:
# TODO: Compute size without executing computation
c_size = getattr(torch, op)(a, b, *args, **kwargs).size()
c = generate_random_ring_element(c_size, generator=generator, device=device)
a = ArithmeticSharedTensor.from_shares(a, precision=0)
b = ArithmeticSharedTensor.from_shares(b, precision=0)
c = ArithmeticSharedTensor.from_shares(c, precision=0)
return a, b, c
def square(self, size, device=None):
"""Generate square double of given size"""
generator = TTPClient.get().get_generator(device=device)
r = generate_random_ring_element(size, generator=generator, device=device)
if comm.get().get_rank() == 0:
# Request r2 from TTP
r2 = TTPClient.get().ttp_request("square", device, size)
else:
r2 = generate_random_ring_element(size, generator=generator, device=device)
r = ArithmeticSharedTensor.from_shares(r, precision=0)
r2 = ArithmeticSharedTensor.from_shares(r2, precision=0)
return r, r2
def generate_binary_triple(self, size0, size1, device=None):
"""Generate binary triples of given size"""
generator = TTPClient.get().get_generator(device=device)
a = generate_kbit_random_tensor(size0, generator=generator, device=device)
b = generate_kbit_random_tensor(size1, generator=generator, device=device)
if comm.get().get_rank() == 0:
# Request c from TTP
c = TTPClient.get().ttp_request("binary", device, size0, size1)
else:
size2 = torch.broadcast_tensors(a, b)[0].size()
c = generate_kbit_random_tensor(size2, generator=generator, device=device)
# Stack to vectorize scatter function
a = BinarySharedTensor.from_shares(a)
b = BinarySharedTensor.from_shares(b)
c = BinarySharedTensor.from_shares(c)
return a, b, c
def wrap_rng(self, size, device=None):
"""Generate random shared tensor of given size and sharing of its wraps"""
generator = TTPClient.get().get_generator(device=device)
r = generate_random_ring_element(size, generator=generator, device=device)
if comm.get().get_rank() == 0:
# Request theta_r from TTP
theta_r = TTPClient.get().ttp_request("wraps", device, size)
else:
theta_r = generate_random_ring_element(
size, generator=generator, device=device
)
r = ArithmeticSharedTensor.from_shares(r, precision=0)
theta_r = ArithmeticSharedTensor.from_shares(theta_r, precision=0)
return r, theta_r
def B2A_rng(self, size, device=None):
"""Generate random bit tensor as arithmetic and binary shared tensors"""
generator = TTPClient.get().get_generator(device=device)
# generate random bit
rB = generate_kbit_random_tensor(
size, bitlength=1, generator=generator, device=device
)
if comm.get().get_rank() == 0:
# Request rA from TTP
rA = TTPClient.get().ttp_request("B2A", device, size)
else:
rA = generate_random_ring_element(size, generator=generator, device=device)
rA = ArithmeticSharedTensor.from_shares(rA, precision=0)
rB = BinarySharedTensor.from_shares(rB)
return rA, rB
@staticmethod
def _init():
TTPClient._init()
@staticmethod
def uninit():
TTPClient.uninit()
class TTPClient:
__instance = None
class __TTPClient:
"""Singleton class"""
def __init__(self):
# Initialize connection
self.ttp_group = comm.get().ttp_group
self.comm_group = comm.get().ttp_comm_group
self._setup_generators()
logging.info(f"TTPClient {comm.get().get_rank()} initialized")
def _setup_generators(self):
"""Setup RNG generator shared between each party (client) and the TTPServer"""
seed = torch.empty(size=(), dtype=torch.long)
dist.irecv(
tensor=seed, src=comm.get().get_ttp_rank(), group=self.ttp_group
).wait()
dist.barrier(group=self.ttp_group)
self.generator = torch.Generator(device="cpu")
self.generator.manual_seed(seed.item())
if torch.cuda.is_available():
self.generator_cuda = torch.Generator(device="cuda")
self.generator_cuda.manual_seed(seed.item())
else:
self.generator_cuda = None
def get_generator(self, device=None):
if device is None:
device = "cpu"
device = torch.device(device)
if device.type == "cuda":
return self.generator_cuda
else:
return self.generator
def ttp_request(self, func_name, device, *args, **kwargs):
assert (
comm.get().get_rank() == 0
), "Only party 0 communicates with the TTPServer"
if device is not None:
device = str(device)
message = {
"function": func_name,
"device": device,
"args": args,
"kwargs": kwargs,
}
ttp_rank = comm.get().get_ttp_rank()
comm.get().send_obj(message, ttp_rank, self.ttp_group)
size = comm.get().recv_obj(ttp_rank, self.ttp_group)
result = torch.empty(size, dtype=torch.long, device=device)
comm.get().broadcast(result, ttp_rank, self.comm_group)
return result
@staticmethod
def _init():
"""Initializes a Trusted Third Party client that sends requests"""
if TTPClient.__instance is None:
TTPClient.__instance = TTPClient.__TTPClient()
@staticmethod
def uninit():
"""Uninitializes a Trusted Third Party client"""
del TTPClient.__instance
TTPClient.__instance = None
@staticmethod
def get():
"""Returns the instance of the TTPClient"""
if TTPClient.__instance is None:
raise RuntimeError("TTPClient is not initialized")
return TTPClient.__instance
class TTPServer:
TERMINATE = -1
def __init__(self):
"""Initializes a Trusted Third Party server that receives requests"""
# Initialize connection
crypten.init()
self.ttp_group = comm.get().ttp_group
self.comm_group = comm.get().ttp_comm_group
self.device = "cpu"
self._setup_generators()
ttp_rank = comm.get().get_ttp_rank()
logging.info("TTPServer Initialized")
try:
while True:
# Wait for next request from client
message = comm.get().recv_obj(0, self.ttp_group)
logging.info("Message received: %s" % message)
if message == "terminate":
logging.info("TTPServer shutting down.")
return
function = message["function"]
device = message["device"]
args = message["args"]
kwargs = message["kwargs"]
self.device = device
result = getattr(self, function)(*args, **kwargs)
comm.get().send_obj(result.size(), 0, self.ttp_group)
comm.get().broadcast(result, ttp_rank, self.comm_group)
except RuntimeError as err:
logging.info("Encountered Runtime error. TTPServer shutting down:")
logging.info(f"{err}")
def _setup_generators(self):
"""Create random generator to send to a party"""
ws = comm.get().get_world_size()
seeds = [torch.randint(-(2**63), 2**63 - 1, size=()) for _ in range(ws)]
reqs = [
dist.isend(tensor=seeds[i], dst=i, group=self.ttp_group) for i in range(ws)
]
self.generators = [torch.Generator(device="cpu") for _ in range(ws)]
self.generators_cuda = [
(torch.Generator(device="cuda") if torch.cuda.is_available() else None)
for _ in range(ws)
]
for i in range(ws):
self.generators[i].manual_seed(seeds[i].item())
if torch.cuda.is_available():
self.generators_cuda[i].manual_seed(seeds[i].item())
reqs[i].wait()
dist.barrier(group=self.ttp_group)
def _get_generators(self, device=None):
if device is None:
device = "cpu"
device = torch.device(device)
if device.type == "cuda":
return self.generators_cuda
else:
return self.generators
def _get_additive_PRSS(self, size, remove_rank=False):
"""
Generates a plaintext value from a set of random additive secret shares
generated by each party
"""
gens = self._get_generators(device=self.device)
if remove_rank:
gens = gens[1:]
result = None
for idx, g in enumerate(gens):
elem = generate_random_ring_element(size, generator=g, device=g.device)
result = elem if idx == 0 else result + elem
return result
def _get_binary_PRSS(self, size, bitlength=None, remove_rank=None):
"""
Generates a plaintext value from a set of random binary secret shares
generated by each party
"""
gens = self._get_generators(device=self.device)
if remove_rank:
gens = gens[1:]
result = None
for idx, g in enumerate(gens):
elem = generate_kbit_random_tensor(
size, bitlength=bitlength, generator=g, device=g.device
)
result = elem if idx == 0 else result ^ elem
return result
def additive(self, size0, size1, op, *args, **kwargs):
# Add all shares of `a` and `b` to get plaintext `a` and `b`
a = self._get_additive_PRSS(size0)
b = self._get_additive_PRSS(size1)
c = getattr(torch, op)(a, b, *args, **kwargs)
# Subtract all other shares of `c` from plaintext value of `c` to get `c0`
c0 = c - self._get_additive_PRSS(c.size(), remove_rank=True)
return c0
def square(self, size):
# Add all shares of `r` to get plaintext `r`
r = self._get_additive_PRSS(size)
r2 = r.mul(r)
return r2 - self._get_additive_PRSS(size, remove_rank=True)
def binary(self, size0, size1):
# xor all shares of `a` and `b` to get plaintext `a` and `b`
a = self._get_binary_PRSS(size0)
b = self._get_binary_PRSS(size1)
c = a & b
# xor all other shares of `c` from plaintext value of `c` to get `c0`
c0 = c ^ self._get_binary_PRSS(c.size(), remove_rank=True)
return c0
def wraps(self, size):
r = [generate_random_ring_element(size, generator=g) for g in self.generators]
theta_r = count_wraps(r)
return theta_r - self._get_additive_PRSS(size, remove_rank=True)
def B2A(self, size):
rB = self._get_binary_PRSS(size, bitlength=1)
# Subtract all other shares of `rA` from plaintext value of `rA`
rA = rB - self._get_additive_PRSS(size, remove_rank=True)
return rA
| CrypTen-main | crypten/mpc/provider/ttp_provider.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .provider import TupleProvider
class HomomorphicProvider(TupleProvider):
NAME = "HE"
def generate_additive_triple(self, size0, size1, op, *args, **kwargs):
"""Generate multiplicative triples of given sizes"""
raise NotImplementedError("HomomorphicProvider not implemented")
def square(self, size):
"""Generate square double of given size"""
raise NotImplementedError("HomomorphicProvider not implemented")
def generate_xor_triple(self, size0, size1):
"""Generate xor triples of given size"""
raise NotImplementedError("HomomorphicProvider not implemented")
def wrap_rng(self, size, num_parties):
"""Generate random shared tensor of given size and sharing of its wraps"""
raise NotImplementedError("HomomorphicProvider not implemented")
def B2A_rng(self, size):
"""Generate random bit tensor as arithmetic and binary shared tensors"""
raise NotImplementedError("HomomorphicProvider not implemented")
| CrypTen-main | crypten/mpc/provider/homomorphic_provider.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .homomorphic_provider import HomomorphicProvider
from .tfp_provider import TrustedFirstParty
from .ttp_provider import TrustedThirdParty, TTPServer
__all__ = ["TrustedFirstParty", "TrustedThirdParty", "TTPServer", "HomomorphicProvider"]
| CrypTen-main | crypten/mpc/provider/__init__.py |
CrypTen-main | crypten/mpc/provider/tuple_cache/__init__.py |
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import crypten.communicator as comm
# dependencies:
import torch
from crypten.common.functions import regular
from crypten.common.rng import generate_random_ring_element
from crypten.common.tensor_types import is_float_tensor, is_int_tensor, is_tensor
from crypten.common.util import torch_stack
from crypten.config import cfg
from crypten.cryptensor import CrypTensor
from crypten.cuda import CUDALongTensor
from crypten.encoder import FixedPointEncoder
from . import beaver, replicated # noqa: F401
SENTINEL = -1
# MPC tensor where shares additive-sharings.
class ArithmeticSharedTensor:
"""
Encrypted tensor object that uses additive sharing to perform computations.
Additive shares are computed by splitting each value of the input tensor
into n separate random values that add to the input tensor, where n is
the number of parties present in the protocol (world_size).
"""
# constructors:
def __init__(
self,
tensor=None,
size=None,
broadcast_size=False,
precision=None,
src=0,
device=None,
):
"""
Creates the shared tensor from the input `tensor` provided by party `src`.
The other parties can specify a `tensor` or `size` to determine the size
of the shared tensor object to create. In this case, all parties must
specify the same (tensor) size to prevent the party's shares from varying
in size, which leads to undefined behavior.
Alternatively, the parties can set `broadcast_size` to `True` to have the
`src` party broadcast the correct size. The parties who do not know the
tensor size beforehand can provide an empty tensor as input. This is
guaranteed to produce correct behavior but requires an additional
communication round.
The parties can also set the `precision` and `device` for their share of
the tensor. If `device` is unspecified, it is set to `tensor.device`.
"""
# do nothing if source is sentinel:
if src == SENTINEL:
return
# assertions on inputs:
assert (
isinstance(src, int) and src >= 0 and src < comm.get().get_world_size()
), "specified source party does not exist"
if self.rank == src:
assert tensor is not None, "source must provide a data tensor"
if hasattr(tensor, "src"):
assert (
tensor.src == src
), "source of data tensor must match source of encryption"
if not broadcast_size:
assert (
tensor is not None or size is not None
), "must specify tensor or size, or set broadcast_size"
# if device is unspecified, try and get it from tensor:
if device is None and tensor is not None and hasattr(tensor, "device"):
device = tensor.device
# encode the input tensor:
self.encoder = FixedPointEncoder(precision_bits=precision)
if tensor is not None:
if is_int_tensor(tensor) and precision != 0:
tensor = tensor.float()
tensor = self.encoder.encode(tensor)
tensor = tensor.to(device=device)
size = tensor.size()
# if other parties do not know tensor's size, broadcast the size:
if broadcast_size:
size = comm.get().broadcast_obj(size, src)
# generate pseudo-random zero sharing (PRZS) and add source's tensor:
self.share = ArithmeticSharedTensor.PRZS(size, device=device).share
if self.rank == src:
self.share += tensor
@staticmethod
def new(*args, **kwargs):
"""
Creates a new ArithmeticSharedTensor, passing all args and kwargs into the constructor.
"""
return ArithmeticSharedTensor(*args, **kwargs)
@property
def device(self):
"""Return the `torch.device` of the underlying _tensor"""
return self._tensor.device
@property
def is_cuda(self):
"""Return True if the underlying _tensor is stored on GPU, False otherwise"""
return self._tensor.is_cuda
def to(self, *args, **kwargs):
"""Call `torch.Tensor.to` on the underlying _tensor"""
self._tensor = self._tensor.to(*args, **kwargs)
return self
def cuda(self, *args, **kwargs):
"""Call `torch.Tensor.cuda` on the underlying _tensor"""
self._tensor = CUDALongTensor(self._tensor.cuda(*args, **kwargs))
return self
def cpu(self, *args, **kwargs):
"""Call `torch.Tensor.cpu` on the underlying _tensor"""
self._tensor = self._tensor.cpu(*args, **kwargs)
return self
@property
def share(self):
"""Returns underlying _tensor"""
return self._tensor
@share.setter
def share(self, value):
"""Sets _tensor to value"""
self._tensor = value
@staticmethod
def from_shares(share, precision=None, device=None):
"""Generate an ArithmeticSharedTensor from a share from each party"""
result = ArithmeticSharedTensor(src=SENTINEL)
share = share.to(device) if device is not None else share
result.share = CUDALongTensor(share) if share.is_cuda else share
result.encoder = FixedPointEncoder(precision_bits=precision)
return result
@staticmethod
def PRZS(*size, device=None):
"""
Generate a Pseudo-random Sharing of Zero (using arithmetic shares)
This function does so by generating `n` numbers across `n` parties with
each number being held by exactly 2 parties. One of these parties adds
this number while the other subtracts this number.
"""
from crypten import generators
tensor = ArithmeticSharedTensor(src=SENTINEL)
if device is None:
device = torch.device("cpu")
elif isinstance(device, str):
device = torch.device(device)
g0 = generators["prev"][device]
g1 = generators["next"][device]
current_share = generate_random_ring_element(*size, generator=g0, device=device)
next_share = generate_random_ring_element(*size, generator=g1, device=device)
tensor.share = current_share - next_share
return tensor
@staticmethod
def PRSS(*size, device=None):
"""
Generates a Pseudo-random Secret Share from a set of random arithmetic shares
"""
share = generate_random_ring_element(*size, device=device)
tensor = ArithmeticSharedTensor.from_shares(share=share)
return tensor
@property
def rank(self):
return comm.get().get_rank()
def shallow_copy(self):
"""Create a shallow copy"""
result = ArithmeticSharedTensor(src=SENTINEL)
result.encoder = self.encoder
result._tensor = self._tensor
return result
def clone(self):
result = ArithmeticSharedTensor(src=SENTINEL)
result.encoder = self.encoder
result._tensor = self._tensor.clone()
return result
def copy_(self, other):
"""Copies other tensor into this tensor."""
self.share.copy_(other.share)
self.encoder = other.encoder
def __repr__(self):
return f"ArithmeticSharedTensor({self.share})"
def __bool__(self):
"""Override bool operator since encrypted tensors cannot evaluate"""
raise RuntimeError("Cannot evaluate ArithmeticSharedTensors to boolean values")
def __nonzero__(self):
"""__bool__ for backwards compatibility with Python 2"""
raise RuntimeError("Cannot evaluate ArithmeticSharedTensors to boolean values")
def __setitem__(self, index, value):
"""Set tensor values by index"""
if isinstance(value, (int, float)) or is_tensor(value):
value = ArithmeticSharedTensor(value)
assert isinstance(
value, ArithmeticSharedTensor
), "Unsupported input type %s for __setitem__" % type(value)
self.share.__setitem__(index, value.share)
def pad(self, pad, mode="constant", value=0):
"""
Pads the input tensor with values provided in `value`.
"""
assert mode == "constant", (
"Padding with mode %s is currently unsupported" % mode
)
result = self.shallow_copy()
if isinstance(value, (int, float)):
value = self.encoder.encode(value).item()
if result.rank == 0:
result.share = torch.nn.functional.pad(
result.share, pad, mode=mode, value=value
)
else:
result.share = torch.nn.functional.pad(
result.share, pad, mode=mode, value=0
)
elif isinstance(value, ArithmeticSharedTensor):
assert (
value.dim() == 0
), "Private values used for padding must be 0-dimensional"
value = value.share.item()
result.share = torch.nn.functional.pad(
result.share, pad, mode=mode, value=value
)
else:
raise TypeError(
"Cannot pad ArithmeticSharedTensor with a %s value" % type(value)
)
return result
@staticmethod
def stack(tensors, *args, **kwargs):
"""Perform tensor stacking"""
for i, tensor in enumerate(tensors):
if is_tensor(tensor):
tensors[i] = ArithmeticSharedTensor(tensor)
assert isinstance(
tensors[i], ArithmeticSharedTensor
), "Can't stack %s with ArithmeticSharedTensor" % type(tensor)
result = tensors[0].shallow_copy()
result.share = torch_stack(
[tensor.share for tensor in tensors], *args, **kwargs
)
return result
@staticmethod
def reveal_batch(tensor_or_list, dst=None):
"""Get (batched) plaintext without any downscaling"""
if isinstance(tensor_or_list, ArithmeticSharedTensor):
return tensor_or_list.reveal(dst=dst)
assert isinstance(
tensor_or_list, list
), f"Invalid input type into reveal {type(tensor_or_list)}"
shares = [tensor.share for tensor in tensor_or_list]
if dst is None:
return comm.get().all_reduce(shares, batched=True)
else:
return comm.get().reduce(shares, dst, batched=True)
def reveal(self, dst=None):
"""Decrypts the tensor without any downscaling."""
tensor = self.share.clone()
if dst is None:
return comm.get().all_reduce(tensor)
else:
return comm.get().reduce(tensor, dst)
def get_plain_text(self, dst=None):
"""Decrypts the tensor."""
# Edge case where share becomes 0 sized (e.g. result of split)
if self.nelement() < 1:
return torch.empty(self.share.size())
return self.encoder.decode(self.reveal(dst=dst))
def encode_(self, new_encoder):
"""Rescales the input to a new encoding in-place"""
if self.encoder.scale == new_encoder.scale:
return self
elif self.encoder.scale < new_encoder.scale:
scale_factor = new_encoder.scale // self.encoder.scale
self.share *= scale_factor
else:
scale_factor = self.encoder.scale // new_encoder.scale
self = self.div_(scale_factor)
self.encoder = new_encoder
return self
def encode(self, new_encoder):
"""Rescales the input to a new encoding"""
return self.clone().encode_(new_encoder)
def encode_as_(self, other):
"""Rescales self to have the same encoding as other"""
return self.encode_(other.encoder)
def encode_as(self, other):
return self.encode(other.encoder)
def _arithmetic_function_(self, y, op, *args, **kwargs):
return self._arithmetic_function(y, op, inplace=True, *args, **kwargs)
def _arithmetic_function(self, y, op, inplace=False, *args, **kwargs): # noqa:C901
assert op in [
"add",
"sub",
"mul",
"matmul",
"conv1d",
"conv2d",
"conv_transpose1d",
"conv_transpose2d",
], f"Provided op `{op}` is not a supported arithmetic function"
additive_func = op in ["add", "sub"]
public = isinstance(y, (int, float)) or is_tensor(y)
private = isinstance(y, ArithmeticSharedTensor)
if inplace:
result = self
if additive_func or (op == "mul" and public):
op += "_"
else:
result = self.clone()
if public:
y = result.encoder.encode(y, device=self.device)
if additive_func: # ['add', 'sub']
if result.rank == 0:
result.share = getattr(result.share, op)(y)
else:
result.share = torch.broadcast_tensors(result.share, y)[0]
elif op == "mul_": # ['mul_']
result.share = result.share.mul_(y)
else: # ['mul', 'matmul', 'convNd', 'conv_transposeNd']
result.share = getattr(torch, op)(result.share, y, *args, **kwargs)
elif private:
if additive_func: # ['add', 'sub', 'add_', 'sub_']
# Re-encode if necessary:
if self.encoder.scale > y.encoder.scale:
y.encode_as_(result)
elif self.encoder.scale < y.encoder.scale:
result.encode_as_(y)
result.share = getattr(result.share, op)(y.share)
else: # ['mul', 'matmul', 'convNd', 'conv_transposeNd']
protocol = globals()[cfg.mpc.protocol]
result.share.set_(
getattr(protocol, op)(result, y, *args, **kwargs).share.data
)
else:
raise TypeError("Cannot %s %s with %s" % (op, type(y), type(self)))
# Scale by encoder scale if necessary
if not additive_func:
if public: # scale by self.encoder.scale
if self.encoder.scale > 1:
return result.div_(result.encoder.scale)
else:
result.encoder = self.encoder
else: # scale by larger of self.encoder.scale and y.encoder.scale
if self.encoder.scale > 1 and y.encoder.scale > 1:
return result.div_(result.encoder.scale)
elif self.encoder.scale > 1:
result.encoder = self.encoder
else:
result.encoder = y.encoder
return result
def add(self, y):
"""Perform element-wise addition"""
return self._arithmetic_function(y, "add")
def add_(self, y):
"""Perform element-wise addition"""
return self._arithmetic_function_(y, "add")
def sub(self, y):
"""Perform element-wise subtraction"""
return self._arithmetic_function(y, "sub")
def sub_(self, y):
"""Perform element-wise subtraction"""
return self._arithmetic_function_(y, "sub")
def mul(self, y):
"""Perform element-wise multiplication"""
if isinstance(y, int):
result = self.clone()
result.share = self.share * y
return result
return self._arithmetic_function(y, "mul")
def mul_(self, y):
"""Perform element-wise multiplication"""
if isinstance(y, int) or is_int_tensor(y):
self.share *= y
return self
return self._arithmetic_function_(y, "mul")
def div(self, y):
"""Divide by a given tensor"""
result = self.clone()
if isinstance(y, CrypTensor):
result.share = torch.broadcast_tensors(result.share, y.share)[0].clone()
elif is_tensor(y):
result.share = torch.broadcast_tensors(result.share, y)[0].clone()
return result.div_(y)
def div_(self, y):
"""Divide two tensors element-wise"""
# TODO: Add test coverage for this code path (next 4 lines)
if isinstance(y, float) and int(y) == y:
y = int(y)
if is_float_tensor(y) and y.frac().eq(0).all():
y = y.long()
if isinstance(y, int) or is_int_tensor(y):
validate = cfg.debug.validation_mode
if validate:
tolerance = 1.0
tensor = self.get_plain_text()
# Truncate protocol for dividing by public integers:
if comm.get().get_world_size() > 2:
protocol = globals()[cfg.mpc.protocol]
protocol.truncate(self, y)
else:
self.share = self.share.div_(y, rounding_mode="trunc")
# Validate
if validate:
if not torch.lt(
torch.abs(self.get_plain_text() * y - tensor), tolerance
).all():
raise ValueError("Final result of division is incorrect.")
return self
# Otherwise multiply by reciprocal
if isinstance(y, float):
y = torch.tensor([y], dtype=torch.float, device=self.device)
assert is_float_tensor(y), "Unsupported type for div_: %s" % type(y)
return self.mul_(y.reciprocal())
def matmul(self, y):
"""Perform matrix multiplication using some tensor"""
return self._arithmetic_function(y, "matmul")
def conv1d(self, kernel, **kwargs):
"""Perform a 1D convolution using the given kernel"""
return self._arithmetic_function(kernel, "conv1d", **kwargs)
def conv2d(self, kernel, **kwargs):
"""Perform a 2D convolution using the given kernel"""
return self._arithmetic_function(kernel, "conv2d", **kwargs)
def conv_transpose1d(self, kernel, **kwargs):
"""Perform a 1D transpose convolution (deconvolution) using the given kernel"""
return self._arithmetic_function(kernel, "conv_transpose1d", **kwargs)
def conv_transpose2d(self, kernel, **kwargs):
"""Perform a 2D transpose convolution (deconvolution) using the given kernel"""
return self._arithmetic_function(kernel, "conv_transpose2d", **kwargs)
def index_add(self, dim, index, tensor):
"""Perform out-of-place index_add: Accumulate the elements of tensor into the
self tensor by adding to the indices in the order given in index."""
result = self.clone()
return result.index_add_(dim, index, tensor)
def index_add_(self, dim, index, tensor):
"""Perform in-place index_add: Accumulate the elements of tensor into the
self tensor by adding to the indices in the order given in index."""
public = isinstance(tensor, (int, float)) or is_tensor(tensor)
private = isinstance(tensor, ArithmeticSharedTensor)
if public:
enc_tensor = self.encoder.encode(tensor)
if self.rank == 0:
self._tensor.index_add_(dim, index, enc_tensor)
elif private:
self._tensor.index_add_(dim, index, tensor._tensor)
else:
raise TypeError("index_add second tensor of unsupported type")
return self
def scatter_add(self, dim, index, other):
"""Adds all values from the tensor other into self at the indices
specified in the index tensor in a similar fashion as scatter_(). For
each value in other, it is added to an index in self which is specified
by its index in other for dimension != dim and by the corresponding
value in index for dimension = dim.
"""
return self.clone().scatter_add_(dim, index, other)
def scatter_add_(self, dim, index, other):
"""Adds all values from the tensor other into self at the indices
specified in the index tensor in a similar fashion as scatter_(). For
each value in other, it is added to an index in self which is specified
by its index in other for dimension != dim and by the corresponding
value in index for dimension = dim.
"""
public = isinstance(other, (int, float)) or is_tensor(other)
private = isinstance(other, ArithmeticSharedTensor)
if public:
if self.rank == 0:
self.share.scatter_add_(dim, index, self.encoder.encode(other))
elif private:
self.share.scatter_add_(dim, index, other.share)
else:
raise TypeError("scatter_add second tensor of unsupported type")
return self
def avg_pool2d(self, kernel_size, stride=None, padding=0, ceil_mode=False):
"""Perform an average pooling on each 2D matrix of the given tensor
Args:
kernel_size (int or tuple): pooling kernel size.
"""
# TODO: Add check for whether ceil_mode would change size of output and allow ceil_mode when it wouldn't
if ceil_mode:
raise NotImplementedError(
"CrypTen does not support `ceil_mode` for `avg_pool2d`"
)
z = self._sum_pool2d(
kernel_size, stride=stride, padding=padding, ceil_mode=ceil_mode
)
if isinstance(kernel_size, (int, float)):
pool_size = kernel_size**2
else:
pool_size = kernel_size[0] * kernel_size[1]
return z / pool_size
def _sum_pool2d(self, kernel_size, stride=None, padding=0, ceil_mode=False):
"""Perform a sum pooling on each 2D matrix of the given tensor"""
result = self.shallow_copy()
result.share = torch.nn.functional.avg_pool2d(
self.share,
kernel_size,
stride=stride,
padding=padding,
ceil_mode=ceil_mode,
divisor_override=1,
)
return result
# negation and reciprocal:
def neg_(self):
"""Negate the tensor's values"""
self.share.neg_()
return self
def neg(self):
"""Negate the tensor's values"""
return self.clone().neg_()
def square_(self):
protocol = globals()[cfg.mpc.protocol]
self.share = protocol.square(self).div_(self.encoder.scale).share
return self
def square(self):
return self.clone().square_()
def where(self, condition, y):
"""Selects elements from self or y based on condition
Args:
condition (torch.bool or ArithmeticSharedTensor): when True
yield self, otherwise yield y.
y (torch.tensor or ArithmeticSharedTensor): values selected at
indices where condition is False.
Returns: ArithmeticSharedTensor or torch.tensor
"""
if is_tensor(condition):
condition = condition.float()
y_masked = y * (1 - condition)
else:
# encrypted tensor must be first operand
y_masked = (1 - condition) * y
return self * condition + y_masked
def scatter_(self, dim, index, src):
"""Writes all values from the tensor `src` into `self` at the indices
specified in the `index` tensor. For each value in `src`, its output index
is specified by its index in `src` for `dimension != dim` and by the
corresponding value in `index` for `dimension = dim`.
"""
if is_tensor(src):
src = ArithmeticSharedTensor(src)
assert isinstance(
src, ArithmeticSharedTensor
), "Unrecognized scatter src type: %s" % type(src)
self.share.scatter_(dim, index, src.share)
return self
def scatter(self, dim, index, src):
"""Writes all values from the tensor `src` into `self` at the indices
specified in the `index` tensor. For each value in `src`, its output index
is specified by its index in `src` for `dimension != dim` and by the
corresponding value in `index` for `dimension = dim`.
"""
result = self.clone()
return result.scatter_(dim, index, src)
# overload operators:
__add__ = add
__iadd__ = add_
__radd__ = __add__
__sub__ = sub
__isub__ = sub_
__mul__ = mul
__imul__ = mul_
__rmul__ = __mul__
__div__ = div
__truediv__ = div
__itruediv__ = div_
__neg__ = neg
def __rsub__(self, tensor):
"""Subtracts self from tensor."""
return -self + tensor
@property
def data(self):
return self._tensor.data
@data.setter
def data(self, value):
self._tensor.set_(value)
# Register regular functions
for func in regular.__all__:
if not hasattr(ArithmeticSharedTensor, func):
setattr(ArithmeticSharedTensor, func, getattr(regular, func))
| CrypTen-main | crypten/mpc/primitives/arithmetic.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import crypten.communicator as comm
# dependencies:
import torch
from crypten.common.functions import regular
from crypten.common.rng import generate_kbit_random_tensor
from crypten.common.tensor_types import is_tensor
from crypten.common.util import torch_cat, torch_stack
from crypten.cuda import CUDALongTensor
from crypten.encoder import FixedPointEncoder
from . import beaver, circuit
SENTINEL = -1
# MPC tensor where shares are XOR-sharings.
class BinarySharedTensor:
"""
Encrypted tensor object that uses binary sharing to perform computations.
Binary shares are computed by splitting each value of the input tensor
into n separate random values that xor together to the input tensor value,
where n is the number of parties present in the protocol (world_size).
"""
def __init__(
self, tensor=None, size=None, broadcast_size=False, src=0, device=None
):
"""
Creates the shared tensor from the input `tensor` provided by party `src`.
The other parties can specify a `tensor` or `size` to determine the size
of the shared tensor object to create. In this case, all parties must
specify the same (tensor) size to prevent the party's shares from varying
in size, which leads to undefined behavior.
Alternatively, the parties can set `broadcast_size` to `True` to have the
`src` party broadcast the correct size. The parties who do not know the
tensor size beforehand can provide an empty tensor as input. This is
guaranteed to produce correct behavior but requires an additional
communication round.
The parties can also set the `precision` and `device` for their share of
the tensor. If `device` is unspecified, it is set to `tensor.device`.
"""
# do nothing if source is sentinel:
if src == SENTINEL:
return
# assertions on inputs:
assert (
isinstance(src, int) and src >= 0 and src < comm.get().get_world_size()
), "specified source party does not exist"
if self.rank == src:
assert tensor is not None, "source must provide a data tensor"
if hasattr(tensor, "src"):
assert (
tensor.src == src
), "source of data tensor must match source of encryption"
if not broadcast_size:
assert (
tensor is not None or size is not None
), "must specify tensor or size, or set broadcast_size"
# if device is unspecified, try and get it from tensor:
if device is None and tensor is not None and hasattr(tensor, "device"):
device = tensor.device
# assume zero bits of precision unless encoder is set outside of init:
self.encoder = FixedPointEncoder(precision_bits=0)
if tensor is not None:
tensor = self.encoder.encode(tensor)
tensor = tensor.to(device=device)
size = tensor.size()
# if other parties do not know tensor's size, broadcast the size:
if broadcast_size:
size = comm.get().broadcast_obj(size, src)
# generate pseudo-random zero sharing (PRZS) and add source's tensor:
self.share = BinarySharedTensor.PRZS(size, device=device).share
if self.rank == src:
self.share ^= tensor
@staticmethod
def new(*args, **kwargs):
"""
Creates a new BinarySharedTensor, passing all args and kwargs into the constructor.
"""
return BinarySharedTensor(*args, **kwargs)
@staticmethod
def from_shares(share, precision=None, src=0, device=None):
"""Generate a BinarySharedTensor from a share from each party"""
result = BinarySharedTensor(src=SENTINEL)
share = share.to(device) if device is not None else share
result.share = CUDALongTensor(share) if share.is_cuda else share
result.encoder = FixedPointEncoder(precision_bits=precision)
return result
@staticmethod
def PRZS(*size, device=None):
"""
Generate a Pseudo-random Sharing of Zero (using arithmetic shares)
This function does so by generating `n` numbers across `n` parties with
each number being held by exactly 2 parties. Therefore, each party holds
two numbers. A zero sharing is found by having each party xor their two
numbers together.
"""
from crypten import generators
tensor = BinarySharedTensor(src=SENTINEL)
if device is None:
device = torch.device("cpu")
elif isinstance(device, str):
device = torch.device(device)
g0 = generators["prev"][device]
g1 = generators["next"][device]
current_share = generate_kbit_random_tensor(*size, device=device, generator=g0)
next_share = generate_kbit_random_tensor(*size, device=device, generator=g1)
tensor.share = current_share ^ next_share
return tensor
@staticmethod
def rand(*size, bits=64, device=None):
"""
Generate a uniform random samples with a given size.
"""
tensor = BinarySharedTensor(src=SENTINEL)
if isinstance(size[0], (torch.Size, tuple)):
size = size[0]
tensor.share = generate_kbit_random_tensor(size, bitlength=bits, device=device)
return tensor
@property
def device(self):
"""Return the `torch.device` of the underlying _tensor"""
return self._tensor.device
@property
def is_cuda(self):
"""Return True if the underlying _tensor is stored on GPU, False otherwise"""
return self._tensor.is_cuda
def to(self, *args, **kwargs):
"""Call `torch.Tensor.to` on the underlying _tensor"""
self._tensor = self._tensor.to(*args, **kwargs)
return self
def cuda(self, *args, **kwargs):
"""Call `torch.Tensor.cuda` on the underlying _tensor"""
self._tensor = CUDALongTensor(self._tensor.cuda(*args, **kwargs))
return self
def cpu(self, *args, **kwargs):
"""Call `torch.Tensor.cpu` on the underlying _tensor"""
self._tensor = self._tensor.cpu(*args, **kwargs)
return self
@property
def rank(self):
return comm.get().get_rank()
@property
def share(self):
"""Returns underlying _tensor"""
return self._tensor
@share.setter
def share(self, value):
"""Sets _tensor to value"""
self._tensor = value
def shallow_copy(self):
"""Create a shallow copy"""
result = BinarySharedTensor(src=SENTINEL)
result.encoder = self.encoder
result._tensor = self._tensor
return result
def clone(self):
result = BinarySharedTensor(src=SENTINEL)
result.encoder = self.encoder
result._tensor = self._tensor.clone()
return result
def copy_(self, other):
"""Copies other tensor into this tensor."""
self.share.copy_(other.share)
self.encoder = other.encoder
def __repr__(self):
return f"BinarySharedTensor({self.share})"
def __bool__(self):
"""Override bool operator since encrypted tensors cannot evaluate"""
raise RuntimeError("Cannot evaluate BinarySharedTensors to boolean values")
def __nonzero__(self):
"""__bool__ for backwards compatibility with Python 2"""
raise RuntimeError("Cannot evaluate BinarySharedTensors to boolean values")
def __ixor__(self, y):
"""Bitwise XOR operator (element-wise) in place"""
if is_tensor(y) or isinstance(y, int):
if self.rank == 0:
self.share ^= y
elif isinstance(y, BinarySharedTensor):
self.share ^= y.share
else:
raise TypeError("Cannot XOR %s with %s." % (type(y), type(self)))
return self
def __xor__(self, y):
"""Bitwise XOR operator (element-wise)"""
result = self.clone()
if isinstance(y, BinarySharedTensor):
broadcast_tensors = torch.broadcast_tensors(result.share, y.share)
result.share = broadcast_tensors[0].clone()
elif is_tensor(y):
broadcast_tensors = torch.broadcast_tensors(result.share, y)
result.share = broadcast_tensors[0].clone()
return result.__ixor__(y)
def __iand__(self, y):
"""Bitwise AND operator (element-wise) in place"""
if is_tensor(y) or isinstance(y, int):
self.share &= y
elif isinstance(y, BinarySharedTensor):
self.share.set_(beaver.AND(self, y).share.data)
else:
raise TypeError("Cannot AND %s with %s." % (type(y), type(self)))
return self
def __and__(self, y):
"""Bitwise AND operator (element-wise)"""
result = self.clone()
# TODO: Remove explicit broadcasts to allow smaller beaver triples
if isinstance(y, BinarySharedTensor):
broadcast_tensors = torch.broadcast_tensors(result.share, y.share)
result.share = broadcast_tensors[0].clone()
elif is_tensor(y):
broadcast_tensors = torch.broadcast_tensors(result.share, y)
result.share = broadcast_tensors[0].clone()
return result.__iand__(y)
def __ior__(self, y):
"""Bitwise OR operator (element-wise) in place"""
xor_result = self ^ y
return self.__iand__(y).__ixor__(xor_result)
def __or__(self, y):
"""Bitwise OR operator (element-wise)"""
return self.__and__(y) ^ self ^ y
def __invert__(self):
"""Bitwise NOT operator (element-wise)"""
result = self.clone()
if result.rank == 0:
result.share ^= -1
return result
def lshift_(self, value):
"""Left shift elements by `value` bits"""
assert isinstance(value, int), "lshift must take an integer argument."
self.share <<= value
return self
def lshift(self, value):
"""Left shift elements by `value` bits"""
return self.clone().lshift_(value)
def rshift_(self, value):
"""Right shift elements by `value` bits"""
assert isinstance(value, int), "rshift must take an integer argument."
self.share >>= value
return self
def rshift(self, value):
"""Right shift elements by `value` bits"""
return self.clone().rshift_(value)
# Circuits
def add(self, y):
"""Compute [self] + [y] for xor-sharing"""
return circuit.add(self, y)
def eq(self, y):
return circuit.eq(self, y)
def ne(self, y):
return self.eq(y) ^ 1
def lt(self, y):
return circuit.lt(self, y)
def le(self, y):
return circuit.le(self, y)
def gt(self, y):
return circuit.gt(self, y)
def ge(self, y):
return circuit.ge(self, y)
def __setitem__(self, index, value):
"""Set tensor values by index"""
if is_tensor(value) or isinstance(value, list):
value = BinarySharedTensor(value)
assert isinstance(
value, BinarySharedTensor
), "Unsupported input type %s for __setitem__" % type(value)
self.share.__setitem__(index, value.share)
@staticmethod
def stack(seq, *args, **kwargs):
"""Stacks a list of tensors along a given dimension"""
assert isinstance(seq, list), "Stack input must be a list"
assert isinstance(
seq[0], BinarySharedTensor
), "Sequence must contain BinarySharedTensors"
result = seq[0].shallow_copy()
result.share = torch_stack(
[BinarySharedTensor.share for BinarySharedTensor in seq], *args, **kwargs
)
return result
def sum(self, dim=None):
"""Add all tensors along a given dimension using a log-reduction"""
if dim is None:
x = self.flatten()
else:
x = self.transpose(0, dim)
# Add all BinarySharedTensors
while x.size(0) > 1:
extra = None
if x.size(0) % 2 == 1:
extra = x[0]
x = x[1:]
x0 = x[: (x.size(0) // 2)]
x1 = x[(x.size(0) // 2) :]
x = x0 + x1
if extra is not None:
x.share = torch_cat([x.share, extra.share.unsqueeze(0)])
if dim is None:
x = x.squeeze()
else:
x = x.transpose(0, dim).squeeze(dim)
return x
def cumsum(self, *args, **kwargs):
raise NotImplementedError("BinarySharedTensor cumsum not implemented")
def trace(self, *args, **kwargs):
raise NotImplementedError("BinarySharedTensor trace not implemented")
@staticmethod
def reveal_batch(tensor_or_list, dst=None):
"""Get (batched) plaintext without any downscaling"""
if isinstance(tensor_or_list, BinarySharedTensor):
return tensor_or_list.reveal(dst=dst)
assert isinstance(
tensor_or_list, list
), f"Invalid input type into reveal {type(tensor_or_list)}"
shares = [tensor.share for tensor in tensor_or_list]
op = torch.distributed.ReduceOp.BXOR
if dst is None:
return comm.get().all_reduce(shares, op=op, batched=True)
else:
return comm.get().reduce(shares, dst, op=op, batched=True)
def reveal(self, dst=None):
"""Get plaintext without any downscaling"""
op = torch.distributed.ReduceOp.BXOR
if dst is None:
return comm.get().all_reduce(self.share, op=op)
else:
return comm.get().reduce(self.share, dst, op=op)
def get_plain_text(self, dst=None):
"""Decrypts the tensor."""
# Edge case where share becomes 0 sized (e.g. result of split)
if self.nelement() < 1:
return torch.empty(self.share.size())
return self.encoder.decode(self.reveal(dst=dst))
def where(self, condition, y):
"""Selects elements from self or y based on condition
Args:
condition (torch.bool or BinarySharedTensor): when True yield self,
otherwise yield y. Note condition is not bitwise.
y (torch.tensor or BinarySharedTensor): selected when condition is
False.
Returns: BinarySharedTensor or torch.tensor.
"""
if is_tensor(condition):
condition = condition.long()
is_binary = ((condition == 1) | (condition == 0)).all()
assert is_binary, "condition values must be 0 or 1"
# -1 mult expands 0 into binary 00...00 and 1 into 11...11
condition_expanded = -condition
y_masked = y & (~condition_expanded)
elif isinstance(condition, BinarySharedTensor):
condition_expanded = condition.clone()
# -1 mult expands binary while & 1 isolates first bit
condition_expanded.share = -(condition_expanded.share & 1)
# encrypted tensor must be first operand
y_masked = (~condition_expanded) & y
else:
msg = f"condition {condition} must be torch.bool, or BinarySharedTensor"
raise ValueError(msg)
return (self & condition_expanded) ^ y_masked
def scatter_(self, dim, index, src):
"""Writes all values from the tensor `src` into `self` at the indices
specified in the `index` tensor. For each value in `src`, its output index
is specified by its index in `src` for `dimension != dim` and by the
corresponding value in `index` for `dimension = dim`.
"""
if is_tensor(src):
src = BinarySharedTensor(src)
assert isinstance(
src, BinarySharedTensor
), "Unrecognized scatter src type: %s" % type(src)
self.share.scatter_(dim, index, src.share)
return self
def scatter(self, dim, index, src):
"""Writes all values from the tensor `src` into `self` at the indices
specified in the `index` tensor. For each value in `src`, its output index
is specified by its index in `src` for `dimension != dim` and by the
corresponding value in `index` for `dimension = dim`.
"""
result = self.clone()
return result.scatter_(dim, index, src)
# Bitwise operators
__add__ = add
__eq__ = eq
__ne__ = ne
__lt__ = lt
__le__ = le
__gt__ = gt
__ge__ = ge
__lshift__ = lshift
__rshift__ = rshift
# In-place bitwise operators
__ilshift__ = lshift_
__irshift__ = rshift_
# Reversed boolean operations
__radd__ = __add__
__rxor__ = __xor__
__rand__ = __and__
__ror__ = __or__
# Register regular functions
skip_funcs = ["trace", "sum", "cumsum", "pad"] # skip additive functions and pad
for func in regular.__all__:
if func in skip_funcs:
continue
setattr(BinarySharedTensor, func, getattr(regular, func))
| CrypTen-main | crypten/mpc/primitives/binary.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import crypten
import crypten.communicator as comm
import torch
from crypten.common.util import count_wraps
from crypten.config import cfg
class IgnoreEncodings:
"""Context Manager to ignore tensor encodings"""
def __init__(self, list_of_tensors):
self.list_of_tensors = list_of_tensors
self.encodings_cache = [tensor.encoder.scale for tensor in list_of_tensors]
def __enter__(self):
for tensor in self.list_of_tensors:
tensor.encoder._scale = 1
def __exit__(self, exc_type, exc_value, exc_traceback):
for i, tensor in enumerate(self.list_of_tensors):
tensor.encoder._scale = self.encodings_cache[i]
def __beaver_protocol(op, x, y, *args, **kwargs):
"""Performs Beaver protocol for additively secret-shared tensors x and y
1. Obtain uniformly random sharings [a],[b] and [c] = [a * b]
2. Additively hide [x] and [y] with appropriately sized [a] and [b]
3. Open ([epsilon] = [x] - [a]) and ([delta] = [y] - [b])
4. Return [z] = [c] + (epsilon * [b]) + ([a] * delta) + (epsilon * delta)
"""
assert op in {
"mul",
"matmul",
"conv1d",
"conv2d",
"conv_transpose1d",
"conv_transpose2d",
}
if x.device != y.device:
raise ValueError(f"x lives on device {x.device} but y on device {y.device}")
provider = crypten.mpc.get_default_provider()
a, b, c = provider.generate_additive_triple(
x.size(), y.size(), op, device=x.device, *args, **kwargs
)
from .arithmetic import ArithmeticSharedTensor
if cfg.mpc.active_security:
"""
Reference: "Multiparty Computation from Somewhat Homomorphic Encryption"
Link: https://eprint.iacr.org/2011/535.pdf
"""
f, g, h = provider.generate_additive_triple(
x.size(), y.size(), op, device=x.device, *args, **kwargs
)
t = ArithmeticSharedTensor.PRSS(a.size(), device=x.device)
t_plain_text = t.get_plain_text()
rho = (t_plain_text * a - f).get_plain_text()
sigma = (b - g).get_plain_text()
triples_check = t_plain_text * c - h - sigma * f - rho * g - rho * sigma
triples_check = triples_check.get_plain_text()
if torch.any(triples_check != 0):
raise ValueError("Beaver Triples verification failed!")
# Vectorized reveal to reduce rounds of communication
with IgnoreEncodings([a, b, x, y]):
epsilon, delta = ArithmeticSharedTensor.reveal_batch([x - a, y - b])
# z = c + (a * delta) + (epsilon * b) + epsilon * delta
c._tensor += getattr(torch, op)(epsilon, b._tensor, *args, **kwargs)
c._tensor += getattr(torch, op)(a._tensor, delta, *args, **kwargs)
c += getattr(torch, op)(epsilon, delta, *args, **kwargs)
return c
def mul(x, y):
return __beaver_protocol("mul", x, y)
def matmul(x, y):
return __beaver_protocol("matmul", x, y)
def conv1d(x, y, **kwargs):
return __beaver_protocol("conv1d", x, y, **kwargs)
def conv2d(x, y, **kwargs):
return __beaver_protocol("conv2d", x, y, **kwargs)
def conv_transpose1d(x, y, **kwargs):
return __beaver_protocol("conv_transpose1d", x, y, **kwargs)
def conv_transpose2d(x, y, **kwargs):
return __beaver_protocol("conv_transpose2d", x, y, **kwargs)
def square(x):
"""Computes the square of `x` for additively secret-shared tensor `x`
1. Obtain uniformly random sharings [r] and [r2] = [r * r]
2. Additively hide [x] with appropriately sized [r]
3. Open ([epsilon] = [x] - [r])
4. Return z = [r2] + 2 * epsilon * [r] + epsilon ** 2
"""
provider = crypten.mpc.get_default_provider()
r, r2 = provider.square(x.size(), device=x.device)
with IgnoreEncodings([x, r]):
epsilon = (x - r).reveal()
return r2 + 2 * r * epsilon + epsilon * epsilon
def wraps(x):
"""Privately computes the number of wraparounds for a set a shares
To do so, we note that:
[theta_x] = theta_z + [beta_xr] - [theta_r] - [eta_xr]
Where [theta_i] is the wraps for a variable i
[beta_ij] is the differential wraps for variables i and j
[eta_ij] is the plaintext wraps for variables i and j
Note: Since [eta_xr] = 0 with probability 1 - |x| / Q for modulus Q, we
can make the assumption that [eta_xr] = 0 with high probability.
"""
provider = crypten.mpc.get_default_provider()
r, theta_r = provider.wrap_rng(x.size(), device=x.device)
beta_xr = theta_r.clone()
beta_xr._tensor = count_wraps([x._tensor, r._tensor])
with IgnoreEncodings([x, r]):
z = x + r
theta_z = comm.get().gather(z._tensor, 0)
theta_x = beta_xr - theta_r
# TODO: Incorporate eta_xr
if x.rank == 0:
theta_z = count_wraps(theta_z)
theta_x._tensor += theta_z
return theta_x
def truncate(x, y):
"""Protocol to divide an ArithmeticSharedTensor `x` by a constant integer `y`"""
wrap_count = wraps(x)
x.share = x.share.div_(y, rounding_mode="trunc")
# NOTE: The multiplication here must be split into two parts
# to avoid long out-of-bounds when y <= 2 since (2 ** 63) is
# larger than the largest long integer.
correction = wrap_count * 4 * (int(2**62) // y)
x.share -= correction.share
return x
def AND(x, y):
"""
Performs Beaver protocol for binary secret-shared tensors x and y
1. Obtain uniformly random sharings [a],[b] and [c] = [a & b]
2. XOR hide [x] and [y] with appropriately sized [a] and [b]
3. Open ([epsilon] = [x] ^ [a]) and ([delta] = [y] ^ [b])
4. Return [c] ^ (epsilon & [b]) ^ ([a] & delta) ^ (epsilon & delta)
"""
from .binary import BinarySharedTensor
provider = crypten.mpc.get_default_provider()
a, b, c = provider.generate_binary_triple(x.size(), y.size(), device=x.device)
# Stack to vectorize reveal
eps_del = BinarySharedTensor.reveal_batch([x ^ a, y ^ b])
epsilon = eps_del[0]
delta = eps_del[1]
return (b & epsilon) ^ (a & delta) ^ (epsilon & delta) ^ c
def B2A_single_bit(xB):
"""Converts a single-bit BinarySharedTensor xB into an
ArithmeticSharedTensor. This is done by:
1. Generate ArithmeticSharedTensor [rA] and BinarySharedTensor =rB= with
a common 1-bit value r.
2. Hide xB with rB and open xB ^ rB
3. If xB ^ rB = 0, then return [rA], otherwise return 1 - [rA]
Note: This is an arithmetic xor of a single bit.
"""
if comm.get().get_world_size() < 2:
from .arithmetic import ArithmeticSharedTensor
return ArithmeticSharedTensor(xB._tensor, precision=0, src=0)
provider = crypten.mpc.get_default_provider()
rA, rB = provider.B2A_rng(xB.size(), device=xB.device)
z = (xB ^ rB).reveal()
rA = rA * (1 - 2 * z) + z
return rA
| CrypTen-main | crypten/mpc/primitives/beaver.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
import math
import torch
# Cache masks and constants to skip computation during each call
__BITS = torch.iinfo(torch.long).bits
__LOG_BITS = int(math.log2(torch.iinfo(torch.long).bits))
@functools.lru_cache(maxsize=None)
def __SPK_circuit_constants(device):
"""
Generate the __MASKS, __OUT_MASKS, and __MULTIPLIERS constants
used by __SPK_circuit.
"""
# Cached SPK masks are:
# [0] -> 010101010101....0101 = 01 x 32
# [1] -> 001000100010....0010 = 0010 x 16
# [2] -> 000010000000....0010 = 00001000 x 8
# [n] -> [2^n 0s, 1, (2^n -1) 0s] x (32 / (2^n))
__MASKS = torch.tensor(
[
6148914691236517205,
2459565876494606882,
578721382704613384,
36029346783166592,
140737488388096,
2147483648,
],
dtype=torch.long,
device=device,
)
__MULTIPLIERS = torch.tensor(
[(1 << (2**iter + 1)) - 2 for iter in range(__LOG_BITS)], device=device
)
__OUT_MASKS = __MASKS * __MULTIPLIERS
return __MASKS, __OUT_MASKS, __MULTIPLIERS
def __SPK_circuit(S, P):
"""
Computes the Set-Propagate-Kill Tree circuit for a set (S, P)
(K is implied by S, P since (SPK) is one-hot)
(See section 6.3 of Damgard, "Unconditionally Secure Constant-Rounds
Multi-Party Computation for Equality, Comparison, Bits and Exponentiation")
At each stage:
S <- S0 ^ (P0 & S1)
P <- P0 & P1
K <- K0 ^ (P0 & K1) <- don't need K since it is implied by S and P
"""
from .binary import BinarySharedTensor
# Vectorize private AND calls to reduce rounds:
SP = BinarySharedTensor.stack([S, P])
__MASKS, __OUT_MASKS, __MULTIPLIERS = __SPK_circuit_constants(SP.device)
# fmt: off
# Tree reduction circuit
for i in range(__LOG_BITS):
in_mask = __MASKS[i] # Start of arrows
out_mask = __OUT_MASKS[i] # End of arrows
not_out_mask = out_mask ^ -1 # Not (end of arrows)
# Set up S0, S1, P0, and P1
P0 = SP[1] & out_mask # Mask P0 from P
S1P1 = SP & in_mask # Mask S1P1 from SP
S1P1._tensor *= __MULTIPLIERS[i] # Fan out S1P1 along arrows
# Update S and P
update = P0 & S1P1 # S0 ^= P0 & S1, P0 = P0 & P1
SP[1] &= not_out_mask
SP ^= update
# fmt: on
return SP[0], SP[1]
def __P_circuit(P):
"""
Computes the Propagate Tree circuit for input P.
The P circuit will return 1 only if the binary of
the input is all ones (i.e. the value is -1).
Otherwise this circuit returns 0
At each stage:
P <- P0 & P1
"""
shift = __BITS // 2
for _ in range(__LOG_BITS):
P &= P << shift # using lshift since rshift was modified to arithmetic
shift //= 2
return P
def __flip_sign_bit(x):
return x ^ -(2**63)
def __get_sign_bit(x):
from .binary import BinarySharedTensor
y = x >> 63
# NOTE: __rshift__ was changed to arithmetic shift
if isinstance(y, BinarySharedTensor):
y.share = y.share.eq(-1).long()
else:
y = y.eq(-1).long()
return y
def add(x, y):
"""Returns x + y from BinarySharedTensors `x` and `y`"""
S = x & y
P = x ^ y
carry, _ = __SPK_circuit(S, P)
return P ^ (carry << 1)
def eq(x, y):
"""Returns x == y from BinarySharedTensors `x` and `y`"""
bitwise_equal = ~(x ^ y)
P = __P_circuit(bitwise_equal)
return __get_sign_bit(P)
def lt(x, y):
"""Returns x < y from BinarySharedTensors `x` and `y`"""
x, y = __flip_sign_bit(x), __flip_sign_bit(y)
S = y & ~x
P = ~(x ^ y)
S, _ = __SPK_circuit(S, P)
return __get_sign_bit(S)
def le(x, y):
"""Returns x <= y from BinarySharedTensors `x` and `y`"""
x, y = __flip_sign_bit(x), __flip_sign_bit(y)
S = y & ~x
P = ~(x ^ y)
S, P = __SPK_circuit(S, P)
return __get_sign_bit(S ^ P)
def gt(x, y):
"""Returns x > y from BinarySharedTensors `x` and `y`"""
x, y = __flip_sign_bit(x), __flip_sign_bit(y)
S = x & ~y
P = ~(x ^ y)
S, _ = __SPK_circuit(S, P)
return __get_sign_bit(S)
def ge(x, y):
"""Returns x >= y from BinarySharedTensors `x` and `y`"""
x, y = __flip_sign_bit(x), __flip_sign_bit(y)
S = x & ~y
P = ~(x ^ y)
S, P = __SPK_circuit(S, P)
return __get_sign_bit(S ^ P)
| CrypTen-main | crypten/mpc/primitives/circuit.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .arithmetic import ArithmeticSharedTensor
from .binary import BinarySharedTensor
__all__ = ["ArithmeticSharedTensor", "BinarySharedTensor"]
| CrypTen-main | crypten/mpc/primitives/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# This file implements Replicated Secret Sharing protocols
# from the CryptGPU repo
import crypten.communicator as comm
import torch
def replicate_shares(share_list):
world_size = comm.get().get_world_size()
if world_size < 3:
raise ValueError("Cannot utilize Replicated Sharing securely with < 3 parties.")
rank = comm.get().get_rank()
prev_rank = (rank - 1) % world_size
next_rank = (rank + 1) % world_size
reqs = []
rep_shares = []
for share in share_list:
rep_shares.append(torch.zeros_like(share))
send_req = comm.get().isend(share.contiguous(), dst=next_rank)
recv_req = comm.get().irecv(rep_shares[-1], src=prev_rank)
reqs.extend([send_req, recv_req])
for req in reqs:
req.wait()
# Order [(x1, x2), (y1, y2), ...]
shares = [(share_list[i], rep_shares[i]) for i in range(len(share_list))]
return shares
def __replicated_secret_sharing_protocol(op, x, y, *args, **kwargs):
"""Implements bilinear functions using replicated secret shares.
Shares are input as ArithmeticSharedTensors and are replicated
within this function to perform computations.
The protocol used here is that of section 3.2 of ABY3
(https://eprint.iacr.org/2018/403.pdf).
"""
assert op in {
"mul",
"matmul",
"conv1d",
"conv2d",
"conv_transpose1d",
"conv_transpose2d",
}
x_shares, y_shares = replicate_shares([x.share, y.share])
x1, x2 = x_shares
y1, y2 = y_shares
z = x.shallow_copy()
z.share = getattr(torch, op)(x1, y1, *args, **kwargs)
z.share += getattr(torch, op)(x1, y2, *args, **kwargs)
z.share += getattr(torch, op)(x2, y1, *args, **kwargs)
return z
def mul(x, y):
return __replicated_secret_sharing_protocol("mul", x, y)
def matmul(x, y):
return __replicated_secret_sharing_protocol("matmul", x, y)
def conv1d(x, y, **kwargs):
return __replicated_secret_sharing_protocol("conv1d", x, y, **kwargs)
def conv2d(x, y, **kwargs):
return __replicated_secret_sharing_protocol("conv2d", x, y, **kwargs)
def conv_transpose1d(x, y, **kwargs):
return __replicated_secret_sharing_protocol("conv_transpose1d", x, y, **kwargs)
def conv_transpose2d(x, y, **kwargs):
return __replicated_secret_sharing_protocol("conv_transpose2d", x, y, **kwargs)
def square(x):
(x_shares,) = replicate_shares([x.share])
x1, x2 = x_shares
x_square = x1**2 + 2 * x1 * x2
z = x.shallow_copy()
z.share = x_square
return z
def truncate(x, y):
"""Protocol to divide an ArithmeticSharedTensor `x` by a constant integer `y`
using RSS (see ABY3 Figure 2: https://eprint.iacr.org/2018/403.pdf).
Note: This is currently supported under 3PC only. This is because the protocol
requires 2-out-of-N secret sharing since only 2 parties can perform division to
provide statistical guarantees equivalent to 2-out-of-2 truncation.
"""
if comm.get().get_world_size() != 3:
raise NotImplementedError(
"RSS truncation is only implemented for world_size == 3."
)
rank = x.rank
if rank == 0:
x.share = x.share.div(y, rounding_mode="trunc")
elif rank == 1:
x2 = comm.get().recv(x.share, 2)
x.share = x.share.add(x2).div(y, rounding_mode="trunc")
elif rank == 2:
comm.get().send(x.share, 1)
x.share -= x.share
# Add PRZS - this takes the place of r
x.share += x.PRZS(x.size(), device=x.device).share
return x
def AND(x, y):
from .binary import BinarySharedTensor
x_share = x
y_share = y
if isinstance(x, BinarySharedTensor):
x_share = x.share
y_share = y.share
x_shares, y_shares = replicate_shares([x_share, y_share])
x1, x2 = x_shares
y1, y2 = y_shares
z = x.shallow_copy()
z.share = (x1 & y1) ^ (x2 & y1) ^ (x1 & y2)
return z
| CrypTen-main | crypten/mpc/primitives/replicated.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import crypten.communicator as comm
import torch
from crypten.encoder import FixedPointEncoder
from ..ptype import ptype as Ptype
from . import beaver
from .arithmetic import ArithmeticSharedTensor
from .binary import BinarySharedTensor
def _A2B(arithmetic_tensor):
# first try memory-inefficient implementation that takes O(log P) rounds:
try:
binary_tensor = BinarySharedTensor.stack(
[
BinarySharedTensor(arithmetic_tensor.share, src=i)
for i in range(comm.get().get_world_size())
]
)
binary_tensor = binary_tensor.sum(dim=0)
# if we OOM, try memory-efficient implementation that uses O(P) rounds:
except RuntimeError:
binary_tensor = None
for i in range(comm.get().get_world_size()):
binary_share = BinarySharedTensor(arithmetic_tensor.share, src=i)
binary_tensor = binary_share if i == 0 else binary_tensor + binary_share
# return the result:
binary_tensor.encoder = arithmetic_tensor.encoder
return binary_tensor
def _B2A(binary_tensor, precision=None, bits=None):
if bits is None:
bits = torch.iinfo(torch.long).bits
if bits == 1:
binary_bit = binary_tensor & 1
arithmetic_tensor = beaver.B2A_single_bit(binary_bit)
else:
binary_bits = BinarySharedTensor.stack(
[binary_tensor >> i for i in range(bits)]
)
binary_bits = binary_bits & 1
arithmetic_bits = beaver.B2A_single_bit(binary_bits)
multiplier = torch.cat(
[
torch.tensor([1], dtype=torch.long, device=binary_tensor.device) << i
for i in range(bits)
]
)
while multiplier.dim() < arithmetic_bits.dim():
multiplier = multiplier.unsqueeze(1)
arithmetic_tensor = arithmetic_bits.mul_(multiplier).sum(0)
arithmetic_tensor.encoder = FixedPointEncoder(precision_bits=precision)
scale = arithmetic_tensor.encoder._scale // binary_tensor.encoder._scale
arithmetic_tensor *= scale
return arithmetic_tensor
def convert(tensor, ptype, **kwargs):
tensor_name = ptype.to_tensor()
if isinstance(tensor, tensor_name):
return tensor
if isinstance(tensor, ArithmeticSharedTensor) and ptype == Ptype.binary:
return _A2B(tensor)
elif isinstance(tensor, BinarySharedTensor) and ptype == Ptype.arithmetic:
return _B2A(tensor, **kwargs)
else:
raise TypeError("Cannot convert %s to %s" % (type(tensor), ptype.__name__))
| CrypTen-main | crypten/mpc/primitives/converters.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
from hashlib import sha256
from typing import List
import crypten.communicator as comm
"""
I dont think random modular is secure enough, but we can live with it for testing purpose
"""
class BaseOT:
"""
hardcoded public parameter
log2(__prime) > 128
__generator is a primitive root of __prime
"""
__prime = 631276824160446938136046282957027762913
__generator = 3
__inverse__generator = pow(__generator, (__prime - 2), __prime)
@staticmethod
def string_xor(s1, s2):
"""
XOR of two strings
"""
return "".join(chr(ord(a) ^ ord(b)) for a, b in zip(s1, s2))
def __init__(self, partner_rank):
self.partner_rank = partner_rank
return
def send(self, message0s: List[str], message1s: List[str]):
"""
sender's input is two message lists
"""
if len(message0s) != len(message1s):
raise ("inconsistent input size!")
alphas = []
masks_for_message1s = []
for _i in range(len(message1s)):
# pick a random element from Z_p
alpha = random.randint(0, self.__prime - 1)
alphas.append(alpha)
# g^\alpha
mask_for_message1 = pow(self.__generator, alpha, self.__prime)
masks_for_message1s.append(mask_for_message1)
# send mask_for_message1
for i in range(len(message1s)):
comm.get().send_obj(masks_for_message1s[i], self.partner_rank)
# compute (g^\alpha)^-\alpha when waiting for response
# (g^-1)^(\alpha^2) = (g^-1)^(\alpha^2 mod (p-1))
dividers = []
for i in range(len(message1s)):
divider = pow(
self.__inverse__generator,
alphas[i] * alphas[i] % (self.__prime - 1),
self.__prime,
)
dividers.append(divider)
masks_for_choices = []
# recv mask_for_choice
for _i in range(len(message1s)):
mask_for_choice = comm.get().recv_obj(self.partner_rank)
masks_for_choices.append(mask_for_choice)
for i in range(len(message1s)):
masks_for_choices[i] = pow(masks_for_choices[i], alphas[i], self.__prime)
# hash
pad0 = sha256(str(masks_for_choices[i]).encode("utf-8")).hexdigest()
pad1 = sha256(
str(masks_for_choices[i] * dividers[i] % self.__prime).encode("utf-8")
).hexdigest()
if len(pad0) < len(message0s[i]):
raise (str(i) + "-th message0 is too long")
if len(pad1) < len(message1s[i]):
raise (str(i) + "-th message1 is too long")
# encrypt with one time pad
message0_enc = self.string_xor(pad0, message0s[i])
message1_enc = self.string_xor(pad1, message1s[i])
# send message0, message1
comm.get().send_obj(message0_enc, self.partner_rank)
comm.get().send_obj(message1_enc, self.partner_rank)
def receive(self, choices: List[bool]):
"""
choice:
false: pick message0
true: pick message1
"""
betas = []
masks_for_choices = []
for _i in range(len(choices)):
# pick a random element from Z_p
beta = random.randint(0, self.__prime - 1)
mask_for_choice = pow(self.__generator, beta, self.__prime)
betas.append(beta)
masks_for_choices.append(mask_for_choice)
masks_for_message1s = []
for i in range(len(choices)):
# recv mask_for_message1
mask_for_message1 = comm.get().recv_obj(self.partner_rank)
masks_for_message1s.append(mask_for_message1)
if choices[i]:
masks_for_choices[i] = (
masks_for_choices[i] * mask_for_message1
) % self.__prime
for i in range(len(choices)):
# send mask_for_choice
comm.get().send_obj(masks_for_choices[i], self.partner_rank)
keys = []
for i in range(len(choices)):
# compute the hash when waiting for response
key = sha256(
str(pow(masks_for_message1s[i], betas[i], self.__prime)).encode("utf-8")
).hexdigest()
keys.append(key)
rst = []
for i in range(len(choices)):
# recv message0, message1
message0_enc = comm.get().recv_obj(self.partner_rank)
message1_enc = comm.get().recv_obj(self.partner_rank)
if choices[i]:
rst.append(self.string_xor(keys[i], message1_enc))
else:
rst.append(self.string_xor(keys[i], message0_enc))
return rst
| CrypTen-main | crypten/mpc/primitives/ot/baseOT.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from functools import wraps
from crypten.config import cfg
from .debug import configure_logging, MultiprocessingPdb, validate_correctness
pdb = MultiprocessingPdb()
__all__ = ["pdb", "configure_logging", "validate_correctness", "validate_decorator"]
def register_validation(getattr_function):
@wraps(getattr_function)
def validate_attribute(self, name):
# Get dispatched function call
function = getattr_function(self, name)
if not cfg.debug.validation_mode:
return function
# Run validation
return validate_correctness(self, function, name)
return validate_attribute
| CrypTen-main | crypten/debug/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import pdb as pythondebugger
import sys
from crypten.config import cfg
class MultiprocessingPdb(pythondebugger.Pdb):
"""A Pdb subclass that may be used
from a forked multiprocessing child
"""
def interaction(self, *args, **kwargs):
_stdin = sys.stdin
try:
with open("/dev/stdin") as file:
sys.stdin = file
pythondebugger.Pdb.interaction(self, *args, **kwargs)
finally:
sys.stdin = _stdin
def configure_logging():
"""Configures a logging template useful for debugging multiple processes."""
level = logging.INFO
logging.getLogger().setLevel(level)
logging.basicConfig(
level=level,
format=(
"[%(asctime)s][%(levelname)s][%(filename)s:%(lineno)d]"
+ "[%(processName)s] %(message)s"
),
)
def crypten_print(*args, dst=0, **kwargs):
"""
Prints a message to only parties whose rank is contained by `dst` kwarg (default: 0).
"""
if isinstance(dst, int):
dst = [dst]
assert isinstance(
dst, (list, tuple)
), "print destination must be a list or tuple of party ranks"
import crypten.communicator as comm
if comm.get().get_rank() in dst:
print(*args, **kwargs)
def crypten_log(*args, level=logging.INFO, dst=0, **kwargs):
"""
Logs a message to logger of parties whose rank is contained by `dst` kwarg (default: 0).
Uses logging.INFO as default level.
"""
if isinstance(dst, int):
dst = [dst]
assert isinstance(
dst, (list, tuple)
), "log destination must be a list or tuple of party ranks"
import crypten.communicator as comm
if comm.get().get_rank() in dst:
logging.log(level, *args, **kwargs)
def crypten_print_in_order(*args, **kwargs):
"""
Calls print(*args, **kwargs) on each party in rank order to ensure each party
can print its full message uninterrupted and the full output is deterministic
"""
import crypten.communicator as comm
for i in range(comm.get().get_world_size()):
if comm.get().get_rank() == i:
print(*args, **kwargs)
comm.get().barrier()
def validate_correctness(self, func, func_name, tolerance=0.5):
import crypten
import torch
if not hasattr(torch.tensor([]), func_name):
return func
def validation_function(*args, **kwargs):
with cfg.temp_override({"debug.validation_mode": False}):
# Compute crypten result
result_enc = func(*args, **kwargs)
result = (
result_enc.get_plain_text()
if crypten.is_encrypted_tensor(result_enc)
else result_enc
)
args = list(args)
# Compute torch result for corresponding function
for i, arg in enumerate(args):
if crypten.is_encrypted_tensor(arg):
args[i] = args[i].get_plain_text()
kwargs.pop("input_in_01", None)
for key, value in kwargs.items():
if crypten.is_encrypted_tensor(value):
kwargs[key] = value.get_plain_text()
reference = getattr(self.get_plain_text(), func_name)(*args, **kwargs)
# TODO: Validate properties - Issue is tuples can contain encrypted tensors
if not torch.is_tensor(reference):
return result_enc
# Check sizes match
if result.size() != reference.size():
crypten_log(
f"Size mismatch: Expected {reference.size()} but got {result.size()}"
)
raise ValueError(f"Function {func_name} returned incorrect size")
# Check that results match
diff = (result - reference).abs_()
norm_diff = diff.div(result.abs() + reference.abs()).abs_()
test_passed = norm_diff.le(tolerance) + diff.le(tolerance * 0.1)
test_passed = test_passed.gt(0).all().item() == 1
if not test_passed:
crypten_log(f"Function {func_name} returned incorrect values")
crypten_log("Result %s" % result)
crypten_log("Result - Reference = %s" % (result - reference))
raise ValueError(f"Function {func_name} returned incorrect values")
return result_enc
return validation_function
| CrypTen-main | crypten/debug/debug.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import multiprocessing
import os
import uuid
import crypten
class MultiProcessLauncher:
# run_process_fn will be run in subprocesses.
def __init__(self, world_size, run_process_fn, fn_args=None):
env = os.environ.copy()
env["WORLD_SIZE"] = str(world_size)
multiprocessing.set_start_method("spawn")
# Use random file so multiple jobs can be run simultaneously
INIT_METHOD = "file:///tmp/crypten-rendezvous-{}".format(uuid.uuid1())
env["RENDEZVOUS"] = INIT_METHOD
self.processes = []
for rank in range(world_size):
process_name = "process " + str(rank)
process = multiprocessing.Process(
target=self.__class__._run_process,
name=process_name,
args=(rank, world_size, env, run_process_fn, fn_args),
)
self.processes.append(process)
if crypten.mpc.ttp_required():
ttp_process = multiprocessing.Process(
target=self.__class__._run_process,
name="TTP",
args=(
world_size,
world_size,
env,
crypten.mpc.provider.TTPServer,
None,
),
)
self.processes.append(ttp_process)
@classmethod
def _run_process(cls, rank, world_size, env, run_process_fn, fn_args):
for env_key, env_value in env.items():
os.environ[env_key] = env_value
os.environ["RANK"] = str(rank)
orig_logging_level = logging.getLogger().level
logging.getLogger().setLevel(logging.INFO)
crypten.init()
logging.getLogger().setLevel(orig_logging_level)
if fn_args is None:
run_process_fn()
else:
run_process_fn(fn_args)
def start(self):
for process in self.processes:
process.start()
def join(self):
for process in self.processes:
process.join()
assert (
process.exitcode == 0
), f"{process.name} has non-zero exit code {process.exitcode}"
def terminate(self):
for process in self.processes:
process.terminate()
| CrypTen-main | examples/multiprocess_launcher.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
class AverageMeter:
"""Measures average of a value."""
def __init__(self):
self.reset()
def reset(self):
self.sum = 0.0
self.count = 0
def add(self, value, n=1):
self.sum += value * n
self.count += n
def value(self):
return self.sum / self.count
class AccuracyMeter:
"""Measures top-k accuracy of multi-class predictions."""
def __init__(self, topk=(1,)):
self.reset()
self.topk = topk
self.maxk = max(self.topk)
def reset(self):
self.values = []
def add(self, output, ground_truth):
# compute predicted classes (ordered):
_, prediction = output.topk(self.maxk, 1, True, True)
prediction = prediction.t()
# store correctness values:
correct = prediction.eq(ground_truth.view(1, -1).expand_as(prediction))
self.values.append(correct[: self.maxk])
def value(self):
result = {}
correct = torch.stack(self.values, 0)
for k in self.topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
result[k] = correct_k.mul_(100.0 / correct.size(0))
return result
| CrypTen-main | examples/meters.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import numpy as np
import numpy.linalg as nla
import torch
class NoopContextManager:
"""Context manager that does nothing."""
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def onehot(indices):
"""
Converts index vector into one-hot matrix.
"""
assert indices.dtype == torch.long, "indices must be long integers"
assert indices.min() >= 0, "indices must be non-negative"
onehot_vector = torch.zeros(
indices.nelement(), indices.max() + 1, dtype=torch.uint8
)
onehot_vector.scatter_(1, indices.view(indices.nelement(), 1), 1)
return onehot_vector
def kmeans_inference(data, clusters, hard=True, bandwidth=1.0):
"""
Computes cluster assignments for a k-means clustering.
"""
assert clusters.size(1) == data.size(
1
), "cluster dimensionality does not match data dimensionality"
# compute all pairwise distances:
d2_sum = data.pow(2.0).sum(1, keepdim=True)
c2_sum = clusters.pow(2.0).sum(1, keepdim=True)
distances = data.matmul(clusters.t()).mul(-2.0).add_(d2_sum).add_(c2_sum.t())
# compute assignments and return:
if hard:
assignments = distances.argmin(1)
return assignments
else:
similarities = distances.mul_(-1.0 / (2.0 * bandwidth)).exp_()
return similarities
def kmeans(data, K, max_iter=100):
"""
Performs k-means clustering of data into K clusters.
"""
assert K < data.size(0), "more clusters than data points"
# initialize clusters at randomly selected data points:
perm = torch.randperm(data.size(0))
clusters = data[perm[:K], :]
assignments = None
for iter in range(max_iter):
# compute assignments, and stop if converged:
prev_assignments = assignments
assignments = kmeans_inference(data, clusters)
if prev_assignments is not None:
num_changes = assignments.ne(prev_assignments).sum()
logging.info(
"K-means iteration %d: %d assignments changed" % (iter, num_changes)
)
if num_changes == 0:
break
# re-compute cluster means:
for k in range(K):
index = assignments == k
if index.any(): # ignore empty clusters
clusters[k, :] = data[index, :].mean(0)
# done:
return clusters
def pca(data, components):
"""
Finds the `components` top principal components of the data.
"""
assert components > 0 and components < data.size(1), "incorrect # of PCA dimensions"
# We switch to numpy here as torch.symeig gave strange results.
dtype = data.dtype
data = data.numpy()
data -= np.mean(data, axis=0, keepdims=True)
cov = np.cov(data.T)
L, V = nla.eigh(cov)
return torch.tensor(V[:, -components:], dtype=dtype)
def process_mnist_files(raw_dir, processed_dir):
"""
Uncompress zipped train and/or test image and label files, load the
uncompressed data files, and save to .pt files so that datasets.MNIST
can read it directly.
"""
from torchvision import datasets
os.makedirs(processed_dir, exist_ok=True)
def extract_mnist_archive(data_file_name):
"""
Extract the zipped data file and return the path to the uncompresse data
file.
If the zipped data file does not exist in raw_dir, it returns None.
"""
data_file_archive = os.path.join(raw_dir, data_file_name + ".gz")
if os.path.exists(data_file_archive):
datasets.utils.extract_archive(data_file_archive, processed_dir)
return os.path.join(processed_dir, data_file_name)
else:
return None
train_image_file = extract_mnist_archive("train-images-idx3-ubyte")
train_label_file = extract_mnist_archive("train-labels-idx1-ubyte")
with open(os.path.join(processed_dir, datasets.MNIST.training_file), "wb") as f:
if train_image_file and train_label_file:
training_set = (
datasets.mnist.read_image_file(train_image_file),
datasets.mnist.read_label_file(train_label_file),
)
torch.save(training_set, f)
test_image_file = extract_mnist_archive("t10k-images-idx3-ubyte")
test_label_file = extract_mnist_archive("t10k-labels-idx1-ubyte")
with open(os.path.join(processed_dir, datasets.MNIST.test_file), "wb") as f:
if test_image_file and test_label_file:
test_set = (
datasets.mnist.read_image_file(test_image_file),
datasets.mnist.read_label_file(test_label_file),
)
torch.save(test_set, f)
| CrypTen-main | examples/util.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from .meters import AccuracyMeter, AverageMeter
from .multiprocess_launcher import MultiProcessLauncher
from .util import NoopContextManager
__all__ = [
"AverageMeter",
"AccuracyMeter",
"NoopContextManager",
"MultiProcessLauncher",
]
| CrypTen-main | examples/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import random
import shutil
import tempfile
import time
import crypten
import crypten.communicator as comm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from examples.meters import AverageMeter
from examples.util import NoopContextManager
from torchvision import datasets, transforms
def run_mpc_cifar(
epochs=25,
start_epoch=0,
batch_size=1,
lr=0.001,
momentum=0.9,
weight_decay=1e-6,
print_freq=10,
model_location="",
resume=False,
evaluate=True,
seed=None,
skip_plaintext=False,
context_manager=None,
):
if seed is not None:
random.seed(seed)
torch.manual_seed(seed)
crypten.init()
# create model
model = LeNet()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(
model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay
)
# optionally resume from a checkpoint
best_prec1 = 0
if resume:
if os.path.isfile(model_location):
logging.info("=> loading checkpoint '{}'".format(model_location))
checkpoint = torch.load(model_location)
start_epoch = checkpoint["epoch"]
best_prec1 = checkpoint["best_prec1"]
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
logging.info(
"=> loaded checkpoint '{}' (epoch {})".format(
model_location, checkpoint["epoch"]
)
)
else:
raise IOError("=> no checkpoint found at '{}'".format(model_location))
# Data loading code
def preprocess_data(context_manager, data_dirname):
transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
with context_manager:
trainset = datasets.CIFAR10(
data_dirname, train=True, download=True, transform=transform
)
testset = datasets.CIFAR10(
data_dirname, train=False, download=True, transform=transform
)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=4, shuffle=True, num_workers=2
)
testloader = torch.utils.data.DataLoader(
testset, batch_size=batch_size, shuffle=False, num_workers=2
)
return trainloader, testloader
if context_manager is None:
context_manager = NoopContextManager()
data_dir = tempfile.TemporaryDirectory()
train_loader, val_loader = preprocess_data(context_manager, data_dir.name)
if evaluate:
if not skip_plaintext:
logging.info("===== Evaluating plaintext LeNet network =====")
validate(val_loader, model, criterion, print_freq)
logging.info("===== Evaluating Private LeNet network =====")
input_size = get_input_size(val_loader, batch_size)
private_model = construct_private_model(input_size, model)
validate(val_loader, private_model, criterion, print_freq)
# logging.info("===== Validating side-by-side ======")
# validate_side_by_side(val_loader, model, private_model)
return
# define loss function (criterion) and optimizer
for epoch in range(start_epoch, epochs):
adjust_learning_rate(optimizer, epoch, lr)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, print_freq)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion, print_freq)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint(
{
"epoch": epoch + 1,
"arch": "LeNet",
"state_dict": model.state_dict(),
"best_prec1": best_prec1,
"optimizer": optimizer.state_dict(),
},
is_best,
)
data_dir.cleanup()
def train(train_loader, model, criterion, optimizer, epoch, print_freq=10):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# compute output
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.add(loss.item(), input.size(0))
top1.add(prec1[0], input.size(0))
top5.add(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
current_batch_time = time.time() - end
batch_time.add(current_batch_time)
end = time.time()
if i % print_freq == 0:
logging.info(
"Epoch: [{}][{}/{}]\t"
"Time {:.3f} ({:.3f})\t"
"Loss {:.4f} ({:.4f})\t"
"Prec@1 {:.3f} ({:.3f})\t"
"Prec@5 {:.3f} ({:.3f})".format(
epoch,
i,
len(train_loader),
current_batch_time,
batch_time.value(),
loss.item(),
losses.value(),
prec1[0],
top1.value(),
prec5[0],
top5.value(),
)
)
def validate_side_by_side(val_loader, plaintext_model, private_model):
"""Validate the plaintext and private models side-by-side on each example"""
# switch to evaluate mode
plaintext_model.eval()
private_model.eval()
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
# compute output for plaintext
output_plaintext = plaintext_model(input)
# encrypt input and compute output for private
# assumes that private model is encrypted with src=0
input_encr = encrypt_data_tensor_with_src(input)
output_encr = private_model(input_encr)
# log all info
logging.info("==============================")
logging.info("Example %d\t target = %d" % (i, target))
logging.info("Plaintext:\n%s" % output_plaintext)
logging.info("Encrypted:\n%s\n" % output_encr.get_plain_text())
# only use the first 1000 examples
if i > 1000:
break
def get_input_size(val_loader, batch_size):
input, target = next(iter(val_loader))
return input.size()
def construct_private_model(input_size, model):
"""Encrypt and validate trained model for multi-party setting."""
# get rank of current process
rank = comm.get().get_rank()
dummy_input = torch.empty(input_size)
# party 0 always gets the actual model; remaining parties get dummy model
if rank == 0:
model_upd = model
else:
model_upd = LeNet()
private_model = crypten.nn.from_pytorch(model_upd, dummy_input).encrypt(src=0)
return private_model
def encrypt_data_tensor_with_src(input):
"""Encrypt data tensor for multi-party setting"""
# get rank of current process
rank = comm.get().get_rank()
# get world size
world_size = comm.get().get_world_size()
if world_size > 1:
# party 1 gets the actual tensor; remaining parties get dummy tensor
src_id = 1
else:
# party 0 gets the actual tensor since world size is 1
src_id = 0
if rank == src_id:
input_upd = input
else:
input_upd = torch.empty(input.size())
private_input = crypten.cryptensor(input_upd, src=src_id)
return private_input
def validate(val_loader, model, criterion, print_freq=10):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
if isinstance(model, crypten.nn.Module) and not crypten.is_encrypted_tensor(
input
):
input = encrypt_data_tensor_with_src(input)
# compute output
output = model(input)
if crypten.is_encrypted_tensor(output):
output = output.get_plain_text()
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.add(loss.item(), input.size(0))
top1.add(prec1[0], input.size(0))
top5.add(prec5[0], input.size(0))
# measure elapsed time
current_batch_time = time.time() - end
batch_time.add(current_batch_time)
end = time.time()
if (i + 1) % print_freq == 0:
logging.info(
"\nTest: [{}/{}]\t"
"Time {:.3f} ({:.3f})\t"
"Loss {:.4f} ({:.4f})\t"
"Prec@1 {:.3f} ({:.3f}) \t"
"Prec@5 {:.3f} ({:.3f})".format(
i + 1,
len(val_loader),
current_batch_time,
batch_time.value(),
loss.item(),
losses.value(),
prec1[0],
top1.value(),
prec5[0],
top5.value(),
)
)
logging.info(
" * Prec@1 {:.3f} Prec@5 {:.3f}".format(top1.value(), top5.value())
)
return top1.value()
def save_checkpoint(state, is_best, filename="checkpoint.pth.tar"):
"""Saves checkpoint of plaintext model"""
# only save from rank 0 process to avoid race condition
rank = comm.get().get_rank()
if rank == 0:
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, "model_best.pth.tar")
def adjust_learning_rate(optimizer, epoch, lr=0.01):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
new_lr = lr * (0.1 ** (epoch // 5))
for param_group in optimizer.param_groups:
param_group["lr"] = new_lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].flatten().float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class LeNet(nn.Sequential):
"""
Adaptation of LeNet that uses ReLU activations
"""
# network architecture:
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
| CrypTen-main | examples/mpc_cifar/mpc_cifar.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
To run mpc_cifar example in multiprocess mode:
$ python3 examples/mpc_cifar/launcher.py \
--evaluate \
--resume path-to-model/model.pth.tar \
--batch-size 1 \
--print-freq 1 \
--skip-plaintext \
--multiprocess
To run mpc_cifar example on AWS EC2 instances:
$ python3 scripts/aws_launcher.py \
--ssh_key_file=$HOME/.aws/fair-$USER.pem \
--instances=i-038dd14b9383b9d79,i-08f057b9c03d4a916 \
--aux_files=examples/mpc_cifar/mpc_cifar.py,\
path-to-model/model.pth.tar\
examples/mpc_cifar/launcher.py \
--evaluate \
--resume model.pth.tar \
--batch-size 1 \
--print-freq 1 \
--skip-plaintext
"""
import argparse
import logging
import os
from examples.multiprocess_launcher import MultiProcessLauncher
parser = argparse.ArgumentParser(description="CrypTen Cifar Training")
parser.add_argument(
"--world_size",
type=int,
default=2,
help="The number of parties to launch. Each party acts as its own process",
)
parser.add_argument(
"--epochs", default=25, type=int, metavar="N", help="number of total epochs to run"
)
parser.add_argument(
"--start-epoch",
default=0,
type=int,
metavar="N",
help="manual epoch number (useful on restarts)",
)
parser.add_argument(
"-b",
"--batch-size",
default=256,
type=int,
metavar="N",
help="mini-batch size (default: 256)",
)
parser.add_argument(
"--lr",
"--learning-rate",
default=0.001,
type=float,
metavar="LR",
help="initial learning rate",
)
parser.add_argument("--momentum", default=0.9, type=float, metavar="M", help="momentum")
parser.add_argument(
"--weight-decay",
"--wd",
default=1e-6,
type=float,
metavar="W",
help="weight decay (default: 1e-4)",
)
parser.add_argument(
"--print-freq",
"-p",
default=10,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument(
"--model-location",
default="",
type=str,
metavar="PATH",
help="path to latest checkpoint (default: none)",
)
parser.add_argument(
"--resume",
default=False,
action="store_true",
help="Resume training from latest checkpoint",
)
parser.add_argument(
"-e",
"--evaluate",
dest="evaluate",
action="store_true",
help="evaluate model on validation set",
)
parser.add_argument(
"--seed", default=None, type=int, help="seed for initializing training. "
)
parser.add_argument("--lr-decay", default=0.1, type=float, help="lr decay factor")
parser.add_argument(
"--skip-plaintext",
default=False,
action="store_true",
help="Skip validation for plaintext network",
)
parser.add_argument(
"--multiprocess",
default=False,
action="store_true",
help="Run example in multiprocess mode",
)
def _run_experiment(args):
# only import here to initialize crypten within the subprocesses
from mpc_cifar import run_mpc_cifar
# Only Rank 0 will display logs.
level = logging.INFO
if "RANK" in os.environ and os.environ["RANK"] != "0":
level = logging.CRITICAL
logging.getLogger().setLevel(level)
run_mpc_cifar(
args.epochs,
args.start_epoch,
args.batch_size,
args.lr,
args.momentum,
args.weight_decay,
args.print_freq,
args.model_location,
args.resume,
args.evaluate,
args.seed,
args.skip_plaintext,
)
def main(run_experiment):
args = parser.parse_args()
if args.multiprocess:
launcher = MultiProcessLauncher(args.world_size, run_experiment, args)
launcher.start()
launcher.join()
launcher.terminate()
else:
run_experiment(args)
if __name__ == "__main__":
main(_run_experiment)
| CrypTen-main | examples/mpc_cifar/launcher.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
import time
import crypten
import torch
from crypten.config import cfg
def set_precision(bits):
cfg.encoder.precision_bits = bits
def online_learner(
sampler,
backend="mpc",
nr_iters=7,
score_func=None,
monitor_func=None,
checkpoint_func=None,
checkpoint_every=0,
):
"""
Online learner that minimizes linear least squared loss.
Args:
sampler: An iterator that returns one sample at a time. Samples are
assumed to be `dict`s with a `'context'` and a `'rewards'` field.
backend: Which privacy protocol to use (default 'mpc').
score_func: A closure that can be used to plug in exploration mechanisms.
monitor_func: A closure that does logging.
checkpoint_func: A closure that does checkpointing.
nr_iters: Number of Newton-Rhapson iterations to use for private
reciprocal.
"""
# initialize some variables:
total_reward = 0.0
# initialize constructor for tensors:
crypten.set_default_backend(backend)
# loop over dataset:
idx = 0
for sample in sampler():
start_t = time.time()
# unpack sample:
assert "context" in sample and "rewards" in sample, (
"invalid sample: %s" % sample
)
context = crypten.cryptensor(sample["context"])
num_features = context.nelement()
num_arms = sample["rewards"].nelement()
# initialization of model parameters:
if idx == 0:
# initialize accumulators for linear least squares:
A_inv = [torch.eye(num_features).unsqueeze(0) for _ in range(num_arms)]
A_inv = crypten.cat([crypten.cryptensor(A) for A in A_inv])
b = crypten.cryptensor(torch.zeros(num_arms, num_features))
# compute initial weights for all arms:
weights = b.unsqueeze(1).matmul(A_inv).squeeze(1)
# compute score of all arms:
scores = weights.matmul(context)
# plug in exploration mechanism:
if score_func is not None:
score_func(scores, A_inv, b, context)
onehot = scores.argmax()
# In practice only one party opens the onehot vector in order to
# take the action.
selected_arm = onehot.get_plain_text().argmax()
# Once the action is taken, the reward (a scalar) is observed by some
# party and secret shared. Here we simulate that by selecting the
# reward from the rewards vector and then sharing it.
reward = crypten.cryptensor(
(sample["rewards"][selected_arm] > random.random()).view(1).float()
)
# update linear least squares accumulators (using Sherman–Morrison
# formula):
A_inv_context = A_inv.matmul(context)
numerator = A_inv_context.unsqueeze(1).mul(A_inv_context.unsqueeze(2))
denominator = A_inv_context.matmul(context).add(1.0).view(-1, 1, 1)
with crypten.mpc.ConfigManager("reciprocal_nr_iters", nr_iters):
update = numerator.mul_(denominator.reciprocal())
A_inv.sub_(update.mul_(onehot.view(-1, 1, 1)))
b.add_(context.mul(reward).unsqueeze(0).mul_(onehot.unsqueeze(0)))
# update model weights:
weights = b.unsqueeze(1).matmul(A_inv).squeeze(1)
# monitor learning progress: we use the plain reward only for
# monitoring
reward = reward.get_plain_text().item()
total_reward += reward
iter_time = time.time() - start_t
if monitor_func is not None:
monitor_func(idx, reward, total_reward, iter_time)
idx += 1
# checkpointing:
if checkpoint_func is not None and idx % checkpoint_every == 0:
checkpoint_func(
idx,
{
"A_inv": [AA.get_plain_text() for AA in A_inv],
"b": [bb.get_plain_text() for bb in b],
},
)
# signal monitoring closure that we are done:
if monitor_func is not None:
monitor_func(idx, None, None, None, finished=True)
def epsilon_greedy(
sampler,
epsilon=0.0,
backend="mpc",
nr_iters=7,
precision=20,
monitor_func=None,
checkpoint_func=None,
checkpoint_every=0,
):
"""
Run epsilon-greedy linear least squares learner on dataset.
The `sampler` is expected to be an iterator that returns one sample at a time.
Samples are assumed to be `dict`s with a `'context'` and a `'rewards'` field.
The function takes a hyperpameter `epsilon`, `dtype`, and `device` as optional
arguments. It also takes an optional `monitor_func` closure that does logging,
and an optional `checkpoint_func` that does checkpointing.
"""
set_precision(precision)
# define scoring function
def score_func(scores, A_inv, b, context):
explore = crypten.bernoulli(torch.tensor([epsilon]))
rand_scores = crypten.rand(*scores.size())
scores.mul_(1 - explore).add_(rand_scores.mul(explore))
# run online learner:
online_learner(
sampler,
backend=backend,
score_func=score_func,
monitor_func=monitor_func,
checkpoint_func=checkpoint_func,
checkpoint_every=checkpoint_every,
nr_iters=nr_iters,
)
| CrypTen-main | examples/bandits/private_contextual_bandits.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import random
import time
import torch
def online_learner(
sampler,
dtype=torch.double,
device="cpu",
score_func=None,
monitor_func=None,
checkpoint_func=None,
checkpoint_every=0,
):
"""
Online learner that minimizes linear least squared loss.
The `sampler` is expected to be an iterator that returns one sample at a time.
Samples are assumed to be `dict`s with a `'context'` and a `'rewards'` field.
The function takes a `dtype` and `device` as optional arguments. It also
takes an optional `score_func` closure that can be used to plug in
exploration mechanisms, an optional `monitor_func` closure that does logging,
and an optional `checkpoint_func` that does checkpointing.
"""
# initialize some variables:
total_reward = 0.0
# loop over dataset:
idx = 0
for sample in sampler():
start_t = time.time()
# unpack sample:
assert "context" in sample and "rewards" in sample, (
"invalid sample: %s" % sample
)
context = sample["context"].to(dtype=dtype, device=device)
rewards = sample["rewards"].to(dtype=dtype, device=device)
num_features, num_arms = context.nelement(), rewards.nelement()
# initialization of model parameters:
if idx == 0:
# initialize accumulators for linear least squares:
A_inv = torch.stack(
[
torch.eye(num_features, dtype=dtype, device=device)
for _ in range(num_arms)
],
dim=0,
) # inv(X^T X + I)
b = torch.zeros(num_arms, num_features, dtype=dtype, device=device) # X^T r
# compute initial weights for all arms:
weights = torch.zeros((num_arms, num_features), dtype=dtype, device=device)
for arm in range(num_arms):
weights[arm, :] = b[arm, :].matmul(A_inv[arm, :, :])
# compute score of all arms:
score = torch.matmul(weights, context.view(num_features, 1)).squeeze()
# plug in exploration mechanism:
if score_func is not None:
score_func(score, A_inv, b, context)
# select highest-scoring arm (break ties randomly), and observe reward:
max_score = score.max()
indices = torch.nonzero(score == max_score)
selected_arm = random.choice(indices).item()
reward = float(rewards[selected_arm].item() > random.random())
# update linear least squares accumulators (using Sherman–Morrison formula):
A_inv_context = A_inv[selected_arm, :, :].mv(context)
numerator = torch.outer(A_inv_context, A_inv_context)
denominator = A_inv_context.dot(context).add(1.0)
A_inv[selected_arm, :, :].sub_(numerator.div_(denominator))
b[selected_arm, :].add_(context.mul(reward))
# update model weights:
weights[selected_arm, :] = b[selected_arm, :].matmul(A_inv[selected_arm, :, :])
# monitor learning progress:
total_reward += reward
iter_time = time.time() - start_t
if monitor_func is not None:
monitor_func(idx, reward, total_reward, iter_time)
idx += 1
# checkpointing:
if checkpoint_func is not None and idx % checkpoint_every == 0:
checkpoint_func(idx, {"A_inv": A_inv, "b": b})
# signal monitoring closure that we are done:
if monitor_func is not None:
monitor_func(idx, None, None, None, finished=True)
def epsilon_greedy(
sampler,
epsilon=0.0,
dtype=torch.double,
device="cpu",
monitor_func=None,
checkpoint_func=None,
checkpoint_every=0,
):
"""
Run epsilon-greedy linear least squares learner on dataset.
The `sampler` is expected to be an iterator that returns one sample at a time.
Samples are assumed to be `dict`s with a `'context'` and a `'rewards'` field.
The function takes a hyperpameter `epsilon`, `dtype`, and `device` as optional
arguments. It also takes an optional `monitor_func` closure that does logging,
and an optional `checkpoint_func` that does checkpointing.
"""
# define scoring function:
def score_func(scores, A_inv, b, context):
# Implement as (p < epsilon) * scores + (p > epsilon) * random
# in order to match private version
explore = random.random() < epsilon
rand_scores = torch.rand_like(scores)
scores.mul_(1 - explore).add_(rand_scores.mul(explore))
# run online learner:
online_learner(
sampler,
dtype=dtype,
device=device,
score_func=score_func,
monitor_func=monitor_func,
checkpoint_func=checkpoint_func,
checkpoint_every=checkpoint_every,
)
def linucb(
sampler,
epsilon=0.1,
dtype=torch.double,
device="cpu",
monitor_func=None,
checkpoint_func=None,
checkpoint_every=0,
):
"""
Run LinUCB contextual bandit learner on dataset.
The `sampler` is expected to be an iterator that returns one sample at a time.
Samples are assumed to be `dict`s with a `'context'` and a `'rewards'` field.
The function takes a hyperpameter `epsilon`, `dtype`, and `device` as optional
arguments. It also takes an optional `monitor_func` closure that does logging,
and an optional `checkpoint_func` that does checkpointing.
Implementation following https://arxiv.org/pdf/1003.0146.pdf
"""
# define UCB scoring function:
def score_func(scores, A_inv, b, context):
for arm in range(scores.nelement()):
scores[arm] += (
context.matmul(A_inv[arm, :, :]).dot(context).sqrt_().mul_(epsilon)
)
# run online learner:
online_learner(
sampler,
dtype=dtype,
device=device,
score_func=score_func,
monitor_func=monitor_func,
checkpoint_func=checkpoint_func,
checkpoint_every=checkpoint_every,
)
| CrypTen-main | examples/bandits/plain_contextual_bandits.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
To run bandits example in multiprocess mode:
$ python3 examples/bandits/membership_inference.py --multiprocess
To run bandits example on AWS EC2 instances:
$ python3 scripts/aws_launcher.py \
--ssh_key_file=$HOME/.aws/fair-$USER.pem \
--instances=i-038dd14b9383b9d79,i-08f057b9c03d4a916 \
--aux_files=examples/bandits/launcher.py \
examples/bandits/membership_inference.py
"""
import argparse
import logging
import os
import pickle
import examples.util
import torch
import visdom
from examples.multiprocess_launcher import MultiProcessLauncher
def compute_rewards(weights, dataset, epsilon=0.0):
"""
Perform inference using epsilon-greedy contextual bandit (without updates).
"""
context, rewards = dataset
context = context.type(torch.float32)
# compute scores:
scores = torch.matmul(weights, context.t()).squeeze()
explore = (torch.rand(scores.shape[1]) < epsilon).type(torch.float32)
rand_scores = torch.rand_like(scores)
scores.mul_(1 - explore).add_(rand_scores.mul(explore))
# select arm and observe reward:
selected_arms = scores.argmax(dim=0)
return rewards[range(rewards.shape[0]), selected_arms]
def membership_accuracy(model, positive_set, negative_set, epsilon=0.0):
"""
Measure accuracy of membership inference attacks on model using the specified
positive and negative data sets.
"""
# compute weights for all arms:
weights = model["b"].unsqueeze(1).matmul(model["A_inv"]).squeeze(1)
weights = weights.type(torch.float32)
# compute rewards for both sets:
rewards = {
"positive": compute_rewards(weights, positive_set, epsilon=epsilon),
"negative": compute_rewards(weights, negative_set, epsilon=epsilon),
}
def p_reward(x):
return torch.sum(x).type(torch.float32) / x.numel()
p_reward_pos = p_reward(rewards["positive"])
p_reward_neg = p_reward(rewards["negative"])
advantage = (p_reward_pos - p_reward_neg).abs().item()
return advantage
def parse_args():
"""
Parse input arguments.
"""
parser = argparse.ArgumentParser(description="Perform membership inference attacks")
parser.add_argument(
"--pca", default=20, type=int, help="Number of PCA dimensions (0 for raw data)"
)
parser.add_argument(
"--number_arms",
default=None,
type=int,
help="create arbitrary number of arms via k-means",
)
parser.add_argument(
"--bandwidth",
default=1.0,
type=float,
help="bandwidth of kernel used to assign rewards",
)
parser.add_argument(
"--checkpoint_folder",
default=None,
type=str,
help="folder from which to load checkpointed models",
)
parser.add_argument(
"--permfile", default=None, type=str, help="file with sampling permutation"
)
parser.add_argument(
"--epsilon",
default=0.01,
type=float,
help="exploration parameter (default = 0.01)",
)
parser.add_argument(
"--savefile", default=None, type=str, help="file to pickle advantages"
)
parser.add_argument(
"--visualize", action="store_true", help="visualize results with visdom"
)
parser.add_argument(
"--world_size",
type=int,
default=2,
help="The number of parties to launch. Each party acts as its own process",
)
parser.add_argument(
"--multiprocess",
default=False,
action="store_true",
help="Run example in multiprocess mode",
)
return parser.parse_args()
def membership_inference(args, load_data_module, download_mnist):
# load clusters:
clusters = None
if args.number_arms is not None:
clusters_file = "clusters_K=%d_pca=%d.torch" % (args.number_arms, args.pca)
clusters_file = os.path.join(load_data_module.MEMOIZE_FOLDER, clusters_file)
logging.info("Loading clusters from file...")
clusters = torch.load(clusters_file)
# load dataset:
train_data, _ = load_data_module.load_data(
split="train", download_mnist_func=download_mnist
)
components = examples.util.pca(train_data, args.pca)
positive_set = load_data_module.load_data(
split="train",
pca=components,
clusters=clusters,
bandwidth=args.bandwidth,
download_mnist_func=download_mnist,
)
negative_set = load_data_module.load_data(
split="test",
pca=components,
clusters=clusters,
bandwidth=args.bandwidth,
download_mnist_func=download_mnist,
)
# get list of checkpoints:
model_files = [
os.path.join(args.checkpoint_folder, filename)
for filename in os.listdir(args.checkpoint_folder)
if filename.endswith(".torch")
]
model_files = sorted(model_files)
iterations = [int(os.path.splitext(f)[0].split("_")[-1]) for f in model_files]
# load permutation used in training:
perm = load_data_module.load_data_sampler(
permfile=args.permfile, download_mnist_func=download_mnist
)
def subset(dataset, iteration):
ids = perm[:iteration]
return tuple(d[ids, :] for d in dataset)
# measure accuracies of membership inference attacs:
advantage = [
membership_accuracy(
torch.load(model_file),
subset(positive_set, iteration),
negative_set,
epsilon=args.epsilon,
)
for model_file, iteration in zip(model_files, iterations)
]
# save advantages to file:
if args.savefile is not None:
with open(args.savefile, "wb") as fid:
pickle.dump(advantage, fid)
# plot advantages:
if args.visualize:
opts = {
"xlabel": "Number of iterations",
"ylabel": "Accuracy of inference attack",
}
visdom.line(iterations, advantage, opts=opts)
def _run_experiment(args):
import launcher
membership_inference(args, launcher, launcher.download_mnist)
def main(run_experiment):
# parse command-line arguments:
args = parse_args()
if args.multiprocess:
launcher = MultiProcessLauncher(args.world_size, run_experiment, args)
launcher.start()
launcher.join()
launcher.terminate()
else:
run_experiment(args)
if __name__ == "__main__":
main(_run_experiment)
| CrypTen-main | examples/bandits/membership_inference.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
To run bandits example in multiprocess mode:
$ python3 examples/bandits/launcher.py --multiprocess
To run bandits example on AWS EC2 instances:
$ python3 scripts/aws_launcher.py \
--ssh_key_file=$HOME/.aws/fair-$USER.pem \
--instances=i-038dd14b9383b9d79,i-08f057b9c03d4a916 \
--aux_files=examples/bandits/plain_contextual_bandits.py,\
examples/bandits/private_contextual_bandits.py \
examples/bandits/launcher.py
"""
import argparse
import logging
import os
import random
import examples.util
import torch
import visdom
from examples.multiprocess_launcher import MultiProcessLauncher
from examples.util import NoopContextManager, process_mnist_files
from torchvision.datasets.mnist import MNIST
def learning_curve(visualizer, idx, value, window=None, title=""):
"""
Appends new value to learning curve, creating new curve if none exists.
"""
opts = {"title": title, "xlabel": "Number of samples", "ylabel": "Reward value"}
window = visualizer.line(
value.view(value.nelement(), 1),
idx,
update=None if window is None else "append",
opts=opts,
win=window,
env="contextual_bandits",
)
return window
def download_mnist(split="train"):
"""
Loads split from the MNIST dataset and returns data.
"""
train = split == "train"
# If need to downkload MNIST dataset and uncompress,
# it is necessary to create a separate for each process.
mnist_exists = os.path.exists(
os.path.join(
"/tmp/MNIST/processed", MNIST.training_file if train else MNIST.test_file
)
)
if mnist_exists:
mnist_root = "/tmp"
else:
rank = "0" if "RANK" not in os.environ else os.environ["RANK"]
mnist_root = os.path.join("tmp", "bandits", rank)
os.makedirs(mnist_root, exist_ok=True)
# download the MNIST dataset:
with NoopContextManager():
mnist = MNIST(mnist_root, download=not mnist_exists, train=train)
return mnist
def load_data(
split="train",
pca=None,
clusters=None,
bandwidth=1.0,
download_mnist_func=download_mnist,
):
"""
Loads split from the MNIST dataset and returns data.
"""
# download the MNIST dataset:
mnist = download_mnist_func(split)
# preprocess the MNIST dataset:
context = mnist.data.float().div_(255.0)
context = context.view(context.size(0), -1)
# apply PCA:
if pca is not None:
context -= torch.mean(context, dim=0, keepdim=True)
context = context.matmul(pca)
context /= torch.norm(context, dim=1, keepdim=True)
# compute rewards (based on clustering if clusters defined, 0-1 otherwise):
if clusters is not None:
assert clusters.size(1) == context.size(
1
), "cluster dimensionality does not match data dimensionality"
rewards = examples.util.kmeans_inference(
context, clusters, hard=False, bandwidth=bandwidth
)
else:
rewards = examples.util.onehot(mnist.targets.long())
# return data:
return context, rewards
def load_data_sampler(
split="train",
pca=None,
clusters=None,
bandwidth=1.0,
permfile=None,
download_mnist_func=download_mnist,
):
"""
Loads split from the MNIST dataset and returns sampler.
"""
# load dataset:
context, rewards = load_data(
split=split,
pca=pca,
clusters=clusters,
bandwidth=bandwidth,
download_mnist_func=download_mnist_func,
)
if permfile is not None:
perm = torch.load(permfile)
assert perm.shape[0] == context.shape[0], "Incorrect perm size for context."
else:
perm = torch.randperm(context.size(0))
# define simple dataset sampler:
def sampler():
idx = 0
while idx < context.size(0):
yield {"context": context[perm[idx], :], "rewards": rewards[perm[idx], :]}
idx += 1
# return sampler:
return sampler
def parse_args(hostname):
"""
Parse input arguments.
"""
parser = argparse.ArgumentParser(
description="Train contextual bandit model using encrypted learning signal"
)
parser.add_argument(
"--world_size",
type=int,
default=2,
help="The number of parties to launch. Each party acts as its own process",
)
parser.add_argument(
"--plaintext", action="store_true", help="use a non-private algorithm"
)
parser.add_argument(
"--backend", default="mpc", type=str, help="crypten backend: mpc (default)"
)
parser.add_argument(
"--mnist-split",
default="train",
type=str,
help="The split from the MNIST dataset (default = train)",
)
parser.add_argument(
"--mnist-dir",
default=None,
type=str,
help="path to the dir of MNIST raw data files",
)
parser.add_argument(
"--learner",
default="epsilon_greedy",
type=str,
help="learning algorithm: epsilon_greedy or linucb",
)
parser.add_argument(
"--epsilon",
default=0.01,
type=float,
help="exploration parameter (default = 0.01)",
)
parser.add_argument(
"--visualize", action="store_true", help="visualize results with visdom"
)
parser.add_argument(
"--visdom",
default=hostname,
type=str,
help="visdom server to use (default = %s)" % hostname,
)
parser.add_argument(
"--pca", default=20, type=int, help="Number of PCA dimensions (0 for raw data)"
)
parser.add_argument(
"--precision",
default=20,
type=int,
help="Bits of precision for encoding floats.",
)
parser.add_argument(
"--nr_iters",
default=7,
type=int,
help="Newton-Rhapson iterations for mpc reciprocal",
)
parser.add_argument(
"--number_arms",
default=None,
type=int,
help="create arbitrary number of arms via k-means",
)
parser.add_argument(
"--bandwidth",
default=1.0,
type=float,
help="bandwidth of kernel used to assign rewards",
)
parser.add_argument(
"--memoize_folder",
default="/tmp/kmeans",
type=str,
help="folder to save k-means clusters",
)
parser.add_argument(
"--checkpoint_folder",
default=None,
type=str,
help="folder in which to checkpoint models",
)
parser.add_argument(
"--checkpoint_every",
default=1000,
type=int,
help="checkpoint every K iterations",
)
parser.add_argument(
"--permfile", default=None, type=str, help="file with sampling permutation"
)
parser.add_argument("--seed", default=None, type=int, help="Seed the torch rng")
parser.add_argument(
"--multiprocess",
default=False,
action="store_true",
help="Run example in multiprocess mode",
)
return parser.parse_args()
def get_monitor_func(args, buffers, visualizer, window, title, progress_iter):
"""
Return closure that performs monitoring.
"""
def monitor_func(idx, reward, total_reward, iter_time, finished=False):
def mean(vals):
return torch.DoubleTensor(vals).mean().item()
# flush buffers:
if finished:
for key, val in buffers.items():
buffers[key] = [item for item in val if item is not None]
if finished or (idx > 0 and idx % progress_iter == 0):
logging.info(
"Sample %s; average reward = %2.5f, time %.3f (sec/iter) "
% (idx, mean(buffers["reward"]), mean(buffers["iter_time"]))
)
if args.visualize:
window[0] = learning_curve(
visualizer,
torch.tensor(buffers["idx"], dtype=torch.long),
torch.DoubleTensor(buffers["cumulative_reward"]),
window=window[0],
title=title,
)
for key in buffers.keys():
buffers[key] = [None] * progress_iter
# fill buffers:
if idx is not None:
cur_idx = idx % progress_iter
buffers["idx"][cur_idx] = idx
buffers["reward"][cur_idx] = reward
buffers["cumulative_reward"][cur_idx] = total_reward
buffers["iter_time"][cur_idx] = iter_time
return monitor_func
def get_checkpoint_func(args):
"""
Return closure that performs checkpointing.
"""
def checkpoint_func(idx, model):
if "RANK" not in os.environ or os.environ["RANK"] == 0:
if args.checkpoint_folder is not None:
checkpoint_file = os.path.join(
args.checkpoint_folder, "iter_%05d.torch" % idx
)
torch.save(model, checkpoint_file)
return checkpoint_func
def build_learner(args, bandits, download_mnist):
# set up loggers:
logger = logging.getLogger()
level = logging.INFO
if "RANK" in os.environ and os.environ["RANK"] != "0":
level = logging.CRITICAL
logger.setLevel(level)
visualizer = visdom.Visdom(args.visdom) if args.visualize else None
# allow comparisons between plain and private algorithm:
if args.seed is not None:
torch.manual_seed(args.seed)
random.seed(args.seed)
if args.plaintext:
logging.info("Using plain text bandit")
kwargs = {"dtype": torch.double, "device": "cpu"}
else:
logging.info(f"Using encrypted bandit with {args.backend}")
kwargs = {
"backend": args.backend,
"precision": args.precision,
"nr_iters": args.nr_iters,
}
# set up variables for progress monitoring:
window = [None]
title = "Cumulative reward (encrypted %s, epsilon = %2.2f)" % (
args.learner,
args.epsilon,
)
progress_iter = 100
buffers = {
key: [None] * progress_iter
for key in ["idx", "reward", "cumulative_reward", "iter_time"]
}
# closures that perform progress monitoring and checkpointing:
monitor_func = get_monitor_func(
args, buffers, visualizer, window, title, progress_iter
)
checkpoint_func = get_checkpoint_func(args)
# compute pca:
context, _ = load_data(
split=args.mnist_split, pca=None, download_mnist_func=download_mnist
)
pca = examples.util.pca(context, args.pca)
# create or load clustering if custom number of arms is used:
clusters = None
if args.number_arms is not None:
clusters_file = "clusters_K=%d_pca=%d.torch" % (args.number_arms, args.pca)
clusters_file = os.path.join(args.memoize_folder, clusters_file)
# load precomputed clusters from file:
if os.path.exists(clusters_file):
logging.info("Loading clusters from file...")
clusters = torch.load(clusters_file)
else:
# load data and allocate clusters:
context, _ = load_data(
split=args.mnist_split, pca=pca, download_mnist_func=download_mnist
)
clusters = context.new((args.number_arms, context.size(1)))
# run clustering in process 0:
if (
not torch.distributed.is_initialized()
or torch.distributed.get_rank() == 0
):
logging.info("Performing clustering to get arms...")
clusters = examples.util.kmeans(context, args.number_arms)
torch.save(clusters, clusters_file)
# if run is distributed, synchronize clusters:
if torch.distributed.is_initialized():
torch.distributed.barrier()
torch.distributed.broadcast(clusters, 0)
# run contextual bandit algorithm on MNIST:
sampler = load_data_sampler(
split=args.mnist_split,
pca=pca,
clusters=clusters,
bandwidth=args.bandwidth,
permfile=args.permfile,
download_mnist_func=download_mnist,
)
assert hasattr(bandits, args.learner), "unknown learner: %s" % args.learner
def learner_func():
getattr(bandits, args.learner)(
sampler,
epsilon=args.epsilon,
monitor_func=monitor_func,
checkpoint_func=checkpoint_func,
checkpoint_every=args.checkpoint_every,
**kwargs,
)
return learner_func
def _run_experiment(args):
if args.plaintext:
import plain_contextual_bandits as bandits
else:
import private_contextual_bandits as bandits
learner_func = build_learner(args, bandits, download_mnist)
import crypten
crypten.init()
learner_func()
def main(run_experiment):
"""
Runs encrypted contextual bandits learning experiment on MNIST.
"""
# parse input arguments:
args = parse_args(os.environ.get("HOSTNAME", "localhost"))
if args.mnist_dir is not None:
process_mnist_files(args.mnist_dir, "/tmp/MNIST/processed")
if args.multiprocess:
launcher = MultiProcessLauncher(args.world_size, run_experiment, args)
launcher.start()
launcher.join()
launcher.terminate()
else:
run_experiment(args)
# run all the things:
if __name__ == "__main__":
main(_run_experiment)
| CrypTen-main | examples/bandits/launcher.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import random
import shutil
import tempfile
import time
import warnings
import crypten
import crypten.communicator as comm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from examples.meters import AverageMeter
from examples.util import NoopContextManager, process_mnist_files
from torchvision import datasets, transforms
def run_tfe_benchmarks(
network="B",
epochs=5,
start_epoch=0,
batch_size=256,
lr=0.01,
momentum=0.9,
weight_decay=1e-6,
print_freq=10,
resume="",
evaluate=True,
seed=None,
skip_plaintext=False,
save_checkpoint_dir="/tmp/tfe_benchmarks",
save_modelbest_dir="/tmp/tfe_benchmarks_best",
context_manager=None,
mnist_dir=None,
):
crypten.init()
if seed is not None:
random.seed(seed)
torch.manual_seed(seed)
# create model
model = create_benchmark_model(network)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(
model.parameters(), lr, momentum=momentum, weight_decay=weight_decay
)
# optionally resume from a checkpoint
best_prec1 = 0
if resume:
if os.path.isfile(resume):
logging.info("=> loading checkpoint '{}'".format(resume))
checkpoint = torch.load(resume)
start_epoch = checkpoint["epoch"]
best_prec1 = checkpoint["best_prec1"]
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
logging.info(
"=> loaded checkpoint '{}' (epoch {})".format(
resume, checkpoint["epoch"]
)
)
else:
logging.info("=> no checkpoint found at '{}'".format(resume))
# Loading MNIST. Normalizing per pytorch/examples/blob/master/mnist/main.py
def preprocess_data(context_manager, data_dirname):
if mnist_dir is not None:
process_mnist_files(
mnist_dir, os.path.join(data_dirname, "MNIST", "processed")
)
download = False
else:
download = True
with context_manager:
if not evaluate:
mnist_train = datasets.MNIST(
data_dirname,
download=download,
train=True,
transform=transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)),
]
),
)
mnist_test = datasets.MNIST(
data_dirname,
download=download,
train=False,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
)
train_loader = (
torch.utils.data.DataLoader(
mnist_train, batch_size=batch_size, shuffle=True
)
if not evaluate
else None
)
test_loader = torch.utils.data.DataLoader(
mnist_test, batch_size=batch_size, shuffle=False
)
return train_loader, test_loader
if context_manager is None:
context_manager = NoopContextManager()
warnings.filterwarnings("ignore")
data_dir = tempfile.TemporaryDirectory()
train_loader, val_loader = preprocess_data(context_manager, data_dir.name)
flatten = False
if network == "A":
flatten = True
if evaluate:
if not skip_plaintext:
logging.info("===== Evaluating plaintext benchmark network =====")
validate(val_loader, model, criterion, print_freq, flatten=flatten)
private_model = create_private_benchmark_model(model, flatten=flatten)
logging.info("===== Evaluating Private benchmark network =====")
validate(val_loader, private_model, criterion, print_freq, flatten=flatten)
# validate_side_by_side(val_loader, model, private_model, flatten=flatten)
return
os.makedirs(save_checkpoint_dir, exist_ok=True)
os.makedirs(save_modelbest_dir, exist_ok=True)
for epoch in range(start_epoch, epochs):
adjust_learning_rate(optimizer, epoch, lr)
# train for one epoch
train(
train_loader,
model,
criterion,
optimizer,
epoch,
print_freq,
flatten=flatten,
)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion, print_freq, flatten=flatten)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
checkpoint_file = "checkpoint_bn" + network + ".pth.tar"
model_best_file = "model_best_bn" + network + ".pth.tar"
save_checkpoint(
{
"epoch": epoch + 1,
"arch": "Benchmark" + network,
"state_dict": model.state_dict(),
"best_prec1": best_prec1,
"optimizer": optimizer.state_dict(),
},
is_best,
filename=os.path.join(save_checkpoint_dir, checkpoint_file),
model_best=os.path.join(save_modelbest_dir, model_best_file),
)
data_dir.cleanup()
shutil.rmtree(save_checkpoint_dir)
def train(
train_loader, model, criterion, optimizer, epoch, print_freq=10, flatten=False
):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# compute output
if flatten:
input = input.view(input.size(0), -1)
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.add(loss.item(), input.size(0))
top1.add(prec1[0], input.size(0))
top5.add(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
current_batch_time = time.time() - end
batch_time.add(current_batch_time)
end = time.time()
if i % print_freq == 0:
logging.info(
"Epoch: [{}][{}/{}]\t"
"Time {:.3f} ({:.3f})\t"
"Loss {:.4f} ({:.4f})\t"
"Prec@1 {:.3f} ({:.3f})\t"
"Prec@5 {:.3f} ({:.3f})".format(
epoch,
i,
len(train_loader),
current_batch_time,
batch_time.value(),
loss.item(),
losses.value(),
prec1[0],
top1.value(),
prec5[0],
top5.value(),
)
)
def validate_side_by_side(val_loader, plaintext_model, private_model, flatten=False):
# switch to evaluate mode
plaintext_model.eval()
private_model.eval()
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
# compute output
if flatten:
input = input.view(input.size(0), -1)
output0 = plaintext_model(input)
encr_input = crypten.cryptensor(input)
output1 = private_model(encr_input)
logging.info("==============================")
logging.info("Example %d\t target = %d" % (i, target))
logging.info("Plaintext:\n%s" % output0)
logging.info("Encrypted:\n%s\n" % output1.get_plain_text())
if i > 1000:
break
def validate(val_loader, model, criterion, print_freq=10, flatten=False):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
# compute output
if flatten:
input = input.view(input.size(0), -1)
if isinstance(model, crypten.nn.Module) and not crypten.is_encrypted_tensor(
input
):
input = crypten.cryptensor(input)
output = model(input)
if crypten.is_encrypted_tensor(output):
output = output.get_plain_text()
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.add(loss.item(), input.size(0))
top1.add(prec1[0], input.size(0))
top5.add(prec5[0], input.size(0))
# measure elapsed time
current_batch_time = time.time() - end
batch_time.add(current_batch_time)
end = time.time()
if (i + 1) % print_freq == 0:
logging.info(
"\nTest: [{}/{}]\t"
"Time {:.3f} ({:.3f})\t"
"Loss {:.4f} ({:.4f})\t"
"Prec@1 {:.3f} ({:.3f}) \t"
"Prec@5 {:.3f} ({:.3f})".format(
i + 1,
len(val_loader),
current_batch_time,
batch_time.value(),
loss.item(),
losses.value(),
prec1[0],
top1.value(),
prec5[0],
top5.value(),
)
)
if i > 100:
break
logging.info(
" * Prec@1 {:.3f} Prec@5 {:.3f}".format(top1.value(), top5.value())
)
return top1.value()
def save_checkpoint(
state, is_best, filename="checkpoint.pth.tar", model_best="model_best.pth.tar"
):
# TODO: use crypten.save_from_party() in future.
rank = comm.get().get_rank()
# only save for process rank = 0
if rank == 0:
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, model_best)
def adjust_learning_rate(optimizer, epoch, lr=0.01):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
new_lr = lr * (0.1 ** (epoch // 5))
for param_group in optimizer.param_groups:
param_group["lr"] = new_lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].flatten().float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def create_benchmark_model(benchmark):
if benchmark == "A":
return NetworkA()
elif benchmark == "B":
return NetworkB()
elif benchmark == "C":
return NetworkC()
else:
raise RuntimeError("Invalid benchmark network")
def create_private_benchmark_model(model, flatten=False):
dummy_input = torch.empty((1, 1, 28, 28))
if flatten:
dummy_input = torch.empty((1, 28 * 28))
private_model = crypten.nn.from_pytorch(model, dummy_input)
private_model.encrypt()
return private_model
class NetworkA(nn.Module):
def __init__(self):
super(NetworkA, self).__init__()
self.fc1 = nn.Linear(784, 128)
self.fc2 = nn.Linear(128, 128)
self.fc3 = nn.Linear(128, 10)
self.batchnorm1 = nn.BatchNorm1d(128)
self.batchnorm2 = nn.BatchNorm1d(128)
def forward(self, x):
out = self.fc1(x)
out = self.batchnorm1(out)
out = F.relu(out)
out = self.fc2(out)
out = self.batchnorm2(out)
out = F.relu(out)
out = self.fc3(out)
return out
class NetworkB(nn.Module):
def __init__(self):
super(NetworkB, self).__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=5, padding=0)
self.conv2 = nn.Conv2d(16, 16, kernel_size=5, padding=0)
self.fc1 = nn.Linear(16 * 4 * 4, 100)
self.fc2 = nn.Linear(100, 10)
self.batchnorm1 = nn.BatchNorm2d(16)
self.batchnorm2 = nn.BatchNorm2d(16)
self.batchnorm3 = nn.BatchNorm1d(100)
def forward(self, x):
out = self.conv1(x)
out = self.batchnorm1(out)
out = F.relu(out)
out = F.avg_pool2d(out, 2)
out = self.conv2(out)
out = self.batchnorm2(out)
out = F.relu(out)
out = F.avg_pool2d(out, 2)
out = out.view(-1, 16 * 4 * 4)
out = self.fc1(out)
out = self.batchnorm3(out)
out = F.relu(out)
out = self.fc2(out)
return out
class NetworkC(nn.Module):
def __init__(self):
super(NetworkC, self).__init__()
self.conv1 = nn.Conv2d(1, 20, kernel_size=5, padding=0)
self.conv2 = nn.Conv2d(20, 50, kernel_size=5, padding=0)
self.fc1 = nn.Linear(50 * 4 * 4, 500)
self.fc2 = nn.Linear(500, 10)
self.batchnorm1 = nn.BatchNorm2d(20)
self.batchnorm2 = nn.BatchNorm2d(50)
self.batchnorm3 = nn.BatchNorm1d(500)
def forward(self, x):
out = self.conv1(x)
out = self.batchnorm1(out)
out = F.relu(out)
out = F.avg_pool2d(out, 2)
out = self.conv2(out)
out = self.batchnorm2(out)
out = F.relu(out)
out = F.avg_pool2d(out, 2)
out = out.view(-1, 50 * 4 * 4)
out = self.fc1(out)
out = self.batchnorm3(out)
out = F.relu(out)
out = self.fc2(out)
return out
| CrypTen-main | examples/tfe_benchmarks/tfe_benchmarks.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
To run tfe_benchmarks example in multiprocess mode:
$ python3 examples/tfe_benchmarks/launcher.py --multiprocess
To run tfe_benchmarks example on AWS EC2 instances:
$ python3 scripts/aws_launcher.py \
--ssh_key_file=$HOME/.aws/fair-$USER.pem \
--instances=i-038dd14b9383b9d79,i-08f057b9c03d4a916 \
--aux_files=examples/tfe_benchmarks/tfe_benchmarks.py \
examples/tfe_benchmarks/launcher.py
"""
import argparse
import logging
import os
from examples.multiprocess_launcher import MultiProcessLauncher
parser = argparse.ArgumentParser(description="CrypTen TFEncrypted Benchmarks")
parser.add_argument(
"--world_size",
type=int,
default=2,
help="The number of parties to launch. Each party acts as its own process",
)
parser.add_argument(
"--network",
default="B",
type=str,
help="choose from networks A, B and C (default: B)",
)
parser.add_argument(
"--epochs", default=5, type=int, metavar="N", help="number of total epochs to run"
)
parser.add_argument(
"--start-epoch",
default=0,
type=int,
metavar="N",
help="manual epoch number (useful on restarts)",
)
parser.add_argument(
"-b",
"--batch-size",
default=256,
type=int,
metavar="N",
help="mini-batch size (default: 256)",
)
parser.add_argument(
"--lr",
"--learning-rate",
default=0.01,
type=float,
metavar="LR",
help="initial learning rate",
)
parser.add_argument("--momentum", default=0.9, type=float, metavar="M", help="momentum")
parser.add_argument(
"--weight-decay",
"--wd",
default=1e-6,
type=float,
metavar="W",
help="weight decay (default: 1e-4)",
)
parser.add_argument(
"--print-freq",
"-p",
default=10,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument(
"--resume",
default="",
type=str,
metavar="PATH",
help="path to latest checkpoint (default: none)",
)
parser.add_argument(
"--save-checkpoint-dir",
default="/tmp/tfe_benchmarks",
type=str,
metavar="SAVE",
help="path to the dir to save checkpoint (default: /tmp/tfe_benchmarks)",
)
parser.add_argument(
"--save-modelbest-dir",
default="/tmp/tfe_benchmarks_best",
type=str,
metavar="SAVE",
help="path to the dir to save the best model (default: /tmp/tfe_benchmarks_best)",
)
parser.add_argument(
"-e",
"--evaluate",
dest="evaluate",
action="store_true",
help="evaluate model on validation set",
)
parser.add_argument(
"--seed", default=None, type=int, help="seed for initializing training. "
)
parser.add_argument("--lr-decay", default=0.1, type=float, help="lr decay factor")
parser.add_argument(
"--skip-plaintext",
default=False,
action="store_true",
help="Skip validation for plaintext network",
)
parser.add_argument(
"--mnist-dir",
default=None,
type=str,
metavar="MNIST",
help="path to the dir of MNIST raw data files",
)
parser.add_argument(
"--multiprocess",
default=False,
action="store_true",
help="Run example in multiprocess mode",
)
def _run_experiment(args):
# only import here to initialize crypten within the subprocesses
from tfe_benchmarks import run_tfe_benchmarks
# Only Rank 0 will display logs.
level = logging.INFO
if "RANK" in os.environ and os.environ["RANK"] != "0":
level = logging.CRITICAL
logging.getLogger().setLevel(level)
run_tfe_benchmarks(
args.network,
args.epochs,
args.start_epoch,
args.batch_size,
args.lr,
args.momentum,
args.weight_decay,
args.print_freq,
args.resume,
args.evaluate,
args.seed,
args.skip_plaintext,
os.path.join(args.save_checkpoint_dir, os.environ.get("RANK", "")),
os.path.join(args.save_modelbest_dir, os.environ.get("RANK", "")),
mnist_dir=args.mnist_dir,
)
def main(run_experiment):
args = parser.parse_args()
os.makedirs(args.save_checkpoint_dir, exist_ok=True)
os.makedirs(args.save_modelbest_dir, exist_ok=True)
if args.multiprocess:
launcher = MultiProcessLauncher(args.world_size, run_experiment, args)
launcher.start()
launcher.join()
launcher.terminate()
else:
run_experiment(args)
if __name__ == "__main__":
main(_run_experiment)
| CrypTen-main | examples/tfe_benchmarks/launcher.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import tempfile
import crypten
import torch
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms
from examples.meters import AccuracyMeter
from examples.util import NoopContextManager
try:
from crypten.nn.tensorboard import SummaryWriter
except ImportError: # tensorboard not installed
SummaryWriter = None
def run_experiment(
model_name,
imagenet_folder=None,
tensorboard_folder="/tmp",
num_samples=None,
context_manager=None,
):
"""Runs inference using specified vision model on specified dataset."""
crypten.init()
# check inputs:
assert hasattr(models, model_name), (
"torchvision does not provide %s model" % model_name
)
if imagenet_folder is None:
imagenet_folder = tempfile.gettempdir()
download = True
else:
download = False
if context_manager is None:
context_manager = NoopContextManager()
# load dataset and model:
with context_manager:
model = getattr(models, model_name)(pretrained=True)
model.eval()
dataset = datasets.ImageNet(imagenet_folder, split="val", download=download)
# define appropriate transforms:
transform = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
to_tensor_transform = transforms.ToTensor()
# encrypt model:
dummy_input = to_tensor_transform(dataset[0][0])
dummy_input.unsqueeze_(0)
encrypted_model = crypten.nn.from_pytorch(model, dummy_input=dummy_input)
encrypted_model.encrypt()
# show encrypted model in tensorboard:
if SummaryWriter is not None:
writer = SummaryWriter(log_dir=tensorboard_folder)
writer.add_graph(encrypted_model)
writer.close()
# loop over dataset:
meter = AccuracyMeter()
for idx, sample in enumerate(dataset):
# preprocess sample:
image, target = sample
image = transform(image)
image.unsqueeze_(0)
target = torch.tensor([target], dtype=torch.long)
# perform inference using encrypted model on encrypted sample:
encrypted_image = crypten.cryptensor(image)
encrypted_output = encrypted_model(encrypted_image)
# measure accuracy of prediction
output = encrypted_output.get_plain_text()
meter.add(output, target)
# progress:
logging.info(
"[sample %d of %d] Accuracy: %f" % (idx + 1, len(dataset), meter.value()[1])
)
if num_samples is not None and idx == num_samples - 1:
break
# print final accuracy:
logging.info("Accuracy on all %d samples: %f" % (len(dataset), meter.value()[1]))
| CrypTen-main | examples/mpc_imagenet/mpc_imagenet.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
To run tfe_benchmarks example in multiprocess mode:
$ python3 examples/mpc_imagenet/launcher.py --multiprocess
To run tfe_benchmarks example on AWS EC2 instances:
$ python3 scripts/aws_launcher.py \
--ssh_key_file=$HOME/.aws/fair-$USER.pem \
--instances=i-038dd14b9383b9d79,i-08f057b9c03d4a916 \
--aux_files=examples/mpc_imagenet/mpc_imagenet.py \
examples/mpc_imagenet/launcher.py
"""
import argparse
import logging
import os
from examples.multiprocess_launcher import MultiProcessLauncher
from mpc_imagenet import run_experiment
# input arguments:
parser = argparse.ArgumentParser(description="Encrypted inference of vision models")
parser.add_argument(
"--world_size",
type=int,
default=2,
help="The number of parties to launch. Each party acts as its own process",
)
parser.add_argument(
"--model",
default="resnet18",
type=str,
help="torchvision model to use for inference (default: resnet18)",
)
parser.add_argument(
"--imagenet_folder",
default=None,
type=str,
help="folder containing the ImageNet dataset",
)
parser.add_argument(
"--tensorboard_folder",
default="/tmp",
type=str,
help="folder in which tensorboard performs logging (default: /tmp)",
)
parser.add_argument(
"--num_samples",
default=None,
type=int,
help="number of samples to test on (default: all)",
)
parser.add_argument(
"--multiprocess",
default=False,
action="store_true",
help="Run example in multiprocess mode",
)
def _run_experiment(args):
# only worker with rank 0 will display logging information:
level = logging.INFO
rank = "0"
if "RANK" in os.environ and os.environ["RANK"] != "0":
level = logging.CRITICAL
rank = os.environ["RANK"]
logging.getLogger().setLevel(level)
tensorboard_folder = "/tmp/mpc_imagenet/" + rank
os.makedirs(tensorboard_folder, exist_ok=True)
run_experiment(
args.model,
imagenet_folder=args.imagenet_folder,
tensorboard_folder=tensorboard_folder,
num_samples=args.num_samples,
)
def main():
args = parser.parse_args()
if args.multiprocess:
launcher = MultiProcessLauncher(args.world_size, _run_experiment, args)
launcher.start()
launcher.join()
launcher.terminate()
else:
_run_experiment(args)
if __name__ == "__main__":
main()
| CrypTen-main | examples/mpc_imagenet/launcher.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
To run mpc_linear_svm example in multiprocess mode:
$ python3 examples/mpc_linear_svm/launcher.py --multiprocess
To run mpc_linear_svm example on AWS EC2 instances:
$ python3 scripts/aws_launcher.py \
--ssh_key_file=$HOME/.aws/fair-$USER.pem \
--instances=i-038dd14b9383b9d79,i-08f057b9c03d4a916 \
--aux_files=examples/mpc_linear_svm/mpc_linear_svm.py \
examples/mpc_linear_svm/launcher.py
"""
import argparse
import logging
import os
from examples.multiprocess_launcher import MultiProcessLauncher
parser = argparse.ArgumentParser(description="CrypTen Linear SVM Training")
parser.add_argument(
"--world_size",
type=int,
default=2,
help="The number of parties to launch. Each party acts as its own process",
)
parser.add_argument(
"--epochs", default=50, type=int, metavar="N", help="number of total epochs to run"
)
parser.add_argument(
"--examples", default=50, type=int, metavar="N", help="number of examples per epoch"
)
parser.add_argument(
"--features",
default=100,
type=int,
metavar="N",
help="number of features per example",
)
parser.add_argument(
"--lr", "--learning-rate", default=0.5, type=float, help="initial learning rate"
)
parser.add_argument(
"--skip_plaintext",
default=False,
action="store_true",
help="skip evaluation for plaintext svm",
)
parser.add_argument(
"--multiprocess",
default=False,
action="store_true",
help="Run example in multiprocess mode",
)
def _run_experiment(args):
level = logging.INFO
if "RANK" in os.environ and os.environ["RANK"] != "0":
level = logging.CRITICAL
logging.getLogger().setLevel(level)
logging.basicConfig(
level=level,
format="%(asctime)s - %(process)d - %(name)s - %(levelname)s - %(message)s",
)
from mpc_linear_svm import run_mpc_linear_svm
run_mpc_linear_svm(
args.epochs, args.examples, args.features, args.lr, args.skip_plaintext
)
def main(run_experiment):
args = parser.parse_args()
if args.multiprocess:
launcher = MultiProcessLauncher(args.world_size, run_experiment, args)
launcher.start()
launcher.join()
launcher.terminate()
else:
run_experiment(args)
if __name__ == "__main__":
main(_run_experiment)
| CrypTen-main | examples/mpc_linear_svm/launcher.py |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import time
import crypten
import torch
from examples.meters import AverageMeter
def train_linear_svm(features, labels, epochs=50, lr=0.5, print_time=False):
# Initialize random weights
w = features.new(torch.randn(1, features.size(0)))
b = features.new(torch.randn(1))
if print_time:
pt_time = AverageMeter()
end = time.time()
for epoch in range(epochs):
# Forward
label_predictions = w.matmul(features).add(b).sign()
# Compute accuracy
correct = label_predictions.mul(labels)
accuracy = correct.add(1).div(2).mean()
if crypten.is_encrypted_tensor(accuracy):
accuracy = accuracy.get_plain_text()
# Print Accuracy once
if crypten.communicator.get().get_rank() == 0:
print(
f"Epoch {epoch} --- Training Accuracy %.2f%%" % (accuracy.item() * 100)
)
# Backward
loss_grad = -labels * (1 - correct) * 0.5 # Hinge loss
b_grad = loss_grad.mean()
w_grad = loss_grad.matmul(features.t()).div(loss_grad.size(1))
# Update
w -= w_grad * lr
b -= b_grad * lr
if print_time:
iter_time = time.time() - end
pt_time.add(iter_time)
logging.info(" Time %.6f (%.6f)" % (iter_time, pt_time.value()))
end = time.time()
return w, b
def evaluate_linear_svm(features, labels, w, b):
"""Compute accuracy on a test set"""
predictions = w.matmul(features).add(b).sign()
correct = predictions.mul(labels)
accuracy = correct.add(1).div(2).mean().get_plain_text()
if crypten.communicator.get().get_rank() == 0:
print("Test accuracy %.2f%%" % (accuracy.item() * 100))
def run_mpc_linear_svm(
epochs=50, examples=50, features=100, lr=0.5, skip_plaintext=False
):
crypten.init()
# Set random seed for reproducibility
torch.manual_seed(1)
# Initialize x, y, w, b
x = torch.randn(features, examples)
w_true = torch.randn(1, features)
b_true = torch.randn(1)
y = w_true.matmul(x) + b_true
y = y.sign()
if not skip_plaintext:
logging.info("==================")
logging.info("PyTorch Training")
logging.info("==================")
w_torch, b_torch = train_linear_svm(x, y, lr=lr, print_time=True)
# Encrypt features / labels
x = crypten.cryptensor(x)
y = crypten.cryptensor(y)
logging.info("==================")
logging.info("CrypTen Training")
logging.info("==================")
w, b = train_linear_svm(x, y, lr=lr, print_time=True)
if not skip_plaintext:
logging.info("PyTorch Weights :")
logging.info(w_torch)
logging.info("CrypTen Weights:")
logging.info(w.get_plain_text())
if not skip_plaintext:
logging.info("PyTorch Bias :")
logging.info(b_torch)
logging.info("CrypTen Bias:")
logging.info(b.get_plain_text())
| CrypTen-main | examples/mpc_linear_svm/mpc_linear_svm.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.