python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import json
import sys
sys.path.append("./FLSim")
from FLSim.flsim.utils.config_utils import fl_config_from_json
def bool_flag(s):
"""
Parse boolean arguments from the command line.
"""
if s.lower() in ["False", "false"]:
return False
elif s.lower() in ["True", "true"]:
return True
else:
raise argparse.ArgumentTypeError("invalid value for a boolean flag")
def flsim_args(parser):
parser.add_argument(
"--dp-level",
default="user_level",
type=str,
help="FLSim DP level (User or item level DP). Defaults to user_level.",
)
parser.add_argument(
"--gpu-mem-minimiser",
default="False",
type=bool_flag,
help="FLSim, whether to use the GPUMemoryMinimiser",
)
parser.add_argument(
"--debug-config",
default="False",
type=bool_flag,
help="For debugging: Whether to use FLSim debug configs (without CanarySyncTrainer)",
)
parser.add_argument(
"--users-per-round",
default=1,
type=int,
help="FLSim, Sets the number of users per round for training + attacking FL models",
)
parser.add_argument(
"--client-epochs",
default=1,
type=int,
help="FLSim, number of local epochs per user",
)
parser.add_argument(
"--num-local-updates",
default=-1,
type=int,
help="FLSim, number of local updates made by a user. -1 if users have varying number of local batches (default)",
)
parser.add_argument(
"--server-clip-const",
default=1,
type=int,
help="Sets the FLSim 'clipping_value' parameter. This is the clipping constant of model updates.",
)
parser.add_argument(
"--canary-design-reverse-server-clip",
default=False,
type=bool_flag,
help="For debugging: If True, will design and test on unclipped server updates, but will still train the model on clipped server updates",
)
parser.add_argument(
"--insert-canary-as-batch",
default=False,
type=bool_flag,
help="Whether to insert the canary as a sample or an entire batch. Does not need to be set, will be updated based on canary-insert-batch-index",
)
parser.add_argument(
"--canary-insert-global-round",
default=-1,
type=int,
help="FLSim, the global round to insert the canary into, overrides canary-insert-epoch",
)
parser.add_argument(
"--canary-insert-offset",
default=1,
type=int,
help="FLSim, used in train_and_freeze and continuous testing and is the round period between attacks",
)
parser.add_argument(
"--canary-insert-batch-index",
default="batch",
type=str,
help="FLSim, the batch index to insert the canary. Options: 0,-1, 'batch', Default: batch (i.e inserts canary on its own)",
)
parser.add_argument(
"--canary-design-local-models",
type=bool_flag,
default=False,
help="For debugging: If True and canary_insert_batch_index=-1, then design canaries on the (num_local_updates-1)th model",
)
parser.add_argument(
"--canary-insert-train-acc",
default=-1,
type=int,
help="In FLSim, inserts canary after model achieves train acc >= canary-insert-train-acc, overrides canary-insert-epoch and canary-insert-global-round",
)
parser.add_argument(
"--canary-insert-test-acc",
default=-1,
type=int,
help="In FLSim, inserts canary after model achieves given test acc, overrides canary-insert-epoch, canary-insert-global-round and canary-insert-train-acc",
)
parser.add_argument(
"--canary-insert-type",
default="",
type=str,
help="Types: train (acc), test (acc)",
)
parser.add_argument(
"--canary-test-type",
default="freeze",
type=str,
help="Takes values: 'freeze', 'train_and_freeze', 'continuous'",
)
parser.add_argument(
"--canary-insert-acc-threshold",
default=-1,
type=int,
help="FLSim, Round or accuracy to design canary at and begin CANIFE attack",
)
parser.add_argument(
"--canary-insert-epsilon",
default=-1,
type=float,
help="FLSim, train model to target epsilon before inserting canary, Default: -1 (disabled)",
)
parser.add_argument(
"--epsilon",
default=-1,
type=float,
help="FLSim, will calibrate noise_multiplier to guarantee epsilon over fl-epochs Default -1 (disabled)",
)
parser.add_argument(
"--fl-server-lr",
default=-1,
type=float,
help="FLSim server lr, Default: -1 (uses FLSim config default)",
)
parser.add_argument(
"--fl-client-lr",
default=-1,
type=float,
help="FLSim client lr, Default: -1 (uses FLSim config default)",
)
parser.add_argument(
"--fl-dropout",
default=0,
type=float,
help="FLSim, model dropout if using simpleconv, Default: 0 (no dropout)",
)
parser.add_argument(
"--fl-checkpoint-only",
default=False,
type=bool_flag,
help="FLSim, Train until canary insertion, save checkpoint and then exit",
)
parser.add_argument(
"--fl-load-checkpoint",
default=False,
type=bool_flag,
help="FLSim, Attempt to load the checkpoint of the experiments parameters if possible, otherwise train from scratch",
)
parser.add_argument(
"--fl-epochs",
default=-1,
type=int,
help="FLSim number of epochs Default: -1 (uses FLSim config epochs)",
)
parser.add_argument(
"--local-batch-size",
default="",
type=str,
help="FLSim, Local batch size of FLSim clients",
)
parser.add_argument(
"--override-noise-multiplier",
default="False",
type=bool_flag,
help="FLSim, If True, will override noise multiplier with epsilon/sigma even when loading a DP checkpoint",
)
def canary_args(parser):
parser.add_argument(
"--canary-normalize-optim-grad",
default="True",
type=bool_flag,
help="Normalize grad",
)
# Takes values: Random, Image, Text
parser.add_argument(
"--canary-init",
default="random",
type=str,
help="CANIFE, Method for initialising the canary sample. Default: Randomly initialised (from token space or image space)",
)
parser.add_argument(
"--canary-epochs",
default=5000,
type=int,
help="CANIFE, number of canary design iterations",
)
parser.add_argument(
"--canary-iters",
default=1,
type=int,
help="How many times to repeat the canary experiment. Default: 1",
)
parser.add_argument(
"--canary-clip-const",
default=1,
type=float,
help="CANIFE, Canary sample-grad clip factor. Only used for debugging.",
)
# loss1 - Square dot product with batch mean
# loss2 - Square dot product with per sample gradients
parser.add_argument(
"--canary-loss",
default="loss2",
type=str,
help="CANIFE, Canary loss to use. Defaults to loss2 (First term of Eq1 in paper)",
)
parser.add_argument(
"--canary-norm-matching",
default="True",
type=bool_flag,
help="CANIFE, If True, will optimise canary sample to have gradient matched to canary-norm-constant",
)
parser.add_argument(
"--canary-norm-loss",
default="hinge_squared",
type=str,
help="For debugging: hinge vs hinge_squared",
)
parser.add_argument(
"--canary-norm-constant",
default=1,
type=int,
help="CANIFE, If canary_norm_matching=True, will optimise canary to have norm >= canary-norm-consant",
)
# sample_grads = Orthogonal to sample grads
# model_updates = Orthogonal to model updates
parser.add_argument(
"--canary-design-type",
default="model_updates",
type=str,
help="CANIFE, whether to design on clipped model updates or on clipped sample grads. Default: model_updates",
)
# freeze / holdout
# exact
parser.add_argument(
"--canary-setup",
default="exact",
type=str,
help="CANIFE, Whether to form the design pool of mock clients from a holdout (test) set or 'exact' (design on current rounds clients)",
)
parser.add_argument(
"--canary-insert-epoch",
default="1",
type=str,
help="FLSim, Epoch to design canary from and carry out CANIFE attack",
)
parser.add_argument(
"--canary-num-test-batches",
default=50,
type=int,
help="Number of batches (from the training set) to test canary against",
)
parser.add_argument(
"--canary-design-sample-size",
default="",
type=str,
help="CANIFE, Design pool sample size. If empty will be inferred from canary-design-minibatch-size",
)
parser.add_argument(
"--canary-design-pool-size",
default="",
type=str,
help="CANIFE, Design pools size. If not empty and using model updates, will override sample size",
)
parser.add_argument(
"--canary-design-minibatch-size",
default="",
type=str,
help="CANIFE, Design optimisation minibatch size. If empty will be set to canary_design_sample_size or users_per_round",
)
parser.add_argument(
"--benchmark-design",
default="False",
type=bool_flag,
help="CANIFE, Whether to track canary design time or not. Default: False",
)
parser.add_argument(
"--scale-canary-test",
default="False",
type=bool_flag,
help="CANIFE, Debugging"
)
def parse_args():
parser = argparse.ArgumentParser(description="PyTorch CIFAR10 Mad Canaries")
canary_args(parser)
flsim_args(parser)
parser.add_argument(
"--task",
default="FLSim",
type=str,
help="Task",
)
parser.add_argument(
"--model-arch",
default="simpleconv",
type=str,
help="Model arch options: lstm, resnet, simpleconv, shakes_lstm",
)
parser.add_argument(
"--num-classes",
default=10,
type=int,
help="",
)
parser.add_argument(
"--sigma",
type=float,
default=0,
metavar="S",
help="Noise multiplier for DP (default 0)",
)
parser.add_argument(
"--delta",
type=float,
default=1e-5,
metavar="D",
help="Target DP delta (default: 1e-5)",
)
parser.add_argument(
"--disable-dp",
type=bool_flag,
default=False,
help="Not used in FLSim/CANIFE. Disable privacy training and just train with vanilla SGD.",
)
parser.add_argument(
"--skip-acc",
type=bool_flag,
default=False,
help="If True, does not benchmark accuracy when loading a checkpointed model in central canary attack",
)
parser.add_argument(
"--checkpoint",
type=bool_flag,
default=True,
help="Save checkpoints every checkpoint_round during training",
)
parser.add_argument(
"--checkpoint-path",
type=str,
default="./local_checkpoints",
help="path of checkpoints (saving/loading)",
)
parser.add_argument(
"--plot-path",
type=str,
default="",
help="Will output experiment results to DUMP_PATH/PLOT_PATH. Default: '' ",
)
parser.add_argument(
"--dump-path",
type=str,
default="./local_checkpoints",
help="Output path of experiment run.",
)
parser.add_argument(
"--checkpoint-round",
type=int,
default=5,
metavar="k",
help="Not used. FLSim, Checkpoint every k rounds",
)
parser.add_argument(
"--dataset",
type=str,
default="CIFAR10",
help="Options: CIFAR10, celeba, shakespeare, sent140",
)
parser.add_argument(
"--data-root",
type=str,
default="../cifar10",
help="Location of LEAF datsets or CIFAR10",
)
parser.add_argument(
"--device", type=str, default="cpu", help="Device on which to run the code. Values: cpu or gpu"
)
parser.add_argument(
"--master-port",
default=12568,
type=str,
help="Slurm master port",
)
parser.add_argument(
"--debug",
type=int,
default=0,
help="debug level (default: 0)",
)
parser.add_argument(
"--prettify-samples",
type=bool_flag,
default="False",
help="CANIFE, For debugging. Disables data augmentation + outputs canary samples",
)
return parser.parse_args()
def create_flsim_cfg(args, base_config="./FLSim/examples/configs/"):
config_map = {
"CIFAR10_True": "cifar10_resnet_canary_sample_level.json",
"CIFAR10_False": "cifar10_resnet_canary_user_level.json",
"celeba_True": "celeba_example.json",
"celeba_False": "celeba_resnet_canary_user_level.json",
"sent140_True": "sent140_config.json",
"sent140_False": "sent140_canary_user_level.json",
"femnist_False": "femnist_config.json",
"shakespeare_False": "shakespeare_config.json"
}
config_key = f"{args.dataset}_{args.debug_config}"
config_name = config_map.get(config_key, None)
if config_name is None:
raise Exception("No viable config provided")
base_config += config_name
with open(base_config, "r") as config_file:
json_config = json.load(config_file)
if args.dp_level == "server_level":
json_config["config"]["trainer"]["server"]["privacy_setting"]["clipping_value"] = args.flsim_server_clip_const
cfg = fl_config_from_json(json_config["config"])
if args.canary_insert_type != "":
if args.canary_insert_type == "train":
args.canary_insert_train_acc = args.canary_insert_acc_threshold
elif args.canary_insert_type == "test":
args.canary_insert_test_acc = args.canary_insert_acc_threshold
if args.canary_insert_batch_index == "batch":
args.insert_canary_as_batch = True
else:
args.canary_insert_batch_index = int(args.canary_insert_batch_index)
# Data args
if args.local_batch_size != "":
cfg["data"]["local_batch_size"] = int(args.local_batch_size)
if args.dataset == "CIFAR10":
cfg["data"]["examples_per_user"] = max(args.local_batch_size, 1)*max(args.num_local_updates,1)
cfg["data"]["data_root"] = args.data_root
cfg["data"]["canary_iters"] = args.canary_iters
cfg["data"]["debug_config"] = args.debug_config
# Model args
cfg["model"]["model_arch"] = args.model_arch
cfg["model"]["dropout"] = args.fl_dropout
# Trainer args
cfg["trainer"]["checkpoint_only"] = args.fl_checkpoint_only
cfg["trainer"]["load_checkpoint"] = args.fl_load_checkpoint
if not args.debug_config:
args.canary_insert_epoch = int(args.canary_insert_epoch)
dict_args = copy.deepcopy(vars(args))
cfg["trainer"]["users_per_round"] = args.users_per_round
cfg["trainer"]["args"] = dict_args
cfg["trainer"]["client"]["epochs"] = args.client_epochs
if args.fl_server_lr != -1:
cfg["trainer"]["server"]["server_optimizer"]["lr"] = args.fl_server_lr
if args.fl_client_lr != -1:
cfg["trainer"]["client"]["optimizer"]["lr"] = args.fl_client_lr
if "privacy_setting" in cfg["trainer"]["server"]:
cfg["trainer"]["server"]["privacy_setting"]["clipping_value"] = args.server_clip_const
cfg["trainer"]["server"]["privacy_setting"]["target_delta"] = args.delta
cfg["trainer"]["server"]["privacy_setting"]["noise_multiplier"] = args.sigma
if args.fl_epochs != -1:
cfg["trainer"]["epochs"] = args.fl_epochs
if args.canary_test_type == "train_and_freeze" and args.epsilon > 0:
cfg["trainer"]["always_keep_trained_model"] = True
return cfg
| canife-main | arg_handler.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import sys
import matplotlib.pyplot as plt
sys.path.append("./FLSim")
from arg_handler import create_flsim_cfg, parse_args
from FLSim.examples.canary_example import run
from FLSim.flsim.common.logger import Logger
plt.rcParams.update({
# "text.usetex": True,
"font.family": "sans-serif",
"font.sans-serif": ["Helvetica"]})
logging.basicConfig(
format="%(asctime)s:%(levelname)s:%(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
stream=sys.stdout,
)
logger = logging.getLogger("ddp")
logger.setLevel(level=logging.INFO)
num_class_map = {"CIFAR10": 10, "imagenet": 1000, "sent140": 2, "femnist": 62, "celeba": 2, "shakespeare": 80}
# ----------------- Args + Main -----------------
if __name__ == "__main__":
args = parse_args()
if not args.debug_config:
args.canary_design_minibatch_size = int(args.users_per_round) if args.canary_design_minibatch_size == "num_users" else args.canary_design_minibatch_size
args.canary_design_pool_size = int(args.users_per_round) if args.canary_design_pool_size == "num_users" else args.canary_design_pool_size
if args.canary_design_type == "sample_grads": # Defaults for sample grads
if args.canary_design_pool_size != "": # Design pool size overrides design sample size
args.canary_design_sample_size = args.canary_design_pool_size
else:
args.canary_design_sample_size = 32 if args.canary_design_minibatch_size == "" else args.canary_design_minibatch_size
args.canary_design_pool_size = args.canary_design_sample_size
args.canary_design_minibatch_size = args.canary_design_sample_size if args.canary_design_minibatch_size == "" else args.canary_design_minibatch_size
args.local_batch_size = 128 if args.local_batch_size == "" else args.local_batch_size
else: # Defaults for model_updates
args.local_batch_size = 128 if args.local_batch_size == "" else args.local_batch_size
if args.canary_design_minibatch_size == "":
args.canary_design_minibatch_size = int(args.users_per_round) if args.canary_design_type == "model_updates" else int(args.local_batch_size)
args.canary_design_sample_size = int(args.local_batch_size) * abs(args.num_local_updates) * int(args.canary_design_minibatch_size) if args.canary_design_sample_size == "" else args.canary_design_sample_size
if args.canary_design_pool_size != "":
args.canary_design_sample_size = int(args.canary_design_pool_size) * abs(args.num_local_updates) * int(args.local_batch_size)
args.canary_design_sample_size = int(args.canary_design_sample_size)
args.canary_design_minibatch_size = int(args.canary_design_minibatch_size)
args.local_batch_size = int(args.local_batch_size)
args.canary_design_pool_size = int(args.canary_design_pool_size) if args.canary_design_pool_size != "" else -1
args.num_classes = num_class_map[args.dataset]
if args.task == "FLSim": # Run FLSim with a canary attack
# Load config and run flsim
if args.debug == 1:
Logger.set_logging_level(logging.DEBUG)
cfg = create_flsim_cfg(args)
print(args.dataset)
run(cfg) | canife-main | launcher.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
class Canary():
def __init__(self, data, init_data, class_label, init_loss=0, init_grad=None, canary_grad=None, final_loss=0, health=0) -> None:
"""Canary class
Args:
data: Tensor of final optimised canary
init_data: Tensor of initial canary (before optimisation)
class_label: Canary class
init_loss (int, optional): Initial canary loss. Defaults to 0.
init_grad (tensor, optional): Initial canary gradient. Defaults to None.
canary_grad (tensor, optional): Final canary gradient. Defaults to None.
final_loss (int, optional): Final loss after optimisation. Defaults to 0.
health (int, optional): Canary health between 0-1. Defaults to 0.
"""
self.data = data
self.init_data = init_data
self.final_loss = final_loss
self.init_loss = init_loss
self.class_label = class_label
self.health = health
self.grad = canary_grad
self.init_grad = init_grad
self.health = health | canife-main | canife/canary.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# flake8: noqa
from .canary import Canary
from .canary_analyser import CanaryAnalyser
from .canary_designer import CanaryDesigner
from .canary_designer_nlp import CanaryDesignerNLP | canife-main | canife/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
import random
from collections import defaultdict
from timeit import default_timer as timer
import numpy as np
import torch
import torch.autograd as autograd
import torch.optim as optim
from hydra.utils import instantiate
from canife import Canary
from canife.utils import (
clip_grad,
compute_batch_grad,
compute_local_update,
compute_sample_grads,
count_params,
display_gpu_mem,
)
class CanaryDesigner():
def __init__(self, grad_sample_module, canary_class=None, canary_loss="loss1", canary_norm_loss="hinge_squared", canary_design_type="sample_grads", canary_epochs=1000,
canary_init="random", canary_preprocess=None, canary_clip_const=1, local_batch_size=128, canary_insert_batch_index=0, canary_design_local_models=False,
server_clip_const=1, client_lr=1, num_classes=10, logger=None, local_updates=1, local_epochs=1, optimizer_config=None, dp_level="sample_level",
gpu_mem_minimiser=False, canary_norm_matching=False, canary_norm_constant=50, canary_normalize_optim_grad=True,
in_channels=3, image_size=32, benchmark_design=False, **kwargs) -> None:
self.canary_init = canary_init
self.canary_loss = canary_loss
self.canary_norm_loss = canary_norm_loss
self.canary_norm_matching = canary_norm_matching
self.canary_norm_constant = canary_norm_constant
self.canary_normalize_optim_grad = canary_normalize_optim_grad
self.canary_design_type = canary_design_type
self.canary_class = canary_class
self.canary_epochs = canary_epochs
self.canary_clip_const = canary_clip_const
self.canary_preprocess = canary_preprocess
self.local_batch_size = local_batch_size
self.canary_insert_batch_index = canary_insert_batch_index
self.canary_design_local_models = canary_design_local_models
self.canary_design_bias = 0
self.canary_losses = canary_loss
self.canary_type = "image"
self.local_updates = local_updates
self.local_epochs = local_epochs
self.server_clip_const = server_clip_const
self.client_lr = client_lr
self.dp_level = dp_level
self.num_classes = num_classes
self.gpu_mem_minimiser = gpu_mem_minimiser
self.logger = logger
self.grad_sample_module = grad_sample_module
self.optimizer_config = optimizer_config
self.in_channels = in_channels
self.image_size = image_size
self.benchmark_design = benchmark_design
self.benchmark_times = []
# If user-level, design canary on unclipped gradients
if self.dp_level == "user_level":
self.canary_clip_const = float('inf')
def get_analyser_args(self):
"""Returns attributes of CanaryDesigner which can be used to populate args when creating a CanaryAnalyser
Returns:
dict: attributes of CanaryDesigner
"""
return self.__dict__
def set_grad_sample_module(self, grad_sample_module):
"""
Args:
grad_sample_module (GradSampleModule): GradSampleModule to be used to compute per-sample gradients when designing the canary
"""
self.grad_sample_module = grad_sample_module
def _compute_clipped_grad(self, model, criterion, batch, device="cpu"):
"""Computes the clipped gradients of a batch
Args:
model: nn.Module to compute clipped grad
criterion: Loss function
batch: Batch to compute gradients from
device (optional): Torch device. Defaults to "cpu".
Returns:
Clipped gradient of batch
"""
grad = compute_batch_grad(model, criterion, batch, device=device)
# clip canary grad
return clip_grad(grad, self.canary_clip_const)
def _init_canary(self, canary_design_loader):
"""Initialises canary
Args:
canary_design_loader: Canary design pool, required for image initialisation
Returns:
canary: Canary as a tensor
"""
if self.canary_init == "random":
canary = torch.rand(size=(1,self.in_channels,self.image_size,self.image_size))
canary = canary if self.canary_preprocess is None else self.canary_preprocess(canary)
self.canary_class = random.randint(0, self.num_classes-1)
else:
if self.canary_design_type == "sample_grads": # The specific shapes of design loaders depends on sample_grads vs model_updates
canary = next(iter(canary_design_loader))[0][0].clone().view(1,self.in_channels,self.image_size,self.image_size)
self.canary_class = next(iter(canary_design_loader))[1][0].clone().item()
else:
canary = next(iter(canary_design_loader))[0][0][0].clone().view(1,self.in_channels,self.image_size,self.image_size)
self.canary_class = next(iter(canary_design_loader))[0][1][0].clone().item()
return canary.clone()
def _compute_local_update(self, model, criterion, local_batches, device):
"""Computes a model update from a mock client who has local_batches
Args:
model: nn.Module
criterion: Loss function
local_batches: Clients local batches
device: torch device
Returns:
model_update: An unscaled model update (clipped and then scaled by 1/lr * expected batch size)
"""
initial_model_state = copy.deepcopy(model.state_dict())
model_optimizer = instantiate(self.optimizer_config, model=model)
local_model_state, local_model_before_insert, _ = compute_local_update(model, criterion, model_optimizer, local_batches, expected_batch_size=self.local_batch_size, local_epochs=self.local_epochs, reverse_batch_scaling=False, device=device)
# Difference original and local model
local_update = torch.tensor([]).to(device)
for name, param in model.named_parameters():
if param.requires_grad:
local_update = torch.cat([local_update, (initial_model_state[name].data-local_model_state[name].data).flatten().detach().clone()])
model.load_state_dict(initial_model_state) # Revert changes made by multiple local updates
self.logger.debug(f"Mock client local update {local_update}, server clip const {self.server_clip_const}")
# (1/lr)*B*clip(local update)
# return (1/self.client_lr)*self.local_batch_size*clip_grad(local_update.cpu(), self.server_clip_const), local_model_before_insert
return clip_grad(local_update.cpu(), self.server_clip_const), local_model_before_insert
def _compute_aggregated_design_vectors(self, model, grad_dim, canary_design_loader, criterion, device):
"""Computes aggregated design vectors to craft canary on
Args:
model: nn.Module
grad_dim: Gradient dimension of model
canary_design_loader: Design loader
criterion: Loss function
device: torch device
Returns:
aggregated_design_vec: Either the aggregated sum of per-sample-gradients (if canary_design_type == sample_grads) or aggregated model updates (if canary_design_type == model_updates)
batch_design_vecs: Individual per-sample gradients or individual model updates from mock design clients
local_model_states: The final states of local models if canary_design_type=="model_updates"
"""
aggregated_design_vec = torch.zeros(size=(grad_dim,))
batch_design_vecs = torch.tensor([])
local_model_states = []
if self.canary_design_type == "sample_grads":
batch_design_vecs = torch.zeros((grad_dim, ))
elif self.canary_design_type == "model_updates":
batch_design_vecs = torch.zeros((len(canary_design_loader), grad_dim))
self.logger.info(" Computing sample grads/model updates from canary design pool...")
for i, design_batch in enumerate(canary_design_loader):
if i % 10 == 0:
self.logger.debug(f" Computing sample grads/model updates of canary design batch={i+1}")
if self.canary_design_type == "model_updates": # Scaled and clipped model updates
local_update, before_insert_model_state, = self._compute_local_update(model, criterion, design_batch, device) # The design batch is a mock client's local daata
batch_design_vecs[i] = local_update
aggregated_design_vec += local_update
local_model_states.append(before_insert_model_state)
self.logger.debug(f"Mock client {i} scaled local update {local_update}")
if i == 0:
self.logger.info(f"Local design updates are scaled by B={self.local_batch_size}, lr={self.client_lr}, clip const={self.server_clip_const}")
elif self.canary_design_type == "gradient_pool":
global_state = copy.deepcopy(self.grad_sample_module.state_dict())
model_optimizer = instantiate(self.optimizer_config, model=self.grad_sample_module)
_, _, local_step_sample_grads = compute_local_update(self.grad_sample_module, criterion, model_optimizer, design_batch, device=device, compute_sample_grads=True)
self.grad_sample_module.load_state_dict(global_state) # Revert changes made by multiple local updates
batch_design_vecs = torch.cat([batch_design_vecs, local_step_sample_grads], dim=0)
aggregated_design_vec += local_step_sample_grads.sum(axis=0)
else:
batch_design_vecs, _ = compute_sample_grads(self.grad_sample_module, criterion, design_batch, device=device, clipping_const=self.canary_clip_const)
aggregated_design_vec += batch_design_vecs.sum(axis=0)
return aggregated_design_vec, batch_design_vecs, local_model_states
# Will be overriden for NLP
def _init_canary_optimisation(self, canary_design_loader, device):
"""Initialises canaries for optimisation
Args:
canary_design_loader: Design pool
device: Torch device
Returns:
init_canary: Initial Canary for metrics
canary: Tensor canary to optimise
canary_class: Tensor class of canary
canary_optimizer: Optimizer over canary
"""
init_canary = self._init_canary(canary_design_loader)
canary = init_canary.clone().to(device) # Clone because we keep the initial canary for statistics
canary.requires_grad = True
canary_class = torch.tensor([self.canary_class]).to(device)
base_lr = 1
canary_optimizer = optim.Adam([canary], lr=base_lr)
return init_canary, canary, canary_class, canary_optimizer
# Will be overriden for NLP
def _forward_pass_canary(self, model, canary):
"""Runs a forward pass on a canary given a model
Args:
model: nn.Module
canary: canary tensor
Returns:
output: Output of model(canary)
"""
model.train()
model.zero_grad()
output = model(canary)
return output
# Will be overriden for NLP
def _post_process_canary(self, model, criterion, canary, canary_class, device="cpu"):
"""Computes final gradient from the canary
Args:
model: nn.Module
criterion: Loss function
canary: tensor
canary_class: tensor
device (optional): torch device, defaults to "cpu".
Returns:
canary: Final canary after post-processsing
canary_grad: Final canary gradient
"""
canary_grad = self._compute_clipped_grad(model, criterion, [canary, canary_class], device=device).detach().cpu()
return canary, canary_grad
def _optimise(self, model, criterion, canary_design_loader, device="cpu"):
""" Optimise over model and design loader to craft a canary
Args:
model: nn.Module
criterion: Loss function
canary_design_loader: DataLoader or list of tensors that mimics the batch structure of a DataLoader
device (str, optional): Torch device, defaults to "cpu".
Returns:
canary: Canary object
"""
display_gpu_mem(prefix="Start of optim", device=device, logger=self.logger)
init_canary, canary, canary_class, canary_optimizer = self._init_canary_optimisation(canary_design_loader, device)
model = model.to(device)
model.zero_grad()
# Init optim
grad_dim = count_params(model)
self.logger.info(f" Grad Dim {grad_dim}")
canary_loss = torch.tensor(float("inf"))
initial_model_state = copy.deepcopy(model.state_dict())
local_model_states = []
t, initial_canary_loss = 0,0
optim_stats = defaultdict(list)
best_canary = [float("inf"), None, 0]
optim_improving = True
aggregated_design_vec = torch.tensor([])
x_grad_norm = 0
display_gpu_mem(prefix="After moving model", device=device, logger=self.logger)
# Compute the aggregated (sum or mean) grads of the canary design set and batch sample grads (if it fits into memory)
if self.canary_loss == "loss1" or self.canary_design_sample_size <= self.canary_design_minibatch_size or self.canary_design_type != "sample_grads":
aggregated_design_vec, batch_design_vecs, local_model_states = self._compute_aggregated_design_vectors(model, grad_dim, canary_design_loader, criterion, device)
display_gpu_mem(prefix="After grad sample comp", device=device, logger=self.logger)
self.logger.info("\n ===== Beginning canary optimization... =====")
self.logger.info(f"Canary optimizer {canary_optimizer}")
if self.canary_loss != "loss1" and (self.canary_design_sample_size <= self.canary_design_minibatch_size or self.canary_design_type != "sample_grads"): # i.e no minibatches
target = batch_design_vecs # loss2 when sample grads fit into memory or when designing against model updates
gradient_norms = torch.norm(target, dim=1)
self.logger.info(f"Design norms {gradient_norms}") # Model updates or sample gradients
self.logger.info(f"Average design norm {torch.mean(gradient_norms)}")
else:
target = aggregated_design_vec # loss1, optimisation target is the aggregated gradients or model updates
display_gpu_mem(prefix="After target comp", device=device, logger=self.logger)
parameters = []
for p in model.parameters():
if p.requires_grad:
parameters.append(p)
loss1_target = target.to(device)
epoch_target = loss1_target
self.logger.debug(f"Pre-optim model arch {model}, {sum([p.flatten().sum() for p in model.parameters()])}")
display_gpu_mem(prefix="Target moved to gpu", device=device, logger=self.logger)
# grad_dim = 1
model.zero_grad()
while (t<=self.canary_epochs) and optim_improving:
t+= 1
if (t+1) % 100 == 0:
loss_mean = np.mean(optim_stats["canary_loss"][-100:])
self.logger.info(f" Canary optimisation, epoch={t}, initial loss={initial_canary_loss.item()}, average loss (last 100 iters)={loss_mean}, last loss={canary_loss.item()}")
self.logger.debug(f" Canary grad (w.r.t canary loss) t={t}, {x_grad_norm}")
if self.benchmark_design:
start = timer()
# Calculate loss of canary
canary_optimizer.zero_grad()
if (t+1) % 100 == 0 or t==1:
display_gpu_mem(prefix=f"Start of optim t={t}", device=device, logger=self.logger)
if len(local_model_states) > 0 and self.canary_insert_batch_index == -1 and self.canary_design_local_models:
model.load_state_dict(local_model_states[random.randint(0, len(local_model_states)-1)]) # Randomly sample a local model to compute canary grad from
if self.canary_loss == "loss2" and self.canary_design_sample_size > self.canary_design_minibatch_size or self.canary_loss == "loss_both": # minibatching
if self.canary_design_type == "sample_grads": # Minibatch sample grads
design_batch = next(iter(canary_design_loader))
epoch_target, _ = compute_sample_grads(self.grad_sample_module, criterion, design_batch, device, move_grads_to_cpu=False, clipping_const=self.canary_clip_const)
else: # Minibatch model updates
idx = torch.ones(target.shape[0]).multinomial(num_samples=self.canary_design_minibatch_size, replacement=False).to(device)
epoch_target = target[idx]
if (t+1) % 100 == 0 or t==1:
display_gpu_mem(prefix=f"Minibatch optim t={t}", device=device, logger=self.logger)
output = self._forward_pass_canary(model, canary)
loss = criterion(output, canary_class)
self.logger.debug(f"Model canary {canary}, norm={torch.norm(canary)}")
self.logger.debug(f"Model output {output}")
self.logger.debug(f" Model loss t={t}, {loss}")
canary_loss = torch.zeros(1, requires_grad=True).to(device)
# hvp
grad_f = autograd.grad(loss, parameters, create_graph=True, retain_graph=True)
grad_f = torch.cat([g.flatten() for g in grad_f])
self.logger.debug(f" Autograd grad_f t={t}, {grad_f}\n")
self.logger.debug(f" Sum grad_f t={t}, {torch.sum(grad_f)}\n")
temp_grad = grad_f.clone().detach().cpu()
# Norm loss
if self.canary_norm_matching and self.canary_norm_constant-torch.norm(grad_f) > 0:
if self.canary_norm_loss == "hinge_squared":
canary_loss = canary_loss + grad_dim*((self.canary_norm_constant-torch.norm(grad_f)))**2
else:
canary_loss = canary_loss + grad_dim*((self.canary_norm_constant-torch.norm(grad_f)))
# Normalise canary grad
if self.canary_normalize_optim_grad:
grad_f = torch.nn.functional.normalize(grad_f, dim=0)*self.server_clip_const
canary_loss = canary_loss + (grad_dim*(torch.sum(grad_f.view(1,-1) * epoch_target, dim=(1))**2).sum()/epoch_target.shape[0]) # Loss 1/2 term
if self.canary_loss == "loss_both":
canary_loss += (grad_dim*(torch.sum(grad_f.view(1,-1) * loss1_target, dim=(1))**2).sum()/loss1_target.shape[0])
self.logger.debug(f" Canary loss t={t}, {canary_loss}\n")
canary_loss.backward()
canary_loss = canary_loss.detach().cpu()
initial_canary_loss = canary_loss if t==1 else initial_canary_loss
optim_stats["canary_loss"].append(canary_loss.item())
optim_stats["canary_norm"].append(torch.norm(temp_grad).norm().item())
x_grad_norm = torch.norm(canary.grad.detach()).cpu()
if (t+1) % 100 == 0 or t==1:
display_gpu_mem(prefix=f"Pre-end of optim t={t}", device=device, logger=self.logger)
if t < self.canary_epochs:
canary_optimizer.step()
model.zero_grad()
# if canary_loss < best_canary[0]:
if True and t == self.canary_epochs:
best_canary = [canary_loss.detach().cpu(), canary.detach().clone().cpu(), t]
if (t+1) % 100 == 0 or t==1:
display_gpu_mem(prefix=f"End of optim t={t}", device=device, logger=self.logger)
if self.benchmark_design:
end = timer()
self.benchmark_times.append(end-start)
best_canary_loss, canary, best_t = best_canary
# Computes grad of canary from the model
# For NLP this will sample the canary and compute the exact gradient
canary, canary_grad = self._post_process_canary(model, criterion, canary, canary_class, device=device)
init_canary, init_canary_grad = self._post_process_canary(model, criterion, init_canary, canary_class, device=device)
self.logger.debug(f"Clipped gradient computed {torch.sum(canary_grad)}, {canary_grad}")
self.logger.info(f" Grad Descent for canary...t={t}")
self.logger.info(f" Best canary at t={best_t}, {best_canary_loss}")
canary_health = ((initial_canary_loss-best_canary_loss) / initial_canary_loss).item()
self.logger.info(f" Canary Norm {torch.norm(canary_grad).item()}")
self.logger.info(f" Canary Health {canary_health}")
if self.canary_loss == "loss1" or self.canary_design_sample_size <= self.canary_design_minibatch_size or self.canary_design_type == "model_updates":
aggregated_design_vec = aggregated_design_vec/self.canary_design_pool_size
self.canary_design_bias = -torch.dot(canary_grad/torch.norm(canary_grad), aggregated_design_vec).cpu().detach().item()
self.logger.info(f"Canary grad {canary_grad}")
self.logger.info(f"Canary grad normalised {canary_grad/torch.norm(canary_grad)}")
self.logger.info(f"Dot Product <Canary/||grad(can)||, S> {-self.canary_design_bias}")
self.logger.info(f"Dot Product <Canary/||grad(can)||, S+canary> {torch.dot(canary_grad/torch.norm(canary_grad), aggregated_design_vec + (canary_grad/torch.norm(canary_grad))).cpu().detach().item()}")
self.logger.info(f"Canary batch gradients {aggregated_design_vec + canary_grad/torch.norm(canary_grad)}")
self.logger.info(f" x.grad Norm {x_grad_norm}\n\n")
self.canary_losses = optim_stats["canary_loss"]
self.canary_norms = optim_stats["canary_norm"]
model.load_state_dict(initial_model_state)
return Canary(canary, init_canary, canary_class.item(), init_loss=initial_canary_loss.item(), init_grad=init_canary_grad,
final_loss=best_canary_loss.item(), canary_grad=canary_grad, health=canary_health)
def _update_design_params(self, canary_design_loader, clients_per_round, design_minibatch_size=None, varying_local_batches=False):
"""Updates relevant design params (canary_design_sample_size, canary_design_pool_size, canary_design_minibatch_size)
will infer this from the canary_design_loader and other provided args
Args:
canary_design_loader: Design loader
design_minibatch_size (optional): To override inferred minibatch size. Defaults to None which sets design_minibatch_size to num_local_updates
varying_local_batches (bool): If True then clients have varying batch sizes. Defaults to False.
"""
example_design_batch = next(iter(canary_design_loader))[0] if self.canary_design_type == "sample_grads" else canary_design_loader[0] # Either a batch of sample gradients or a mock client
num_local_updates = -1
if self.canary_design_type == "sample_grads":
self.canary_design_minibatch_size = example_design_batch.shape[0]
self.local_batch_size = self.canary_design_minibatch_size
self.canary_design_sample_size = len(canary_design_loader) * self.canary_design_minibatch_size
self.canary_design_pool_size = self.canary_design_sample_size
else:
if not varying_local_batches:
self.local_batch_size = example_design_batch[0][0].shape[0]
num_local_updates = len(example_design_batch)
self.canary_design_minibatch_size = design_minibatch_size if design_minibatch_size else clients_per_round
self.canary_design_sample_size = sum([sum([batch[0].shape[0] for batch in mock_client]) for mock_client in canary_design_loader])
self.canary_design_pool_size = len(canary_design_loader)
if self.canary_design_type == "gradient_pool":
self.canary_design_pool_size = self.canary_design_sample_size
if self.canary_design_type == "model_updates" and self.canary_design_minibatch_size > self.canary_design_pool_size:
self.canary_design_minibatch_size = self.canary_design_pool_size
self.logger.info(f"Designer inferred design sample size={self.canary_design_sample_size}, design pool={self.canary_design_pool_size}, minibatch size={self.canary_design_minibatch_size}, local updates={num_local_updates}, local client batch size={self.local_batch_size}")
def design(self, model, criterion, canary_design_loader, clients_per_round=100, varying_local_batches=False, canary_design_minibatch_size=None, device="cpu"):
"""Designs a canary from a given model and design pool (canary_design_loader)
Args:
model: nn.Module
criterion: Loss function
canary_design_loader: Design loader
varying_local_batches (bool, optional): If True, design clients contain varying batch sizes. Defaults to False.
canary_design_minibatch_size (optional): Minibatch size for designing. Defaults to None.
device (optional): Torch device to design on, defaults to "cpu".
Returns:
canary: Canary object
"""
assert self.grad_sample_module is not None, "Must set_grad_sample_module before designing a canary"
display_gpu_mem(prefix="Start of design", device=device, logger=self.logger) # For debugging
self.grad_sample_module.to(device)
display_gpu_mem(prefix="Grad sample module moved", device=device, logger=self.logger) # For debugging
self.logger.debug(f"Design model arch {model}, {sum([p.flatten().sum() for p in model.parameters()])}")
# Infer design parameters such as the design pool + sample size from the canary_design_loader
self._update_design_params(canary_design_loader, clients_per_round, design_minibatch_size=canary_design_minibatch_size, varying_local_batches=varying_local_batches)
# Optimise and find canary
canary = self._optimise(model, criterion, canary_design_loader, device)
# To avoid GPU mem issues with FLSim if using GPUMemoryMinimiser
if self.gpu_mem_minimiser:
self.grad_sample_module.to("cpu")
model.to("cpu")
return canary | canife-main | canife/canary_designer.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
import itertools
import re
import string
import unicodedata
from typing import List
import torch
# Sent140 character embeddings
class TextProcessorSent140():
def __init__(self):
self.all_letters = {c: i for i, c in enumerate(string.printable)}
self.reverse_map_all_letters = {i: c for i, c in enumerate(string.printable)}
self.num_letters = len(self.all_letters)
self.vocab_size = self.num_letters+1
self.UNK: int = self.num_letters
def unicodeToAscii(self, s):
return "".join(
c
for c in unicodedata.normalize("NFD", s)
if unicodedata.category(c) != "Mn" and c in self.all_letters
)
def split_line(self, line):
"""split given line/phrase into list of words
Args:
line: string representing phrase to be split
Return:
list of strings, with each string representing a word
"""
return re.findall(r"[\w']+|[.,!?;]", line)
def flatten_list(self, nested_list):
return list(itertools.chain.from_iterable(nested_list))
def line_to_indices(self, line: str, max_seq_len: int):
line_list = self.split_line(line) # split phrase in words
line_list = line_list
chars = self.flatten_list([list(word) for word in line_list])
# padding
indices: List[int] = [
self.all_letters.get(letter, self.UNK)
for i, letter in enumerate(chars)
if i < max_seq_len
]
indices = indices + ([self.UNK] * (max_seq_len - len(indices)))
return indices
# Assume input is a tensor of indices
def index_sequence_to_text(self, indices):
line = ""
for i in indices:
line += self.reverse_map_all_letters.get(i.item(), "�")
return line
def text_to_index_sequence(self, text):
return torch.tensor([self.all_letters.get(c, self.UNK) for c in text])
# Preprocessing for Shakespeare
class TextProcessorShakes():
def __init__(self) -> None:
self.all_letters = (
"\n !\"&'(),-.0123456789:;>?ABCDEFGHIJKLMNOPQRSTUVWXYZ[]abcdefghijklmnopqrstuvwxyz}"
)
self.vocab_size = len(self.all_letters)
def word_to_indices(self, word):
"""returns a list of character indices
Args:
word: string
Return:
indices: int list with length len(word)
"""
indices = []
for c in word:
indices.append(self.all_letters.find(c))
return indices
def index_sequence_to_text(self, indices):
line = ""
for i in indices:
line += self.all_letters[i]
return line
def _one_hot(self, index, size):
"""returns one-hot vector with given size and value 1 at given index"""
vec = [0 for _ in range(size)]
vec[int(index)] = 1
return vec
def letter_to_vec(self, letter):
"""returns one-hot representation of given letter"""
index = self.all_letters.find(letter)
return index # _one_hot(index, NUM_LETTERS)
def get_plot_path(args, exp_num=1, file_suffix=".png"):
plot_name = args.model_arch + "_" + args.canary_loss + "_B=" + str(args.local_batch_size)
if args.canary_setup == "holdout":
plot_name += "_CanaryDesign=" + str(args.canary_design_sample_size) + "_" + str(args.canary_design_minibatch_size)
plot_name += "_" + args.canary_setup + "_checkpoint_epoch=" + str(args.canary_insert_epoch) + "_iter=" + str(exp_num)
plot_path = args.dump_path + args.plot_path + "/" + plot_name + file_suffix
return plot_path
def state_dict_to_cpu(state_dict):
"""Moves a state dict from GPU to CPU
Args:
state_dict: model state dict (on GPU)
Returns:
state_dict: model state dict (on CPU)
"""
for k,v in state_dict.items():
state_dict[k] = v.detach().clone().cpu()
return state_dict
def display_gpu_mem(device, logger=None, prefix=""):
"""Debug function - displays device GPU memory statistics
Args:
device: GPU device
logger (_type_, optional): Optional logger. Defaults to None.
prefix (str, optional): Add prefix to debug output. Defaults to "".
"""
if str(device) != "cpu":
mem = torch.cuda.mem_get_info(device=device)
if logger is None:
print(prefix, torch.cuda.mem_get_info(device=device))
else:
logger.debug(f"{prefix} {mem} {round((mem[1] - mem[0]) / 1024**3, 4)}Gb used")
def count_params(model):
"""Counts number of parameters (that require grad) in a model
Args:
model: Model to count params
Returns:
Total number of parameters (that require grad)
"""
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def clip_grad(grad, clip_const=1):
"""Clip gradient
Args:
grad (tensor): Gradient to clip
clip_const (int, optional): Clipping constant. Defaults to 1.
Returns:
Clipped gradient tensor
"""
if torch.norm(grad) > clip_const:
grad = grad*clip_const / torch.norm(grad)
return grad
def compute_batch_grad(model, criterion, batch, device="cpu"):
"""Computes average gradients of a batch
Args:
model: nn.Module
criterion: Loss function
batch: Batch to compute average gradients
device (str, optional): Torch device. Defaults to "cpu".
Returns:
Batch gradients, moved to cpu
"""
model.to(device)
model.zero_grad()
img = batch[0].to(device)
target = batch[1].to(device)
outputs = model(img)
batch_losses = criterion(outputs, target)
batch_losses.backward()
batch_grads = torch.tensor([]).to(device)
for p in model.parameters():
if p.requires_grad:
batch_grads = torch.cat([batch_grads, p.grad.detach().clone().flatten()])
model.zero_grad()
return batch_grads.cpu()
def compute_sample_grads(grad_sample_module, criterion, batch, device="cpu", move_grads_to_cpu=True, clipping=True, clipping_const=1):
"""Computes per-sample gradients given a GSM and a batch
Args:
grad_sample_module: GradSampleModule
criterion: Loss function
batch: Batch to compute per-sample grads of
device (str, optional): Defaults to "cpu".
move_grads_to_cpu (bool, optional): If True will move all sample grads to cpu. Defaults to True.
clipping (bool, optional): Whether to clip per-sample-gradients. Defaults to True.
clipping_const (int, optional): Clipping const. Defaults to 1.
Returns:
batch_grads: Per-sample gradients of batch
clip_count: Number of per-sample gradients that were clipped
"""
grad_sample_module.to(device)
grad_sample_module.zero_grad()
img = batch[0].to(device)
target = batch[1].to(device)
outputs = grad_sample_module(img)
batch_losses = criterion(outputs, target)
batch_losses.backward()
batch_grads = torch.hstack([p.grad_sample.detach().clone().view(img.shape[0], -1) for p in grad_sample_module.parameters()])
clip_count = 0
if clipping:
for i, grad in enumerate(batch_grads):
grad_norm = torch.norm(grad)
if grad_norm > clipping_const:
clip_count += 1
batch_grads[i] = batch_grads[i]*clipping_const / grad_norm
# Calling zero-grad of GradSampleModule without DPOptimizer doesn't remove sample grads (?)
grad_sample_module.zero_grad()
for p in grad_sample_module.parameters():
p.grad_sample = None
if move_grads_to_cpu:
return batch_grads.cpu(), clip_count
else:
return batch_grads, clip_count
def compute_local_update(model, criterion, optimizer, batches, reverse_batch_scaling=True, expected_batch_size=1, compute_sample_grads=False, local_epochs=1, device="cpu"):
"""Computes a model update given a set of local batches
Args:
model: nn.Module
criterion: Loss function
optimizer: Model optimizer
batches: Mock client local batches
reverse_batch_scaling (bool, optional): Reverse 1/B averaging, multiplies gradients by B/expected B. Defaults to True.
expected_batch_size (int, optional): The expected batch size. Defaults to 1.
compute_sample_grads (bool, optional): Whether to also compute per-sample gradients. If True expects model to be a GSM. Defaults to False.
local_epochs (int, optional): Number of local epochs to perform. Defaults to 1.
device (str, optional): Defaults to "cpu".
Returns:
local_model_state: The model state dict after the local training. Can be used to compute a model update by differencing with global model.
local_model_before_insert: Local model at step n-1 where n is the number of local batches
sample_grads: The per-sample grads, defaults to empty tensor is compute_sample_grads=False
"""
model.to(device)
sample_grads = torch.tensor([])
local_model_before_insert = None
for epochs in range(local_epochs):
for i, batch in enumerate(batches):
img, target = batch
model.zero_grad()
if i == len(batches)-1:
local_model_before_insert = state_dict_to_cpu(copy.deepcopy(model.state_dict()))
img = img.to(device)
target = target.to(device)
outputs = model(img)
batch_losses = criterion(outputs, target)
batch_losses.backward()
if reverse_batch_scaling:
for p in model.parameters():
p.grad *= (img.shape[0]/expected_batch_size)
if compute_sample_grads:
sample_grads = torch.cat((sample_grads, torch.hstack([p.grad_sample.clone().cpu().view(img.shape[0], -1) for p in model.parameters()])), dim=0)
optimizer.step()
model.zero_grad()
return model.state_dict(), local_model_before_insert, sample_grads | canife-main | canife/utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import random
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import torch
import torch.nn.functional as F
from canife import CanaryDesigner
from canife.utils import TextProcessorSent140, TextProcessorShakes
class CanaryDesignerNLP(CanaryDesigner):
def __init__(self, grad_sample_module, canary_class=None, canary_loss="loss1", canary_norm_loss="hinge_squared", canary_design_type="sample_grads", canary_epochs=1000, canary_init="random", canary_preprocess=None, canary_clip_const=1,
local_batch_size=128, canary_insert_batch_index=0, canary_design_local_models=False, server_clip_const=1, client_lr=1,
num_classes=10, logger=None, local_updates=1, local_epochs=1, optimizer_config=None, dp_level="sample_level", gpu_mem_minimiser=False,
canary_norm_matching=False, canary_norm_constant=50, canary_normalize_optim_grad=True,
benchmark_design=False, **kwargs) -> None:
super().__init__(grad_sample_module=grad_sample_module, canary_class=canary_class, canary_loss=canary_loss, canary_norm_loss=canary_norm_loss, canary_design_type=canary_design_type, canary_epochs=canary_epochs,
canary_init=canary_init, canary_preprocess=canary_preprocess, canary_clip_const=canary_clip_const, local_batch_size=local_batch_size, canary_insert_batch_index=canary_insert_batch_index,
canary_design_local_models=canary_design_local_models, server_clip_const=server_clip_const, client_lr=client_lr,
num_classes = num_classes, logger=logger, local_updates=local_updates, local_epochs=local_epochs, optimizer_config=optimizer_config, dp_level=dp_level, gpu_mem_minimiser=gpu_mem_minimiser, canary_norm_matching=canary_norm_matching,
canary_norm_constant=canary_norm_constant, canary_normalize_optim_grad=canary_normalize_optim_grad, benchmark_design=benchmark_design, **kwargs)
self.text_processor = TextProcessorShakes() if kwargs["dataset"] == "shakespeare" else TextProcessorSent140()
self.canary_type = "nlp"
def _init_canary_optimisation(self, canary_design_loader, device):
"""Initialises canaries for optimisation
Args:
canary_design_loader: Design pool
device: Torch device
Returns:
init_canary: Initial Canary for metrics
canary: Tensor canary to optimise
canary_class: Tensor class of canary
canary_optimizer: Optimizer over canary
"""
init_canary = self._init_canary(canary_design_loader)
canary = init_canary.clone().to(device) # Clone because we keep the initial canary for statistics
canary.requires_grad = True
canary_class = torch.tensor([self.canary_class]).to(device)
canary_optimizer = torch.optim.Adam([canary], lr=0.1)
return init_canary, canary, canary_class, canary_optimizer
def _init_canary(self, canary_design_loader):
"""Initialises canary
Args:
canary_design_loader: Canary design pool, required to infer sequence length for text initialisation
Returns:
canary: Canary as a tensor
"""
# Initialise log coeffs
if self.canary_design_type == "sample_grads":
example_seq = next(iter(canary_design_loader))[0][0].clone()
self.canary_class = next(iter(canary_design_loader))[1][0].clone().item()
else:
example_seq = next(iter(canary_design_loader))[0][0][0].clone()
self.canary_class = next(iter(canary_design_loader))[0][1][0].clone().item()
if self.canary_init == "random":
log_coeffs = torch.rand(len(example_seq), self.text_processor.vocab_size)
self.canary_class = random.randint(0, self.num_classes-1)
else:
log_coeffs = torch.zeros(len(example_seq), self.text_processor.vocab_size)
indices = torch.arange(log_coeffs.size(0)).long()
log_coeffs[indices, example_seq] = 12
self.logger.info(f"Log coeffs initialised shape={log_coeffs.shape}")
return log_coeffs
def _forward_pass_canary(self, model, canary):
"""Runs a forward pass on a canary given a model
Uses the Gumbel softmax method of Guo et al. (2021) (https://arxiv.org/abs/2104.13733)
Args:
model: nn.Module
canary: canary tensor
Returns:
output: Output of model(canary)
"""
model.train()
model.zero_grad()
# Gumbel softmax the log coeffs
coeffs = F.gumbel_softmax(canary, hard=False) # T x V
# Form soft embeddings
embedding_weights = model.__dict__["_modules"]["embedding"].weight
inputs_embeds = (coeffs @ embedding_weights) # T x D
# Forward pass through model (using soft embeddings as input)
pred = model(None, input_embeds=inputs_embeds.unsqueeze(0))
return pred
def _post_process_canary(self, model, criterion, canary, canary_class, device="cpu"):
"""Computes final gradient from the canary. Converts token distribution to text sample
Args:
model: nn.Module
criterion: Loss function
canary: tensor
canary_class: tensor
device (optional): torch device, defaults to "cpu".
Returns:
canary: Final canary after post-processsing
canary_grad: Final canary gradient
"""
# self._plot_canary_dist(canary)
canary = F.gumbel_softmax(canary, hard=True).argmax(1).unsqueeze(0).long()
canary_grad = self._compute_clipped_grad(model, criterion, [canary, canary_class], device=device).detach().cpu()
return canary, canary_grad
def _plot_canary_dist(self, canary):
"""
For debugging. Plots the token distribution of the canary.
Args:
canary: canary token distribution to plot
"""
coeffs = F.gumbel_softmax(canary, hard=False)
for row in coeffs:
row = np.array(row)
sns.barplot(x=list(range(0, len(row))), y=row)
plt.plot()
plt.pause(1)
plt.clf()
| canife-main | canife/canary_designer_nlp.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
import math
import matplotlib.pyplot as plt
import numpy as np
import torch
from opacus import GradSampleModule
from scipy.stats import binomtest
from canife.utils import TextProcessorSent140, TextProcessorShakes, compute_sample_grads
from privacy_lint.privacy_lint.attack_results import AttackResults
class CanaryAnalyser():
def __init__(self, plot_path, result_path, grad_sample_module=None, canary_epochs=1000, canary_loss="loss1", canary_norm_matching=None, canary_design_type="sample_grads", canary_setup="holdout", canary_init="random",
canary_design_minibatch_size=0, canary_design_sample_size = 0, canary_design_pool_size=0, local_batch_size=128, canary_clip_const=1, canary_insert_train_acc=0, canary_insert_test_acc=0, canary_losses=None, canary_norms=None,
canary_design_reverse_server_clip=False, canary_design_bias=0, canary_insert_epoch="unknown", canary_insert_global_round=-1, canary_insert_batch_index=-1, canary_insert_acc_threshold=-1, canary_normalize_optim_grad=True,
canary_design_local_models=False, local_updates=1, local_epochs=1, canary_type="image", delta=1e-5, sigma=0, epsilon=float('inf'), sample_rate=1, checkpoint_train_acc = 0, checkpoint_test_acc = 0,
model_arch="unknown", dataset="unknown", task="canary_attack", dp_level="sample_level", logger=None, benchmark_times=None, server_clip_const=1,
actual_sample_size=0, actual_pool_size=0 , actual_minibatch_size=0, canary_norm_constant=1, canary_norm_loss="hinge_squared", scale_canary_test=False, **kwargs) -> None:
self.reset()
self.epsilon = epsilon
self.delta = delta
self.sigma = sigma
self.sample_rate = sample_rate
self.global_round = 0
self.canary_type = canary_type
self.canary_loss = canary_loss
self.canary_losses = canary_losses
self.canary_norms = canary_norms
self.canary_epochs = canary_epochs
self.canary_init = canary_init
self.canary_design_type = canary_design_type
self.canary_setup = canary_setup
self.canary_clip_const = canary_clip_const
self.canary_design_minibatch_size = canary_design_minibatch_size
self.canary_design_sample_size = canary_design_sample_size
self.canary_design_pool_size = canary_design_pool_size
self.scale_canary_test = scale_canary_test
self.actual_sample_size = actual_sample_size
self.actual_pool_size = actual_pool_size
self.actual_minibatch_size = actual_minibatch_size
self.canary_design_reverse_server_clip = canary_design_reverse_server_clip
self.canary_design_bias = canary_design_bias
self.local_batch_size = local_batch_size
self.canary_norm_matching = canary_norm_matching
self.canary_norm_constant = canary_norm_constant
self.canary_norm_loss = canary_norm_loss
self.canary_normalize_optim_grad = canary_normalize_optim_grad
self.model_arch = model_arch
self.dataset = dataset
self.canary_insert_epoch = canary_insert_epoch
self.canary_insert_global_round = canary_insert_global_round
self.canary_insert_batch_index = canary_insert_batch_index
self.canary_insert_train_acc = canary_insert_train_acc
self.canary_insert_test_acc = canary_insert_test_acc
self.canary_insert_acc_threshold = canary_insert_acc_threshold
self.canary_design_local_models = canary_design_local_models
self.local_updates = local_updates
self.local_epochs = local_epochs
self.num_clients = "N/A"
self.server_clip_const = server_clip_const
self.accuracy_metrics = {"train": [], "eval": [], "test": []} # Used to track model accuracies
self.checkpoint_train_acc = checkpoint_train_acc
self.checkpoint_test_acc = checkpoint_test_acc
self.empirical_eps_tracker = []
self.logger = logger
self.dp_level = dp_level
self.task = task
self.base_plot_path = plot_path
self.base_result_path = result_path
self.grad_sample_module = grad_sample_module
self.benchmark_times = benchmark_times if benchmark_times else []
self.text_processor = TextProcessorSent140() if dataset == "sent140" else TextProcessorShakes()
def reset(self):
""" Resets attributes that track a canary attack
"""
self.canary_healths = []
self.canaries = []
self.canary_dot_prods = {"with_canary": [], "without_canary": []}
self.init_canary_dot_prods = {"with_canary": [], "without_canary": []}
self.batch_clip_percs = []
self.clip_rates = []
self.num_tests = 0
def _plot_canary_hist(self, canary_metrics, suffix=""):
""" Plots canary histogram and associated attack metrics for a canary that is being analysed
Args:
canary_metrics (dict): Dict of canary metrics
suffix (str, optional): Plot name suffix. Defaults to "".
"""
if np.isnan(np.sum(canary_metrics["dot_prods_without_canary"])) or np.isnan(np.sum(canary_metrics["dot_prods_with_canary"])):
self.logger.info("WARNING - Some dot products are NaN, these are being removed for plotting...")
canary_metrics["dot_prods_without_canary"] = np.array(canary_metrics["dot_prods_without_canary"])[~np.isnan(canary_metrics["dot_prods_without_canary"])]
canary_metrics["dot_prods_with_canary"] = np.array(canary_metrics["dot_prods_with_canary"])[~np.isnan( canary_metrics["dot_prods_with_canary"])]
if len(canary_metrics["dot_prods_without_canary"]) == 0 or len(canary_metrics["dot_prods_with_canary"]) == 0 :
self.logger.info("Dot products were empty, likely all nans, optimisation has failed. Canary norm is likely 0...")
return
bins = 25
bins=None
plt.hist(canary_metrics["dot_prods_without_canary"], bins=bins, label="Without canary (" + self.canary_design_type + "), m=" + str(round(canary_metrics["without_canary_mean"], 5)) + " std=" + str(round(canary_metrics["without_canary_sd"], 5)))
plt.hist(canary_metrics["dot_prods_with_canary"], bins=bins, label="W/ canary (" + self.canary_design_type + ") m=" + str(round(canary_metrics["with_canary_mean"], 5)) + " std=" + str(round(canary_metrics["with_canary_sd"], 5)))
plt.vlines(canary_metrics["mia_threshold"], ymin=0, ymax=10, color="red")
plot_title = self.task + " " + self.dp_level + " num_clients=" + str(self.num_clients) + " local_steps=" + str(self.local_updates) + " init=" + self.canary_init + "\n"
plot_title += "Design: naive" if self.canary_design_type == "naive" else f"Design: {self.canary_design_type } {self.canary_loss}"
plot_title += f" Local Batch Size={self.local_batch_size} epoch={self.canary_insert_epoch}, round={self.canary_insert_global_round}"
if len(self.accuracy_metrics["train"]) > 0 and len(self.accuracy_metrics["test"]) > 0:
plot_title += f" (Train, Test): {round(self.accuracy_metrics['train'][-1],2)}, {round(self.accuracy_metrics['test'][-1],2)}"
if self.canary_setup == "holdout" and self.canary_design_type != "naive":
plot_title += f"\n Design Sample={self.canary_design_sample_size} Design Pool={self.canary_design_pool_size}"
if self.canary_loss != "loss1":
plot_title += f" Minibatch= {self.canary_design_minibatch_size}"
if self.canary_setup == "exact" and self.canary_design_type != "naive":
plot_title += "\n Canary Health (min, max, mean): {min}, {max}, {mean}".format(min=str(round(np.min(canary_metrics["canary_health_list"]), 4)),
max=str(np.round(max(canary_metrics["canary_health_list"]),4)), mean=str(round(np.mean(canary_metrics["canary_health_list"]), 4)))
else:
plot_title += f"\n Canary norm={round(canary_metrics['canary_norm'],3)} Canary Health: {round(canary_metrics['canary_health_list'][0],5)}"
plot_title += f" (Acc, Max Acc, AUC): {round(canary_metrics['mia_acc'], 4)}, {round(canary_metrics['mia_max_acc'],4)}, {round(canary_metrics['mia_auc'],4)}"
plot_title += f"\n (eps, delta)=({round(canary_metrics['initial_epsilon'],4)}, {canary_metrics['initial_delta']}), sigma={round(canary_metrics['final_sigma'],4)}, empirical= {round(canary_metrics['empirical_eps'],4)}, ({round(canary_metrics['empirical_eps_lower'],4)}, {round(canary_metrics['empirical_eps_upper'],4)})"
plt.title(plot_title, fontdict={"fontsize": 10})
plt.ylabel("Freq")
plt.xlabel(r'<S, grad(canary)>')
plt.legend()
plt.tight_layout()
full_path = self.plot_path + suffix + ".png"
plt.savefig(full_path, bbox_inches='tight')
self.logger.info(f" Plot Saved: {full_path}")
plt.clf()
def _plot_canary_losses(self):
"""Plots the optimisation loss of an analysed canary.
"""
smoothed_loss = np.mean(np.array(self.canary_losses)[:(len(self.canary_losses)//100)*100].reshape(-1,100), axis=1)
data_list = [("canary_norms", self.canary_norms), ("canary_loss_full", self.canary_losses),
("canary_loss_last_epochs", self.canary_losses[-1000:]), ("canary_loss_smoothed", smoothed_loss)]
for item in data_list:
name, data = item
plt.plot(range(0, len(data)), data)
plt.title(name)
plt.ylabel(name)
plt.xlabel("Epoch")
plt.tight_layout()
full_path = self.plot_path + f"_{name}.png"
plt.savefig(full_path)
self.logger.info(f" Plot Saved: {full_path}")
plt.clf()
def _plot_pr_curve(self, precision, recall, auprc=0, suffix=""):
"""Plots pr curves of an analysed canary
Args:
precision (list): Precision values
recall (list): Recall values
auprc (int, optional): Optional AUPRC to display in the plot title. Defaults to 0.
suffix (str, optional): Plot name suffix. Defaults to "".
"""
for i in range(recall.shape[0]-1):
plt.plot((recall[i],recall[i]),(precision[i],precision[i+1]),'b-')
plt.plot((recall[i],recall[i+1]),(precision[i+1],precision[i+1]),'b-')
plt.title(f"PR Curve - MAP={auprc}")
plt.xlabel("Recall")
plt.ylabel("Precision")
plot_name = self.plot_path + "_pr_curve_" + suffix
plt.savefig(plot_name)
self.logger.info(f" Plot Saved: {plot_name}")
plt.clf()
def _save_results(self, canary_metrics, additional_args):
"""Checkpoint analysed canary attack
Args:
canary_metrics (dict): All canary metrics to checkpoint
additional_args (dict): Additional args i.e, from a CanaryDesigner
"""
all_args = canary_metrics
all_args.update(self.__dict__)
remove_list = ["grad_sample_module", "canaries", "logger", "canary_losses", "text_processor", "canary_dot_prods", "init_canary_dot_prods", "canary_norms"]
for attr in remove_list:
all_args.pop(attr)
if additional_args is not None:
all_args.update(vars(additional_args))
experiment_dict = {}
all_args["canary_health"] = all_args["canary_health_list"][0] if len(all_args["canary_health_list"]) == 1 else np.mean(all_args["canary_health_list"])
columns = list(canary_metrics.keys())
row = [all_args[col] for col in columns]
experiment_dict["row"] = row
experiment_dict["columns"] = columns
torch.save(experiment_dict, self.result_path + ".tar")
self.logger.info(f" Experiment metrics saved {self.result_path}")
self.logger.info(f"Saved columns {columns}")
self.logger.info(f"Canary insert epoch={all_args['canary_insert_epoch']}, global round={all_args['canary_insert_global_round']}")
def _save_canary(self, batched_canary, title):
"""Saves an output of the designed canary. Either as an image of a .txt for NLP
Args:
batched_canary: Batch with a single canary
title: Title of the canary output file
"""
if self.canary_type == "image":
if self.dataset == "femnist":
plt.imshow(np.transpose(batched_canary[0]).numpy(), cmap="gray")
else:
plt.imshow(np.transpose(batched_canary[0].numpy(), (1, 2, 0)))
plt.title(title)
plt.axis("off")
plt.savefig(self.plot_path + "_" + title + ".png")
plt.clf()
elif self.canary_type == "nlp":
try:
with open(self.plot_path + "_" + title + ".txt", 'w') as f:
f.write(self.text_processor.index_sequence_to_text(batched_canary[0]))
except:
plt.clf()
self.logger.info("Saving nlp error...")
def ci_eps(self, fp, fn, n_pos, n_neg, delta=1e-5, bound="lower"):
"""Calculate the 95% CI for empirial epsilon via the Clopper-Pearson method
Args:
fp (_type_): False positives
fn (function): False negatives
n_pos (_type_): Number of positive examples
n_neg (_type_): Number of negative examples
delta (_type_, optional): DP delta. Defaults to 10e-5.
bound (str, optional): "upper" or "lower" CI bounds. Defaults to "lower".
Returns:
empirial eps
"""
fp = int(fp)
fn = int(fn)
fp_result = binomtest(k=fp, n=n_pos)
fn_result = binomtest(k=fn, n=n_neg)
if bound == "lower":
fp_hi = fp_result.proportion_ci().high
fn_hi = fn_result.proportion_ci().high
else:
fp_hi = fp_result.proportion_ci().low
fn_hi = fn_result.proportion_ci().low
return self.empirical_eps(1-fn_hi,fp_hi, delta=delta, type=bound)
def empirical_eps(self, tpr, fpr, delta=1e-5, type=""):
"""Calculate empirical epsilon
Args:
tpr: True Positive Rate (TPR)
fpr: False Positive Rate (FPR)
delta: DP delta. Defaults to 10e-5.
type (str, optional): "lower" or "upper" for CI calculations. Defaults to "".
Returns:
empirical eps
"""
x = []
if 1-tpr > 0:
x.append((1-delta-fpr)/(1-tpr))
if fpr > 0:
x.append((1-delta-(1-tpr))/fpr)
if len(x) <= 1 or max(x) < 0:
print(f"Warning empirical eps=inf, type={type} - {fpr}, {1-tpr}")
x = [float("inf")]
return math.log(max(x))
def _compute_empirical_eps(self, attack_results: AttackResults, use_max_acc_threshold=False):
n_pos, n_neg = len(attack_results.scores_train), len(attack_results.scores_test)
delta = 1/(n_pos + n_neg)
max_empirical_eps = 0
_, scores = attack_results._get_scores_and_labels_ordered()
tpr_fpr = attack_results.get_tpr_fpr()
if use_max_acc_threshold: # Calculate empirical eps from max acc threshold
max_acc_thresh = attack_results.get_max_accuracy_threshold()[0]
tp = int((attack_results.scores_train >= max_acc_thresh).sum().item())
fp = int((attack_results.scores_test >= max_acc_thresh).sum().item())
max_fp, max_fn, max_tp, max_tn = fp, n_pos-tp, tp, n_neg-fp
max_tpr, max_fpr = max_tp / (max_tp + max_fn), max_fp/(max_fp+max_tn)
max_empirical_eps = self.empirical_eps(max_tpr, max_fpr, delta=delta)
else: # Maximise empirical eps over TPR/FPR
for i, t in enumerate(scores):
tpr, fpr = tpr_fpr[0][i], tpr_fpr[1][i]
empirical_eps = self.empirical_eps(tpr, fpr, delta=delta)
acc = attack_results.get_accuracy(t)
if empirical_eps > max_empirical_eps and (empirical_eps != float("inf") or acc == 1):
tp = int((attack_results.scores_train >= t).sum().item())
fp = int((attack_results.scores_test >= t).sum().item())
max_empirical_eps = empirical_eps
max_fp, max_fn, max_tp, max_tn = fp, n_pos-tp, tp, n_neg-fp
max_tpr, max_fpr = tpr, fpr
empirical_eps_lower = self.ci_eps(max_fp, max_fn, n_pos=n_pos, n_neg=n_neg, delta=delta)
empirical_eps_upper = self.ci_eps(max_fp, max_fn, bound="upper", n_pos=n_pos, n_neg=n_neg, delta=delta)
return max_empirical_eps, empirical_eps_lower, empirical_eps_upper, max_fp, max_fn, max_tp, max_tn
def _compute_canary_metrics(self, initial_privacy_budget, final_privacy_budget, type="canary", correct_bias=False, plot_prc=True, **kwargs):
"""Computes canary and attack metrics for checkpointing
Args:
initial_privacy_budget (dict): Initial privacy budget of the model
final_privacy_budget (dict): Final privacy budget at the attack round
type (str, optional): Type of canary metrics, either "init" or "canary". Defaults to "canary".
correct_bias (bool, optional): Debugging, if True computes corrected bias metrics. Defaults to False.
plot_prc (bool, optional): If True will plot PR curves. Defaults to True.
Returns:
canary_metrics: dict of canary metrics to checkpoint
"""
canary_metrics = {}
canary_metrics.update(kwargs)
bias = self.canary_design_bias if correct_bias else 0
canary_metrics["with_canary_mean"] = np.round(np.mean(canary_metrics["dot_prods_with_canary"], axis=0)+bias,10)
canary_metrics["with_canary_var"] = np.round(np.var(canary_metrics["dot_prods_with_canary"], axis=0),10)
canary_metrics["without_canary_mean"] = np.round(np.mean(canary_metrics["dot_prods_without_canary"], axis=0)+bias,10)
canary_metrics["without_canary_var"] = np.round(np.var(canary_metrics["dot_prods_without_canary"], axis=0),10)
results = AttackResults(torch.tensor(canary_metrics["dot_prods_with_canary"])+bias, torch.tensor(canary_metrics["dot_prods_without_canary"])+bias)
max_accuracy_threshold, max_accuracy = results.get_max_accuracy_threshold()
tpr, fpr = results.get_tpr_fpr()
precision, recall = results.get_precision_recall()
auprc = results.get_map()
canary_metrics["mia_auc"] = results.get_auc()
canary_metrics["mia_threshold"] = max_accuracy_threshold
canary_metrics["mia_max_acc"] = max_accuracy
canary_metrics["mia_acc"] = results.get_accuracy(threshold=0.5).item()
if plot_prc:
self._plot_pr_curve(precision, recall, auprc=auprc, suffix=type)
n_pos = len(results.scores_test)
n_neg = len(results.scores_train)
n = n_pos + n_neg
self.logger.info(f"=== Computing metrics for type={type}")
self.logger.info(f"Number of tests={self.num_tests}, without={len(results.scores_train)}, with={len(results.scores_test)}, n={n}")
empirical_eps, empirical_eps_lower, empirical_eps_upper, fp, fn, tp, tn = self._compute_empirical_eps(attack_results=results, use_max_acc_threshold=False)
self.logger.info(f"n={n}, tp={tp}, fp={fp}, tn={tn}, fn={fn}")
fpr = fp/(fp+tn)
fnr = fn/(fn+tp)
tpr = 1-fnr
self.logger.info(f"FPR={fpr}, TPR={tpr}, FNR={fnr}")
self.logger.info(f"Type={type}, Acc= {canary_metrics['mia_acc']}, empirical eps={empirical_eps}, lower, upper =({empirical_eps_lower},{empirical_eps_upper})\n")
canary_metrics["fp"] = fp
canary_metrics["fn"] = fn
canary_metrics["tp"] = tp
canary_metrics["tn"] = tn
canary_metrics["empirical_eps_lower"] = empirical_eps_lower
canary_metrics["empirical_eps_upper"] = empirical_eps_upper
canary_metrics["empirical_eps"] = empirical_eps
canary_metrics["without_canary_sd"] = math.sqrt(canary_metrics["without_canary_var"])
canary_metrics["with_canary_sd"] = math.sqrt(canary_metrics["with_canary_var"])
canary_metrics["sd_gap"] = abs(canary_metrics["without_canary_sd"] - canary_metrics["with_canary_sd"])
canary_metrics["loss_gap"] = np.min(canary_metrics["dot_prods_with_canary"])+bias - np.max(canary_metrics["dot_prods_without_canary"])+bias
canary_metrics["batch_clip_percs"] = kwargs["batch_clip_percs"]
if type == 'canary':
self.empirical_eps_tracker.append((canary_metrics["empirical_eps_lower"], canary_metrics["empirical_eps"], canary_metrics["empirical_eps_upper"]))
self._add_privacy_metrics(canary_metrics, initial_privacy_budget, type="initial")
self._add_privacy_metrics(canary_metrics, final_privacy_budget, type="final")
return canary_metrics
def _add_privacy_metrics(self, metrics, privacy_budget, type="final"):
"""Adds privacy budget to canary metrics
Args:
metrics (Canary metrics): Canary metrics
privacy_budget (dict): Privacy budget
type (str, optional): Type. Defaults to "final".
"""
metrics[f"{type}_epsilon"] = privacy_budget["epsilon"]
metrics[f"{type}_delta"] = privacy_budget["delta"]
metrics[f"{type}_sigma"] = privacy_budget["sigma"]
def add_clip_rate(self, clip_rate):
"""Add a clip rate e.g. a % of model updates that were clipped in the current test round
Args:
clip_rate (float): clip percentage
"""
self.clip_rates.append(clip_rate)
def add_canary(self, canary):
"""Add a canary to be analysed
Args:
canary (Canary): canary
"""
self.canaries.append(canary)
def set_canary(self, canary):
"""Set a canary, replacing all old canaries being tracked
Args:
canary
"""
self.canaries = [canary]
def reset_canaries(self):
"""Reset all tracked canaries
"""
self.canaries = []
def set_grad_sample_module(self, model):
"""Set GradSampleModule, not used in FLSim
Args:
model (GSM)
"""
self.grad_sample_module = GradSampleModule(copy.deepcopy(model))
def set_accuracy_metrics(self, accuracy_metrics):
"""Set accuracy metrics of model to checkpoint in canary_metrics
Args:
accuracy_metrics: FLSim accuracy metrics
"""
self.accuracy_metrics = accuracy_metrics
self.current_train_acc = accuracy_metrics["train"][-1] if len(accuracy_metrics["train"]) > 0 else 0
self.current_test_acc = accuracy_metrics["test"][-1] if len(accuracy_metrics["test"]) > 0 else 0
def test_against_batch(self, criterion, batch, canary, device="cpu"):
"""Debugging only, not used in FLSim.
Args:
criterion: torch criterion
batch: Batch to test canary presence
canary (Canary): canary
device (str, optional): torch device. Defaults to "cpu".
"""
assert self.grad_sample_module is not None, "Must set_grad_sample_module() before testing with a batch"
if canary not in self.canaries:
self.add_canary(canary)
# Compute required gradients
batch_sample_grads, clip_count = compute_sample_grads(self.grad_sample_module, criterion, batch, device=device, clipping_const=self.canary_clip_const)
clip_perc = round(clip_count / self.local_batch_size, 8)*100
self.batch_clip_percs.append(clip_perc)
self.logger.info(f" Clip Percentage {clip_perc}")
aggregated_batch_grad = torch.sum(batch_sample_grads, axis=0)
canary_norm = torch.norm(canary.grad).item()
self.logger.info(f" Canary grad norm: {canary_norm}\n")
self.canary_dot_prods["without_canary"].append(torch.dot(canary.grad, aggregated_batch_grad).item())
self.init_canary_dot_prods["without_canary"].append(torch.dot(canary.init_grad, aggregated_batch_grad).item())
self.canary_dot_prods["with_canary"].append(torch.dot(canary.grad, aggregated_batch_grad + canary.grad).item())
self.init_canary_dot_prods["with_canary"].append(torch.dot(canary.init_grad, aggregated_batch_grad+canary.init_grad).item())
self.num_tests += 1
def test_against_agg_grad(self, canary, aggregated_model, lr, num_clients, clip_factor=1, type="with"):
"""Tests canary against aggregated model udpates by computing a dot-product score.
Args:
canary (Canary): canary to test
aggregated_model (tensor): Aggregated clipped (noisy) model updates
lr: Client lr
num_clients: Number of clients in the current round
clip_factor (int, optional): Clip factor (not used). Defaults to 1.
type (str, optional): Type of the test, either "with" or "without" canary. Defaults to "with".
"""
self.num_clients = num_clients
aggregated_batch_grad = torch.tensor([])
for p in aggregated_model.parameters():
aggregated_batch_grad = torch.cat([aggregated_batch_grad, p.detach().clone().cpu().flatten()])
aggregated_batch_grad = num_clients * aggregated_batch_grad * 1/clip_factor
self.logger.debug(f"Aggregated grads {aggregated_batch_grad}")
self.logger.debug(f"Norm of aggregated grads {torch.norm(aggregated_batch_grad)}")
self.logger.debug(f"Clip factor {clip_factor}, num clients {num_clients}, lr {lr}, Batch size {self.local_batch_size}")
self.logger.debug(f"Aggregated scaled grads {aggregated_batch_grad}")
self.logger.info(f"Canary grad norm {torch.norm(canary.grad)}, Canary clip const {self.canary_clip_const}")
# if self.canary_design_type == "sample_grads": # Designing against unclipped updates (must unscale)
# aggregated_batch_grad = (1/lr) * num_clients * self.local_batch_size * aggregated_batch_grad * 1/clip_factor
# else: # Designing against clipped and scaled updates (so no need to unscale)
# Aggregate attack scores
if type == "with" or type == "without":
if self.canary_design_reverse_server_clip:
canary_dot_prod = torch.dot(canary.grad/torch.norm(canary.grad)**2, aggregated_batch_grad).item()
else:
if self.scale_canary_test and torch.norm(canary.grad) < self.server_clip_const:
canary_dot_prod = torch.dot(canary.grad/(torch.norm(canary.grad)**2)*self.server_clip_const, aggregated_batch_grad).item()
else:
canary_dot_prod = torch.dot((canary.grad/torch.norm(canary.grad)*self.server_clip_const)/self.server_clip_const**2, aggregated_batch_grad).item()
self.canary_dot_prods[type+"_canary"].append(canary_dot_prod)
# Aggregate canary init scores
init_dot_prod = torch.dot(canary.init_grad/torch.norm(canary.init_grad), aggregated_batch_grad).item()
if type == "without":
self.init_canary_dot_prods[type+"_canary"].append(init_dot_prod)
elif type == "with_init":
self.init_canary_dot_prods["with_canary"].append(init_dot_prod)
self.num_tests += 1
def analyse(self, global_round=0, initial_privacy_budget=None, final_privacy_budget=None, one_step_budget=None,
disable_init_metrics=False, disable_bias_metrics=False,
plot_hists=True, plot_canaries=True, plot_losses=True, plot_prc=True, args=None):
"""Analyse current set canary and checkpoint associated attack metrics and plots
Args:
global_round (int, optional): Global FL round of the attack. Defaults to 0.
initial_privacy_budget (dict, optional): Initial model privacy budget. Defaults to None.
final_privacy_budget (dict, optional): Current model privacy budget. Defaults to None.
one_step_budget (dict, optional): Model one-step budget. Defaults to None.
disable_init_metrics (bool, optional): If True will not compute canary init metrics. Defaults to False.
disable_bias_metrics (bool, optional): If False will not compute bias corrected metrics. Defaults to False.
plot_hists (bool, optional): If False will not plot attack histograms. Defaults to True.
plot_canaries (bool, optional): If False will not output canaries. Defaults to True.
plot_losses (bool, optional): If False will not output canary optimisation loss plots. Defaults to True.
plot_prc (bool, optional): If False will not plot PR curves. Defaults to True.
args (dict, optional): Additional args for checkpointing. Defaults to None.
"""
assert len(self.canaries) > 0, "Cannot anaylse() before test_against_agg_grad() or test_against_batch() at least once"
if final_privacy_budget is None:
final_privacy_budget = {"epsilon": float('inf'), "delta": 0, "sigma": 0}
if initial_privacy_budget is None:
initial_privacy_budget = {"epsilon": 0, "delta": 0, "sigma": 0}
if one_step_budget is None:
one_step_budget = {"epsilon": 0, "delta": 0, "sigma": 0}
self.global_round = global_round
self.plot_path = self.base_plot_path + f"_global_round={global_round}"
self.result_path = self.base_result_path + f"_global_round={global_round}"
self.canary_healths = [canary.health for canary in self.canaries]
canary_norm = np.mean([torch.norm(canary.grad).item() for canary in self.canaries])
init_norm = np.mean([torch.norm(canary.init_grad).item() for canary in self.canaries])
final_loss = np.mean([canary.final_loss for canary in self.canaries])
init_loss = np.mean([canary.init_loss for canary in self.canaries])
# Save initial and final canaries
if plot_canaries:
self._save_canary(self.canaries[0].data, "final_canary class " + str(self.canaries[0].class_label))
self._save_canary(self.canaries[0].init_data, "init_canary class " + str(self.canaries[0].class_label))
# Compute and save canary metrics
canary_metrics = self._compute_canary_metrics(initial_privacy_budget, final_privacy_budget, type="canary", plot_prc=plot_prc, dot_prods_with_canary=self.canary_dot_prods["with_canary"], dot_prods_without_canary=self.canary_dot_prods["without_canary"],
canary_norm=canary_norm,
canary_health_list=self.canary_healths, batch_clip_percs=self.batch_clip_percs, final_loss=final_loss)
if not disable_bias_metrics:
canary_bias_corrected_metrics = self._compute_canary_metrics(initial_privacy_budget, final_privacy_budget, type="bias_canary", plot_prc=False, correct_bias=True, dot_prods_with_canary=self.canary_dot_prods["with_canary"], dot_prods_without_canary=self.canary_dot_prods["without_canary"],
canary_norm=canary_norm,
canary_health_list=self.canary_healths, batch_clip_percs=self.batch_clip_percs, final_loss=final_loss)
canary_bias_corrected_metrics["mia_threshold"] = 0.5
canary_metrics["bias_corrected_acc"] = canary_bias_corrected_metrics["mia_acc"]
if not disable_init_metrics:
init_canary_metrics = self._compute_canary_metrics(initial_privacy_budget, final_privacy_budget, type="init", plot_prc=plot_prc, dot_prods_with_canary=self.init_canary_dot_prods["with_canary"], dot_prods_without_canary=self.init_canary_dot_prods["without_canary"],
canary_norm=init_norm,
canary_health_list=[0], batch_clip_percs=self.batch_clip_percs, final_loss=init_loss)
canary_metrics["sd_improvement"] = "n/a" if disable_init_metrics else init_canary_metrics["without_canary_sd"] - canary_metrics["without_canary_sd"]
canary_metrics["init_without_canary_sd"] = "n/a" if disable_init_metrics else init_canary_metrics["without_canary_sd"]
canary_metrics["init_with_canary_sd"] = "n/a" if disable_init_metrics else init_canary_metrics["with_canary_sd"]
canary_metrics["mia_acc_improvement"] = "n/a" if disable_init_metrics else canary_metrics["mia_max_acc"] - init_canary_metrics["mia_max_acc"]
canary_metrics["dot_prods_with_init_canary"] = "n/a" if disable_init_metrics else self.init_canary_dot_prods["with_canary"]
canary_metrics["dot_prods_without_init_canary"] = "n/a" if disable_init_metrics else self.init_canary_dot_prods["without_canary"]
canary_metrics["one_step_eps"] = one_step_budget["epsilon"]
self.logger.info(f"One step privacy metrics (no sampling) (eps,delta)={one_step_budget['epsilon']}, {one_step_budget['delta']}, sigma={one_step_budget['sigma']}")
self.logger.info(f"Initial privacy metrics (eps,delta)={canary_metrics['initial_epsilon']}, {canary_metrics['initial_delta']}, sigma={canary_metrics['initial_sigma']}")
self.logger.info(f"Final privacy metrics (eps,delta)={canary_metrics['final_epsilon']}, {canary_metrics['final_delta']}, sigma={canary_metrics['final_sigma']}, sample rate={self.sample_rate}")
self.logger.info(f"Empirical epsilon tracker {self.empirical_eps_tracker}\n")
self.logger.info(f"Checkpoint train acc {self.checkpoint_train_acc} Checkpoint test acc {self.checkpoint_test_acc}")
self.logger.info(f"Current train acc {self.current_train_acc} Current test acc {self.current_test_acc}")
self.logger.info(f"All accuracy metrics {self.accuracy_metrics}\n")
if not disable_init_metrics:
self.logger.info(f" SD Improvement: {canary_metrics['sd_improvement']}")
self.logger.info(f" MIA Acc Improvement: {canary_metrics['mia_acc_improvement']}\n")
# Save canary metrics
self._save_results(canary_metrics, args)
# Plot and save dot product histograms
if plot_hists:
self._plot_canary_hist(canary_metrics, suffix="_canary") # Final canary
if not disable_bias_metrics:
self._plot_canary_hist(canary_bias_corrected_metrics, suffix="_bias_corrected_canary") # Final canary (bias-corrected)
if not disable_init_metrics:
self._plot_canary_hist(init_canary_metrics, suffix="_init") # Initial canary
# Save canary optim losses
if plot_losses:
self._plot_canary_losses()
self.logger.info(f"Minibatch, sample size, pool size, {self.canary_design_minibatch_size, self.canary_design_sample_size, self.canary_design_pool_size}")
self.logger.info(f"Actual minibatch, sample size, pool size, {self.actual_minibatch_size, self.actual_sample_size, self.actual_pool_size}") | canife-main | canife/canary_analyser.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import glob
import pandas as pd
import torch
def extract_sweep(root_dir="local_checkpoints", csv_name=""):
rows = []
full_path = root_dir
tar_path = full_path + "/**/*.tar"
print("Full path", full_path)
for file in glob.glob(tar_path, recursive=True):
exp_checkpoint = torch.load(file)
row = exp_checkpoint["row"]
# row.append(exp_checkpoint["batch_clip_percs"])
columns = exp_checkpoint["columns"]
columns.extend(["train_acc", "test_acc"])
row.extend([-1,-1])
if "accuracy_metrics" in columns:
metrics = row[columns.index("accuracy_metrics")]
if len(metrics["train"]) > 0:
train_acc = metrics["train"][-1]
row[-2] = train_acc
if len(metrics["test"]) > 0:
test_acc = metrics["test"][-1]
row[-1] = test_acc
rows.append(row)
df = pd.DataFrame(rows, columns=columns)
print(df.info(memory_usage="deep"))
save_path = f"{args.path}{args.csv_name}"
df.to_csv(save_path)
print(f"Sweep extracted saved to {save_path}...")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Extract canife experiment")
parser.add_argument("--path", type=str, help= "Path to location of experiment output")
parser.add_argument("--csv-name", type=str, help= "Name of output .csv")
args = parser.parse_args()
extract_sweep(csv_name=args.csv_name, root_dir=args.path) | canife-main | plotting/extract_exp.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import ast
import pathlib
import sys
from pathlib import Path
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch
sys.path.append("../canife")
sys.path.append("../privacy_lint")
from collections import defaultdict
from opacus.accountants import RDPAccountant
from opacus.accountants.utils import get_noise_multiplier
from canife import CanaryAnalyser
from privacy_lint.privacy_lint.attack_results import AttackResults
BASE_PATH = str(pathlib.Path(__file__).parent.resolve())
sns.set_theme(style="whitegrid")
def set_fontsize(size=14):
usetex = matplotlib.checkdep_usetex(True)
tex_fonts = {
"text.usetex": usetex,
"font.family": "serif",
"axes.labelsize": size,
"font.size": size,
"legend.fontsize": size,
"xtick.labelsize": size,
"ytick.labelsize": size
}
plt.rcParams.update(tex_fonts)
FONT_SIZE = 20
set_fontsize(FONT_SIZE)
# convert pandas col names to readable plot labels
column_map = {
"global_round": r"Global Round ($r$)",
"empirical_eps_upper": r"$\hat{\varepsilon}_U$",
"empirical_eps_lower": r"$\hat{\varepsilon}_L$",
"empirical_eps": r"$\hat{\varepsilon}$",
"current_test_acc": r"Model Test Accuracy",
"current_train_acc": r"Model Train Accuracy",
"canary_health": r"Canary Health",
"mia_acc": r"Attack Accuracy ($\gamma = 0.5$)",
"mia_max_acc": r"Attack Accuracy",
"mia_max_acc_rolling": r"Attack Accuracy",
"acc_rolling": r"Attack Accuracy",
"final_epsilon": r"Privacy Budget ($\varepsilon$)",
"one_step_eps": r"One-step $\varepsilon$",
"num_clients": r"Clients Per Round",
"canary_design_pool_size": r"Design Pool Size ($m$)",
"canary_design_sample_size": "Design Sample Size",
"average_sd": r"Mean Standard Deviation",
"with_canary_sd": r"With Canary SD",
"without_canary_sd": r"Without Canary SD",
"mia_auc": r"Attack AUC",
"empirical_global_eps": r"$\hat{\varepsilon}}$",
"epsilon": r"$\varepsilon$",
"canary_epochs": r"Design Iterations ($t$)",
"canary_norm_constant": r"Canary Gradient Norm Constant",
"dataset": "Dataset"
}
def print_full(x):
pd.set_option('display.max_rows', len(x))
pd.set_option('display.max_columns', len(x.columns))
print(x)
pd.reset_option('display.max_rows')
def format_axis(ax):
xlabel = ax.xaxis.get_label()._text
ylabel = ax.yaxis.get_label()._text
xlabel = column_map.get(xlabel, xlabel)
ylabel = column_map.get(ylabel, ylabel)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
def save_plot(name="", fig=None):
plt.tight_layout()
if fig:
fig.savefig(f"{BASE_PATH}/{name}.pdf", bbox_inches='tight', format="pdf")
else:
plt.savefig(f"{BASE_PATH}/{name}.pdf", bbox_inches='tight', format="pdf")
plt.clf()
def extract_epsilon_metrics(df, override_empirical_eps=False, use_max_acc=False):
extra_cols = defaultdict(list)
if override_empirical_eps:
print("Extracting empirical epsilon data...")
analyser = CanaryAnalyser(None, None, None)
for idx, x in df.iterrows():
with_dot_prods = ast.literal_eval(x["dot_prods_with_canary"].replace('nan,', ''))
without_dot_prods = ast.literal_eval(x["dot_prods_without_canary"].replace('nan,', ''))
results = AttackResults(torch.tensor(with_dot_prods), torch.tensor(without_dot_prods))
max_acc_thresh = results.get_max_accuracy_threshold()[0]
n_pos, n_neg = len(results.scores_train), len(results.scores_test)
max_empirical_eps = 0
_, scores = results._get_scores_and_labels_ordered()
tpr_fpr = results.get_tpr_fpr()
# delta = 1e-5
delta = 1/(n_pos + n_neg)
if use_max_acc: # Calculate empirical eps from max acc threshold
tp = int((results.scores_train >= max_acc_thresh).sum().item())
fp = int((results.scores_test >= max_acc_thresh).sum().item())
max_fp, max_fn, max_tp, max_tn = fp, n_pos-tp, tp, n_neg-fp
max_tpr, max_fpr = max_tp / (max_tp + max_fn), max_fp/(max_fp+max_tn)
max_empirical_eps = analyser.empirical_eps(max_tpr, max_fpr, delta=delta)
else: # Maximise empirical eps over TPR/FPR
for i, t in enumerate(scores):
tpr, fpr = tpr_fpr[0][i], tpr_fpr[1][i]
empirical_eps = analyser.empirical_eps(tpr, fpr, delta=delta)
acc = results.get_accuracy(t)
if empirical_eps > max_empirical_eps and (empirical_eps != float("inf") or acc == 1):
tp = int((results.scores_train >= t).sum().item())
fp = int((results.scores_test >= t).sum().item())
max_empirical_eps = empirical_eps
max_fp, max_fn, max_tp, max_tn = fp, n_pos-tp, tp, n_neg-fp
max_tpr, max_fpr = tpr, fpr
lower_eps = analyser.ci_eps(max_fp, max_fn, n_pos, n_neg, delta=delta)
upper_eps = analyser.ci_eps(max_fp, max_fn, bound="upper", n_pos=n_pos, n_neg=n_neg, delta=delta)
extra_cols["fp"].append(max_fp)
extra_cols["fn"].append(max_fn)
extra_cols["tp"].append(max_tp)
extra_cols["tn"].append(max_tn)
extra_cols["empirical_eps_lower"].append(lower_eps)
extra_cols["empirical_eps_upper"].append(upper_eps)
extra_cols["empirical_eps"].append(max_empirical_eps)
for col in extra_cols.keys():
df[col] = extra_cols[col]
print("Empirical epsilon data added...")
def extract_global_empirical_eps(df, skip_ci=True):
df["empirical_global_eps"] = 0
df["empirical_global_eps_lower"] = 0
df["empirical_global_eps_upper"] = 0
df = df.sort_values(by="global_round")
eps_list = df["epsilon"].unique()
sample_rate = df["sample_rate"].unique()[0]
print(f"Eps list {eps_list}, sample rate={sample_rate}")
for eps in eps_list:
temp_df = df[df["epsilon"] == eps]
temp_df = temp_df.sort_values(by="global_round")
df_eps = temp_df["empirical_eps"].values
steps = temp_df["global_round"].values
theoretical_sigma = temp_df["final_sigma"].mean()
empirical_global_eps = calculate_global_eps(df_eps, theoretical_sigma=theoretical_sigma, steps=steps, sample_rate=sample_rate)
df.loc[df["epsilon"] == eps, 'empirical_global_eps'] = empirical_global_eps
print(f"eps={eps} estimate done...")
if not skip_ci:
empirical_global_eps = calculate_global_eps(temp_df["empirical_eps_lower"].clip(lower=0.2).values, theoretical_sigma=theoretical_sigma, steps=steps, sample_rate=sample_rate)
df.loc[df["epsilon"] == eps, 'empirical_global_eps_lower'] = empirical_global_eps
print(f"eps={eps} lower done...")
empirical_global_eps = calculate_global_eps(temp_df["empirical_eps_upper"].values, theoretical_sigma=theoretical_sigma, steps=steps, sample_rate=sample_rate)
df.loc[df["epsilon"] == eps, 'empirical_global_eps_upper'] = empirical_global_eps
print(f"eps={eps} upper done...\n")
return df
def compute_one_step_eps(sample_rate, noise, delta=1e-5):
accountant = RDPAccountant()
history_step = (noise, sample_rate, 1)
accountant.history.append(history_step)
current_budget = accountant.get_privacy_spent(delta=delta)
return current_budget[0]
def calculate_global_eps(empirical_per_step_epsilons, theoretical_sigma, sample_rate=0.01, steps=1000, delta=1e-5, n=100, verbose=False):
if type(steps) == int:
steps = range(1, steps+1)
accountant = RDPAccountant()
previous_step = 0
if verbose:
theoretical_accountant = RDPAccountant()
one_step_theoretical_eps = compute_one_step_eps(1, theoretical_sigma, delta)
budgets = []
for i,step in enumerate(steps):
# One-step sigma based on current empirical eps
if empirical_per_step_epsilons[i] == float("inf"): # Resort to theoretical sigma if empirical eps is inf
estimated_sigma = theoretical_sigma
else:
estimated_sigma = get_noise_multiplier(target_epsilon=max(empirical_per_step_epsilons[i], 0.15),
target_delta=1/n,
sample_rate=1,
steps=1)
# Assume noise is constant for step-previous_step rounds (i.e time between last estimate and current)
history_step = (estimated_sigma, sample_rate, step-previous_step)
accountant.history.append(history_step)
previous_step = step
current_budget = accountant.get_privacy_spent(delta=delta)
budgets.append(current_budget[0])
if verbose:
estimated_sigma_theoretical = get_noise_multiplier(target_epsilon=one_step_theoretical_eps,
target_delta=delta,
sample_rate=1,
steps=1)
history_step = (estimated_sigma_theoretical, sample_rate, step-previous_step)
theoretical_accountant.history.append(history_step)
theoretical_eps = theoretical_accountant.get_privacy_spent(delta=delta)[0]
print(f"i={i}, global round={step}")
print(f"Estimated empirical one-step sigma = {estimated_sigma} vs theoretical = {estimated_sigma_theoretical}")
print(f"Estimated empirical one-step epsilon = {empirical_per_step_epsilons[i]} vs theoretical = {one_step_theoretical_eps}")
print(f"Accumulated empirical budget {budgets[-1]} vs theoretical {theoretical_eps}\n")
return budgets
def load_sweep(name, relative_path=False, override_empirical_eps=False):
if relative_path:
df = pd.read_csv(name)
else:
df = pd.read_csv(BASE_PATH + "/" + name + ".csv")
df["sd_gap"] = np.sqrt(df["without_canary_var"]) - np.sqrt(df["with_canary_var"])
print(df.columns)
# For compatability with old sweeps where some metrics were tensors
if df["mia_acc"].dtype == object:
for s in ["tensor", "(", ")"]:
df["mia_acc"] = df["mia_acc"].str.replace(s, "")
df["mia_acc"] = df["mia_acc"].astype("float64")
extract_epsilon_metrics(df, override_empirical_eps=override_empirical_eps)
return df
def plot(csv_name):
dataset = "sent140"
model = "lstm"
xlim = 8900
main_df = load_sweep(f"{csv_name}", relative_path=True, override_empirical_eps=False)
main_df["epsilon"] = main_df["epsilon"].astype("int")
main_df = main_df[main_df["dataset"] == dataset]
main_df = main_df[main_df["epsilon"].isin([10,30,50])]
# Per-round empirical eps comparison
for eps in main_df["epsilon"].unique():
plot_df = main_df.copy()
plot_df = plot_df[plot_df["epsilon"] == eps]
plot_df.replace([float("inf")], np.nan, inplace=True)
for y in ["one_step_eps"]:
plot_df = main_df.copy()
plot_df = plot_df[plot_df["epsilon"] == eps]
ax = sns.lineplot(data=plot_df, x="global_round", y="empirical_eps", markers=False, label=r"$\hat{\varepsilon}_r$")
plt.fill_between(plot_df["global_round"].values, plot_df["empirical_eps_lower"].values, plot_df["empirical_eps_upper"].values, alpha=.3)
sns.lineplot(data=plot_df, x="global_round", y=y, markers=False, label=r"$\varepsilon_r$", ax=ax)
plt.ylim(0)
plt.xlim(0, xlim)
plt.tight_layout()
plt.draw()
plt.legend(loc='upper right', bbox_to_anchor=(1, 0.95))
format_axis(ax)
ax.set_ylabel(r"Privacy Budget ($\varepsilon$)")
save_plot(name=f"{dataset}_{eps}_{model}_per_round_eps")
# Global empirical eps comparison
plot_df = main_df.copy()
main_palette = sns.color_palette("deep", 3)
palette_dict = {10: main_palette[0], 30: main_palette[1], 50: main_palette[2]}
palette = [palette_dict[eps] for eps in plot_df["epsilon"].unique()]
ax = sns.lineplot(data=plot_df, x="global_round", y="final_epsilon", hue="epsilon", linestyle="--", palette=palette)
sns.lineplot(data=plot_df, x="global_round", y="empirical_global_eps", hue="epsilon", ax=ax, label='_nolegend_', palette=palette)
plt.xlim(0, xlim)
format_axis(ax)
hand, labl = ax.get_legend_handles_labels()
handout=[]
lablout=[]
for h,l in zip(hand,labl):
if l not in lablout:
lablout.append(l)
handout.append(h)
legend1 = plt.legend(handout, lablout, title=r"$\varepsilon$")
plt.ylim(0, 50)
linestyles = ['-', "--"]
dummy_lines = []
titles = [r"Empirical $\hat{\varepsilon}$", r"Theoretical $\varepsilon$"]
for b_idx, b in enumerate(titles):
dummy_lines.append(ax.plot([],[], c="black", ls = linestyles[b_idx])[0])
plt.legend([dummy_lines[i] for i in [0,1]], titles, loc="upper left", bbox_to_anchor=(0.4,0.6))
ax.add_artist(legend1)
save_plot(name=f"{dataset}_global_eps")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Plot example canife experiment")
parser.add_argument("--csv-path", type=str, help= "Path to output .csv for plotting")
args = parser.parse_args()
global_eps_path = args.csv_path.split(".csv")[0] + "_extracted.csv"
global_eps_csv_file = Path(global_eps_path)
csv_file = Path(args.csv_path)
if not csv_file.is_file():
raise FileNotFoundError(f"Output .csv does not exist at the given file path {args.csv_path}")
if not global_eps_csv_file.is_file():
df = pd.read_csv(args.csv_path)
df = extract_global_empirical_eps(df)
df.to_csv(global_eps_csv_file)
plot(csv_name=global_eps_csv_file) | canife-main | plotting/example_plotter.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import glob
import os
import pandas as pd
import torch
USERNAME = os.getlogin()
print(f"USERNAME: {USERNAME}")
def extract_sweep(root_dir="saved_sweeps", csv_name=""):
rows = []
full_path = root_dir
tar_path = full_path + "/**/*.tar"
print("Full path", full_path)
for file in glob.glob(tar_path, recursive=True):
exp_checkpoint = torch.load(file)
row = exp_checkpoint["row"]
columns = exp_checkpoint["columns"]
columns.extend(["train_acc", "test_acc"])
row.extend([-1,-1])
if "accuracy_metrics" in columns:
metrics = row[columns.index("accuracy_metrics")]
if len(metrics["train"]) > 0:
train_acc = metrics["train"][-1]
row[-2] = train_acc
if len(metrics["test"]) > 0:
test_acc = metrics["test"][-1]
row[-1] = test_acc
rows.append(row)
df = pd.DataFrame(rows, columns=columns)
print(df.info(memory_usage="deep"))
save_path = f"/checkpoints/{USERNAME}/" + csv_name + ".csv"
df.to_csv(f"/checkpoints/{USERNAME}/" + csv_name + ".csv")
print(f"Sweep extracted saved to {save_path}...")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Extract canife sweep")
parser.add_argument("--sweep", type=str, help= "Name of saved sweep")
args = parser.parse_args()
extract_sweep(csv_name=args.sweep, root_dir=f"/checkpoints/{USERNAME}/canife/{args.sweep}") | canife-main | plotting/extract_aws.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
from setuptools import find_packages, setup
# 3.6.8 is the final Windows binary release for 3.6.x
REQUIRED_MAJOR = 3
REQUIRED_MINOR = 6
REQUIRED_MICRO = 8
version = {}
with open("flsim/version.py") as fp:
exec(fp.read(), version)
__version__ = version["__version__"]
# Check for python version
if sys.version_info < (REQUIRED_MAJOR, REQUIRED_MINOR, REQUIRED_MICRO):
error = (
"Your version of python ({major}.{minor}.{micro}) is too old. You need "
"python >= {required_major}.{required_minor}.{required_micro}"
).format(
major=sys.version_info.major,
minor=sys.version_info.minor,
micro=sys.version_info.micro,
required_major=REQUIRED_MAJOR,
required_minor=REQUIRED_MINOR,
required_micro=REQUIRED_MICRO,
)
sys.exit(error)
src_dir = os.path.abspath(os.path.dirname(__file__))
with open("README.md", "r", encoding="utf8") as fh:
long_description = fh.read()
requirements_txt = os.path.join(src_dir, "requirements.txt")
with open(requirements_txt, encoding="utf8") as f:
required = f.read().splitlines()
dev_required = []
setup(
name="flsim",
version=__version__,
author="The FLSim Team",
description="Federated Learning Simulator (FLSim) is a flexible, standalone core library that simulates FL settings with a minimal, easy-to-use API. FLSim is domain-agnostic and accommodates many use cases such as vision and text.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://flsim.ai",
project_urls={
"Documentation": "https://flsim.ai/api",
"Source": "https://github.com/facebookresearch/flsim",
},
license="Apache-2.0",
install_requires=required,
extras_require={"dev": dev_required},
packages=find_packages(),
keywords=[
"PyTorch",
"Federated Learning",
"FL",
"On device training",
"Differential Privacy",
"Secure Aggregation",
"Privacy Preserving Machine Learning",
"PPML",
"PPAI",
],
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Scientific/Engineering",
],
python_requires=f">={REQUIRED_MAJOR}.{REQUIRED_MINOR}.{REQUIRED_MICRO}",
)
| canife-main | FLSim/setup.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| canife-main | FLSim/examples/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
"""
import copy
import json
import os
import random
from typing import Any, Iterator, List, Tuple
import flsim.configs # noqa
import hydra # @manual
import numpy as np
import torch
import torch.nn as nn
from flsim.interfaces.metrics_reporter import Channel
from flsim.utils.config_utils import maybe_parse_json_config
from flsim.utils.example_utils import (
DataLoader,
DataProvider,
FLModel,
LEAFDataLoader,
LEAFDataProvider,
MetricsReporter,
Resnet18,
SequentialSharder,
SimpleConvNet,
)
from hydra.utils import instantiate
from omegaconf import DictConfig, OmegaConf
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
from torchvision.datasets import ImageFolder
from torchvision.datasets.cifar import CIFAR10
from canife.utils import TextProcessorSent140, TextProcessorShakes, get_plot_path
IMAGE_SIZE = 32
# Datasets
class ShakespeareDataset(Dataset):
SEED = 7
def __init__(
self,
data_root=None,
num_users=None,
):
self.text_processor = TextProcessorShakes()
with open(data_root, "r") as f:
dataset = json.load(f)
user_ids = dataset["users"]
random.seed(self.SEED)
num_users = num_users if num_users is not None else len(user_ids)
user_ids = random.sample(user_ids, min(len(user_ids), num_users))
print(f"Creating dataset with {num_users} users")
# Filter train and test datasets based on user_ids list
self.dataset = dataset
self.data = {}
self.targets = {}
# Populate self.data and self.targets
for user_id, user_data in self.dataset["user_data"].items():
if user_id not in user_ids:
continue
self.data[user_id] = list(user_data["x"])
self.targets[user_id] = list(user_data["y"])
def __iter__(self) -> Iterator[Tuple[List[torch.Tensor], List[Any]]]:
for user_id in self.data.keys():
yield self.__getitem__(user_id)
def __getitem__(self, user_id: str) -> Tuple[List[torch.Tensor], List[Any]]:
if user_id not in self.data or user_id not in self.targets:
raise IndexError(f"User {user_id} is not in dataset")
user_utterances = self.process_x(self.data[user_id])
user_targets = self.process_y(self.targets[user_id])
return user_utterances, user_targets
def __len__(self) -> int:
return len(self.data)
def get_user_ids(self):
return self.data.keys()
def process_x(self, raw_x_batch):
x_batch = [self.text_processor.word_to_indices(word) for word in raw_x_batch]
x_batch = torch.LongTensor(x_batch)
return x_batch
def process_y(self, raw_y_batch):
y_batch = [self.text_processor.letter_to_vec(c) for c in raw_y_batch]
return y_batch
class CelebaDataset(Dataset):
def __init__(
self,
data_root,
image_root,
num_users=None,
transform=None,
target_transform=None,
):
with open(data_root, "r") as f:
self.dataset = json.load(f)
user_ids = self.dataset["users"]
num_users = num_users if num_users is not None else len(user_ids)
user_ids = random.sample(user_ids, min(len(user_ids), num_users))
self.transform = transform
self.target_transform = target_transform
self.image_root = image_root
self.image_folder = ImageFolder(image_root, transform)
self.data = {}
self.targets = {}
# Populate self.data and self.targets
for user_id, user_data in self.dataset["user_data"].items():
if user_id in user_ids:
self.data[user_id] = [
int(os.path.splitext(img_path)[0]) for img_path in user_data["x"]
]
self.targets[user_id] = list(user_data["y"])
def __iter__(self) -> Iterator[Tuple[List[torch.Tensor], List[Any]]]:
for user_id in self.data.keys():
yield self.__getitem__(user_id)
def __getitem__(self, user_id: str) -> Tuple[List[torch.Tensor], List[Any]]:
if user_id not in self.data or user_id not in self.targets:
raise IndexError(f"User {user_id} is not in dataset")
user_imgs = []
for image_index in self.data[user_id]:
user_imgs.append(self.image_folder[image_index - 1][0])
user_targets = self.targets[user_id]
if self.target_transform is not None:
user_targets = [self.target_transform(target) for target in user_targets]
return user_imgs, user_targets
def __len__(self) -> int:
return len(self.data)
class Sent140Dataset(Dataset):
def __init__(self, data_root, max_seq_len):
self.data_root = data_root
self.max_seq_len = max_seq_len
self.text_processor = TextProcessorSent140()
self.vocab_size = self.text_processor.vocab_size
self.embedding_size = 300
with open(data_root, "r") as f:
self.dataset = json.load(f)
self.data = {}
self.targets = {}
self.num_classes = 2
# Populate self.data and self.targets
for user_id, user_data in self.dataset["user_data"].items():
self.data[user_id] = self.process_x(list(user_data["x"]))
self.targets[user_id] = self.process_y(list(user_data["y"]))
def __len__(self):
return len(self.data)
def __iter__(self):
for user_id in self.data.keys():
yield self.__getitem__(user_id)
def __getitem__(self, user_id: str):
if user_id not in self.data or user_id not in self.targets:
raise IndexError(f"User {user_id} is not in dataset")
return self.data[user_id], self.targets[user_id]
def process_x(self, raw_x_batch):
x_batch = [e[4] for e in raw_x_batch]
x_batch = [self.text_processor.line_to_indices(e, self.max_seq_len) for e in x_batch]
x_batch = torch.LongTensor(x_batch)
return x_batch
def process_y(self, raw_y_batch):
y_batch = [int(e) for e in raw_y_batch]
return y_batch
class FemnistDatasetChunked(Dataset):
IMAGE_SIZE = (28, 28)
def __init__(
self,
data_root,
num_users=None,
transform=None,
target_transform=None,
):
with open(data_root, "r") as f:
dataset = json.load(f)
user_ids = []
for _, chunk_data in dataset:
user_ids.extend(list(chunk_data["user_data"].keys()))
num_users = num_users if num_users is not None else len(user_ids)
user_ids = random.sample(user_ids, min(len(user_ids), num_users))
print(f"Creating dataset with {num_users} users")
self.transform = transform
self.transform = transform
self.target_transform = target_transform
self.data = {}
self.targets = {}
# Populate self.data and self.targets
for _, chunk_data in dataset:
for user_id in user_ids:
if user_id in set(chunk_data["users"]):
self.data[user_id] = [
np.array(img) for img in chunk_data["user_data"][user_id]["x"]
]
self.targets[user_id] = list(chunk_data["user_data"][user_id]["y"])
def __iter__(self) -> Iterator[Tuple[List[torch.Tensor], List[Any]]]:
for user_id in self.data.keys():
yield self.__getitem__(user_id)
def __getitem__(self, user_id: str) -> Tuple[List[torch.Tensor], List[Any]]:
if user_id not in self.data or user_id not in self.targets:
return [], []
user_imgs, user_targets = self.data[user_id], self.targets[user_id]
user_imgs = [
Image.fromarray(img.reshape(FemnistDataset.IMAGE_SIZE)) for img in user_imgs
]
user_imgs = [self.transform(img) for img in user_imgs]
if self.target_transform is not None:
user_targets = [self.target_transform(target) for target in user_targets]
return user_imgs, user_targets
def __len__(self) -> int:
return len(self.data)
class FemnistDataset(Dataset):
IMAGE_SIZE = (28, 28)
def __init__(
self,
data_root,
num_users=None,
transform=None,
target_transform=None,
):
with open(data_root, "r") as f:
dataset = json.load(f)
user_ids = dataset["users"]
num_users = num_users if num_users is not None else len(user_ids)
user_ids = random.sample(user_ids, min(len(user_ids), num_users))
print(f"Creating dataset with {num_users} users")
self.transform = transform
self.transform = transform
self.target_transform = target_transform
self.data = {}
self.targets = {}
# Populate self.data and self.targets
for user_id in user_ids:
if user_id in set(dataset["users"]):
self.data[user_id] = [
np.array(img) for img in dataset["user_data"][user_id]["x"]
]
self.targets[user_id] = list(dataset["user_data"][user_id]["y"])
def __iter__(self) -> Iterator[Tuple[List[torch.Tensor], List[Any]]]:
for user_id in self.data.keys():
yield self.__getitem__(user_id)
def __getitem__(self, user_id: str) -> Tuple[List[torch.Tensor], List[Any]]:
if user_id not in self.data or user_id not in self.targets:
return [], []
user_imgs, user_targets = self.data[user_id], self.targets[user_id]
user_imgs = [
Image.fromarray(img.reshape(FemnistDataset.IMAGE_SIZE)) for img in user_imgs
]
user_imgs = [self.transform(img) for img in user_imgs]
if self.target_transform is not None:
user_targets = [self.target_transform(target) for target in user_targets]
return user_imgs, user_targets
def __len__(self) -> int:
return len(self.data)
# NLP Models
class Sent140StackedLSTMModel(nn.Module):
def __init__(
self, seq_len, num_classes, emb_size, n_hidden, vocab_size, dropout_rate, **kwargs
):
super(Sent140StackedLSTMModel, self).__init__()
self.seq_len = seq_len
self.num_classes = num_classes
self.n_hidden = n_hidden
self.vocab_size = vocab_size
self.emb_size = emb_size
self.dropout_rate = dropout_rate
self.embedding = nn.Embedding(self.vocab_size, self.emb_size)
self.stacked_lstm = nn.LSTM(
self.emb_size, self.n_hidden, 2, batch_first=True, dropout=self.dropout_rate
)
self.fc1 = nn.Linear(self.n_hidden, self.num_classes)
self.dropout = nn.Dropout(p=self.dropout_rate)
# self.out = nn.Linear(128, self.num_classes)
def set_embedding_weights(self, emb_matrix, trainable=False):
self.embedding.weight = torch.nn.Parameter(emb_matrix)
if not trainable:
self.embedding.weight.requires_grad = False
def forward(self, features, input_embeds=None):
# seq_lens = torch.sum(features != (self.vocab_size - 1), 1) - 1
if features is not None:
x = self.embedding(features)
else:
x = input_embeds
outputs, _ = self.stacked_lstm(x)
# outputs = outputs[torch.arange(outputs.size(0)), seq_lens]
pred = self.fc1(self.dropout(outputs[:, -1]))
return pred
class ShakespeareModel(nn.Module):
def __init__(self, seq_len, num_classes, n_hidden, dropout_rate=0.0, **kwargs):
super(ShakespeareModel, self).__init__()
self.seq_len = seq_len
self.num_classes = num_classes # Number of characters supported
self.n_hidden = n_hidden
self.dropout_rate = dropout_rate
self.embedding = nn.Embedding(self.num_classes, 8)
self.stacked_lstm = nn.LSTM(
8, self.n_hidden, 2, batch_first=True, dropout=self.dropout_rate
)
self.out = nn.Linear(self.n_hidden, self.num_classes)
def forward(self, features, input_embeds=None):
if features is not None:
x = self.embedding(features)
else:
x = input_embeds
outputs, _ = self.stacked_lstm(x)
pred = self.out(outputs[:, -1])
return pred
# Data providers
def build_data_provider_shakespeare(data_config):
# Local testing
# train_split = "/data/train/all_data_0_2_keep_0_train_9.json"
# test_split = "/data/test/all_data_0_2_keep_0_test_9.json"
# Full splits
train_split = "/data/train/all_data_0_0_keep_0_train_9.json"
test_split = "/data/test/all_data_0_0_keep_0_test_9.json"
train_dataset = ShakespeareDataset(data_root=data_config.data_root + train_split)
test_dataset = ShakespeareDataset(data_root=data_config.data_root + test_split)
dataloader = LEAFDataLoader(
train_dataset,
test_dataset,
test_dataset,
batch_size=data_config.local_batch_size,
drop_last=True,
)
data_provider = LEAFDataProvider(dataloader)
return data_provider
def build_data_provider_sent140(
local_batch_size, vocab_size, num_users, user_dist, max_seq_len, drop_last, data_path
):
train_dataset = Sent140Dataset(
data_root=data_path + "/data/train/all_data_0_15_keep_1_train_6.json",
max_seq_len=max_seq_len,
)
eval_dataset = Sent140Dataset(
data_root=data_path + "/data/test/all_data_0_15_keep_1_test_6.json",
max_seq_len=max_seq_len,
)
test_dataset = Sent140Dataset(
data_root=data_path + "/data/test/all_data_0_15_keep_1_test_6.json",
max_seq_len=max_seq_len,
)
dataloader = LEAFDataLoader(
train_dataset,
eval_dataset,
test_dataset,
batch_size=local_batch_size,
drop_last=drop_last,
)
data_provider = LEAFDataProvider(dataloader)
return data_provider, train_dataset.vocab_size, train_dataset.embedding_size
def build_data_provider_cifar10(data_root, local_batch_size, examples_per_user, drop_last: bool = False, disable_aug=False):
if disable_aug:
transform_list = [transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)), transforms.ToTensor()]
else:
transform_list = [
transforms.Resize(IMAGE_SIZE),
transforms.CenterCrop(IMAGE_SIZE),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
transform = transforms.Compose(transform_list)
train_dataset = CIFAR10(
root=data_root, train=True, download=False, transform=transform
)
val_dataset = CIFAR10(
root=data_root, train=False, download=False, transform=transform
)
test_dataset = CIFAR10(
root=data_root, train=False, download=False, transform=transform
)
sharder = SequentialSharder(examples_per_shard=examples_per_user)
fl_data_loader = DataLoader(
train_dataset, val_dataset, test_dataset, sharder, local_batch_size, drop_last
)
data_provider = DataProvider(fl_data_loader)
print(f"Clients in total: {data_provider.num_train_users()}")
return data_provider
def build_data_provider_celeba(data_config, trainer_config, disable_aug=False):
IMAGE_SIZE: int = 32
if disable_aug:
IMAGE_SIZE = 128
transform_list = [transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)), transforms.ToTensor()]
else:
transform_list = [
transforms.Resize(IMAGE_SIZE),
transforms.CenterCrop(IMAGE_SIZE),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
transform = transforms.Compose(transform_list)
# Local testing
# train_split = "/data/train/all_data_0_01_keep_0_train_9.json" if not "celeba_iid" in trainer_config.args.dataset else "/data/train/all_data_0_01_01_keep_1_train_9.json"
# test_split = "/data/test/all_data_0_01_keep_0_test_9.json" if not "celeba_iid" in trainer_config.args.dataset else "/data/test/all_data_0_01_01_keep_1_test_9.json"
# GPU Debug (Non-IID)
# train_split = "/data/train/all_data_0_1_keep_1_train_9.json"
# test_split = "/data/test/all_data_0_1_keep_1_test_9.json"
train_split = "/data/train/all_data_0_0_keep_0_train_9.json" if "celeba_iid" not in trainer_config.args.dataset else "/data/train/all_data_0_0_0_keep_0_train_9_iid.json"
test_split = "/data/test/all_data_0_0_keep_0_test_9.json" if "celeba_iid" not in trainer_config.args.dataset else "/data/test/all_data_0_0_0_keep_0_test_9_iid.json"
train_dataset = CelebaDataset( # data_root arg should be leaf/celeba
data_root=data_config.data_root + train_split,
image_root=data_config.data_root+"/data/raw/",
transform=transform,
)
test_dataset = CelebaDataset(
data_root=data_config.data_root + test_split,
transform=transform,
image_root=train_dataset.image_root,
)
print(
f"Created datasets with {len(train_dataset)} train users and {len(test_dataset)} test users"
)
dataloader = LEAFDataLoader(
train_dataset,
test_dataset,
test_dataset,
batch_size=data_config.local_batch_size,
drop_last=data_config.drop_last,
)
# data_provider = LEAFDataProvider(dataloader)
data_provider = DataProvider(dataloader)
print(f"Training clients in total: {data_provider.num_train_users()}")
return data_provider
def build_data_provider_femnist(data_config, disable_aug=False):
if disable_aug:
transform_list = [transforms.ToTensor()]
else:
transform_list = [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)),]
transform = transforms.Compose(transform_list)
# Local debugging
train_split = data_config.data_root + "/data/train/all_data_0_niid_05_keep_0_train_9.json"
test_split = data_config.data_root + "/data/test/all_data_0_niid_05_keep_0_test_9.json"
train_dataset = FemnistDataset(
data_root=train_split,
transform=transform,
)
test_dataset = FemnistDataset(
data_root=test_split,
transform=transform,
)
print(
f"Created datasets with {len(train_dataset)} train users and {len(test_dataset)} test users"
)
dataloader = LEAFDataLoader(
train_dataset,
test_dataset,
test_dataset,
batch_size=data_config.local_batch_size,
)
data_provider = LEAFDataProvider(dataloader)
print(f"Training clients in total: {data_provider.num_train_users()}")
return data_provider
def _get_checkpoint_path(cfg):
filename = cfg.args.checkpoint_path
filename += f"/FLSim_dp={cfg.args.dp_level}_model={cfg.args.model_arch}_dataset={cfg.args.dataset}_num_clients={cfg.args.users_per_round}_test_size={cfg.args.local_batch_size}"
filename += f"_insert_test_acc={cfg.args.canary_insert_test_acc}_insert_train_acc={cfg.args.canary_insert_train_acc}_client_epochs={cfg.args.client_epochs}"
if cfg.args.epsilon != -1 or cfg.args.sigma != 0:
if cfg.args.epsilon != -1:
filename += f"_private_eps={cfg.args.epsilon}_delta={cfg.args.delta}"
else:
filename += f"_private_sigma={cfg.args.sigma}_delta={cfg.args.delta}"
filename += ".tar"
return filename
def _load_checkpoint(trainer_cfg, model, device="cpu"):
checkpoint_path = _get_checkpoint_path(trainer_cfg)
print(f"\n====== Attempting to load checkpoint {checkpoint_path} ======")
checkpoint = {}
try:
checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
model.load_state_dict(checkpoint["state_dict"])
if "epsilon" not in checkpoint:
checkpoint["epsilon"] = float("inf")
if "delta" not in checkpoint:
checkpoint["delta"] = max(0, trainer_cfg.args.delta)
if "noise_multiplier" not in checkpoint:
checkpoint["noise_multiplier"] = max(0, trainer_cfg.args.sigma)
if "steps" not in checkpoint:
checkpoint["steps"] = -1 # Let CanarySyncTrainer compute this
if "train_acc" not in checkpoint:
checkpoint["train_acc"] = 0
if "test_acc" not in checkpoint:
checkpoint["test_acc"] = 0
print(f"Checkpointed FL model loaded successfully epoch={checkpoint['epoch']}, round={checkpoint['round']}")
print(f"Checkpointed model DP guarantees (eps, delta)=({checkpoint['epsilon']}, {checkpoint['delta']}) sigma={checkpoint['noise_multiplier']}")
# TODO: Rework this?
trainer_cfg.args.canary_insert_epoch = 1
trainer_cfg.args.canary_insert_test_acc = -1
trainer_cfg.args.canary_insert_train_acc = -1
except FileNotFoundError:
print("Checkpoint not found for the specific combination of parameters, resorting to training model from scratch")
return checkpoint
def create_model(model_config, data_config, in_channels, vocab_size, emb_size):
if model_config.model_arch == "resnet":
model = Resnet18(num_classes=model_config.num_classes, in_channels=in_channels)
elif model_config.model_arch == "lstm":
model = Sent140StackedLSTMModel(
seq_len=data_config.max_seq_len,
num_classes=model_config.num_classes,
emb_size=emb_size,
n_hidden=model_config.n_hidden,
vocab_size=vocab_size,
dropout_rate=model_config.dropout,
)
elif model_config.model_arch == "shakes_lstm":
model = ShakespeareModel(
seq_len=model_config.seq_len,
n_hidden=model_config.n_hidden,
num_classes=model_config.num_classes,
dropout_rate=model_config.dropout,
)
else:
model = SimpleConvNet(num_classes=model_config.num_classes, in_channels=in_channels, dropout_rate=model_config.dropout)
return model
def create_data_provider(trainer_config, data_config):
in_channels, vocab_size, emb_size = 0, 0, 0
if trainer_config.args.dataset == "CIFAR10":
data_provider = build_data_provider_cifar10(
data_root=data_config.data_root,
local_batch_size=data_config.local_batch_size,
examples_per_user=data_config.examples_per_user,
drop_last=False,
disable_aug=trainer_config.args.prettify_samples
)
in_channels = 3
elif "celeba" in trainer_config.args.dataset:
data_provider = build_data_provider_celeba(data_config, trainer_config, disable_aug=trainer_config.args.prettify_samples)
in_channels = 3
elif "femnist" in trainer_config.args.dataset:
data_provider = build_data_provider_femnist(data_config, disable_aug=trainer_config.args.prettify_samples)
in_channels = 1
elif "shakespeare" in trainer_config.args.dataset:
data_provider = build_data_provider_shakespeare(data_config)
else:
data_provider, vocab_size, emb_size = build_data_provider_sent140(
local_batch_size=data_config.local_batch_size,
vocab_size=data_config.vocab_size,
num_users=data_config.num_users,
user_dist=data_config.user_dist,
max_seq_len=data_config.max_seq_len,
drop_last=False,
data_path=data_config.data_root,
)
return data_provider, in_channels, vocab_size, emb_size
# Main
def main_worker(
trainer_config,
data_config,
model_config,
use_cuda_if_available: bool = True,
distributed_world_size: int = 1,
) -> None:
original_trainer_config = copy.deepcopy(trainer_config) # If loading checkpoints, the trainer config is modified to change canary insert epochs to 1
emb_size, vocab_size = 0,0 # For sent140
checkpoint_path = _get_checkpoint_path(trainer_config)
if (trainer_config.args.fl_load_checkpoint) and not os.path.isfile(checkpoint_path):
print(f"Checkpoint {checkpoint_path} does not exist, experiment exiting early...")
return
if trainer_config.checkpoint_only:
print(f"Checkpoint only run - will save checkpoint as {checkpoint_path}")
data_provider, in_channels, vocab_size, emb_size = create_data_provider(trainer_config, data_config)
for exp_num in range(0, data_config.canary_iters):
torch.cuda.empty_cache()
trainer_config = copy.deepcopy(original_trainer_config)
if not data_config.debug_config:
trainer_config["plot_path"] = get_plot_path(trainer_config.args, exp_num=exp_num, file_suffix="")
trainer_config["result_path"] = get_plot_path(trainer_config.args, exp_num, ".tar")
model = create_model(model_config, data_config, in_channels, vocab_size, emb_size)
print(model)
cuda_enabled = torch.cuda.is_available() and use_cuda_if_available
device = torch.device(f"cuda:{0}" if cuda_enabled else "cpu")
checkpoint = {}
if trainer_config.load_checkpoint:
checkpoint = _load_checkpoint(trainer_config, model, device)
global_model = FLModel(model, device)
if cuda_enabled:
global_model.fl_cuda()
trainer = instantiate(trainer_config, model=global_model, cuda_enabled=cuda_enabled)
metrics_reporter = MetricsReporter([Channel.TENSORBOARD, Channel.STDOUT])
final_model, eval_score = trainer.train(
data_provider=data_provider,
metrics_reporter=metrics_reporter,
num_total_users=data_provider.num_train_users(),
distributed_world_size=1,
checkpoint=checkpoint
)
if trainer_config.checkpoint_only and not trainer.insert_acc_achieved:
trainer.logger.info("Failed to achieve insert accuracy, checkpointing model anyway...")
trainer._checkpoint_model(trainer_config.epochs, 1, final=True)
if not hasattr(trainer, "canary_analyser") and data_config.canary_iters > 1:
trainer.logger.info("Experiment ended early - either checkpoint only or model failed to reach insertion epoch/accuracy for canary testing")
return
@hydra.main(config_path=None, config_name="celeba_config", version_base="1.1")
def run(cfg: DictConfig) -> None:
print(OmegaConf.to_yaml(cfg))
trainer_config = cfg.trainer
data_config = cfg.data
model_config = cfg.model
main_worker(
trainer_config,
data_config,
model_config,
cfg.use_cuda_if_available,
cfg.distributed_world_size,
)
if __name__ == "__main__":
cfg = maybe_parse_json_config()
run(cfg)
| canife-main | FLSim/examples/canary_example.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""In this tutorial, we will train a binary classifier on LEAF's CelebA dataset with FLSim.
Before running this file, you need to download the dataset and partition the data by users.
1. Clone the leaf dataset by running `git clone https://github.com/TalwalkarLab/leaf.git`
2. Change direectory to celeba: `cd leaf/data/celeba || exit`
3. Download the data from http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html
- Download or request the metadata files `identity_CelebA.txt` and `list_attr_celeba.txt`,
and place them inside the data/raw folder.
- Download the celebrity faces dataset from the same site. Place the images in a folder
named `img_align_celeba` in the same folder as above.
4. Run the pre-processing script:
- `./preprocess.sh --sf 0.01 -s niid -t 'user' --tf 0.90 -k 1 --spltseed 1`
Typical usage example:
python3 celeba_example.py --config-file configs/celeba_config.json
"""
import json
import os
import random
from typing import Any, Iterator, List, Tuple
import flsim.configs # noqa
import hydra # @manual
import torch
from flsim.interfaces.metrics_reporter import Channel
from flsim.utils.config_utils import maybe_parse_json_config
from flsim.utils.example_utils import (
DataProvider,
FLModel,
LEAFDataLoader,
MetricsReporter,
Resnet18,
SimpleConvNet,
)
from hydra.utils import instantiate
from omegaconf import DictConfig, OmegaConf
from torch.utils.data import Dataset
from torchvision import transforms
from torchvision.datasets import ImageFolder
from canife.utils import get_plot_path
class CelebaDataset(Dataset):
def __init__(
self,
data_root,
image_root,
num_users=None,
transform=None,
target_transform=None,
):
with open(data_root, "r+") as f:
self.dataset = json.load(f)
user_ids = self.dataset["users"]
num_users = num_users if num_users is not None else len(user_ids)
user_ids = random.sample(user_ids, min(len(user_ids), num_users))
self.transform = transform
self.target_transform = target_transform
self.image_root = image_root
self.image_folder = ImageFolder(image_root, transform)
self.data = {}
self.targets = {}
# Populate self.data and self.targets
for user_id, user_data in self.dataset["user_data"].items():
if user_id in user_ids:
self.data[user_id] = [
int(os.path.splitext(img_path)[0]) for img_path in user_data["x"]
]
self.targets[user_id] = list(user_data["y"])
def __iter__(self) -> Iterator[Tuple[List[torch.Tensor], List[Any]]]:
for user_id in self.data.keys():
yield self.__getitem__(user_id)
def __getitem__(self, user_id: str) -> Tuple[List[torch.Tensor], List[Any]]:
if user_id not in self.data or user_id not in self.targets:
raise IndexError(f"User {user_id} is not in dataset")
user_imgs = []
for image_index in self.data[user_id]:
user_imgs.append(self.image_folder[image_index - 1][0])
user_targets = self.targets[user_id]
if self.target_transform is not None:
user_targets = [self.target_transform(target) for target in user_targets]
return user_imgs, user_targets
def __len__(self) -> int:
return len(self.data)
def build_data_provider(data_config, trainer_config):
IMAGE_SIZE: int = 32
transform = transforms.Compose(
[
transforms.Resize(IMAGE_SIZE),
transforms.CenterCrop(IMAGE_SIZE),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
# Local testing
# train_split = "/data/train/all_data_0_01_keep_0_train_9.json" if "celeba_iid" not in trainer_config.args.dataset else "/data/train/all_data_0_01_01_keep_0_train_9_iid.json"
# test_split = "/data/test/all_data_0_01_keep_0_test_9.json" if "celeba_iid" not in trainer_config.args.dataset else "/data/test/all_data_0_01_01_keep_0_test_9_iid.json"
train_split = "/data/train/all_data_0_0_keep_0_train_9.json" if "celeba_iid" not in trainer_config.args.dataset else "/data/train/all_data_0_0_0_keep_0_train_9_iid.json"
test_split = "/data/test/all_data_0_0_keep_0_test_9.json" if "celeba_iid" not in trainer_config.args.dataset else "/data/test/all_data_0_0_0_keep_0_test_9_iid.json"
train_dataset = CelebaDataset( # data_root arg should be leaf/celeba
data_root=data_config.data_root + train_split,
image_root=data_config.data_root+"/data/raw/",
transform=transform,
)
test_dataset = CelebaDataset(
data_root=data_config.data_root + test_split,
transform=transform,
image_root=train_dataset.image_root,
)
print(
f"Created datasets with {len(train_dataset)} train users and {len(test_dataset)} test users"
)
dataloader = LEAFDataLoader(
train_dataset,
test_dataset,
test_dataset,
batch_size=data_config.local_batch_size,
drop_last=data_config.drop_last,
)
# data_provider = LEAFDataProvider(dataloader)
data_provider = DataProvider(dataloader)
print(f"Training clients in total: {data_provider.num_train_users()}")
return data_provider
def _get_checkpoint_path(cfg):
filename = cfg.args.checkpoint_path
filename += f"/FLSim_dp={cfg.args.dp_level}_model={cfg.args.model_arch}_dataset={cfg.args.dataset}_num_clients={cfg.args.users_per_round}_test_size={cfg.args.local_batch_size}"
filename += f"_insert_test_acc={cfg.args.canary_insert_test_acc}_insert_train_acc={cfg.args.canary_insert_train_acc}"
filename += ".tar"
return filename
def main_worker(
trainer_config,
data_config,
model_config,
use_cuda_if_available: bool = True,
distributed_world_size: int = 1,
) -> None:
checkpoint_path = _get_checkpoint_path(trainer_config)
if (trainer_config.args.fl_load_checkpoint) and not os.path.isfile(checkpoint_path):
print(f"Checkpoint {checkpoint_path} does not exist, experiment exiting early...")
return
data_provider = build_data_provider(data_config, trainer_config)
for exp_num in range(0, data_config.canary_iters):
torch.cuda.empty_cache()
if not data_config.debug_config:
trainer_config["plot_path"] = get_plot_path(trainer_config.args, exp_num=exp_num, file_suffix="")
trainer_config["result_path"] = get_plot_path(trainer_config.args, exp_num, ".tar")
if model_config.model_arch == "resnet":
model = Resnet18(num_classes=2)
else:
model = SimpleConvNet(num_classes=2, dropout_rate=model_config.dropout)
cuda_enabled = torch.cuda.is_available() and use_cuda_if_available
device = torch.device(f"cuda:{0}" if cuda_enabled else "cpu")
print(model)
# pyre-fixme[6]: Expected `Optional[str]` for 2nd param but got `device`.
global_model = FLModel(model, device)
if cuda_enabled:
global_model.fl_cuda()
trainer = instantiate(trainer_config, model=global_model, cuda_enabled=cuda_enabled)
metrics_reporter = MetricsReporter([Channel.TENSORBOARD, Channel.STDOUT])
final_model, eval_score = trainer.train(
data_provider=data_provider,
metrics_reporter=metrics_reporter,
num_total_users=data_provider.num_train_users(),
distributed_world_size=1,
)
test_metrics = trainer.test(
data_provider=data_provider,
metrics_reporter=MetricsReporter([Channel.STDOUT]),
)
if hasattr(trainer, "canary_analyser") and trainer.canary_analyser:
trainer.accuracy_metrics["test"].append(test_metrics["Accuracy"])
trainer.canary_analyser.set_accuracy_metrics(trainer.accuracy_metrics)
trainer.logger.info(f"Final accuracy metrics {trainer.accuracy_metrics}")
trainer.logger.info("Analysing canary tests...")
trainer.canary_analyser.analyse()
else:
if data_config.canary_iters > 1:
trainer.logger.info("Experiment ended early - either checkpoint only or model failed to reach insertion epoch/accuracy for canary testing")
return
@hydra.main(config_path=None, config_name="celeba_config", version_base="1.1")
def run(cfg: DictConfig) -> None:
print(OmegaConf.to_yaml(cfg))
trainer_config = cfg.trainer
data_config = cfg.data
model_config = cfg.model
main_worker(
trainer_config,
data_config,
model_config,
cfg.use_cuda_if_available,
cfg.distributed_world_size,
)
if __name__ == "__main__":
cfg = maybe_parse_json_config()
run(cfg)
| canife-main | FLSim/examples/old_examples/celeba_example.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""In this tutorial, we will train a binary sentiment classifier on LEAF's Sent140 dataset with FLSim.
Before running this file, you need to download the dataset and partition the data by users. We
provide the script get_data.sh for this purpose.
Typical usage example:
FedAvg
python3 sent140_example.py --config-file configs/sent140_config.json
FedBuff + SGDM
python3 sent140_example.py --config-file configs/sent140_fedbuff_config.json
"""
import itertools
import json
import re
import string
import unicodedata
from typing import List
import flsim.configs # noqa
import hydra # @manual
import torch
import torch.nn as nn
from flsim.interfaces.metrics_reporter import Channel
from flsim.utils.config_utils import maybe_parse_json_config
from flsim.utils.example_utils import (
FLModel,
LEAFDataLoader,
LEAFDataProvider,
MetricsReporter,
)
from hydra.utils import instantiate
from omegaconf import DictConfig, OmegaConf
from torch.utils.data import Dataset
class Sent140StackedLSTMModel(nn.Module):
def __init__(
self, seq_len, num_classes, emb_size, n_hidden, vocab_size, dropout
):
super(Sent140StackedLSTMModel, self).__init__()
self.seq_len = seq_len
self.num_classes = num_classes
self.n_hidden = n_hidden
self.vocab_size = vocab_size
self.emb_size = emb_size
self.dropout = dropout
self.embedding = nn.Embedding(self.vocab_size + 1, self.emb_size)
self.stacked_lstm = nn.LSTM(
self.emb_size, self.n_hidden, 2, batch_first=True, dropout=self.dropout
)
self.fc1 = nn.Linear(self.n_hidden, self.num_classes)
self.dropout = nn.Dropout(p=self.dropout)
self.out = nn.Linear(128, self.num_classes)
def set_embedding_weights(self, emb_matrix, trainable=False):
self.embedding.weight = torch.nn.Parameter(emb_matrix)
if not trainable:
self.embedding.weight.requires_grad = False
def forward(self, features):
seq_lens = torch.sum(features != (self.vocab_size - 1), 1) - 1
x = self.embedding(features)
outputs, _ = self.stacked_lstm(x)
outputs = outputs[torch.arange(outputs.size(0)), seq_lens]
pred = self.fc1(self.dropout(outputs))
return pred
class Sent140Dataset(Dataset):
def __init__(self, data_root, max_seq_len):
self.data_root = data_root
self.max_seq_len = max_seq_len
self.all_letters = {c: i for i, c in enumerate(string.printable)}
self.num_letters = len(self.all_letters)
self.UNK: int = self.num_letters
self.vocab_size = 9930
self.embedding_size = 300
with open(data_root, "r+") as f:
self.dataset = json.load(f)
self.data = {}
self.targets = {}
self.num_classes = 2
# Populate self.data and self.targets
for user_id, user_data in self.dataset["user_data"].items():
self.data[user_id] = self.process_x(list(user_data["x"]))
self.targets[user_id] = self.process_y(list(user_data["y"]))
def __len__(self):
return len(self.data)
def __iter__(self):
for user_id in self.data.keys():
yield self.__getitem__(user_id)
def __getitem__(self, user_id: str):
if user_id not in self.data or user_id not in self.targets:
raise IndexError(f"User {user_id} is not in dataset")
return self.data[user_id], self.targets[user_id]
def unicodeToAscii(self, s):
return "".join(
c
for c in unicodedata.normalize("NFD", s)
if unicodedata.category(c) != "Mn" and c in self.all_letters
)
def line_to_indices(self, line: str, max_seq_len: int):
line_list = self.split_line(line) # split phrase in words
line_list = line_list
chars = self.flatten_list([list(word) for word in line_list])
# padding
indices: List[int] = [
self.all_letters.get(letter, self.UNK)
for i, letter in enumerate(chars)
if i < max_seq_len
]
indices = indices + ([self.UNK] * (max_seq_len - len(indices)))
return indices
def process_x(self, raw_x_batch):
x_batch = [e[4] for e in raw_x_batch]
x_batch = [self.line_to_indices(e, self.max_seq_len) for e in x_batch]
x_batch = torch.LongTensor(x_batch)
return x_batch
def process_y(self, raw_y_batch):
y_batch = [int(e) for e in raw_y_batch]
return y_batch
def split_line(self, line):
"""split given line/phrase into list of words
Args:
line: string representing phrase to be split
Return:
list of strings, with each string representing a word
"""
return re.findall(r"[\w']+|[.,!?;]", line)
def flatten_list(self, nested_list):
return list(itertools.chain.from_iterable(nested_list))
def build_data_provider_vocab(
local_batch_size, vocab_size, num_users, user_dist, max_seq_len, drop_last, data_path
):
train_dataset = Sent140Dataset(
data_root=data_path + "/data/train/all_data_0_15_keep_1_train_6.json",
max_seq_len=max_seq_len,
)
eval_dataset = Sent140Dataset(
data_root=data_path + "/data/test/all_data_0_15_keep_1_test_6.json",
max_seq_len=max_seq_len,
)
test_dataset = Sent140Dataset(
data_root=data_path + "/data/test/all_data_0_15_keep_1_test_6.json",
max_seq_len=max_seq_len,
)
dataloader = LEAFDataLoader(
train_dataset,
eval_dataset,
test_dataset,
batch_size=local_batch_size,
drop_last=drop_last,
)
data_provider = LEAFDataProvider(dataloader)
return data_provider, train_dataset.vocab_size, train_dataset.embedding_size
def main_worker(
trainer_config,
model_config,
data_config,
use_cuda_if_available: bool = True,
distributed_world_size: int = 1,
) -> None:
data_provider, vocab_size, emb_size = build_data_provider_vocab(
local_batch_size=data_config.local_batch_size,
vocab_size=data_config.vocab_size,
num_users=data_config.num_users,
user_dist=data_config.user_dist,
max_seq_len=data_config.max_seq_len,
drop_last=False,
data_path=data_config.data_root
)
model = Sent140StackedLSTMModel(
seq_len=data_config.max_seq_len,
num_classes=model_config.num_classes,
emb_size=emb_size,
n_hidden=model_config.n_hidden,
vocab_size=vocab_size,
dropout_rate=model_config.dropout_rate,
)
cuda_enabled = torch.cuda.is_available() and use_cuda_if_available
device = torch.device(f"cuda:{0}" if cuda_enabled else "cpu")
print(model)
# pyre-fixme[6]: Expected `Optional[str]` for 2nd param but got `device`.
global_model = FLModel(model, device)
if cuda_enabled:
global_model.fl_cuda()
trainer = instantiate(trainer_config, model=global_model, cuda_enabled=cuda_enabled)
metrics_reporter = MetricsReporter([Channel.TENSORBOARD, Channel.STDOUT])
final_model, eval_score = trainer.train(
data_provider=data_provider,
metrics_reporter=metrics_reporter,
num_total_users=data_provider.num_train_users(),
distributed_world_size=distributed_world_size,
)
trainer.test(
data_provider=data_provider,
metrics_reporter=MetricsReporter([Channel.STDOUT]),
)
@hydra.main(config_path=None, config_name="sent140_config", version_base="1.1")
def run(cfg: DictConfig) -> None:
print(OmegaConf.to_yaml(cfg))
trainer_config = cfg.trainer
model_config = cfg.model
data_config = cfg.data
main_worker(trainer_config, model_config, data_config)
if __name__ == "__main__":
cfg = maybe_parse_json_config()
run(cfg)
| canife-main | FLSim/examples/old_examples/sent140_example.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""In this tutorial, we will train an image classifier with FLSim to simulate a federated learning training environment.
With this tutorial, you will learn the following key components of FLSim:
1. Data loading
2. Model construction
3. Trainer construction
Typical usage example:
python3 cifar10_example.py --config-file configs/cifar10_config.json
"""
import flsim.configs # noqa
import hydra
import torch
from flsim.data.data_sharder import SequentialSharder
from flsim.interfaces.metrics_reporter import Channel
from flsim.utils.config_utils import maybe_parse_json_config
from flsim.utils.example_utils import (
DataLoader,
DataProvider,
FLModel,
MetricsReporter,
Resnet18,
SimpleConvNet,
)
from hydra.utils import instantiate
from omegaconf import DictConfig
from torchvision import transforms
from torchvision.datasets.cifar import CIFAR10
from canife.utils import get_plot_path
IMAGE_SIZE = 32
def build_data_provider(data_root, local_batch_size, examples_per_user, drop_last: bool = False):
transform = transforms.Compose(
[
transforms.Resize(IMAGE_SIZE),
transforms.CenterCrop(IMAGE_SIZE),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
)
train_dataset = CIFAR10(
root=data_root, train=True, download=False, transform=transform
)
val_dataset = CIFAR10(
root=data_root, train=False, download=False, transform=transform
)
test_dataset = CIFAR10(
root=data_root, train=False, download=False, transform=transform
)
sharder = SequentialSharder(examples_per_shard=examples_per_user)
fl_data_loader = DataLoader(
train_dataset, val_dataset, test_dataset, sharder, local_batch_size, drop_last
)
data_provider = DataProvider(fl_data_loader)
print(f"Clients in total: {data_provider.num_train_users()}")
return data_provider
def main(
trainer_config,
data_config,
model_config,
use_cuda_if_available: bool = True,
) -> None:
data_provider = build_data_provider(
data_root=data_config.data_root,
local_batch_size=data_config.local_batch_size,
examples_per_user=data_config.examples_per_user,
drop_last=False,
)
for exp_num in range(0, data_config.canary_iters):
if not data_config.debug_config:
trainer_config["plot_path"] = get_plot_path(trainer_config.args, exp_num=exp_num, file_suffix="")
trainer_config["result_path"] = get_plot_path(trainer_config.args, exp_num, ".tar")
cuda_enabled = torch.cuda.is_available() and use_cuda_if_available
device = torch.device(f"cuda:{0}" if cuda_enabled else "cpu")
if model_config.model_arch == "resnet":
model = Resnet18(num_classes=10)
else:
model = SimpleConvNet(num_classes=10, dropout_rate=model_config.dropout)
# pyre-fixme[6]: Expected `Optional[str]` for 2nd param but got `device`.
global_model = FLModel(model, device)
if cuda_enabled:
global_model.fl_cuda()
trainer = instantiate(trainer_config, model=global_model, cuda_enabled=cuda_enabled)
print(f"Created {trainer_config._target_}")
metrics_reporter = MetricsReporter([Channel.TENSORBOARD, Channel.STDOUT])
final_model, eval_score = trainer.train(
data_provider=data_provider,
metrics_reporter=metrics_reporter,
num_total_users=data_provider.num_train_users(),
distributed_world_size=1,
)
test_metrics = trainer.test(
data_provider=data_provider,
metrics_reporter=MetricsReporter([Channel.STDOUT]),
)
if hasattr(trainer, "canary_analyser") and trainer.canary_analyser:
trainer.accuracy_metrics["test"].append(test_metrics["Accuracy"])
trainer.canary_analyser.set_accuracy_metrics(trainer.accuracy_metrics)
trainer.logger.info(f"Final accuracy metrics {trainer.accuracy_metrics}")
trainer.logger.info("Analysing canary tests...")
trainer.canary_analyser.analyse()
else:
if data_config.canary_iters > 1:
trainer.logger.info("Experiment ended early - either checkpoint only or model failed to reach insertion epoch/accuracy for canary testing")
return
@hydra.main(config_path=None, config_name="cifar10_tutorial", version_base="1.1")
def run(cfg: DictConfig) -> None:
# print(OmegaConf.to_yaml(cfg))
trainer_config = cfg.trainer
data_config = cfg.data
model_config = cfg.model
main(
trainer_config,
data_config,
model_config
)
if __name__ == "__main__":
cfg = maybe_parse_json_config()
run(cfg)
| canife-main | FLSim/examples/old_examples/cifar10_example.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import json
from typing import Any, Dict
def _handle_optimizer(trainer):
if "optimizer" not in trainer:
return
trainer["client"] = trainer["client"] if "client" in trainer else {}
client = trainer["client"]
client["optimizer"] = trainer["optimizer"]
del trainer["optimizer"]
optimizer = client["optimizer"]
if "type" not in optimizer:
pass
elif "sgd" == optimizer["type"].lower():
optimizer["_base_"] = "base_optimizer_sgd"
elif "fedprox" == optimizer["type"].lower():
optimizer["_base_"] = "base_optimizer_fedprox"
optimizer.pop("type", None)
def _handle_optimizer_in_client(client):
if "optim_config" not in client:
return
client["optimizer"] = client["optim_config"]
del client["optim_config"]
optimizer = client["optimizer"]
if "type" not in optimizer:
pass
elif "sgd" == optimizer["type"].lower():
optimizer["_base_"] = "base_optimizer_sgd"
elif "fedprox" == optimizer["type"].lower():
optimizer["_base_"] = "base_optimizer_fedprox"
optimizer.pop("type", None)
def _handle_lr_scheduler(trainer):
if "local_lr_scheduler" not in trainer:
return
trainer["client"] = trainer["client"] if "client" in trainer else {}
client = trainer["client"]
client["lr_scheduler"] = trainer["local_lr_scheduler"]
del trainer["local_lr_scheduler"]
lr_scheduler = client["lr_scheduler"]
if "type" not in lr_scheduler:
pass
elif "constant" == lr_scheduler["type"].lower():
lr_scheduler["_base_"] = "base_constant_lr_scheduler"
elif "default" == lr_scheduler["type"].lower():
lr_scheduler["_base_"] = "base_constant_lr_scheduler"
elif "batch_size_normalizer" == lr_scheduler["type"].lower():
lr_scheduler["_base_"] = "base_lr_batch_size_normalizer_scheduler"
elif "armijo_line_search" == lr_scheduler["type"].lower():
lr_scheduler["_base_"] = "base_armijo_line_search_lr_scheduer"
lr_scheduler.pop("type", None)
def _handle_lr_scheduler_in_client(client):
if "local_lr_scheduler" not in client:
return
client["lr_scheduler"] = client["local_lr_scheduler"]
del client["local_lr_scheduler"]
lr_scheduler = client["lr_scheduler"]
if "type" not in lr_scheduler:
pass
elif "constant" == lr_scheduler["type"].lower():
lr_scheduler["_base_"] = "base_constant_lr_scheduler"
elif "default" == lr_scheduler["type"].lower():
lr_scheduler["_base_"] = "base_constant_lr_scheduler"
elif "batch_size_normalizer" == lr_scheduler["type"].lower():
lr_scheduler["_base_"] = "base_lr_batch_size_normalizer_scheduler"
elif "armijo_line_search" == lr_scheduler["type"].lower():
lr_scheduler["_base_"] = "base_armijo_line_search_lr_scheduer"
lr_scheduler.pop("type", None)
def _handle_trainer_to_client_params(trainer):
trainer["client"] = trainer["client"] if "client" in trainer else {}
client = trainer["client"]
if "user_epochs_per_round" in trainer:
client["epochs"] = trainer["user_epochs_per_round"]
del trainer["user_epochs_per_round"]
if "max_clip_norm_normalized" in trainer:
client["max_clip_norm_normalized"] = trainer["max_clip_norm_normalized"]
del trainer["max_clip_norm_normalized"]
if client["max_clip_norm_normalized"] is False:
del client["max_clip_norm_normalized"]
if "random_seed" in trainer:
client["random_seed"] = trainer["random_seed"]
del trainer["random_seed"]
if "store_local_models_and_optimizers" in trainer:
client["store_models_and_optimizers"] = trainer[
"store_local_models_and_optimizers"
]
del trainer["store_local_models_and_optimizers"]
if "shuffle_user_batch_ordering" in trainer:
client["shuffle_batch_order"] = trainer["shuffle_user_batch_ordering"]
del trainer["shuffle_user_batch_ordering"]
def _handle_renaming_client_params(client):
if (
"max_clip_norm_normalized" in client
and client["max_clip_norm_normalized"] is False
):
del client["max_clip_norm_normalized"]
if "store_local_models_and_optimizers" in client:
client["store_models_and_optimizers"] = client[
"store_local_models_and_optimizers"
]
del client["store_local_models_and_optimizers"]
if "shuffle_user_batch_ordering" in client:
client["shuffle_batch_order"] = client["shuffle_user_batch_ordering"]
del client["shuffle_user_batch_ordering"]
def _handle_timeout_simulator(trainer):
if "timeout_simulator_config" not in trainer:
return
trainer["timeout_simulator"] = trainer["timeout_simulator_config"]
del trainer["timeout_simulator_config"]
timeout_simulator = trainer["timeout_simulator"]
if "type" not in timeout_simulator:
pass
elif "never" == timeout_simulator["type"].lower():
timeout_simulator["_base_"] = "base_never_timeout_simulator"
elif "default" == timeout_simulator["type"].lower():
timeout_simulator["_base_"] = "base_never_timeout_simulator"
elif "gaussian" == timeout_simulator["type"].lower():
timeout_simulator["_base_"] = "base_gaussian_timeout_simulator"
timeout_simulator.pop("type", None)
if "base_gaussian_timeout_simulator" == timeout_simulator.get("_base_", None):
timeout_simulator["duration_distribution_generator"] = {}
if "mean_per_example" in timeout_simulator:
timeout_simulator["duration_distribution_generator"][
"training_duration_mean"
] = timeout_simulator["mean_per_example"]
del timeout_simulator["mean_per_example"]
if "std_per_example" in timeout_simulator:
timeout_simulator["duration_distribution_generator"][
"training_duration_sd"
] = timeout_simulator["std_per_example"]
del timeout_simulator["std_per_example"]
if "min_duration_per_example" in timeout_simulator:
timeout_simulator["duration_distribution_generator"][
"training_duration_min"
] = timeout_simulator["min_duration_per_example"]
del timeout_simulator["min_duration_per_example"]
def _handle_active_user_selector(trainer):
if "active_user_selector" not in trainer:
return
active_user_selector = trainer["active_user_selector"]
if "type" not in active_user_selector:
pass
elif "uniformly_random" == active_user_selector["type"].lower():
active_user_selector["_base_"] = "base_uniformly_random_active_user_selector"
elif "sequential" == active_user_selector["type"].lower():
active_user_selector["_base_"] = "base_sequential_active_user_selector"
elif "random_round_robin" == active_user_selector["type"].lower():
active_user_selector["_base_"] = "base_random_round_robin_active_user_selector"
elif "number_of_samples" == active_user_selector["type"].lower():
active_user_selector["_base_"] = "base_number_of_samples_active_user_selector"
elif "high_loss" == active_user_selector["type"].lower():
active_user_selector["_base_"] = "base_high_loss_active_user_selector"
elif "diversity_reporting" == active_user_selector["type"].lower():
active_user_selector["_base_"] = "base_diversity_reporting_user_selector"
elif "diversity_statistics_reporting" == active_user_selector["type"].lower():
active_user_selector[
"_base_"
] = "base_diversity_statistics_reporting_user_selector"
elif "uniformlydiversity_maximizing_random" == active_user_selector["type"].lower():
active_user_selector["_base_"] = "base_diversity_maximizing_user_selector"
active_user_selector.pop("type", None)
def _handle_aggregator_reducer(aggregator):
if "reducer_config" not in aggregator:
return
aggregator["reducer"] = aggregator["reducer_config"]
del aggregator["reducer_config"]
reducer = aggregator["reducer"]
if "type" not in reducer:
pass
elif "roundreducer" == reducer["type"].lower():
reducer["_base_"] = "base_reducer"
elif "dproundreducer" == reducer["type"].lower():
reducer["_base_"] = "base_dp_reducer"
elif "secureroundreducer" == reducer["type"].lower():
reducer["_base_"] = "base_secure_reducer"
elif "weighteddproundreducer" == reducer["type"].lower():
reducer["_base_"] = "base_weighted_dp_reducer"
reducer.pop("type", None)
if "fixedpoint_config" in reducer:
reducer["fixedpoint"] = reducer["fixedpoint_config"]
del reducer["fixedpoint_config"]
if (
len(reducer["fixedpoint"].keys()) != 1
or "all-layers" not in reducer["fixedpoint"].keys()
):
raise Exception(
"per-layer config for fixedpoint in secure round reducer "
"is no longer supported. Your (old) fixedpoint config should "
"have all-layers as the key for this script to work. "
"Please reach out to FL Simulator Users workplace group if "
"you need per-layer fixedpoint config support."
)
reducer["fixedpoint"] = reducer["fixedpoint"]["all-layers"]
reducer["fixedpoint"]["_base_"] = "base_fixedpoint"
def _handle_aggregator(trainer): # noqa
is_async_trainer = "async" in trainer["_base_"]
if "aggregator" not in trainer:
return
aggregator = trainer["aggregator"]
if "type" not in aggregator:
pass
elif "default" == aggregator["type"].lower() and not is_async_trainer:
aggregator["_base_"] = "base_fed_avg_sync_aggregator"
elif "fedavg" == aggregator["type"].lower() and not is_async_trainer:
aggregator["_base_"] = "base_fed_avg_sync_aggregator"
elif "fedavgwithlr" == aggregator["type"].lower() and not is_async_trainer:
aggregator["_base_"] = "base_fed_avg_with_lr_sync_aggregator"
elif "fedadam" == aggregator["type"].lower() and not is_async_trainer:
aggregator["_base_"] = "base_fed_adam_sync_aggregator"
elif "fedlars" == aggregator["type"].lower() and not is_async_trainer:
aggregator["_base_"] = "base_fed_lars_sync_aggregator"
elif "fedlamb" == aggregator["type"].lower() and not is_async_trainer:
aggregator["_base_"] = "base_fed_lamb_sync_aggregator"
elif "fedavgwithlr" == aggregator["type"].lower() and is_async_trainer:
aggregator["_base_"] = "base_fed_avg_with_lr_async_aggregator"
elif "fedadam" == aggregator["type"].lower() and is_async_trainer:
aggregator["_base_"] = "base_fed_adam_async_aggregator"
elif (
"asyncfedavgwithlrmithmomentum" == aggregator["type"].lower()
and is_async_trainer
):
aggregator["_base_"] = "base_fed_avg_with_lr_with_momentum_async_aggregator"
elif "hybridfedavgwithlr" == aggregator["type"].lower() and is_async_trainer:
aggregator["_base_"] = "base_fed_avg_with_lr_hybrid_aggregator"
elif "hybridfedadam" == aggregator["type"].lower() and is_async_trainer:
aggregator["_base_"] = "base_fed_adam_hybrid_aggregator"
aggregator.pop("type", None)
_handle_aggregator_reducer(aggregator)
def _handle_training_start_time_distribution(teg):
if "training_start_time_distr" not in teg:
return
teg["training_start_time_distribution"] = teg["training_start_time_distr"]
del teg["training_start_time_distr"]
tstd = teg["training_start_time_distribution"]
if "type" not in tstd:
pass
elif "constant" == tstd["type"].lower():
tstd["_base_"] = "base_constant_training_start_time_distribution"
elif "poisson" == tstd["type"].lower():
tstd["_base_"] = "base_poisson_training_start_time_distribution"
tstd.pop("type", None)
def _handle_duration_distribution_generator(teg):
if "training_duration_distr" not in teg:
return
teg["duration_distribution_generator"] = teg["training_duration_distr"]
del teg["training_duration_distr"]
ddg = teg["duration_distribution_generator"]
if "type" not in ddg:
pass
elif "per_example_gaussian" == ddg["type"].lower():
ddg["_base_"] = "base_per_example_gaussian_duration_distribution"
elif "per_user_half_normal" == ddg["type"].lower():
ddg["_base_"] = "base_per_user_half_normal_duration_distribution"
elif "per_user_gaussian" == ddg["type"].lower():
ddg["_base_"] = "base_per_user_gaussian_duration_distribution"
elif "per_user_uniform" == ddg["type"].lower():
ddg["_base_"] = "base_per_user_uniform_duration_distribution"
elif "per_user_exponential" == ddg["type"].lower():
ddg["_base_"] = "base_per_user_exponential_duration_distribution"
elif "training_duration_from_list" == ddg["type"].lower():
ddg["_base_"] = "base_duration_distribution_from_list"
ddg.pop("type", None)
def _handle_training_event_generator(trainer):
if "training_event_generator_config" not in trainer:
return
trainer["training_event_generator"] = trainer["training_event_generator_config"]
del trainer["training_event_generator_config"]
teg = trainer["training_event_generator"]
if "type" not in teg:
pass
elif "async_training_event_generator" == teg["type"].lower():
teg["_base_"] = "base_async_training_event_generator"
elif "async_training_event_generator_from_list" == teg["type"].lower():
teg["_base_"] = "base_async_training_event_generator_from_list"
teg.pop("type", None)
_handle_training_start_time_distribution(teg)
_handle_duration_distribution_generator(teg)
def _handle_async_weight(trainer): # noqa
if (
"staleness_weight_config" not in trainer
and "example_weight_config" not in trainer
):
return
trainer["async_weight"] = {}
async_weight = trainer["async_weight"]
if "staleness_weight_config" in trainer:
async_weight["staleness_weight"] = trainer["staleness_weight_config"]
del trainer["staleness_weight_config"]
staleness_weight = async_weight["staleness_weight"]
if "type" not in staleness_weight:
pass
elif "default" == staleness_weight["type"].lower():
staleness_weight["_base_"] = "base_constant_staleness_weight"
elif "constant" == staleness_weight["type"].lower():
staleness_weight["_base_"] = "base_constant_staleness_weight"
elif "threshold" == staleness_weight["type"].lower():
staleness_weight["_base_"] = "base_threshold_staleness_weight"
elif "polynomial" == staleness_weight["type"].lower():
staleness_weight["_base_"] = "base_polynomial_staleness_weight"
staleness_weight.pop("type", None)
if "example_weight_config" in trainer:
async_weight["example_weight"] = trainer["example_weight_config"]
del trainer["example_weight_config"]
example_weight = async_weight["example_weight"]
if "type" not in example_weight:
pass
elif "default" == example_weight["type"].lower():
example_weight["_base_"] = "base_equal_example_weight"
elif "equal" == example_weight["type"].lower():
example_weight["_base_"] = "base_equal_example_weight"
elif "linear" == example_weight["type"].lower():
example_weight["_base_"] = "base_linear_example_weight"
elif "sqrt" == example_weight["type"].lower():
example_weight["_base_"] = "base_sqrt_example_weight"
elif "log10" == example_weight["type"].lower():
example_weight["_base_"] = "base_log10_example_weight"
example_weight.pop("type", None)
def _handle_private_client_config(trainer):
if "private_client_config" not in trainer:
return
# check if client is already present through trainer params
trainer["client"] = {
**trainer.get("client", {}),
**trainer["private_client_config"],
}
del trainer["private_client_config"]
client = trainer["client"]
client["_base_"] = "base_dp_client"
client.pop("type", None)
_handle_renaming_client_params(client)
_handle_optimizer_in_client(client)
_handle_lr_scheduler_in_client(client)
def _handle_private_reducer_config(trainer):
if "private_reducer_config" not in trainer:
return
trainer["reducer"] = trainer["private_reducer_config"]
del trainer["private_reducer_config"]
reducer = trainer["reducer"]
if "type" not in reducer:
pass
elif "dproundreducer" == reducer["type"].lower():
reducer["_base_"] = "base_dp_reducer"
elif "weighteddproundreducer" == reducer["type"].lower():
reducer["_base_"] = "base_weighted_dp_reducer"
else:
raise Exception("invalid reducer type for private sync trainer")
reducer.pop("type", None)
def _handle_data_and_model(new_config):
if "data_config" in new_config:
new_config["data"] = new_config["data_config"]
del new_config["data_config"]
if "model_config" in new_config:
new_config["model"] = new_config["model_config"]
del new_config["model_config"]
if "local_batch_size" in new_config:
new_config["data"] = new_config.get("data", {})
new_config["data"]["local_batch_size"] = new_config["local_batch_size"]
del new_config["local_batch_size"]
if "use_resnet" in new_config:
new_config["model"] = new_config.get("model", {})
new_config["model"]["use_resnet"] = new_config["use_resnet"]
del new_config["use_resnet"]
def convert_old_fl_trainer_config_to_new(trainer):
# handle trainer types
if "synctrainer" == trainer["type"].lower():
trainer["_base_"] = "base_sync_trainer"
elif "asynctrainer" == trainer["type"].lower():
trainer["_base_"] = "base_async_trainer"
elif "privatesynctrainer" == trainer["type"].lower():
trainer["_base_"] = "base_private_sync_trainer"
del trainer["type"]
if "channel_config" in trainer:
trainer["channel"] = trainer["channel_config"]
del trainer["channel_config"]
# handle trainer --> client params
_handle_optimizer(trainer)
_handle_lr_scheduler(trainer)
_handle_trainer_to_client_params(trainer)
# handle trainer base params
_handle_timeout_simulator(trainer)
_handle_active_user_selector(trainer)
# handle sync/async/private trainer params
_handle_aggregator(trainer)
_handle_training_event_generator(trainer)
_handle_async_weight(trainer)
# handle private trainer params
_handle_private_client_config(trainer)
_handle_private_reducer_config(trainer)
def get_new_pytext_fl_trainer_config(fl_trainer):
new_config = {}
new_config["trainer"] = fl_trainer
trainer = new_config["trainer"]
convert_old_fl_trainer_config_to_new(trainer)
return new_config
def get_new_fl_config(
old_config: Dict[str, Any], flsim_example: bool = False
) -> Dict[str, Any]: # noqa
new_config = copy.deepcopy(old_config)
new_config["trainer"] = new_config["trainer_config"]
del new_config["trainer_config"]
trainer = new_config["trainer"]
convert_old_fl_trainer_config_to_new(trainer)
# specifically for fl examples and baseline
if flsim_example:
_handle_data_and_model(new_config)
new_config = {"config": new_config}
return new_config
def create_new_fl_config_from_old_json(
old_config_json_path,
new_config_json_path=None,
flsim_example=False,
):
new_config = {}
with open(old_config_json_path) as f:
old_config = json.load(f)
new_config = get_new_fl_config(old_config, flsim_example)
if new_config_json_path is None:
print(new_config)
else:
with open(new_config_json_path, "w") as fo:
json.dump(new_config, fo, indent=4)
return new_config
if __name__ == "__main__":
warning_msg = """ NOTE:\n
-----\n
THIS CONFIG CONVERTER IS A HACK AND IS NOT FULLY TESTED. \n
DO NOT RELY ON THIS CONVERTER SCRIPT BEING AVAILABLE FOR A LONG TIME. \n
IF YOU HAVE A LOT OF CONFIGS TO CONVERT, PLEASE DO SO ASAP. \n
\n
WARNING:\n
--------\n
THIS SCRIPT BLINDLY CONVERTS THE INPUT CONFIG TO THE NEW FORMAT AND DOES NOT VALIDATE \n
THE INPUT. IF YOU SUPPLY THE WRONG CONFIG, THE ERROR MESSAGE WILL BE THROWN BY FLSIM \n
AND NOT THIS SCRIPT.\n
\n
======================================================================================\n
\n
"""
parser = argparse.ArgumentParser(
description="Convert old FLSim JSON config to new format."
)
parser.add_argument("-o", "--old", type=str, help="path to old json config")
parser.add_argument(
"-n", "--new", type=str, default=None, help="path to new json config"
)
parser.add_argument(
"--flsim_example",
default=False,
action="store_true",
help="also modify data and model configs for flsim repo examples",
)
args = parser.parse_args()
if args.old is None:
parser.print_help()
exit(1)
print(warning_msg)
create_new_fl_config_from_old_json(args.old, args.new, args.flsim_example)
print("Conversion successful")
| canife-main | FLSim/scripts/old_config_converter.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import json
import pkg_resources
from flsim.common.pytest_helper import assertTrue
from scripts.old_config_converter import get_new_fl_config
OLD_CONFIGS = [
"configs/fedbuff_fedadam_old.json",
"configs/async_fedsgd_old.json",
"configs/sync_fedavg_old.json",
"configs/privatesync_fedsgd_old.json",
]
NEW_CONFIGS = [
"configs/fedbuff_fedadam_new.json",
"configs/async_fedsgd_new.json",
"configs/sync_fedavg_new.json",
"configs/privatesync_fedsgd_new.json",
]
class TestOldConfigConveter:
def test_conversion(self) -> None:
for old_file_path, new_file_path in zip(OLD_CONFIGS, NEW_CONFIGS):
old_file_path = pkg_resources.resource_filename(__name__, old_file_path)
new_file_path = pkg_resources.resource_filename(__name__, new_file_path)
with open(old_file_path) as old_file:
old = json.load(old_file)
with open(new_file_path) as new_file:
new = json.load(new_file)
converted_old = get_new_fl_config(old, flsim_example=True)
assertTrue(dict(converted_old) == dict(new))
| canife-main | FLSim/scripts/tests/test_old_config_converter.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
__version__ = "0.0.1"
| canife-main | FLSim/flsim/version.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| canife-main | FLSim/flsim/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
This file defines the concept of a base client for a
federated learning setting. Also defines basic config,
for an FL client.
Note:
This is just a base class and needs to be overridden
for different use cases.
"""
from __future__ import annotations
import logging
import random
from dataclasses import dataclass
from typing import Any, List, Optional, Tuple
import torch
from flsim.channels.base_channel import IdentityChannel
from flsim.channels.message import Message
from flsim.common.logger import Logger
from flsim.common.timeout_simulator import (
NeverTimeOutSimulator,
NeverTimeOutSimulatorConfig,
TimeOutSimulator,
)
from flsim.data.data_provider import IFLUserData
from flsim.interfaces.metrics_reporter import IFLMetricsReporter
from flsim.interfaces.model import IFLModel
from flsim.optimizers.local_optimizers import (
LocalOptimizerConfig,
LocalOptimizerSGDConfig,
)
from flsim.optimizers.optimizer_scheduler import (
ConstantLRSchedulerConfig,
OptimizerScheduler,
OptimizerSchedulerConfig,
)
from flsim.utils.config_utils import fullclassname, init_self_cfg
from flsim.utils.cuda import DEFAULT_CUDA_MANAGER, ICudaStateManager
from flsim.utils.fl.common import FLModelParamUtils
from hydra.utils import instantiate
from omegaconf import OmegaConf
class Client:
logger: logging.Logger = Logger.get_logger(__name__)
def __init__(
self,
*,
dataset: IFLUserData,
channel: Optional[IdentityChannel] = None,
timeout_simulator: Optional[TimeOutSimulator] = None,
store_last_updated_model: Optional[bool] = False,
cuda_manager: ICudaStateManager = DEFAULT_CUDA_MANAGER,
name: Optional[str] = None,
**kwargs,
):
init_self_cfg(
self,
component_class=__class__, # pyre-fixme[10]: Name `__class__` is used but not defined.
config_class=ClientConfig,
**kwargs,
)
self.dataset = dataset
self.cuda_state_manager = cuda_manager
self.channel = channel or IdentityChannel()
self.timeout_simulator = timeout_simulator or NeverTimeOutSimulator(
**OmegaConf.structured(NeverTimeOutSimulatorConfig())
)
self.store_last_updated_model = store_last_updated_model
self.name = name or "unnamed_client"
# base lr needs to match LR in optimizer config, overwrite it
# pyre-ignore [16]
self.cfg.lr_scheduler.base_lr = self.cfg.optimizer.lr
self.per_example_training_time = (
self.timeout_simulator.simulate_per_example_training_time()
)
self.ref_model = None
self.num_samples = 0
self.times_selected = 0
self._tracked = {}
self.last_updated_model = None
# self.logger.setLevel(logging.INFO)
# TODO: Canary modifications
self.is_canary_testing = False
self.has_canary_inserted = False
@classmethod
def _set_defaults_in_cfg(cls, cfg):
if OmegaConf.is_missing(cfg.optimizer, "_target_"):
cfg.optimizer = LocalOptimizerSGDConfig()
if OmegaConf.is_missing(cfg.lr_scheduler, "_target_"):
cfg.lr_scheduler = ConstantLRSchedulerConfig()
@property
def seed(self) -> Optional[int]:
"""if should set random_seed or not."""
# pyre-fixme[16]: `Client` has no attribute `cfg`.
return self.cfg.random_seed
@property
def model_deltas(self) -> List[IFLModel]:
"""
return the stored deltas for all rounds that
this user was selected.
"""
return [self._tracked[s]["delta"] for s in range(self.times_selected)]
@property
def optimizers(self) -> List[Any]:
"""Look at {self.model}"""
return [self._tracked[s]["optimizer"] for s in range(self.times_selected)]
@property
def weights(self) -> List[float]:
"""Look at {self.model}"""
return [self._tracked[s]["weight"] for s in range(self.times_selected)]
# TODO: Canary modifiction
def flag_as_canary_inserted(self):
self.has_canary_inserted = True
# TODO: Canary modifiction
def enable_canary_testing(self):
self.is_canary_testing = True
# TODO: Canary modification
def disable_canary_testing(self):
self.is_canary_testing = False
def generate_local_update(
self, model: IFLModel, metrics_reporter: Optional[IFLMetricsReporter] = None
) -> Tuple[IFLModel, float]:
r"""
wrapper around all functions called on a client for generating an
updated local model.
Note:
-----
Only pass a ``metrics_reporter`` if reporting is needed, i.e.
report_metrics will be called on the reporter, o.w. reports will be
accumulated in memory.
"""
updated_model, weight, optimizer = self.copy_and_train_model(
model, metrics_reporter=metrics_reporter
)
# 4. Store updated model if being tracked
if self.store_last_updated_model:
self.last_updated_model = FLModelParamUtils.clone(updated_model)
# 5. compute delta
delta = self.compute_delta(
before=model, after=updated_model, model_to_save=updated_model
)
# 6. track state of the client
self.track(delta=delta, weight=weight, optimizer=optimizer)
return delta, weight
def copy_and_train_model(
self,
model: IFLModel,
epochs: Optional[int] = None,
optimizer: Optional[torch.optim.Optimizer] = None,
optimizer_scheduler: Optional[OptimizerScheduler] = None,
metrics_reporter: Optional[IFLMetricsReporter] = None,
) -> Tuple[IFLModel, float, torch.optim.Optimizer]:
"""Copy the model then use that model to train on the client's train split
Note: Optional optimizer and optimizer_scheduler are there for easier testing
Returns:
Tuple[IFLModel, float, torch.optim.Optimizer]: The trained model, the client's weight, the optimizer used
"""
# 1. pass through channel, set initial state
updated_model = self.receive_through_channel(model)
# 2. set up model and optimizer in the client
updated_model, default_optim, default_scheduler = self.prepare_for_training(
updated_model
)
optim = default_optim if optimizer is None else optimizer
optim_scheduler = (
default_scheduler if optimizer_scheduler is None else optimizer_scheduler
)
# 3. kick off training on client
updated_model, weight = self.train(
updated_model,
optim,
optim_scheduler,
metrics_reporter=metrics_reporter,
epochs=epochs,
)
# self.logger.info(f"Client {self.name} has model update {self.}")
return updated_model, weight, optim
def compute_delta(
self, before: IFLModel, after: IFLModel, model_to_save: IFLModel
) -> IFLModel:
"""
Computes the delta between the before training and after training model
"""
FLModelParamUtils.subtract_model(
minuend=before.fl_get_module(),
subtrahend=after.fl_get_module(),
difference=model_to_save.fl_get_module(),
)
return model_to_save
def receive_through_channel(self, model: IFLModel) -> IFLModel:
"""
Receives a reference to a state (referred to as model state_dict)
over the channel. Any channel effect is applied as part of this
receive function.
"""
# keep a reference to global model
self.ref_model = model
# need to clone the model because it's a reference to the global model
# modifying model will modify the global model
message = self.channel.server_to_client(
Message(model=FLModelParamUtils.clone(model))
)
return message.model
def prepare_for_training(
self, model: IFLModel
) -> Tuple[IFLModel, torch.optim.Optimizer, OptimizerScheduler]:
"""
1- instantiate a model with the given initial state
2- create an optimizer
"""
# inform cuda_state_manager that we're about to train a model
# it may move model to GPU
self.cuda_state_manager.before_train_or_eval(model)
# put model in train mode
model.fl_get_module().train()
# create optimizer
# pyre-fixme[16]: `Client` has no attribute `cfg`.
optimizer = instantiate(self.cfg.optimizer, model=model.fl_get_module())
optimizer_scheduler = instantiate(self.cfg.lr_scheduler, optimizer=optimizer)
return model, optimizer, optimizer_scheduler
def get_total_training_time(self) -> float:
return self.timeout_simulator.simulate_training_time(
self.per_example_training_time, self.dataset.num_train_examples()
)
def stop_training(self, num_examples_processed) -> bool:
training_time = self.timeout_simulator.simulate_training_time(
self.per_example_training_time, num_examples_processed
)
return self.timeout_simulator.user_timeout(training_time)
def train(
self,
model: IFLModel,
optimizer: Any,
optimizer_scheduler: OptimizerScheduler,
metrics_reporter: Optional[IFLMetricsReporter] = None,
epochs: Optional[int] = None,
) -> Tuple[IFLModel, float]:
total_samples = 0
# NOTE currently weight = total_sampls, this might be a bad strategy
# plus there are privacy implications that must be taken into account.
num_examples_processed = 0 # number of examples processed during training
# pyre-ignore[16]:
epochs = epochs if epochs is not None else self.cfg.epochs
epochs = 1 if self.has_canary_inserted else epochs # TODO: Canary modification
if self.seed is not None:
torch.manual_seed(self.seed)
for epoch in range(epochs):
if self.stop_training(num_examples_processed):
break
# if user has too many examples and times-out, we want to process
# different portion of the dataset each time
dataset = list(self.dataset.train_data())
if self.cfg.shuffle_batch_order:
random.shuffle(dataset)
for batch in dataset:
sample_count = self._batch_train(
model=model,
optimizer=optimizer,
training_batch=batch,
epoch=epoch,
metrics_reporter=metrics_reporter,
optimizer_scheduler=optimizer_scheduler,
)
self.post_batch_train(epoch, model, sample_count, optimizer)
total_samples += 0 if epoch else sample_count
num_examples_processed += sample_count
# stop training depending on time-out condition
if self.stop_training(num_examples_processed):
break
# tell cuda manager we're done with training
# cuda manager may move model out of GPU memory if needed
self.cuda_state_manager.after_train_or_eval(model)
self.logger.debug(
f"Processed {num_examples_processed} of {self.dataset.num_train_examples()}"
)
self.post_train(model, total_samples, optimizer)
# if training stops early, used partial training weight
example_weight = min([num_examples_processed, total_samples])
return model, float(example_weight)
def post_train(self, model: IFLModel, total_samples: int, optimizer: Any):
pass
def post_batch_train(
self, epoch: int, model: IFLModel, sample_count: int, optimizer: Any
):
pass
def track(self, delta: IFLModel, weight: float, optimizer: Any):
"""Tracks metric when the client is selected multiple times"""
# pyre-fixme[16]: `Client` has no attribute `cfg`.
if self.cfg.store_models_and_optimizers:
self._tracked[self.times_selected] = {
"delta": FLModelParamUtils.clone(delta),
"weight": weight,
"optimizer": optimizer,
}
self.times_selected += 1
def eval(
self,
model: IFLModel,
dataset: Optional[IFLUserData] = None,
metrics_reporter: Optional[IFLMetricsReporter] = None,
):
"""
Client evaluates the model based on its evaluation data split
"""
data = dataset or self.dataset
self.cuda_state_manager.before_train_or_eval(model)
with torch.no_grad():
if self.seed is not None:
torch.manual_seed(self.seed)
model.fl_get_module().eval()
for batch in data.eval_data():
batch_metrics = model.get_eval_metrics(batch)
if metrics_reporter is not None:
metrics_reporter.add_batch_metrics(batch_metrics)
model.fl_get_module().train()
self.cuda_state_manager.after_train_or_eval(model)
def _batch_train(
self,
model,
optimizer,
training_batch,
epoch,
metrics_reporter,
optimizer_scheduler,
) -> int:
"""Trainer for NewDocModel based FL Tasks
Run a single iteration of minibatch-gradient descent on a single user.
Compatible with the new tasks in which the model is responsible for
arranging its inputs, targets and context.
Return number of examples in the batch.
"""
optimizer.zero_grad()
batch_metrics = model.fl_forward(training_batch)
loss = batch_metrics.loss
loss.backward()
# pyre-fixme[16]: `Client` has no attribute `cfg`.
if self.cfg.max_clip_norm_normalized is not None:
max_norm = self.cfg.max_clip_norm_normalized
FLModelParamUtils.clip_gradients(
max_normalized_l2_norm=max_norm, model=model.fl_get_module()
)
num_examples = batch_metrics.num_examples
# TODO: Canary modification
# self.logger.debug(f"Client {self.name} before scaling {torch.sum(torch.cat([p.grad.flatten() for p in model.model.parameters()]))}")
if self.is_canary_testing:
self.logger.debug(f"Client {self.name}, num of examples {num_examples}")
self.logger.debug(f"Client {self.name}, norm of gradient batch: {torch.norm(torch.cat([p.grad.flatten() for p in model.model.parameters()]))}")
for n,p in model.model.named_parameters():
if self.has_canary_inserted and num_examples == 1 and len(self.dataset._user_batches) == 1: # Batch insert only
self.logger.debug(f"Canary inserted as batch - canary gradient scaled by 1/lr={self.cfg.optimizer.lr}")
p.grad *= 1/self.cfg.optimizer.lr # Preserve canary gradient after stepping
# adjust lr and take a step
optimizer_scheduler.step(batch_metrics, model, training_batch, epoch)
optimizer.step()
if metrics_reporter is not None:
metrics_reporter.add_batch_metrics(batch_metrics)
return num_examples
@dataclass
class ClientConfig:
_target_: str = fullclassname(Client)
_recursive_: bool = False
epochs: int = 1 # No. of epochs for local training
optimizer: LocalOptimizerConfig = LocalOptimizerConfig()
lr_scheduler: OptimizerSchedulerConfig = OptimizerSchedulerConfig()
max_clip_norm_normalized: Optional[float] = None # gradient clip value
only_federated_params: bool = True # flag to only use certain params
random_seed: Optional[int] = None # random seed for deterministic response
shuffle_batch_order: bool = False # shuffle the ordering of batches
store_models_and_optimizers: bool = False # name clear
track_multiple_selection: bool = False # track if the client appears in 2+ rounds.
| canife-main | FLSim/flsim/clients/base_client.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from hydra.core.config_store import ConfigStore
from .base_client import ClientConfig
from .dp_client import DPClientConfig
ConfigStore.instance().store(
name="base_client",
node=ClientConfig,
group="client",
)
ConfigStore.instance().store(
name="base_dp_client",
node=DPClientConfig,
group="client",
)
| canife-main | FLSim/flsim/clients/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
This file defines the concept of a differentially private
client where a sample level dp is enforced during training.
"""
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Optional
import torch
from flsim.channels.base_channel import IdentityChannel
from flsim.clients.base_client import Client, ClientConfig
from flsim.common.timeout_simulator import TimeOutSimulator
from flsim.data.data_provider import IFLUserData
from flsim.interfaces.model import IFLModel
from flsim.privacy.common import PrivacyBudget, PrivacySetting
from flsim.utils.config_utils import fullclassname, init_self_cfg
from flsim.utils.cuda import DEFAULT_CUDA_MANAGER, ICudaStateManager
from opacus import GradSampleModule
from opacus.accountants import RDPAccountant
from opacus.optimizers import DPOptimizer
class DPClient(Client):
def __init__(
self,
*,
dataset: IFLUserData,
channel: Optional[IdentityChannel] = None,
timeout_simulator: Optional[TimeOutSimulator] = None,
store_last_updated_model: Optional[bool] = False,
name: Optional[str] = None,
cuda_manager: ICudaStateManager = DEFAULT_CUDA_MANAGER,
**kwargs,
):
init_self_cfg(
self,
component_class=__class__, # pyre-fixme[10]: Name `__class__` is used but not defined.
config_class=DPClientConfig,
**kwargs,
)
super().__init__(
dataset=dataset,
channel=channel,
timeout_simulator=timeout_simulator,
store_last_updated_model=store_last_updated_model,
name=name,
cuda_manager=cuda_manager,
**kwargs,
)
self.dataset_length = -1
self.privacy_steps = 0
self._privacy_budget = PrivacyBudget()
self.privacy_on = (
# pyre-fixme[16]: `DPClient` has no attribute `cfg`.
self.cfg.privacy_setting.noise_multiplier >= 0
and self.cfg.privacy_setting.clipping_value < float("inf")
)
if self.privacy_on:
self.accountant = RDPAccountant()
self.grad_sample_module = None
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def _get_dataset_stats(self, model: IFLModel):
batch_size = 0
for batch in self.dataset.train_data():
batch_size = model.get_num_examples(batch)
break
return batch_size, self.dataset.num_train_examples()
@property
def privacy_budget(self) -> PrivacyBudget:
return self._privacy_budget
def prepare_for_training(self, model: IFLModel):
"""
1- call parent's prepare_for_training
2- attach the privacy_engine
"""
model, optimizer, optimizer_scheduler = super().prepare_for_training(model)
if self.privacy_on:
batch_size, self.dataset_length = self._get_dataset_stats(model)
sample_rate = batch_size / self.dataset_length
self.grad_sample_module = GradSampleModule(model.fl_get_module())
# pyre-fixme[16]: `DPClient` has no attribute `cfg`.
if self.cfg.privacy_setting.noise_seed is not None:
generator = torch.Generator()
# pyre-fixme[16]
generator.manual_seed(self.cfg.privacy_setting.noise_seed)
else:
generator = None
optimizer = DPOptimizer(
optimizer=optimizer,
noise_multiplier=self.cfg.privacy_setting.noise_multiplier,
max_grad_norm=self.cfg.privacy_setting.clipping_value,
expected_batch_size=batch_size,
generator=generator,
)
def accountant_hook(optim: DPOptimizer):
self.accountant.step(
noise_multiplier=optim.noise_multiplier,
sample_rate=sample_rate * optim.accumulated_iterations,
)
optimizer.attach_step_hook(accountant_hook)
return model, optimizer, optimizer_scheduler
def _get_privacy_budget(self) -> PrivacyBudget:
if self.privacy_on and self.dataset_length > 0:
# pyre-fixme[16]: `DPClient` has no attribute `cfg`.
delta = self.cfg.privacy_setting.target_delta
eps = self.accountant.get_epsilon(delta=delta)
return PrivacyBudget(epsilon=eps, delta=delta)
else:
return PrivacyBudget()
def post_batch_train(
self, epoch: int, model: IFLModel, sample_count: int, optimizer: Any
):
if self.privacy_on and sample_count > optimizer.expected_batch_size:
raise ValueError(
"Batch size was not properly calculated!"
" Calculated Epsilons are not Correct"
)
def post_train(self, model: IFLModel, total_samples: int, optimizer: Any):
if not self.privacy_on:
self.logger.debug(f"Privacy Engine is not enabled for client: {self.name}!")
return
if self.dataset_length != total_samples:
DPClient.logger.warning(
"Calculated privacy budgets were not Accurate." " Fixing the problem."
)
sample_rate = float(optimizer.expected_batch_size) / total_samples
self.accountant.steps = [
(noise, sample_rate, num_steps)
for noise, _, num_steps in self.accountant.steps
]
self._privacy_budget = self._get_privacy_budget()
DPClient.logger.debug(f"Privacy Budget: {self._privacy_budget}")
# detach the engine to be safe, (not necessary if model is not reused.)
self.grad_sample_module.to_standard_module()
# re-add the detached engine so that can be saved along with optimizer
optimizer.accountant = self.accountant
@dataclass
class DPClientConfig(ClientConfig):
"""
Contains configurations for a dp user (sample-level dp)
"""
_target_: str = fullclassname(DPClient)
privacy_setting: PrivacySetting = PrivacySetting()
| canife-main | FLSim/flsim/clients/dp_client.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Optional, Tuple
from flsim.channels.base_channel import IdentityChannel
from flsim.clients.base_client import Client, ClientConfig
from flsim.common.timeout_simulator import TimeOutSimulator
from flsim.interfaces.model import IFLModel
from flsim.utils.async_trainer.async_user_selector import (
AsyncUserSelector,
AsyncUserSelectorInfo,
)
from flsim.utils.async_trainer.device_state import (
DeviceState,
TrainingSchedule,
TrainingScheduleFactory,
TrainingState,
)
from flsim.utils.async_trainer.training_event_generator import IEventGenerator
from flsim.utils.cuda import DEFAULT_CUDA_MANAGER, ICudaStateManager
from flsim.utils.fl.common import FLModelParamUtils
from omegaconf import OmegaConf
class AsyncClientFactory:
@classmethod
def create(
cls,
current_time: float,
event_generator: IEventGenerator,
user_selector: AsyncUserSelector,
client_config: ClientConfig,
cuda_manager: ICudaStateManager = DEFAULT_CUDA_MANAGER,
timeout_simulator: Optional[TimeOutSimulator] = None,
channel: Optional[IdentityChannel] = None,
):
user_info = user_selector.get_random_user()
training_schedule = TrainingScheduleFactory.create(
current_time, event_generator, user_info.user_data.num_train_examples()
)
client = Client(
**OmegaConf.structured(client_config),
dataset=user_info.user_data,
name=f"client_{user_info.user_index}",
timeout_simulator=timeout_simulator,
channel=channel,
cuda_manager=cuda_manager,
)
return AsyncClientDevice(training_schedule, client, user_info)
class AsyncClientDevice(DeviceState):
r"""
Class to represent a single async device. This class is responsible for
maintaining the training state and training the local model
"""
def __init__(
self,
training_schedule: TrainingSchedule,
client: Client,
user_info: AsyncUserSelectorInfo,
):
self.client: Client = client
self.local_model: IFLModel = None # pyre-ignore[8]
self.model_seqnum: int = -1
self.user_info = user_info
self.training_schedule = training_schedule
super().__init__(training_schedule)
def training_started(
self, model_seqnum: int, init_model: Optional[IFLModel] = None
) -> None:
r"""
Starts the client training event by saving a copy of the current global model and seqnum
"""
if init_model is not None:
self.local_model = self.client.receive_through_channel(init_model)
super().training_started()
self.model_seqnum = model_seqnum
def train_local_model(
self, metrics_reporter: Any = None
) -> Tuple[IFLModel, IFLModel, float]:
r"""
Performs local training loop
"""
assert (
self.local_model is not None
), "Client has not started training, local_model is None"
# 1. Save the init model to compute delta
before_train_local = FLModelParamUtils.clone(self.local_model)
# 2. Get ready for training
self.local_model, optim, optim_scheduler = self.client.prepare_for_training(
self.local_model
)
# 3. Train model on local data
after_train_local, weight = self.client.train(
self.local_model, optim, optim_scheduler, metrics_reporter
)
# 3. Compute delta
delta = self.client.compute_delta(
before_train_local, after_train_local, model_to_save=before_train_local
)
# 4. Track client models if specified by config
self.client.track(delta=delta, weight=weight, optimizer=optim)
return delta, after_train_local, weight
def is_waiting_to_start(self):
return self.training_state == TrainingState.WAITING_FOR_START
| canife-main | FLSim/flsim/clients/async_client.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| canife-main | FLSim/flsim/clients/tests/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import List, Type
import pytest
from flsim.clients.async_client import AsyncClientDevice, AsyncClientFactory
from flsim.clients.base_client import ClientConfig
from flsim.common.pytest_helper import assertEqual, assertIsNotNone
from flsim.data.data_provider import FLDataProviderFromList
from flsim.utils.async_trainer.async_user_selector import (
RandomAsyncUserSelector,
RoundRobinAsyncUserSelector,
)
from flsim.utils.async_trainer.device_state import TrainingState
from flsim.utils.async_trainer.training_event_generator import (
AsyncTrainingEventGenerator,
AsyncTrainingEventGeneratorConfig,
AsyncTrainingEventGeneratorFromList,
AsyncTrainingEventGeneratorFromListConfig,
EventTimingInfo,
PoissonAsyncTrainingStartTimeDistrConfig,
)
from flsim.utils.data.fake_data_utils import create_mock_data_provider
from flsim.utils.sample_model import DummyAlphabetFLModel, MockFLModel
from flsim.utils.tests.helpers.test_data_utils import DummyAlphabetDataset
from flsim.utils.timing.training_duration_distribution import (
DurationDistributionConfig,
PerExampleGaussianDurationDistributionConfig,
PerUserGaussianDurationDistributionConfig,
)
from omegaconf import OmegaConf
@pytest.fixture(scope="class")
def prepare_shared_client_config(request) -> None:
request.cls.shared_client_config = OmegaConf.structured(
ClientConfig(
epochs=1,
max_clip_norm_normalized=0,
only_federated_params=True,
random_seed=1,
store_models_and_optimizers=False,
)
)
@pytest.mark.usefixtures("prepare_shared_client_config")
class TestAsyncClientDeviceGeneration:
def _verify_event(
self,
client: AsyncClientDevice,
expected_start_time: int,
expected_end_time: int,
) -> None:
assertEqual(client.training_schedule.start_time, expected_start_time)
assertEqual(client.training_schedule.end_time, expected_end_time)
def test_provide_client_event_generation(self) -> None:
r"""
Check if client provider returns the client with the correct
start time and end time
"""
# (start time, duration)
event_list = [
EventTimingInfo(prev_event_start_to_current_start=1, duration=3),
EventTimingInfo(prev_event_start_to_current_start=2, duration=5),
EventTimingInfo(prev_event_start_to_current_start=2, duration=1),
EventTimingInfo(prev_event_start_to_current_start=10, duration=10),
]
start_times_gaps = [val.prev_event_start_to_current_start for val in event_list]
start_times = [
sum(start_times_gaps[0 : (x + 1)]) for x in range(0, len(start_times_gaps))
]
durations = [d.duration for d in event_list]
end_times = [t[0] + t[1] for t in zip(start_times, durations)]
event_generator = AsyncTrainingEventGeneratorFromList(
**OmegaConf.structured(
AsyncTrainingEventGeneratorFromListConfig(training_events=event_list)
)
)
num_users = len(event_list)
data_provider = create_mock_data_provider(
num_users=num_users, examples_per_user=1
)
user_selector = RandomAsyncUserSelector(data_provider=data_provider)
current_time = 0
for start, end in zip(start_times, end_times):
client = AsyncClientFactory.create(
current_time=current_time,
event_generator=event_generator,
user_selector=user_selector,
# pyre-ignore [16]: for pytest
client_config=self.shared_client_config,
)
self._verify_event(client, expected_start_time=start, expected_end_time=end)
# how we move forward in time in async is by setting the current time
# to start time of the client on top of the heap
current_time = client.next_event_time()
def test_sequential_client_training_schedule(self) -> None:
r"""
Check that training event generator produces TrainingSchedule sequentially
(where mean and SD of training time is 0), clients are truly produced sequentially:
i.e, if client starts training, client A ends training before any other client start
"""
num_users = 100
training_start_time_distr = PoissonAsyncTrainingStartTimeDistrConfig(
training_rate=10
)
duration_distr = PerExampleGaussianDurationDistributionConfig(
training_duration_mean=0, training_duration_sd=0
)
event_generator = AsyncTrainingEventGenerator(
**OmegaConf.structured(
AsyncTrainingEventGeneratorConfig(
training_start_time_distribution=training_start_time_distr,
duration_distribution_generator=duration_distr,
)
)
)
examples_per_user = 1
data_provider = create_mock_data_provider(
num_users=num_users, examples_per_user=examples_per_user
)
user_selector = RandomAsyncUserSelector(data_provider=data_provider)
current_time = 0
clients = []
for _ in range(num_users):
client = AsyncClientFactory.create(
current_time=current_time,
event_generator=event_generator,
user_selector=user_selector,
# pyre-ignore [16]: for pytest
client_config=self.shared_client_config,
)
assertEqual(
client.training_schedule.start_time, client.training_schedule.end_time
)
current_time = client.next_event_time()
clients.append(client)
# verify that clients were produced and hence trained sequentially
for client_1, client_2 in zip(clients, clients[1:]):
# check that client_1 should end training before client_2 start training
assert (
client_1.training_schedule.end_time
<= client_2.training_schedule.start_time
)
# check that start time is strictly monotonic increasing
assert (
client_1.training_schedule.start_time
< client_2.training_schedule.start_time
)
def _build_clients_training_duration_dist(
self, duration_distr_config: Type[DurationDistributionConfig], num_users: int
) -> List[AsyncClientDevice]:
r"""
Per-Example-Gaussian:
training_duration = num_examples * training_duration_per_example
Per-User-Gaussian:
training_duration = training_duration_per_example
Use a config where training time is completely determined by
number of examples. Eg:
- GaussianDuration,PoissonStartTime
- training_rate: very high
- mean training time: very high
num_examples_per_user = [n, n-1, n-2, ..., 3, 2, 1]
"""
training_start_time_distr = PoissonAsyncTrainingStartTimeDistrConfig(
training_rate=1000
)
duration_distr = duration_distr_config(
training_duration_mean=1000,
training_duration_sd=0,
)
event_generator = AsyncTrainingEventGenerator(
**OmegaConf.structured(
AsyncTrainingEventGeneratorConfig(
training_start_time_distribution=training_start_time_distr,
duration_distribution_generator=duration_distr,
)
)
)
num_examples_per_user = list(reversed(range(1, num_users + 1)))
data = [
[1] * num_example
for num_example, _ in zip(num_examples_per_user, range(num_users))
]
data_provider = FLDataProviderFromList(
train_user_list=data,
eval_user_list=data,
test_user_list=data,
model=MockFLModel(),
)
user_selector = RoundRobinAsyncUserSelector(data_provider=data_provider)
clients = []
current_time = 0
for _ in range(num_users):
client = AsyncClientFactory.create(
current_time=current_time,
event_generator=event_generator,
user_selector=user_selector,
# pyre-ignore [16]: for pytest
client_config=self.shared_client_config,
)
current_time = client.next_event_time()
clients.append(client)
return clients
def test_training_duration_per_example_gaussian(self) -> None:
r"""
Per-Example-Gaussian:
The last user should finish first.
So training finish time should be: [user n, user n-1, ..., user2, user1]
"""
num_users = 50
clients = self._build_clients_training_duration_dist(
duration_distr_config=PerExampleGaussianDurationDistributionConfig,
num_users=num_users,
)
# check that end time is strictly monotonic decreasing
for client_1, client_2 in zip(clients, clients[1:]):
assert (
client_1.training_schedule.end_time
> client_2.training_schedule.end_time
)
def test_training_duration_per_user_gaussian(self) -> None:
r"""
Per-User-Gaussian:
First user should finish first
So training finish time should be: [user 1, user 2, user 3, .... user n]
"""
num_users = 50
clients = self._build_clients_training_duration_dist(
duration_distr_config=PerUserGaussianDurationDistributionConfig,
num_users=num_users,
)
# check that end time is strictly monotonic increasing
for client_1, client_2 in zip(clients, clients[1:]):
assert (
client_1.training_schedule.end_time
< client_2.training_schedule.end_time
)
@pytest.fixture(scope="class")
def prepare_async_client_device(request) -> None:
request.cls.shared_client_config = OmegaConf.structured(
ClientConfig(
epochs=1,
max_clip_norm_normalized=0,
only_federated_params=True,
random_seed=1,
store_models_and_optimizers=False,
)
)
request.cls.event_list = [
EventTimingInfo(prev_event_start_to_current_start=1, duration=3),
EventTimingInfo(prev_event_start_to_current_start=2, duration=5),
EventTimingInfo(prev_event_start_to_current_start=2, duration=1),
EventTimingInfo(prev_event_start_to_current_start=10, duration=10),
]
request.cls.event_generator = AsyncTrainingEventGeneratorFromList(
**OmegaConf.structured(
AsyncTrainingEventGeneratorFromListConfig(
training_events=request.cls.event_list
)
)
)
@pytest.mark.usefixtures("prepare_async_client_device")
class TestAsyncClientDevice:
def _build_data_provider(
self, num_examples, examples_per_user: int, user_batch_size: int, global_model
) -> FLDataProviderFromList:
dummy_dataset = DummyAlphabetDataset(num_examples)
data_provider, _ = DummyAlphabetDataset.create_data_provider_and_loader(
dummy_dataset, examples_per_user, user_batch_size, global_model
)
return data_provider
def test_async_client_start_training(self) -> None:
global_model = DummyAlphabetFLModel()
examples_per_user = 10
data_provider = self._build_data_provider(
num_examples=100,
examples_per_user=examples_per_user,
user_batch_size=2,
global_model=global_model,
)
user_selector = RandomAsyncUserSelector(data_provider=data_provider)
client = AsyncClientFactory.create(
current_time=0,
# pyre-ignore [16]: for pytest
event_generator=self.event_generator,
user_selector=user_selector,
# pyre-ignore [16]: for pytest
client_config=self.shared_client_config,
)
current_seqnum = 1
# verify that client is waiting to start
assert client.is_waiting_to_start()
client.training_started(model_seqnum=current_seqnum, init_model=global_model)
# verify that that we saved a copy of the global model
assertIsNotNone(client.local_model)
# verify that client has the correct seq num
assertEqual(client.model_seqnum, current_seqnum)
# verify that client state is training
assert not client.is_waiting_to_start()
assertEqual(client.training_state, TrainingState.TRAINING)
def test_async_client_training(self) -> None:
num_examples = 10
examples_per_user = 10
user_batch_size = 2
training_start_time = 1
training_duration = 3
training_end_time = training_start_time + training_duration
global_model = DummyAlphabetFLModel()
data_provider = self._build_data_provider(
num_examples=num_examples,
examples_per_user=examples_per_user,
user_batch_size=user_batch_size,
global_model=global_model,
)
event_list = [
EventTimingInfo(
prev_event_start_to_current_start=training_start_time,
duration=training_duration,
)
]
event_generator = AsyncTrainingEventGeneratorFromList(
**OmegaConf.structured(
AsyncTrainingEventGeneratorFromListConfig(training_events=event_list)
)
)
user_selector = RandomAsyncUserSelector(data_provider=data_provider)
client = AsyncClientFactory.create(
current_time=0,
event_generator=event_generator,
user_selector=user_selector,
# pyre-ignore [16]: for pytest
client_config=self.shared_client_config,
)
current_seqnum = 1
assertEqual(client.next_event_time(), training_start_time)
client.training_started(model_seqnum=current_seqnum, init_model=global_model)
(
client_delta,
final_local_model,
num_examples_trained,
) = client.train_local_model(metrics_reporter=None)
client.training_ended()
assertEqual(num_examples_trained, examples_per_user)
assertEqual(client.training_state, TrainingState.TRAINING_FINISHED)
assertEqual(client.next_event_time(), training_end_time)
assertEqual(client.model_seqnum, current_seqnum)
def test_async_client_less_than(self) -> None:
num_examples = 10
examples_per_user = 10
user_batch_size = 2
global_model = DummyAlphabetFLModel()
data_provider = self._build_data_provider(
num_examples=num_examples,
examples_per_user=examples_per_user,
user_batch_size=user_batch_size,
global_model=global_model,
)
user_selector = RandomAsyncUserSelector(data_provider=data_provider)
# two clients
# client 1 starts training at 1
# client 2 starts training at 2
# verify that client 1 will be less than client 2
event_list = [
EventTimingInfo(prev_event_start_to_current_start=1, duration=1),
EventTimingInfo(prev_event_start_to_current_start=2, duration=1),
]
event_generator = AsyncTrainingEventGeneratorFromList(
**OmegaConf.structured(
AsyncTrainingEventGeneratorFromListConfig(training_events=event_list)
)
)
client_1 = AsyncClientFactory.create(
current_time=0,
event_generator=event_generator,
user_selector=user_selector,
# pyre-ignore [16]: for pytest
client_config=self.shared_client_config,
)
client_2 = AsyncClientFactory.create(
current_time=0,
event_generator=event_generator,
user_selector=user_selector,
client_config=self.shared_client_config,
)
assert client_1 < client_2
# two clients currently training (training_state=TRAINING_STARTED)
# client a ends training at 2
# client b ends training at 3
# verify that client a will be less than client b
event_list = [
EventTimingInfo(prev_event_start_to_current_start=1, duration=1),
EventTimingInfo(prev_event_start_to_current_start=1, duration=2),
]
event_generator = AsyncTrainingEventGeneratorFromList(
**OmegaConf.structured(
AsyncTrainingEventGeneratorFromListConfig(training_events=event_list)
)
)
client_a = AsyncClientFactory.create(
current_time=0,
event_generator=event_generator,
user_selector=user_selector,
client_config=self.shared_client_config,
)
client_b = AsyncClientFactory.create(
current_time=0,
event_generator=event_generator,
user_selector=user_selector,
client_config=self.shared_client_config,
)
client_b.training_started(1, global_model)
assert client_a < client_b
| canife-main | FLSim/flsim/clients/tests/test_async_client.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from unittest.mock import MagicMock
import pytest
import torch
import torch.nn as nn
from flsim.clients.base_client import Client, ClientConfig
from flsim.clients.dp_client import DPClient, DPClientConfig
from flsim.common.pytest_helper import (
assertAlmostEqual,
assertEqual,
assertFalse,
assertIsInstance,
assertNotEqual,
assertTrue,
)
from flsim.common.timeout_simulator import (
GaussianTimeOutSimulator,
GaussianTimeOutSimulatorConfig,
NeverTimeOutSimulator,
NeverTimeOutSimulatorConfig,
)
from flsim.data.data_provider import IFLUserData
from flsim.optimizers.local_optimizers import (
LocalOptimizerFedProxConfig,
LocalOptimizerSGD,
LocalOptimizerSGDConfig,
)
from flsim.optimizers.optimizer_scheduler import (
ConstantLRScheduler,
ConstantLRSchedulerConfig,
)
from flsim.privacy.common import PrivacySetting
from flsim.utils import test_utils as utils
from flsim.utils.fl.common import FLModelParamUtils
from flsim.utils.timing.training_duration_distribution import (
PerExampleGaussianDurationDistributionConfig,
)
from omegaconf import OmegaConf
from opacus.accountants.analysis import rdp as privacy_analysis
from opacus.optimizers import DPOptimizer
def calc_eps(
sample_rate: float, noise_multiplier: float, steps: int, alphas, delta: float
) -> float:
rdp = privacy_analysis.compute_rdp(
q=sample_rate, noise_multiplier=noise_multiplier, steps=steps, orders=alphas
)
eps, _ = privacy_analysis.get_privacy_spent(orders=alphas, rdp=rdp, delta=delta)
return eps
@pytest.fixture(scope="class")
def prepare_client_test(request) -> None:
request.cls.num_batches = 2
request.cls.batch_size = 10
@pytest.mark.usefixtures("prepare_client_test")
class ClientTestBase:
def _fake_data(self, num_batches=None, batch_size=None):
num_batches = num_batches or self.num_batches
batch_size = batch_size or self.batch_size
torch.manual_seed(0)
dataset = [torch.rand(batch_size, 2) for _ in range(num_batches)]
dataset = utils.DatasetFromList(dataset)
return utils.DummyUserData(dataset, utils.SampleNet(utils.TwoFC()))
def _get_client(
self,
data=None,
store_models_and_optimizers: bool = False,
timeout_simulator=None,
):
data = data or self._fake_data()
config = ClientConfig(
store_models_and_optimizers=store_models_and_optimizers,
lr_scheduler=ConstantLRSchedulerConfig(),
)
return Client(
**OmegaConf.structured(config),
dataset=data,
timeout_simulator=timeout_simulator,
)
def _get_dp_client(
self,
data=None,
noise_multiplier: int = 1,
clipping_value: int = 1,
store_models_and_optimizers: bool = False,
):
privacy_setting = PrivacySetting(
noise_multiplier=noise_multiplier, clipping_value=clipping_value
)
config = DPClientConfig(
store_models_and_optimizers=store_models_and_optimizers,
privacy_setting=privacy_setting,
)
return DPClient(
**OmegaConf.structured(config), dataset=(data or self._fake_data())
)
def _train(self, data: IFLUserData, model, optim) -> None:
# basically re-write training logic
model.fl_get_module().train()
for batch in data.train_data():
optim.zero_grad()
_batch = model.fl_create_training_batch(batch)
loss = model.fl_forward(_batch).loss
loss.backward()
optim.step()
def _run_client_eval_test(self, client) -> None:
"""
Test client eval will turn on eval mode and turn back into train mode
after evaluation loop is finished
"""
class Net(utils.SampleNet):
def get_eval_metrics(self, batch):
assert (
self.sample_nn.training is False
), "Client should call eval after setting model.eval()"
return self.sample_nn(batch)
input_dim = 2
model = Net(nn.Linear(input_dim, 1))
model.fl_get_module().train()
client.eval(model=model)
assert model.fl_get_module().training
class TestBaseClient(ClientTestBase):
def test_storage(self) -> None:
client = self._get_client(store_models_and_optimizers=True)
model0 = utils.SampleNet(utils.TwoFC())
model1 = utils.SampleNet(utils.TwoFC())
delta1, weight1 = client.generate_local_update(model0)
delta1 = FLModelParamUtils.clone(delta1)
delta2, weight2 = client.generate_local_update(model1)
assertEqual(client.times_selected, 2)
# model1 should be the first model stored
assertAlmostEqual(weight1, client.weights[0])
mismatched = utils.verify_models_equivalent_after_training(
delta1, client.model_deltas[0]
)
assertEqual(mismatched, "", mismatched)
# model2 should be second model stored
assertAlmostEqual(weight2, client.weights[1])
mismatched = utils.verify_models_equivalent_after_training(
delta2, client.model_deltas[1]
)
assertEqual(mismatched, "", mismatched)
def test_receive_through_channel(self) -> None:
# expected channel effects,
clnt = self._get_client()
model = utils.SampleNet(utils.TwoFC())
# check channel is pass through
model2 = clnt.receive_through_channel(model)
mismatched = utils.verify_models_equivalent_after_training(model2, model)
assertEqual(mismatched, "", mismatched)
def test_prepare_for_training(self) -> None:
clnt = self._get_client()
model = utils.SampleNet(utils.TwoFC())
try:
# should work
model2, optim, optim_sch = clnt.prepare_for_training(model)
except BaseException as e:
assertTrue(False, e)
# pyre-fixme[61]: `model2` is undefined, or not always defined.
mismatched = utils.verify_models_equivalent_after_training(model2, model)
assertEqual(mismatched, "", mismatched)
# expect correct type of optimizer
# pyre-fixme[61]: `optim` is undefined, or not always defined.
assertIsInstance(optim, LocalOptimizerSGD)
# pyre-fixme[61]: `optim_sch` is undefined, or not always defined.
assertIsInstance(optim_sch, ConstantLRScheduler)
def test_train(self) -> None:
data = self._fake_data(num_batches=5, batch_size=10)
clnt = self._get_client(data)
model = utils.SampleNet(utils.TwoFC())
model, optim, optim_sch = clnt.prepare_for_training(model)
model2, optim2, _ = clnt.prepare_for_training(FLModelParamUtils.clone(model))
# value chekd in previous test
try:
# should work
model, weight = clnt.train(model, optim, optim_sch, None)
except BaseException as e:
assertTrue(False, e)
# pyre-fixme[61]: `weight` is undefined, or not always defined.
assertAlmostEqual(weight, 5 * 10)
self._train(data, model2, optim2)
mismatched = utils.verify_models_equivalent_after_training(model2, model)
assertEqual(mismatched, "", mismatched)
def test_generate_local_update(self) -> None:
clnt = self._get_client()
model = utils.SampleNet(utils.TwoFC())
model.fl_get_module().fill_all(0.1)
clnt.train = MagicMock(return_value=(model, 12.34))
clnt.compute_delta = MagicMock(return_value=model)
try:
# should work
delta, weight = clnt.generate_local_update(model)
except BaseException as e:
assertTrue(False, e)
# pyre-fixme[61]: `weight` is undefined, or not always defined.
assertAlmostEqual(12.34, weight)
mismatched = utils.verify_models_equivalent_after_training(model, model)
assertEqual(mismatched, "", mismatched)
# pyre-fixme[61]: `delta` is undefined, or not always defined.
mismatched = utils.verify_models_equivalent_after_training(delta, model)
assertEqual(mismatched, "", mismatched)
def test_fed_prox_sgd_equivalent(self) -> None:
"""
Test FedProx under the following scenarios:
FedProx == SGD iff
1. FedProx with mu = 0 == SGD
2. FedProx with mu = x == SGD(weight_decay=mu)
FedProx != SGD if
1. mu > 0 and SGD(weight_decay=0)
"""
# scenario 1
data = self._fake_data()
prox_client = Client(
dataset=data,
**OmegaConf.structured(
ClientConfig(
optimizer=LocalOptimizerFedProxConfig(mu=0),
)
),
)
client = Client(
dataset=data,
**OmegaConf.structured(
ClientConfig(
optimizer=LocalOptimizerSGDConfig(),
)
),
)
init_model = utils.SampleNet(utils.TwoFC())
delta, weight = client.generate_local_update(
FLModelParamUtils.clone(init_model)
)
prox_delta, weight = prox_client.generate_local_update(
FLModelParamUtils.clone(init_model)
)
mismatched = utils.verify_models_equivalent_after_training(
prox_delta, delta, init_model
)
assertEqual(mismatched, "", mismatched)
# scenario 2
init_model = utils.SampleNet(utils.TwoFC())
init_model.fl_get_module().fill_all(0.0)
mu = 1.0
prox_client = Client(
dataset=data,
**OmegaConf.structured(
ClientConfig(
optimizer=LocalOptimizerFedProxConfig(mu=mu),
)
),
)
client = Client(
dataset=data,
**OmegaConf.structured(
ClientConfig(
optimizer=LocalOptimizerSGDConfig(weight_decay=mu),
)
),
)
delta, _ = client.generate_local_update(FLModelParamUtils.clone(init_model))
prox_delta, _ = prox_client.generate_local_update(
FLModelParamUtils.clone(init_model)
)
mismatched = utils.verify_models_equivalent_after_training(
prox_delta, delta, init_model
)
assertEqual(mismatched, "", mismatched)
# negative case
# FedProx != SGD if mu > 0 and SGD has no weight decay
init_model = utils.SampleNet(utils.TwoFC())
init_model.fl_get_module().fill_all(0.0)
mu = 1.0
prox_client = Client(
dataset=data,
**OmegaConf.structured(
ClientConfig(
optimizer=LocalOptimizerFedProxConfig(mu=mu),
)
),
)
client = Client(
dataset=data,
**OmegaConf.structured(
ClientConfig(
optimizer=LocalOptimizerSGDConfig(weight_decay=0),
)
),
)
delta, _ = client.generate_local_update(FLModelParamUtils.clone(init_model))
prox_delta, _ = prox_client.generate_local_update(
FLModelParamUtils.clone(init_model)
)
mismatched = utils.verify_models_equivalent_after_training(
prox_delta, delta, init_model
)
assertNotEqual(mismatched, "", mismatched)
def test_device_perf_generation(self) -> None:
"""
test either client.device_perf is always generated,
either using the TimeOutSimulator given in __init__
or creates a NeverTimeOutSimulator( if none is provided.
"""
data = self._fake_data(num_batches=5, batch_size=10)
# pass gaussian timeout into client
cfg = GaussianTimeOutSimulatorConfig(
timeout_wall_per_round=1.0,
fl_stopping_time=1.0,
duration_distribution_generator=PerExampleGaussianDurationDistributionConfig(
training_duration_mean=1.0, training_duration_sd=0.0
),
)
gaussian_timeout_simulator = GaussianTimeOutSimulator(
**OmegaConf.structured(cfg)
)
clnt_gaussian_timeout = self._get_client(
data, timeout_simulator=gaussian_timeout_simulator
)
assertEqual(clnt_gaussian_timeout.per_example_training_time, 1.0)
# pass never timeout to clients
clnt_never_timeout = self._get_client(
data,
timeout_simulator=NeverTimeOutSimulator(
**OmegaConf.structured(NeverTimeOutSimulatorConfig())
),
)
assertEqual(clnt_never_timeout.per_example_training_time, 0.0)
# default created never timeout within clients
clnt_default = self._get_client(data)
assertEqual(clnt_default.per_example_training_time, 0.0)
def test_total_training_time(self) -> None:
"""
total training time for Gaussian with mean 1.0 and std 0.0
equals to min(number of client examples, timeout_wall_per_round)
"""
data = self._fake_data(num_batches=5, batch_size=10)
# pass gaussian timeout into client with a big timeout wall
cfg_big_wall = GaussianTimeOutSimulatorConfig(
timeout_wall_per_round=99999.0,
fl_stopping_time=1.0,
duration_distribution_generator=PerExampleGaussianDurationDistributionConfig(
training_duration_mean=1.0, training_duration_sd=0.0
),
)
gaussian_timeout_simulator = GaussianTimeOutSimulator(
**OmegaConf.structured(cfg_big_wall)
)
clnt_gaussian_timeout = self._get_client(
data, timeout_simulator=gaussian_timeout_simulator
)
num_examples = 5 * 10
assertEqual(clnt_gaussian_timeout.get_total_training_time(), num_examples)
# pass gaussian timeout into client with a small timeout wall
timeout_wall = 25.0
cfg_small_wall = GaussianTimeOutSimulatorConfig(
timeout_wall_per_round=timeout_wall,
fl_stopping_time=1.0,
duration_distribution_generator=PerExampleGaussianDurationDistributionConfig(
training_duration_mean=1.0, training_duration_sd=0.0
),
)
gaussian_timeout_simulator = GaussianTimeOutSimulator(
**OmegaConf.structured(cfg_small_wall)
)
clnt_gaussian_timeout = self._get_client(
data, timeout_simulator=gaussian_timeout_simulator
)
assertEqual(clnt_gaussian_timeout.get_total_training_time(), timeout_wall)
# pass never timeout to clients
clnt_never_timeout = self._get_client(
data,
timeout_simulator=NeverTimeOutSimulator(
**OmegaConf.structured(NeverTimeOutSimulatorConfig())
),
)
assertEqual(clnt_never_timeout.get_total_training_time(), 0.0)
def test_partial_training(self) -> None:
"""
producing two training instance expected same training results:
1. client with n (even number) batches and a time out wall just
enough for training half of the batches
2. client with n/2 batches and no time out wall
check model1 == model2
"""
n_batches = 2
bs = 10
data = self._fake_data(num_batches=n_batches, batch_size=bs)
# only feasible to process 3 batches, client sends back partial results
expected_processed_samples = n_batches * bs / 2
cfg = GaussianTimeOutSimulatorConfig(
timeout_wall_per_round=expected_processed_samples,
fl_stopping_time=1.0,
duration_distribution_generator=PerExampleGaussianDurationDistributionConfig(
training_duration_mean=1.0, training_duration_sd=0.0
),
)
gaussian_timeout_simulator = GaussianTimeOutSimulator(
**OmegaConf.structured(cfg)
)
clnt_gaussian_timeout = self._get_client(
data, timeout_simulator=gaussian_timeout_simulator
)
torch.manual_seed(0)
model_init = utils.SampleNet(utils.TwoFC())
model1, optim1, optim_sch1 = clnt_gaussian_timeout.prepare_for_training(
FLModelParamUtils.clone(model_init)
)
torch.manual_seed(0)
partial_model, partial_weight = clnt_gaussian_timeout.train(
model1, optim1, optim_sch1, None
)
assertEqual(partial_weight, expected_processed_samples)
# no timeout, but client only has half of the data
n_batches = int(n_batches / 2)
data = self._fake_data(num_batches=n_batches, batch_size=bs)
clnt = self._get_client(data)
model2, optim2, optim_sch2 = clnt.prepare_for_training(
FLModelParamUtils.clone(model_init)
)
torch.manual_seed(0)
full_model, full_weight = clnt.train(model2, optim2, optim_sch2, None)
assertEqual(full_weight, expected_processed_samples)
mismatched = utils.verify_models_equivalent_after_training(
full_model, partial_model
)
assertEqual(mismatched, "", mismatched)
def test_logging_level(self) -> None:
clnt = self._get_client()
assertTrue(utils.check_inherit_logging_level(clnt, 50))
assertTrue(utils.check_inherit_logging_level(clnt, 10))
def test_base_client_eval(self) -> None:
client = self._get_client()
self._run_client_eval_test(client)
class TestDPClient(ClientTestBase):
def test_privacy_engine_properly_initialized(self) -> None:
data = self._fake_data(num_batches=11, batch_size=3)
# pyre-fixme[6]: Expected `int` for 2nd param but got `float`.
# pyre-fixme[6]: Expected `int` for 3rd param but got `float`.
clnt = self._get_dp_client(data, noise_multiplier=0.1, clipping_value=2.0)
model = utils.SampleNet(utils.TwoFC())
model, optim, optim_sch = clnt.prepare_for_training(model)
assertIsInstance(optim, DPOptimizer)
assertEqual(optim.noise_multiplier, 0.1)
assertEqual(optim.max_grad_norm, 2.0)
assertEqual(optim.expected_batch_size, 3)
def test_privacy_turned_off(self) -> None:
data = self._fake_data(num_batches=11, batch_size=3)
# clipping value of inf means privacy is off no matter what the noise multiplier
clnt = self._get_dp_client(
data,
# pyre-fixme[6]: Expected `int` for 2nd param but got `float`.
noise_multiplier=0.0,
# pyre-fixme[6]: Expected `int` for 3rd param but got `float`.
clipping_value=float("inf"),
)
assertFalse(clnt.privacy_on)
clnt = self._get_dp_client(
data,
# pyre-fixme[6]: Expected `int` for 2nd param but got `float`.
noise_multiplier=1.0,
# pyre-fixme[6]: Expected `int` for 3rd param but got `float`.
clipping_value=float("inf"),
)
assertFalse(clnt.privacy_on)
# negative noise multiplier should turn of privacy engine
# pyre-fixme[6]: Expected `int` for 2nd param but got `float`.
# pyre-fixme[6]: Expected `int` for 3rd param but got `float`.
clnt = self._get_dp_client(data, noise_multiplier=-1.0, clipping_value=0.1)
assertFalse(clnt.privacy_on)
def test_prepare_for_training(self) -> None:
clnt = self._get_dp_client()
model = utils.SampleNet(utils.TwoFC())
model2, optim, optim_sch = clnt.prepare_for_training(model)
mismatched = utils.verify_models_equivalent_after_training(model2, model)
assertEqual(mismatched, "")
# expect correct type of optimizer
assertIsInstance(optim, DPOptimizer)
assertIsInstance(optim.original_optimizer, LocalOptimizerSGD)
assertIsInstance(optim_sch, ConstantLRScheduler)
def test_storage(self) -> None:
client = self._get_dp_client(store_models_and_optimizers=True)
model0 = utils.SampleNet(utils.TwoFC())
delta, weight1 = client.generate_local_update(model0)
assertEqual(client.times_selected, 1)
# test existence of privacy_engine
# model1 should be the first model stored
optim = client.optimizers[0]
assertIsInstance(optim, DPOptimizer)
def test_no_noise_no_clip(self) -> None:
data = self._fake_data(3, 4)
model = utils.SampleNet(utils.TwoFC())
private_model = FLModelParamUtils.clone(model)
clnt = self._get_client(data)
delta, weight = clnt.generate_local_update(model)
# set noise to 0 and clipping to a large number
private_clnt = self._get_dp_client(
data, noise_multiplier=0, clipping_value=1000
)
private_delta, private_weight = private_clnt.generate_local_update(
private_model
)
mismatched = utils.verify_models_equivalent_after_training(model, private_model)
mismatched_delta = utils.verify_models_equivalent_after_training(
delta, private_delta
)
assertAlmostEqual(weight, private_weight)
assertEqual(mismatched, "", mismatched)
assertEqual(mismatched_delta, "", mismatched_delta)
def test_only_clip(self) -> None:
data = self._fake_data(4, 4)
model = utils.SampleNet(utils.TwoFC())
private_model = FLModelParamUtils.clone(model)
clnt = self._get_client(data)
delta, weight = clnt.generate_local_update(model)
private_clnt = self._get_dp_client(
data,
noise_multiplier=0,
# pyre-fixme[6]: Expected `int` for 3rd param but got `float`.
clipping_value=0.01,
)
private_delta, private_weight = private_clnt.generate_local_update(
private_model
)
mismatched = utils.verify_models_equivalent_after_training(delta, private_delta)
assertAlmostEqual(weight, private_weight)
assertNotEqual(mismatched, "")
def test_noise_and_clip(self) -> None:
data = self._fake_data(4, 4)
model = utils.SampleNet(utils.TwoFC())
private_model = FLModelParamUtils.clone(model)
clnt = self._get_client(data)
delta, weight = clnt.generate_local_update(model)
private_clnt = self._get_dp_client(
data,
noise_multiplier=1,
# pyre-fixme[6]: Expected `int` for 3rd param but got `float`.
clipping_value=0.01,
)
private_delta, private_weight = private_clnt.generate_local_update(
private_model
)
mismatched_delta = utils.verify_models_equivalent_after_training(
delta, private_delta
)
assertAlmostEqual(weight, private_weight)
assertNotEqual(mismatched_delta, "")
def test_epsilon(self) -> None:
noise_multiplier = 1.5
clipping_value = 0.01
num_batches = 5
batch_size = 6
data = self._fake_data(num_batches, batch_size)
model = utils.SampleNet(utils.TwoFC())
# pyre-fixme[6]: Expected `int` for 2nd param but got `float`.
# pyre-fixme[6]: Expected `int` for 3rd param but got `float`.
clnt = self._get_dp_client(data, noise_multiplier, clipping_value, True)
model, weight = clnt.generate_local_update(model)
alphas = clnt.accountant.DEFAULT_ALPHAS
delta = 1e-5
eps_from_script = calc_eps(
1.0 / num_batches, noise_multiplier, num_batches, alphas, delta
)
eps_from_client, _ = clnt.accountant.get_privacy_spent(
delta=delta, alphas=alphas
)
assertAlmostEqual(eps_from_script, eps_from_client)
def test_dp_client_eval(self) -> None:
dp_client = self._get_dp_client()
self._run_client_eval_test(dp_client)
| canife-main | FLSim/flsim/clients/tests/test_client.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
This file contains the noise generation function and the required
DP parameters that an entity such as an FL server uses for user-level DP.
"""
import logging
import math
import os
from abc import ABC, abstractmethod
from dataclasses import dataclass
from typing import List, Optional
import numpy as np
import torch
from flsim.common.logger import Logger
from flsim.privacy.common import PrivacyBudget, PrivacySetting
from opacus.accountants.analysis import rdp as privacy_analysis
from torch import nn
class PrivacyEngineNotAttachedException(Exception):
"""
Exception class to be thrown from User Privacy Engine in case
the User Privacy Engine is not attached.
"""
pass
class IPrivacyEngine(ABC):
def __init__(
self,
privacy_setting: PrivacySetting,
users_per_round: int,
num_total_users: int,
):
self.setting = privacy_setting
self.users_per_round = users_per_round
self.num_total_users = num_total_users
self.steps = 0
@abstractmethod
def attach(self, global_model: nn.Module) -> None:
"""
Attach the privacy engine to the global model by setting
a reference model
"""
pass
@abstractmethod
def add_noise(self, model_diff: nn.Module, sensitivity: float) -> None:
pass
@abstractmethod
def get_privacy_spent(self, target_delta: Optional[float] = None) -> PrivacyBudget:
pass
class GaussianPrivacyEngine(IPrivacyEngine):
"""
DP-SGD privacy engine where noise is independent
and comes from a gaussian distribution
"""
logger: logging.Logger = Logger.get_logger(__name__)
def __init__(
self,
privacy_setting: PrivacySetting,
users_per_round: int,
num_total_users: int,
) -> None:
super().__init__(privacy_setting, users_per_round, num_total_users)
self.noise_multiplier = privacy_setting.noise_multiplier
self.target_delta = privacy_setting.target_delta
self.alphas = privacy_setting.alphas
self.user_sampling_rate = float(users_per_round) / num_total_users
self.device = None
self.random_number_generator = None
self.noise_seed = privacy_setting.noise_seed
def attach(self, global_model: nn.Module):
self.device = next(global_model.parameters()).device
noise_seed = (
int.from_bytes(os.urandom(8), byteorder="big", signed=True)
if self.noise_seed is None
else self.noise_seed
)
torch.cuda.manual_seed_all(noise_seed) # safe to call even if no gpu.
self.random_number_generator = torch.Generator( # pyre-ignore
device=self.device
)
# pyre-fixme[16]: `Generator` has no attribute `manual_seed`.
self.random_number_generator.manual_seed(noise_seed)
self.logger.debug("User privacy engine attached.")
def add_noise(self, model_diff: nn.Module, sensitivity: float) -> None:
"""
Adds noise to the model_diff (operation is in-place).
This method adds noise to the parameters of the input model.
This operation is in-place (modifies model_diff in this method)
Noise is sampled from a normal distribution with 0 mean and
standard deviation equal to sensitivity * noise_multiplier.
Parameters
----------
model_diff : nn.Module
Noise will be added to the parameters of this model.
sensitivity : float
The sensitivity of the noise that will be added.
"""
with torch.no_grad():
for _, parameter in model_diff.named_parameters():
noise = self._generate_noise(parameter.shape, sensitivity)
parameter.copy_(parameter + noise)
self.steps += 1
def _generate_noise(self, size, sensitivity: float) -> torch.Tensor:
if self.device is None or self.random_number_generator is None:
random_gen = "no" if self.random_number_generator is None else "yes"
raise PrivacyEngineNotAttachedException(
"User Privacy Engine is not attached to the global model. "
f"(device={self.device}, random number generator exists: {random_gen})."
"Call attach() function first before calling."
)
if self.noise_multiplier > 0 and sensitivity > 0:
return torch.normal(
0,
self.noise_multiplier * sensitivity,
size,
device=self.device,
generator=self.random_number_generator,
)
return torch.zeros(size, device=self.device)
def get_privacy_spent(self, target_delta: Optional[float] = None):
if target_delta is None:
target_delta = self.target_delta
rdp = privacy_analysis.compute_rdp(
q=self.user_sampling_rate,
noise_multiplier=self.noise_multiplier,
steps=self.steps,
orders=self.alphas,
)
eps, opt_alpha = privacy_analysis.get_privacy_spent(
orders=self.alphas, rdp=rdp, delta=target_delta
)
self.logger.info(
f"User-level DP Privacy Parameters:"
f"\tuser sampling rate = {100 * self.user_sampling_rate:.3g}%"
f"\tnoise_multiplier = {self.noise_multiplier}"
f"\tsteps = {self.steps}\n satisfies "
f"DP with Ɛ = {eps:.3g} "
f"and δ = {target_delta}."
f" The optimal α is {opt_alpha}."
)
if opt_alpha == max(self.alphas) or opt_alpha == min(self.alphas):
self.logger.debug(
"The privacy estimate is likely to be improved by expanding "
"the set of alpha orders."
)
return PrivacyBudget(eps, opt_alpha, target_delta)
@dataclass
class TreeNode:
start: int
end: int
height: int
efficient: bool
@property
def weight(self):
return (1 / (2 - math.pow(2, -self.height))) ** 0.5 if self.efficient else 1.0
class TreePrivacyEngine(IPrivacyEngine):
"""
DP-FTRL privacy engine where noise is the cummulated noise from
a private binary tree
"""
logger: logging.Logger = Logger.get_logger(__name__)
def __init__(
self,
privacy_setting: PrivacySetting,
users_per_round: int,
num_total_users: int,
restart_rounds: int = 100,
efficient_tree: bool = True,
) -> None:
super().__init__(privacy_setting, users_per_round, num_total_users)
self.num_leaf = min(num_total_users, restart_rounds * users_per_round)
self.restart_rounds = restart_rounds
self.num_tree: int = 1
self.ref_model = None
self.device = None
self.tree = TreePrivacyEngine.build_tree(self.num_leaf, efficient_tree)
@classmethod
def build_tree(cls, num_leaf: int, efficient_tree: bool = True) -> List[TreeNode]:
tree = [TreeNode(-1, -1, -1, True)] * (2 * num_leaf)
# store leaf nodes at back of array
for i, j in enumerate(range(num_leaf, num_leaf * 2)):
tree[j] = TreeNode(start=i, end=i, height=0, efficient=efficient_tree)
# fill in prefix sum internal nodes
for i in range(num_leaf - 1, 0, -1):
left = tree[i * 2]
right = tree[i * 2 + 1]
height = int(math.log2(abs(right.end - left.start) + 1))
tree[i] = TreeNode(
start=left.start, end=right.end, height=height, efficient=efficient_tree
)
return tree
@classmethod
def compute_rdp(cls, alphas, epoch, steps, sigma):
alphas = np.array(alphas)
return alphas * epoch * np.ceil(np.log2(steps + 1e-6)) / (2 * sigma**2)
def get_privacy_spent(self, target_delta: Optional[float] = None) -> PrivacyBudget:
target_delta = (
self.setting.target_delta if target_delta is None else target_delta
)
rdp = TreePrivacyEngine.compute_rdp(
alphas=self.setting.alphas,
epoch=self.num_tree,
steps=self.num_leaf,
sigma=self.setting.noise_multiplier,
)
eps, opt_alpha = privacy_analysis.get_privacy_spent(
orders=self.setting.alphas, rdp=rdp, delta=target_delta
)
if opt_alpha == max(self.setting.alphas) or opt_alpha == min(
self.setting.alphas
):
self.logger.info(
"The privacy estimate is likely to be improved by expanding "
"the set of alpha orders."
)
return PrivacyBudget(eps, opt_alpha, self.setting.target_delta)
def attach(self, global_model: nn.Module, **kwargs) -> None:
"""
Reset the tree by incrementing num_tree and reset steps to 0
these will be used to do privacy calculations
"""
self.device = next(global_model.parameters()).device
self.num_tree += 1
self.steps = 0
def add_noise(self, model_diff: nn.Module, sensitivity: float) -> None:
"""
Adds noise to cummulated noise to model diff
"""
with torch.no_grad():
for parameter in model_diff.parameters():
noise = self.range_sum(
left=0,
right=self.users_per_round - 1,
size=parameter.shape,
sensitivity=sensitivity,
)
parameter.copy_(parameter + noise)
self.steps += 1
def range_sum(
self, left: int, right: int, size: torch.Size, sensitivity: float
) -> torch.Tensor:
left += self.num_leaf
right += self.num_leaf
sum_ = torch.zeros(size)
while left <= right:
noise_std = self.setting.noise_multiplier * sensitivity
if left % 2 == 1:
sum_ += self._generate_noise(size, noise_std) * self.tree[left].weight
left += 1
if right % 2 == 0:
sum_ += self._generate_noise(size, noise_std) * self.tree[right].weight
right -= 1
left = left // 2
right = right // 2
return sum_
def _generate_noise(self, size: torch.Size, noise_std: float):
return torch.normal(
mean=0,
std=noise_std,
size=size,
device=self.device,
)
| canife-main | FLSim/flsim/privacy/privacy_engine.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from flsim.privacy.privacy_engine import (
GaussianPrivacyEngine,
IPrivacyEngine,
PrivacySetting,
TreePrivacyEngine,
)
class NoiseType(Enum):
TREE_NOISE = "tree"
GAUSSIAN = "guassian"
class PrivacyEngineFactory:
@classmethod
def create(
cls,
privacy_setting: PrivacySetting,
users_per_round: int,
num_total_users: int,
noise_type: NoiseType,
) -> IPrivacyEngine:
if noise_type == NoiseType.TREE_NOISE:
return TreePrivacyEngine(
privacy_setting=privacy_setting,
users_per_round=users_per_round,
num_total_users=num_total_users,
)
else:
return GaussianPrivacyEngine(
privacy_setting=privacy_setting,
users_per_round=users_per_round,
num_total_users=num_total_users,
)
| canife-main | FLSim/flsim/privacy/privacy_engine_factory.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| canife-main | FLSim/flsim/privacy/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
This file contains the functions for clipping clients' updates in
an FL simulation.
"""
from typing import Optional
import torch
from flsim.utils.fl.common import FLModelParamUtils
from torch import nn
__EPS__ = 1e-10
class UserUpdateClipper:
def __init__(self, precision: Optional[torch.dtype] = None):
self._cached_model_diff = None
self.precision = precision
def calc_model_diff(self, new_model: nn.Module, prev_model: nn.Module) -> nn.Module:
"""
Calculates the difference between the updated model and the old model
"""
if self._cached_model_diff is None: # for memory efficiency purposes
self._cached_model_diff = FLModelParamUtils.clone(new_model, self.precision)
FLModelParamUtils.linear_comb_models(
new_model, 1, prev_model, -1, self._cached_model_diff
)
return self._cached_model_diff
def clip(self, model_diff: nn.Module, max_norm: float, enable_clipping=True) -> None:
"""
Clips user update (stored in ``model_diff``) by computing clip factor
and using it to rescale each user's update (operation is in-place).
This method clips the parameters of the user update. This operation
is in-place (modifies ``model_diff`` in this method)
"""
max_norm = float(max_norm)
per_user_update_norm = self._calc_norm(model_diff.parameters())
clip_factor = self._calc_clip_factor(max_norm, per_user_update_norm) if enable_clipping else 1
with torch.no_grad():
for parameter in model_diff.parameters():
parameter.copy_(parameter * clip_factor)
return clip_factor
def _calc_clip_factor(self, max_norm: float, per_user_norm: float):
"""
Calculates the clip factor that will be used to clip the user updatas
"""
if max_norm < 0 or per_user_norm < 0:
raise ValueError("Error: max_norm and per_user_norm must be both positive.")
clip_factor = max_norm / (per_user_norm + __EPS__)
clip_factor = min(clip_factor, 1.0)
return clip_factor
def _calc_norm(self, params):
"""
Calculates the l-2 norm of the user updates
"""
norms = [param.norm(2) for param in params]
norm = torch.tensor(norms).norm(2)
return norm
| canife-main | FLSim/flsim/privacy/user_update_clip.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass, field
from typing import List, NamedTuple, Optional
import numpy as np
@dataclass
class PrivacySetting:
"""
Contains setting related to Differential Privacy
"""
alphas: List[float] = field(
default_factory=lambda: np.arange(1.1, 100, 0.1).tolist()
) # Renyi privacy alpha range
noise_multiplier: float = 0.0 # Normalized Noise Variance
clipping_value: float = float("inf")
target_delta: float = 1e-5 # Maximum delta for (epsilon, delta) privacy
noise_seed: Optional[int] = None # [debug] Seed of the noise generation function
secure_rng: bool = False
class PrivacyBudget(NamedTuple):
"""
Encapsulates a privacy budget as (epsilon, delta)
"""
epsilon: float = float("inf")
alpha: float = float(-1)
delta: float = float("inf")
def __str__(self):
return f"eps = {self.epsilon}, delta = {self.delta}, alpha = {self.alpha}"
| canife-main | FLSim/flsim/privacy/common.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, Optional
import numpy as np
import torch
import torch.nn as nn
from flsim.active_user_selectors.simple_user_selector import (
SequentialActiveUserSelectorConfig,
)
from flsim.clients.base_client import ClientConfig
from flsim.clients.dp_client import DPClientConfig
from flsim.common.pytest_helper import assertEmpty, assertEqual, assertNotEmpty
from flsim.optimizers.async_aggregators import FedAvgWithLRFedBuffAggregatorConfig
from flsim.optimizers.local_optimizers import LocalOptimizerSGDConfig
from flsim.privacy.common import PrivacySetting
from flsim.reducers.base_round_reducer import ReductionType
from flsim.reducers.weighted_dp_round_reducer import WeightedDPRoundReducerConfig
from flsim.servers.aggregator import AggregationType
from flsim.servers.sync_dp_servers import SyncDPSGDServerConfig
from flsim.servers.sync_servers import SyncServerConfig
from flsim.trainers.sync_trainer import SyncTrainer, SyncTrainerConfig
from flsim.utils.async_trainer.async_staleness_weights import (
PolynomialStalenessWeightConfig,
)
from flsim.utils.fl.common import FLModelParamUtils
from flsim.utils.sample_model import DummyAlphabetFLModel
from flsim.utils.test_utils import (
FakeMetricReporter,
MetricsReporterWithMockedChannels,
verify_models_equivalent_after_training,
)
from flsim.utils.tests.helpers.test_async_trainer_utils import (
get_fl_data_provider,
run_fl_training,
)
from flsim.utils.tests.helpers.test_data_utils import DummyAlphabetDataset
from omegaconf import OmegaConf
from opacus import GradSampleModule
from opacus.optimizers import DPOptimizer
class TestDifferentialPrivacyIntegration:
def _get_weighted_dp_reducer_config(
self,
noise,
clipping,
reduction_type=ReductionType.WEIGHTED_SUM,
min_weight: float = 1e-6,
max_weight: float = 1.0,
):
return WeightedDPRoundReducerConfig(
reduction_type=reduction_type,
min_weight=min_weight,
max_weight=max_weight,
privacy_setting=PrivacySetting(
noise_multiplier=noise, clipping_value=clipping
),
)
def _create_optimizer(
self,
model: nn.Module,
data_provider,
lr: float,
momentum: float,
sample_level_dp: bool,
dp_config: Dict[str, Any],
train_batch_size: int,
):
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=momentum)
optimizer.zero_grad()
if sample_level_dp:
model = GradSampleModule(model)
optimizer = DPOptimizer(
optimizer=optimizer,
noise_multiplier=dp_config["sample_dp_noise_multiplier"],
max_grad_norm=dp_config["sample_dp_clipping_value"],
expected_batch_size=train_batch_size,
)
return optimizer
def _load_data(self, one_user: bool, data_size: int = 26):
"""
Loads the data, which is a Dummy alphabet, either for 1 user with
`data_size` samples, or for N (`data_size`) users, each with 1 sample.
"""
if one_user: # the single user gets the whole shard (which is the data)
shard_size = data_size
local_batch_size = data_size
else: # will have N (`data_size`) users, each with one sample
shard_size = 1
local_batch_size = 1
dummy_dataset = DummyAlphabetDataset(num_rows=data_size)
(
data_provider,
data_loader,
) = DummyAlphabetDataset.create_data_provider_and_loader(
dummy_dataset, shard_size, local_batch_size, DummyAlphabetFLModel()
)
assertEqual(data_loader.num_total_users, data_size / shard_size)
assertEqual(data_loader.num_total_users, data_provider.num_train_users())
self.data_size = data_size # pyre-ignore
return data_provider, data_loader.train_batch_size
def _train_vanilla_pytorch_dp_model(
self,
lr: float,
momentum: float,
sample_level_dp: bool,
dp_config: Dict[str, Any],
):
"""
Trains a vanilla Pytorch DP model, without FL (there is 1 user)
"""
model_wrapper = DummyAlphabetFLModel()
data_provider, train_batch_size = self._load_data(one_user=True)
optimizer = self._create_optimizer(
model_wrapper.model,
data_provider,
lr,
momentum,
sample_level_dp,
dp_config,
train_batch_size,
)
model_wrapper.model.train()
for one_user_data in data_provider.train_users():
for batch in one_user_data.train_data():
batch_metrics = model_wrapper.fl_forward(batch)
loss = batch_metrics.loss
loss.backward()
optimizer.step()
return model_wrapper
def _train_fl_model(
self,
lr: float,
momentum: float,
one_user: bool,
dp_config: Optional[Dict[str, Any]] = None,
noise_func_seed: Optional[int] = None,
data_size: int = 26,
**kwargs
):
"""
Trains an FL model, with or without DP
"""
# create dummy FL model on alphabet
global_fl_model = DummyAlphabetFLModel()
data_provider, _ = kwargs.pop(
"data_provider", self._load_data(one_user, data_size)
)
world_size = 1
metrics_reporter = FakeMetricReporter()
if one_user:
users_per_round = 1
else: # otherwise, we have `data_size` users
users_per_round = data_size
epochs = kwargs.pop("epoch", 1)
users_per_round = kwargs.pop("users_per_round", users_per_round)
metrics_reporter = kwargs.pop("metrics_reporter", metrics_reporter)
eval_epoch_frequency = kwargs.pop("eval_epoch_frequency", 1.0)
train_metrics_reported_per_epoch = kwargs.pop(
"train_metrics_reported_per_epoch", 1
)
aggregation_type = kwargs.pop("aggregation_type", AggregationType.AVERAGE)
sync_trainer = SyncTrainer(
model=global_fl_model,
cuda_enabled=False,
**OmegaConf.structured(
SyncTrainerConfig(
users_per_round=users_per_round,
epochs=epochs,
always_keep_trained_model=False,
train_metrics_reported_per_epoch=train_metrics_reported_per_epoch,
report_train_metrics=True,
eval_epoch_frequency=eval_epoch_frequency,
do_eval=True,
report_train_metrics_after_aggregation=True,
client=DPClientConfig(
epochs=1,
optimizer=LocalOptimizerSGDConfig(lr=lr, momentum=momentum),
privacy_setting=PrivacySetting(
alphas=dp_config["alphas"],
noise_multiplier=dp_config["sample_dp_noise_multiplier"],
clipping_value=dp_config["sample_dp_clipping_value"],
target_delta=dp_config["delta"],
noise_seed=noise_func_seed,
),
)
if dp_config is not None
else ClientConfig(
epochs=1,
optimizer=LocalOptimizerSGDConfig(lr=lr, momentum=momentum),
),
server=SyncDPSGDServerConfig(
active_user_selector=SequentialActiveUserSelectorConfig(),
privacy_setting=PrivacySetting(
alphas=dp_config["alphas"],
noise_multiplier=dp_config["user_dp_noise_multiplier"],
clipping_value=dp_config["user_dp_clipping_value"],
target_delta=dp_config["delta"],
noise_seed=noise_func_seed,
),
aggregation_type=aggregation_type,
)
if dp_config is not None
else SyncServerConfig(),
)
),
)
global_fl_model, _eval_metric = sync_trainer.train(
data_provider,
metrics_reporter,
num_total_users=data_provider.num_train_users(),
distributed_world_size=world_size,
)
return global_fl_model
def test_dp_turned_off_by_params(self) -> None:
"""
Tests DP and no-DP produce the same exact model, when DP parameters are off.
Basically, tests the equivalence of calling SyncServer and SyncDPServer with noise = 0 and clip = inf
"""
lr = 0.1
momentum = 0.0
# first, call SyncTrainer (DP is off)
torch.manual_seed(1)
fl_model_with_vanilla_server = self._train_fl_model(
lr=lr, momentum=momentum, one_user=False, dp_config=None
)
# set DP parameters off.
off_dp_config = {
"alphas": [10, 100],
"sample_dp_noise_multiplier": 0.0,
"sample_dp_clipping_value": float("inf"),
"user_dp_noise_multiplier": 0.0,
"user_dp_clipping_value": float("inf"),
"delta": 0.00001,
}
torch.manual_seed(1)
fl_model_with_dp_server = self._train_fl_model(
lr=lr, momentum=momentum, one_user=False, dp_config=off_dp_config
)
error_msg = verify_models_equivalent_after_training(
fl_model_with_vanilla_server,
fl_model_with_dp_server,
model_init=None,
rel_epsilon=1e-6,
abs_epsilon=1e-6,
)
assertEmpty(error_msg, msg=error_msg)
def test_dp_ineffective(self) -> None:
"""
Tests DP and no-DP produce the same exact model, when DP parameters are ineffective.
Basically, tests the equivalence of the following 2 scenarios, with N users:
1. Calling SyncTrainer (this is "no DP")
2. Calling PrivateSyncTrainer, and DP (this is "DP") when parameters are ineffective
Note:
To make the dp_config ineffective, we need to set noise = 0 and clipping value to
a large number. This is different from test test_dp_turned_off_by_params() that sets
clipping value to inf, as this test actually applies DP, but because of parameter
values dp is ineffective, the results would be identical to when DP is OFF.
"""
lr = 0.1
momentum = 0.0
torch.manual_seed(1)
# Call vanilla sync server
fl_model_with_vanilla_server = self._train_fl_model(
lr=lr, momentum=momentum, one_user=False, dp_config=None
)
# Next call set DP parameters ineffective.
ineffective_dp_config = {
"alphas": [10, 100],
"sample_dp_noise_multiplier": 0.0,
"sample_dp_clipping_value": 9999.9,
"user_dp_noise_multiplier": 0.0,
"user_dp_clipping_value": 9999.9,
"delta": 0.00001,
}
torch.manual_seed(1)
fl_model_with_dp_server = self._train_fl_model(
lr=lr,
momentum=momentum,
one_user=False,
dp_config=ineffective_dp_config,
)
error_msg = verify_models_equivalent_after_training(
fl_model_with_vanilla_server,
fl_model_with_dp_server,
model_init=None,
rel_epsilon=1e-6,
abs_epsilon=1e-6,
)
assertEmpty(error_msg, msg=error_msg)
def test_frameworks_one_client_sample_dp_off(self) -> None:
"""
Tests if Pytorch-DP and FL simulator, generate the same resutls, when
sample-level DP is off. Essentially, tests equivalence of the following
scenarios, when there is 1 user:
1. Using FL simulator, when sample-level DP is off
2. Using Pytorch DP, when sample-level DP is off
Note:
For this test, user-level DP is off, since it is not relavant to Pytorch-DP's
provided functionality, which is sample-level DP.
"""
lr = 1e-2
momentum = 0.9
torch.manual_seed(1)
fl_model = self._train_fl_model(lr=lr, momentum=momentum, one_user=True)
torch.manual_seed(1)
vanilla_dp_model = self._train_vanilla_pytorch_dp_model(
lr=lr, momentum=momentum, sample_level_dp=False, dp_config={}
)
assertEqual(
FLModelParamUtils.get_mismatched_param(
[fl_model.fl_get_module(), vanilla_dp_model.fl_get_module()], 1e-6
),
"",
)
def test_frameworks_one_client_clipping_only(self) -> None:
"""
Tests if Pytorch-DP and FL simulator, generate the same resutls, when there
is only 1 user and when sample-level DP is on (with noising off).
Essentially, tests equivalence of the following scenarios:
1. Using FL simulator, when there is one user and DP is on (with noise=0),
2. Using Pytorch DP (one user), when DP is on,
(DP parameters are the same for the 2 scenarios.)
Note:
For this test, user-level DP is off, since it is not relavant to Pytorch-DP's
provided functionality, which is sample-level DP.
"""
lr = 0.25
momentum = 0.0
alphas = [1 + x / 10.0 for x in range(1, 100)] + [
float(y) for y in list(range(12, 64))
]
dp_config = {
"alphas": alphas,
"sample_dp_noise_multiplier": 0.0,
"sample_dp_clipping_value": 1.0,
"user_dp_noise_multiplier": 0.0,
"user_dp_clipping_value": float("inf"),
"delta": 0.00001,
}
torch.manual_seed(1)
fl_model = self._train_fl_model(
lr=lr, momentum=momentum, one_user=True, dp_config=dp_config
)
torch.manual_seed(1)
vanilla_dp_model = self._train_vanilla_pytorch_dp_model(
lr=lr, momentum=momentum, sample_level_dp=True, dp_config=dp_config
)
assertEqual(
FLModelParamUtils.get_mismatched_param(
[fl_model.fl_get_module(), vanilla_dp_model.fl_get_module()], 1e-6
),
"",
)
def test_user_dp_equivalent_sample_dp_when_one_client_one_example(
self,
) -> None:
"""
Tests if user-level DP is equivalent to sample-level DP, when we
have one user, and that user has one example (one epoch)
"""
lr = 1.0
momentum = 0.0
alphas = [1 + x / 10.0 for x in range(1, 100)] + [
float(y) for y in list(range(12, 64))
]
dp_config_user_dp_off = {
"alphas": alphas,
"sample_dp_noise_multiplier": 0.7,
"sample_dp_clipping_value": 1.0,
"user_dp_noise_multiplier": 0.0,
"user_dp_clipping_value": float("inf"),
"delta": 0.00001,
}
torch.manual_seed(1)
dp_model_sample_level_dp_on = self._train_fl_model(
lr=lr,
momentum=momentum,
one_user=True,
dp_config=dp_config_user_dp_off,
data_size=1,
noise_func_seed=1234,
)
dp_config_sample_dp_off = {
"alphas": alphas,
"sample_dp_noise_multiplier": 0,
"sample_dp_clipping_value": float("inf"),
"user_dp_noise_multiplier": 0.7,
"user_dp_clipping_value": 1.0,
"delta": 0.00001,
}
torch.manual_seed(1)
dp_model_user_level_dp_on = self._train_fl_model(
lr=lr,
momentum=momentum,
one_user=True,
dp_config=dp_config_sample_dp_off,
data_size=1,
noise_func_seed=1234,
)
error_msg = verify_models_equivalent_after_training(
dp_model_sample_level_dp_on,
dp_model_user_level_dp_on,
model_init=None,
rel_epsilon=1e-6,
abs_epsilon=1e-6,
)
assertEmpty(error_msg, msg=error_msg)
def test_user_dp_equivalent_sample_dp(self) -> None:
"""
Tests if user-level DP is equivalent to sample-level DP under a certain
degenerate condition. It tests the equivalence of the following:
1. User-level DP, with N users, 1 example each (can be N different examples),
2. Sample-level DP, with 1 user, N examples (can be N different examples),
under these Conditions:
Condition 1. [Just as a sanity check] when DP is off (user-level and sample-level)
Condition 2. When DP is on, noise_multiplier for both cases is 0 (i.e. clipping only)
Condition 2. When DP is on, noise_multiplier for both cases is 1, manual seed is set
Note:
For both cases, we set lr = 1.0 and momentum = 0.0
"""
lr = 1.0
momentum = 0.0
# Condition 1
torch.manual_seed(1)
no_dp_model_one_user = self._train_fl_model(
lr=lr, momentum=momentum, one_user=True
)
torch.manual_seed(1)
no_dp_model = self._train_fl_model(lr=lr, momentum=momentum, one_user=False)
error_msg = verify_models_equivalent_after_training(
no_dp_model_one_user,
no_dp_model,
model_init=None,
rel_epsilon=1e-6,
abs_epsilon=1e-6,
)
assertEmpty(error_msg, msg=error_msg)
# Condition 2
alphas = [1 + x / 10.0 for x in range(1, 100)] + [
float(y) for y in list(range(12, 64))
]
dp_config_user_dp_off = {
"alphas": alphas,
"sample_dp_noise_multiplier": 0.0,
"sample_dp_clipping_value": 0.8,
"user_dp_noise_multiplier": 0,
"user_dp_clipping_value": 0.8,
"delta": 0.00001,
}
torch.manual_seed(1)
dp_model_one_user_sample_dp = self._train_fl_model(
lr=lr, momentum=momentum, one_user=True, dp_config=dp_config_user_dp_off
)
dp_config_sample_dp_off = {
"alphas": alphas,
"sample_dp_noise_multiplier": 0.0,
"sample_dp_clipping_value": 0.8,
"user_dp_noise_multiplier": 0.0,
"user_dp_clipping_value": 0.8,
"delta": 0.00001,
}
torch.manual_seed(1)
dp_model_user_dp = self._train_fl_model(
lr=lr,
momentum=momentum,
one_user=False,
dp_config=dp_config_sample_dp_off,
)
error_msg = verify_models_equivalent_after_training(
dp_model_one_user_sample_dp,
dp_model_user_dp,
model_init=None,
rel_epsilon=1e-6,
abs_epsilon=1e-6,
)
assertEmpty(error_msg, msg=error_msg)
# Condition 3
dp_config_user_dp_off = {
"alphas": alphas,
"sample_dp_noise_multiplier": 1,
"sample_dp_clipping_value": 0.5,
"user_dp_noise_multiplier": 0.0,
"user_dp_clipping_value": 0.5,
"delta": 0.00001,
}
torch.manual_seed(1)
dp_model_one_user_sample_dp = self._train_fl_model(
lr=lr,
momentum=momentum,
one_user=True,
dp_config=dp_config_user_dp_off,
noise_func_seed=1000,
)
dp_config_sample_dp_off = {
"alphas": alphas,
"sample_dp_noise_multiplier": 0.0,
"sample_dp_clipping_value": 0.5,
"user_dp_noise_multiplier": 1,
"user_dp_clipping_value": 0.5,
"delta": 0.00001,
}
torch.manual_seed(1)
dp_model_user_dp = self._train_fl_model(
lr=lr,
momentum=momentum,
one_user=False,
dp_config=dp_config_sample_dp_off,
noise_func_seed=1000,
)
error_msg = verify_models_equivalent_after_training(
dp_model_one_user_sample_dp,
dp_model_user_dp,
None,
rel_epsilon=1e-6,
abs_epsilon=1e-6,
)
assertEmpty(error_msg, msg=error_msg)
def test_private_trainer_reporting(self) -> None:
lr = 1.0
momentum = 0.0
alphas = [1 + x / 10.0 for x in range(1, 100)] + [
float(y) for y in list(range(12, 64))
]
dp_config = {
"alphas": alphas,
"sample_dp_noise_multiplier": 0.0,
"sample_dp_clipping_value": 2.0,
"user_dp_noise_multiplier": 1.0,
"user_dp_clipping_value": 2.0,
"delta": 0.00001,
}
torch.manual_seed(1)
metrics_reporter = MetricsReporterWithMockedChannels()
self._train_fl_model(
lr=lr,
momentum=momentum,
one_user=False,
dp_config=dp_config,
users_per_round=4,
epochs=5,
metrics_reporter=metrics_reporter,
train_metrics_reported_per_epoch=10,
eval_epoch_frequency=0.1,
)
# we have 26 users, 4 users per round, that makes 7 rounds per epcoh
# we are reporting train, aggregation, and eval metrics all
# at 10 rounds per epoch, so we should get a total of 21 reports
def count_word(result, word):
return str(result).count(word)
# check for existance of sample_dp and user_dp in reports.
# they are logged only in Aggrefation and we aggregated 7 times.
# print to std our prints sample_dp wich is a dict of four values once
# hence we should get 7 occurrences.
assertEqual(
count_word(metrics_reporter.stdout_results, "sample level dp"),
7,
metrics_reporter.stdout_results,
)
# summary writer breaks dict to 4 plots, hence we get 28 occurrences.
assertEqual(
count_word(metrics_reporter.tensorboard_results, "sample level dp"),
28,
metrics_reporter.tensorboard_results,
)
# for user_dp we only log one value per round.
assertEqual(
count_word(metrics_reporter.stdout_results, "user level dp"),
7,
metrics_reporter.stdout_results,
)
assertEqual(
count_word(metrics_reporter.tensorboard_results, "user level dp"),
7,
metrics_reporter.tensorboard_results,
)
def _test_dp_no_dp_same_weighted_async(
self,
noise,
clip_norm,
data_provider,
buffer_size,
epochs: int = 1,
):
local_lr = np.random.sample()
global_lr = np.random.sample()
dp_model = DummyAlphabetFLModel()
nondp_model = FLModelParamUtils.clone(dp_model)
staleness_config = PolynomialStalenessWeightConfig(
exponent=0.5, avg_staleness=0
)
aggregator_config_nondp = FedAvgWithLRFedBuffAggregatorConfig(
lr=global_lr, buffer_size=buffer_size
)
aggregator_config_dp = FedAvgWithLRFedBuffAggregatorConfig(
lr=global_lr, buffer_size=buffer_size
)
for reduction_type in [
ReductionType.WEIGHTED_SUM,
ReductionType.WEIGHTED_AVERAGE,
]:
nondp_fl_trained_model, _ = run_fl_training(
fl_model=nondp_model,
fl_data_provider=data_provider,
epochs=epochs,
local_lr=local_lr,
aggregator_config=aggregator_config_nondp,
training_duration_mean=1,
staleness_weight_config=staleness_config,
)
reducer_config = self._get_weighted_dp_reducer_config(
noise=noise,
clipping=clip_norm,
reduction_type=reduction_type,
)
aggregator_config_dp.reducer = reducer_config
dp_fl_trained_model, _ = run_fl_training(
fl_model=dp_model,
fl_data_provider=data_provider,
epochs=epochs,
local_lr=local_lr,
aggregator_config=aggregator_config_dp,
training_duration_mean=1,
staleness_weight_config=staleness_config,
)
return verify_models_equivalent_after_training(
nondp_fl_trained_model,
dp_fl_trained_model,
rel_epsilon=1e-4,
abs_epsilon=1e-6,
)
def test_user_dp_variable_weights_same(self) -> None:
"""
There are two cases when DP and NonDP should equal
1) privacy is on, noise = 0, and clip norm is a large value
2) privacy is on, noise * clip_norm << abs_epsilon
Note:
The current implementation has privacy on when noise >= 0 or clip_norm < inf
"""
data_provider = get_fl_data_provider(
num_examples=100,
num_fl_users=10,
examples_per_user=10,
batch_size=5,
model=DummyAlphabetFLModel(),
)
buffer_size = np.random.randint(1, 10)
# sanity check DP == NonDP when privacy off
error_msg = self._test_dp_no_dp_same_weighted_async(
noise=-1,
clip_norm=float("inf"),
data_provider=data_provider,
buffer_size=buffer_size,
)
assertEmpty(error_msg, msg=error_msg)
# scenario 1
error_msg = self._test_dp_no_dp_same_weighted_async(
noise=0,
clip_norm=1e6,
data_provider=data_provider,
buffer_size=buffer_size,
)
assertEmpty(error_msg, msg=error_msg)
# scenario 2
error_msg = self._test_dp_no_dp_same_weighted_async(
noise=1e-14,
clip_norm=1e6,
data_provider=data_provider,
buffer_size=buffer_size,
)
assertEmpty(error_msg, msg=error_msg)
def test_user_dp_variable_weights_different(self) -> None:
"""
Test when noise is some trivial number then
DP model should not equal to NonDP model
"""
buffer_size = np.random.randint(1, 10)
data_provider = get_fl_data_provider(
num_examples=10,
num_fl_users=10,
examples_per_user=1,
batch_size=1,
model=DummyAlphabetFLModel(),
)
is_different_msg = self._test_dp_no_dp_same_weighted_async(
# noise between 0.1 and 1.0
noise=max(0.1, np.random.sample()),
clip_norm=10,
data_provider=data_provider,
buffer_size=buffer_size,
)
assertNotEmpty(is_different_msg, msg=is_different_msg)
| canife-main | FLSim/flsim/privacy/tests/test_dp_integration.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| canife-main | FLSim/flsim/privacy/tests/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
import torch
from flsim.common.pytest_helper import (
assertAlmostEqual,
assertEqual,
assertFalse,
assertTrue,
)
from flsim.privacy.user_update_clip import UserUpdateClipper
from flsim.utils import test_utils as utils
@pytest.fixture(scope="class")
def prepare_user_update_clipper_test(request) -> None:
request.cls.user_update_clipper = UserUpdateClipper()
def _init_user_model(param_value):
user_model = utils.TwoFC()
user_model.fill_all(param_value) # fill all parameters with a number
return user_model
request.cls.previous_user_model = _init_user_model(param_value=6.0)
request.cls.updated_user_model = _init_user_model(param_value=7.0)
request.cls.original_model_diff = request.cls.user_update_clipper.calc_model_diff(
request.cls.updated_user_model, request.cls.previous_user_model
)
request.cls.original_model_diff_params = [
p for p in request.cls.original_model_diff.parameters() if p.requires_grad
]
@pytest.mark.usefixtures("prepare_user_update_clipper_test")
class TestUserUpdateClipper:
def _init_clipped_model_diff(self, max_norm):
clipped_model_diff = utils.TwoFC()
clipped_model_diff.load_state_dict(self.original_model_diff.state_dict())
self.user_update_clipper.clip(clipped_model_diff, max_norm)
return clipped_model_diff
def test_calc_clip_factor(self) -> None:
"""
Tests that the clip factor for user updates is calculated correctly.
"""
# pyre-fixme[16]: `TestUserUpdateClipper` has no attribute
# `user_update_clipper`.
clip_factor = self.user_update_clipper._calc_clip_factor(
max_norm=5, per_user_norm=10
)
assertAlmostEqual(clip_factor, 0.5, places=3)
clip_factor = self.user_update_clipper._calc_clip_factor(
max_norm=1, per_user_norm=1
)
assertAlmostEqual(clip_factor, 1, places=3)
clip_factor = self.user_update_clipper._calc_clip_factor(
max_norm=2, per_user_norm=0.8
)
assertAlmostEqual(clip_factor, 1, places=3)
def test_calc_user_update_norm(self) -> None:
"""
Tests that the user update l-2 norms are calculated correctly.
"""
model = utils.TwoFC()
model.fill_all(2.0)
model_params = model.parameters()
# norm = sqrt(21*2^2)=sqrt(84)=9.16515138991168
# pyre-fixme[16]: `TestUserUpdateClipper` has no attribute
# `user_update_clipper`.
norm = self.user_update_clipper._calc_norm(model_params)
assertTrue(torch.allclose(norm, torch.tensor(9.16515138991168), rtol=1e-06))
model.fill_all(1.0)
model_params = model.parameters()
# norm = sqrt(21*1^2)=sqrt(21)=4.58257569495584
norm = self.user_update_clipper._calc_norm(model_params)
assertTrue(torch.allclose(norm, torch.tensor(4.58257569495584), rtol=1e-06))
def test_clipped_updates_are_smaller(self) -> None:
"""
Tests that user updates are clipped and their value is smaller than
the original updates
"""
# assert the parameters of model_diff are all = (7 - 6 = 1)
# pyre-fixme[16]: `TestUserUpdateClipper` has no attribute
# `original_model_diff_params`.
for p in self.original_model_diff_params:
assertTrue(torch.allclose(p.float(), torch.tensor(1.0)))
clipped_model_diff = self._init_clipped_model_diff(0.0003)
clipped_model_diff_params = [
p for p in clipped_model_diff.parameters() if p.requires_grad
]
for original, clipped in zip(
self.original_model_diff_params, clipped_model_diff_params
):
assertTrue(torch.all(original.gt(clipped)))
def test_clipped_user_updates_non_zero(self) -> None:
"""
Tests that user updates are not zero by clipping
"""
clipped_model_diff = self._init_clipped_model_diff(0.0003)
clipped_model_diff_params = [
p for p in clipped_model_diff.parameters() if p.requires_grad
]
for clipped in clipped_model_diff_params:
allzeros = torch.zeros_like(clipped)
assertFalse(torch.allclose(clipped, allzeros))
def test_clipping_to_high_value_does_not_clip(self) -> None:
"""
Tests that when clip value is set too high, user
updates are not clipped
"""
clipped_model_diff = self._init_clipped_model_diff(9999)
mismatched = utils.verify_models_equivalent_after_training(
# pyre-fixme[16]: `TestUserUpdateClipper` has no attribute
# `original_model_diff`.
self.original_model_diff,
clipped_model_diff,
)
assertEqual(mismatched, "")
| canife-main | FLSim/flsim/privacy/tests/test_user_update_clipper.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
from unittest.mock import MagicMock
import numpy as np
import pytest
import torch
import torch.nn as nn
from flsim.common.pytest_helper import (
assertAlmostEqual,
assertEqual,
assertFalse,
assertLessEqual,
assertNotEqual,
assertRaises,
assertTrue,
)
from flsim.privacy.common import PrivacySetting
from flsim.privacy.privacy_engine import (
GaussianPrivacyEngine,
PrivacyEngineNotAttachedException,
TreePrivacyEngine,
)
from flsim.privacy.privacy_engine_factory import NoiseType, PrivacyEngineFactory
from flsim.utils import test_utils as utils
from flsim.utils.fl.common import FLModelParamUtils
from opacus.accountants.analysis import rdp as privacy_analysis
class TestGaussianPrivacyEngine:
def _init_privacy_engine(
self,
alphas=[1 + x / 10.0 for x in range(1, 100)],
noise_multiplier=1.0,
target_delta=1e-5,
users_per_round=10,
num_total_users=10,
global_model_parameter_val=5.0,
noise_seed=0,
):
privacy_setting = PrivacySetting(
alphas=alphas,
noise_multiplier=noise_multiplier,
target_delta=target_delta,
noise_seed=noise_seed,
)
privacy_engine = GaussianPrivacyEngine(
privacy_setting=privacy_setting,
users_per_round=users_per_round,
num_total_users=num_total_users,
)
global_model = utils.TwoFC() # This model has 15 weights and 6 biases
global_model.fill_all(global_model_parameter_val)
privacy_engine.attach(global_model)
return privacy_engine
def _calc_eps(self, sample_rate, noise_multiplier, steps, alphas, delta):
rdp = privacy_analysis.compute_rdp(
q=sample_rate, noise_multiplier=noise_multiplier, steps=steps, orders=alphas
)
eps, _ = privacy_analysis.get_privacy_spent(orders=alphas, rdp=rdp, delta=delta)
return eps
def test_privacy_analysis_alpha_in_alphas(self):
"""
Tests that the optimal alphas of DP analysis is in the range
of the original alphas.
"""
privacy_engine = self._init_privacy_engine()
privacy_budget = privacy_engine.get_privacy_spent()
assertTrue(privacy_budget.alpha in privacy_engine.alphas)
def test_privacy_analysis_epsilon_reasonable(self):
"""
Tests that the epsilon is greater than 1 in normal settings.
Also, when we do not add any noise, the privacy loss should
be infinity.
"""
privacy_engine = self._init_privacy_engine()
privacy_budget = privacy_engine.get_privacy_spent()
assertTrue(privacy_budget.epsilon > 0)
privacy_engine.noise_multiplier = 0
privacy_budget = privacy_engine.get_privacy_spent()
assertTrue(privacy_budget.epsilon == float("inf"))
def test_privacy_analysis_epsilon(self):
"""
Tests that the epsilon calculations are correct
"""
alphas = [1 + x / 10.0 for x in range(1, 100)]
noise_multiplier = 1.5
target_delta = 1e-5
num_users = 1000
num_users_per_round = 50
steps = num_users // num_users_per_round
user_sampling_rate = num_users_per_round / num_users
privacy_engine = self._init_privacy_engine(
alphas=alphas,
noise_multiplier=noise_multiplier,
target_delta=target_delta,
num_total_users=num_users,
users_per_round=num_users_per_round,
)
model_diff = utils.TwoFC()
for _ in range(steps): # adding noise will increse the steps
privacy_engine.add_noise(model_diff, 1.0)
privacy_budget = privacy_engine.get_privacy_spent()
eps = self._calc_eps(
user_sampling_rate, noise_multiplier, steps, alphas, target_delta
)
assertEqual(privacy_budget.epsilon, eps)
def test_noise_added(self):
"""
Tests that noise is successfully added to a model update, by
checking that the model update after noise addition is different
from the original model update.
"""
model_diff = utils.TwoFC() # model update
model_diff.fill_all(1.0)
model_diff_before_noise = FLModelParamUtils.clone(model_diff)
privacy_engine = self._init_privacy_engine()
privacy_engine.add_noise(model_diff, sensitivity=0.5)
mismatched = utils.verify_models_equivalent_after_training(
model_diff_before_noise, model_diff
)
assertNotEqual(mismatched, "")
def test_deterministic_noise_addition(self):
"""
Tests when the noise seed is set to the same value, we get
the same (i.e. deterministic) noise-added model updates. It
also tests when the seed is set to different values, we will
get different noise-added model updates.
"""
model_diff = utils.TwoFC() # model update
model_diff.fill_all(1.0)
model_diff_another_seed = FLModelParamUtils.clone(model_diff)
model_diff_same_seed = FLModelParamUtils.clone(model_diff)
privacy_engine = self._init_privacy_engine(noise_seed=1003)
privacy_engine.add_noise(model_diff, sensitivity=0.5)
privacy_engine = self._init_privacy_engine(noise_seed=2000)
privacy_engine.add_noise(model_diff_another_seed, sensitivity=0.5)
mismatched = utils.verify_models_equivalent_after_training(
model_diff, model_diff_another_seed
)
assertNotEqual(mismatched, "")
privacy_engine = self._init_privacy_engine(noise_seed=1003)
privacy_engine.add_noise(model_diff_same_seed, sensitivity=0.5)
mismatched = utils.verify_models_equivalent_after_training(
model_diff, model_diff_same_seed
)
assertEqual(mismatched, "")
def test_not_attached_validator(self):
"""
Tests that the Privacy Engine throws a not attach
exception if it is not properly attached.
"""
model_diff = utils.TwoFC() # model update
model_diff.fill_all(1.0)
privacy_setting = PrivacySetting(
alphas=[1 + x / 10.0 for x in range(1, 100)],
noise_multiplier=1.0,
target_delta=1e-6,
)
privacy_engine = GaussianPrivacyEngine(
privacy_setting=privacy_setting, users_per_round=1, num_total_users=1
)
sensitivity = 0.5
with assertRaises(PrivacyEngineNotAttachedException):
privacy_engine.add_noise(model_diff, sensitivity)
raised_exception = False
global_model = utils.TwoFC()
global_model.fill_all(5.0)
privacy_engine.attach(global_model)
try:
privacy_engine.add_noise(model_diff, sensitivity)
except PrivacyEngineNotAttachedException:
raised_exception = True
assertFalse(raised_exception)
class TestTreePrivacyEngine:
def _create_delta(self, dim, value=0.0):
delta = nn.Linear(dim, 1)
delta.bias.data.fill_(value)
delta.weight.data.fill_(value)
return delta, FLModelParamUtils.clone(delta)
def _count_bits(self, n: int):
"""
Returns the number of
1s in the binary representations of i
"""
count = 0
while n:
n &= n - 1
count += 1
return count
@pytest.mark.parametrize("num_leaf, max_height", [(4, 2), (8, 3), (16, 4)])
def test_build_tree(self, num_leaf, max_height):
"""
Test that build tree logic is correct.
For any binary tree with n leaves, the tree's height
will be log2(n) tall
"""
tree = TreePrivacyEngine.build_tree(num_leaf)
for node in tree:
assertLessEqual(node.height, math.ceil(math.log2(num_leaf)))
def test_basic_tree_node_sensitivity(self):
"""
This is a small test. If we set the noise in each node as 1,
we should be seeing the returned noise as the number of
1s in the binary representations of ith step
"""
def generate_noise(*args):
return 1
num_steps = 32
tree = TreePrivacyEngine(
PrivacySetting(noise_multiplier=1.0, noise_seed=0),
users_per_round=1,
num_total_users=num_steps,
efficient_tree=False,
)
tree._generate_noise = MagicMock(side_effect=generate_noise)
for i in range(num_steps):
cumsum = tree.range_sum(0, i, size=torch.Size([1]), sensitivity=1.0)
bits = float(self._count_bits(i + 1))
assertEqual(cumsum, bits)
@pytest.mark.parametrize(
"upr, n_users, noise_multiplier, exp_var",
[
(4, 4, 1, 0.57),
(7, 7, 1, 2.23),
(8, 8, 1, 0.53),
(8, 8, 2, 2.13),
(8, 8, 0.5, 0.13),
],
)
def test_tree_noise_sum_expected(self, upr, n_users, noise_multiplier, exp_var):
def test_one_trial():
delta, _ = self._create_delta(dim=1000, value=0)
setting = PrivacySetting(
noise_multiplier=noise_multiplier,
noise_seed=0,
)
privacy_engine = PrivacyEngineFactory.create(
setting,
users_per_round=upr,
num_total_users=n_users,
noise_type=NoiseType.TREE_NOISE,
)
privacy_engine.add_noise(delta, sensitivity=1.0)
noised_delta = torch.flatten(
torch.stack(
[p for name, p in delta.named_parameters() if "weight" in name]
)
)
return torch.var(noised_delta).item()
num_trials = 5
assertAlmostEqual(
np.mean([test_one_trial() for _ in range(num_trials)]), exp_var, delta=0.15
)
@pytest.mark.parametrize(
"steps, upr, n_users, sigma, exp_var",
[
(4, 4, 4, 1, 0.57),
(7, 7, 7, 1, 2.23),
(8, 8, 8, 1, 0.53),
(8, 8, 8, 2, 2.13),
(8, 8, 8, 0.5, 0.13),
],
)
def test_range_sum_noise_expected(self, steps, upr, n_users, sigma, exp_var):
def test_one_trial():
setting = PrivacySetting(
noise_multiplier=sigma,
noise_seed=2,
)
privacy_engine = PrivacyEngineFactory.create(
setting,
users_per_round=upr,
num_total_users=n_users,
noise_type=NoiseType.TREE_NOISE,
)
for i in range(steps):
sum_ = privacy_engine.range_sum(0, i, torch.Size((1000,)), 1.0)
return torch.var(sum_)
num_trials = 5
assertAlmostEqual(
np.mean([test_one_trial() for _ in range(num_trials)]),
exp_var,
delta=0.15,
)
@pytest.mark.parametrize(
"n_users, upr, sigma, epsilon",
[
(1600, 100, 4.03, 4.19),
(1600, 100, 6.21, 2.60),
(1600, 100, 8.83, 1.77),
],
)
def test_privacy_analysis_epsilon(self, n_users, upr, sigma, epsilon):
delta, _ = self._create_delta(dim=1000, value=0)
setting = PrivacySetting(
noise_multiplier=sigma,
noise_seed=1,
alphas=np.arange(1.01, 100, 0.01).tolist(),
target_delta=1e-6,
)
privacy_engine = PrivacyEngineFactory.create(
setting,
users_per_round=upr,
num_total_users=n_users,
noise_type=NoiseType.TREE_NOISE,
)
for _ in range(n_users // upr):
privacy_engine.add_noise(delta, sensitivity=1.0)
budget = privacy_engine.get_privacy_spent()
assertAlmostEqual(budget.epsilon, epsilon, delta=0.5)
| canife-main | FLSim/flsim/privacy/tests/test_privacy_engine.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import logging
from dataclasses import dataclass
from typing import Dict, Iterator, Tuple
import torch
from flsim.common.logger import Logger
from flsim.utils.config_utils import fullclassname, init_self_cfg
from hydra.utils import instantiate
from omegaconf import MISSING
from torch import nn
class FixedPointConverter:
r"""
The main class that is responsible for conversion between
fixed point and floating point.
"""
MAX_WIDTH_BYTES = 8 # code handles up to 7 bytes, due to division in overflow calc
logger: logging.Logger = Logger.get_logger(__name__)
def __init__(self, **kwargs):
r"""
Args:
cfg: The config for FixedPointConverter
Raises:
ValueError: if the ``num_bytes`` is not between 1 and 8, or if
``config.scaling_factor`` is not greater than 0.
"""
init_self_cfg(
self,
component_class=__class__,
config_class=FixedPointConfig,
**kwargs,
)
if self.cfg.num_bytes < 1 or self.cfg.num_bytes > self.MAX_WIDTH_BYTES:
error_msg = (
f"Width {self.cfg.num_bytes} is not supported. "
f"Please enter a width between 1 and {self.MAX_WIDTH_BYTES}."
)
raise ValueError(error_msg)
if self.cfg.scaling_factor <= 0:
raise ValueError("scaling factor must be greater than 0.")
num_bits = self.cfg.num_bytes * 8
self.max_value = 2 ** (num_bits - 1) - 1
self.min_value = -(2 ** (num_bits - 1))
self.scaling_factor = self.cfg.scaling_factor
self._convert_overflows = 0 # during fixedpoint conversion
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def to_fixedpoint(self, numbers: torch.Tensor) -> torch.Tensor:
"""
Converts numbers in a tensor from floating point to fixed point.
During conversion, the floats are multiplied by ``scaling_factor``.
Now if some of these numbers are outside the range that can be represented by
``num_bytes`` bytes, they will be clamped to fit in the range.
Args:
numbers: the tensor containing the floating point numbers to convert
Returns:
A tensor containing the converted numbers to fixed point.
Notes:
It also updates the number of convert overflows (the number of underflows
are not yet considered)
"""
numbers = numbers.mul(self.scaling_factor)
overflow_matrix = torch.gt(numbers, self.max_value)
self._convert_overflows += int(torch.sum(overflow_matrix).item())
numbers = numbers.clamp(self.min_value, self.max_value)
return torch.round(numbers)
def to_float(self, numbers: torch.Tensor) -> torch.Tensor:
"""
Converts numbers in a tensor from fixed point to floating point.
Note that this method does not check if the fixed point numbers
is withing the range of numbers that can be represented by
``num_bytes`` bytes.
Args:
numbers: the tensor containing the fixed point numbers to convert
Returns:
A tensor containing the converted number to floating point.
"""
return torch.true_divide(numbers, self.scaling_factor)
def get_convert_overflow(self, reset: bool = False):
"""
Reports the conversion overflow and if reset is set, it resets the
conversion overflow for the next call (i.e., the next round)
Args:
reset: whether to reset the conversion overflow
"""
overflow = self._convert_overflows
if reset:
self._convert_overflows = 0
return overflow
def utility_config_flatter(
model: nn.Module, flat_config: FixedPointConfig
) -> Dict[str, FixedPointConfig]:
"""
A utility function to use a "flat" (same config for all layers)
FixedPointConfig for all layers of a model.
Args:
model: the reference model to obtain the named parameters
flat_config: The flat config to use for all layers
Returns:
returns the flat fixedpoint_config_dict
"""
config: Dict[str, FixedPointConfig] = {}
for name, _ in model.named_parameters():
config[name] = flat_config
return config
class SecureAggregator:
r"""
The main class that is responsible for secure aggregation.
Notes:
Since this is a simulation of secure aggregation, it is simplified and
not all details of secure aggregation are implemented. For instance, the
noise generation, sharing random seed, denoising for secure aggregation
are not implemented. Also, entities such as secure enclaves are not
implemented.
"""
def __init__(
self,
config: Dict[str, FixedPointConfig],
):
r"""
Args:
config: a dictionary of fixed-point configs for different layers of
neural network. If the utility ``utility_config_flatter`` is used,
same config will be used for all layers of the neural network.
"""
self.converters = {}
for key in config.keys():
self.converters[key] = instantiate(config[key])
self._aggregate_overflows = 0 # overflow during aggregation of model parameters
def _check_converter_dict_items(self, model: nn.Module) -> None:
"""
Checks if all layers of a model have their corresponding configs
Args:
model: the model
Raises:
ValueError: If some layers of the model do not have their
corresponding configs
"""
unset_configs = set(model.state_dict()) - set(self.converters)
if unset_configs:
error_msg = (
"Not all "
"layers have their corresponding fixed point config. "
f"The layers {unset_configs} do not have configs."
)
raise ValueError(error_msg)
def params_to_fixedpoint(self, model: nn.Module) -> None:
"""
Converts parameters of a model from floating point to fixed point.
Args:
model: the model whose parameters will be converted
Raises:
ValueError: If some layers of the model do not have their
corresponding configs
"""
self._check_converter_dict_items(model)
state_dict = model.state_dict()
for name in state_dict.keys():
converter = self.converters[name]
state_dict[name] = converter.to_fixedpoint(state_dict[name])
converter.logger.debug(
f"{name} has "
f"{converter.get_convert_overflow(reset=False)} overflow(s)"
f"during fixed point conversion"
)
model.load_state_dict(state_dict)
def params_to_float(self, model: nn.Module) -> None:
"""
Converts parameters of a model from fixed point to floating point.
Args:
model: the model whose parameters will be converted
Raises:
ValueError: If some layers of the model do not have their
corresponding configs
"""
self._check_converter_dict_items(model)
state_dict = model.state_dict()
for name in state_dict.keys():
state_dict[name] = self.converters[name].to_float(state_dict[name])
model.load_state_dict(state_dict)
def get_aggregate_overflow(self, reset: bool = False):
"""
Reports the aggregatation overflow and if reset is set, it resets the
aggregatation overflow for the next call (i.e., the next round)
Args:
reset: whether to reset the aggregatation overflow
"""
overflow = self._aggregate_overflows
if reset:
self._aggregate_overflows = 0
return overflow
def _generate_noise_mask(
self, update_params: Iterator[Tuple[str, nn.Parameter]]
) -> Iterator[Tuple[str, nn.Parameter]]:
"""
Generates noise mask, same shape as the update params
Args:
update_params: the parameters of the update sent from
clients. Used to infer the shape of the noise mask
Returns:
noise mask
"""
pass
def apply_noise_mask(
self, update_params: Iterator[Tuple[str, nn.Parameter]]
) -> None:
"""
Applies noise mask to the parameters of the update sent from
clients.
Args:
update_params: the parameters of the update sent from
clients.
Note:
To properly implement this method, call ``_generate_noise_mask()``
as ``noise_mask = self._generate_noise_mask(update_params)``. Then
add the ``noise_mask`` to ``update_params`` and return the new
``update_params``.
"""
pass
def _get_denoise_mask(self) -> Iterator[Tuple[str, nn.Parameter]]:
"""
Gets the aggregated denoised mask for all participating clients
from secure enclave.
Returns:
aggregated denoised mask for all participating clients
"""
pass
def apply_denoise_mask(
self, model_aggregate_params: Iterator[Tuple[str, nn.Parameter]]
) -> None:
"""
Applies denoise mask to the noised aggregated updates from clients
Args:
model_aggregate_params: the parameters of the noised aggragated
client updates. Used to infer the shape of the denoise mask
Note:
To properly implement this method, call ``_get_denoise_mask()``
as ``denoise_mask = self._get_denoise_mask()``. Then add the
``denoise_mask`` to ``model_aggregate_params`` and return the
new ``model_aggregate_params``.
"""
pass
def update_aggr_overflow_and_model(
self,
model: nn.Module,
):
"""
This method is called every time after a delta (in fixedpoint format)
is received from a client. This method updates the overflow counter
due to overflows during aggregation. It also adjusts the values of the
``model`` based on max value related to the fixedpoint (see notes).
Args:
model: the buffered model that holds the current sum, in
fixedpoint format.
Notes:
This is an example to show how this method adjusts the input model
based on min and max values of fixedpoint. If we have one parameter,
and if num_bytes=1 (allowed range is -128 to +127), when in aggregation
we add delta=40 to model=90, the input model would be 130. This
method adjusts 130 to 2 (i.e. 130%128) since 130 is outside the range.
Currently we only keep track of overflows, hence underflows are not
monitored.
"""
state_dict = model.state_dict()
for name in state_dict.keys():
numbers = state_dict[name]
converter = self.converters[name]
overflow_matrix = torch.div( # FIXME: div blows up when MAX_WIDTH_BYTES >7
numbers, converter.max_value + 1, rounding_mode="floor"
)
overflow_matrix = torch.where(
overflow_matrix < 0,
torch.zeros(overflow_matrix.size()),
overflow_matrix,
) # zero out negative entries since we are only interested in overflows
self._aggregate_overflows += int(torch.sum(overflow_matrix).item())
converter.logger.debug(
f"{name} has "
f"{self._aggregate_overflows} overflow(s) during aggregation"
)
numbers = torch.where(
numbers >= 0, torch.remainder(numbers, converter.max_value + 1), numbers
)
numbers = torch.where(
numbers < 0, torch.remainder(numbers, converter.min_value), numbers
)
state_dict[name] = numbers
model.load_state_dict(state_dict)
def calc_avg_overflow_percentage(
self,
num_users: int,
model: nn.Module,
) -> Tuple[float, float]:
"""
Calcualtes the percentage of average overflow over all model layers,
with regards to the number of model parameters. Also resets the
overflow counters to make them ready for the next round.
Args:
num_users: the total number of users in the system
model: the global model
Notes:
The assumption here is that the model is always the same acorss
clients and server, since we have one object of secure aggregator,
and this object assumes the model is same for all clients and server.
"""
num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
convert_overflow_perc = sum(
converter.get_convert_overflow(reset=True) * 100
for converter in self.converters.values()
) / (num_params * num_users)
aggregate_overflow_perc = (
self.get_aggregate_overflow(reset=True) * 100 / (num_params * num_users)
)
return convert_overflow_perc, aggregate_overflow_perc
@dataclass
class FixedPointConfig:
_target_: str = fullclassname(FixedPointConverter)
_recursive_: bool = False
# size in bytes of single fixed point number. 1 to 8 inclusive.
num_bytes: int = MISSING
# multiplier to convert from floating to fixed point
scaling_factor: int = MISSING
| canife-main | FLSim/flsim/secure_aggregation/secure_aggregator.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from hydra.core.config_store import ConfigStore # @manual
from .secure_aggregator import FixedPointConfig
ConfigStore.instance().store(
name="base_fixedpoint",
node=FixedPointConfig,
group="fixedpoint",
)
| canife-main | FLSim/flsim/secure_aggregation/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| canife-main | FLSim/flsim/secure_aggregation/tests/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from flsim.common.pytest_helper import assertEqual, assertNotEqual
from flsim.secure_aggregation.secure_aggregator import FixedPointConfig
from flsim.servers.aggregator import AggregationType
from flsim.servers.sync_secagg_servers import SyncSecAggServerConfig
from flsim.servers.sync_servers import SyncServerConfig
from flsim.utils.fl.common import FLModelParamUtils
from flsim.utils.sample_model import DummyAlphabetFLModel
from flsim.utils.test_utils import FakeMetricReporter, MetricsReporterWithMockedChannels
from flsim.utils.tests.helpers.test_data_utils import DummyAlphabetDataset
from flsim.utils.tests.helpers.test_sync_trainer_utils import create_sync_trainer
class TestSecureAggregationIntegration:
def _load_data(self, num_users: int = 26):
"""
Loads the data, which is a Dummy alphabet for N (`num_users`) users,
each with 1 sample.
"""
shard_size = 1
local_batch_size = 1
dummy_dataset = DummyAlphabetDataset(num_rows=num_users)
(
data_provider,
data_loader,
) = DummyAlphabetDataset.create_data_provider_and_loader(
dummy_dataset, shard_size, local_batch_size, DummyAlphabetFLModel()
)
assertEqual(data_loader.num_total_users, num_users / shard_size)
assertEqual(data_loader.num_total_users, data_provider.num_train_users())
return data_provider, data_loader.train_batch_size
def _train_fl_model(
self,
sec_agg_enable: bool = False,
fixedpoint=None,
num_users: int = 26,
users_per_round: int = 26,
epochs: int = 1,
metrics_reporter=None,
report_train_metrics: bool = False,
report_train_metrics_after_aggregation: bool = False,
train_metrics_reported_per_epoch: int = 1,
):
"""
Trains an FL model, with or without Secure Aggregation
"""
# create dummy FL model on alphabet
global_fl_model = DummyAlphabetFLModel()
data_provider, _ = self._load_data(num_users)
world_size = 1
sync_trainer = create_sync_trainer(
model=global_fl_model,
local_lr=0.1,
users_per_round=users_per_round,
epochs=epochs,
user_epochs_per_round=1,
do_eval=True,
server_config=SyncSecAggServerConfig(
aggregation_type=AggregationType.AVERAGE, fixedpoint=fixedpoint
)
if sec_agg_enable
else SyncServerConfig(
aggregation_type=AggregationType.AVERAGE,
),
)
sync_trainer.cfg.train_metrics_reported_per_epoch = (
train_metrics_reported_per_epoch
)
sync_trainer.cfg.report_train_metrics = report_train_metrics
sync_trainer.cfg.report_train_metrics_after_aggregation = (
report_train_metrics_after_aggregation
)
if metrics_reporter is None:
metrics_reporter = FakeMetricReporter()
global_fl_model, _eval_metric = sync_trainer.train(
data_provider,
metrics_reporter,
num_total_users=data_provider.num_train_users(),
distributed_world_size=world_size,
)
return global_fl_model
def test_secagg_not_equivalent_no_secagg(self) -> None:
"""
Tests training with secure aggregation will produce a different
model than training without secure aggregation
"""
# First, call secure trainer
fixedpoint = FixedPointConfig(num_bytes=1, scaling_factor=1000)
torch.manual_seed(1)
fl_model_with_secure_trainer = self._train_fl_model(
sec_agg_enable=True,
fixedpoint=fixedpoint,
)
# Next, call sync trainer
torch.manual_seed(1)
fl_model_with_trainer = self._train_fl_model()
assertNotEqual(
FLModelParamUtils.get_mismatched_param(
[
fl_model_with_trainer.fl_get_module(),
fl_model_with_secure_trainer.fl_get_module(),
],
1e-6,
),
"",
)
def test_secagg_not_equivalent_no_secagg_large_range(self) -> None:
"""
Tests training with secure aggregation will produce a different
model than training without secure aggregation, even when the
range of fixedpoint number is very large (and scaling factro is 1).
The reason that we get a different model is because, even in a big
fixedpoint range, we still do rounding when we convert, E.g., 127.1
(float) becomes 127 (in fixedpoint), no matter how big the range is.
"""
# First, call secure trainer
fixedpoint = FixedPointConfig(num_bytes=7, scaling_factor=1)
torch.manual_seed(1)
fl_model_with_secure_trainer = self._train_fl_model(
sec_agg_enable=True,
fixedpoint=fixedpoint,
)
# Next, call sync trainer
torch.manual_seed(1)
fl_model_with_trainer = self._train_fl_model()
assertNotEqual(
FLModelParamUtils.get_mismatched_param(
[
fl_model_with_trainer.fl_get_module(),
fl_model_with_secure_trainer.fl_get_module(),
],
1e-6,
),
"",
)
def test_overflow_reporting(self) -> None:
"""
Tests whether the overflow parameters are reported enough
"""
fixedpoint = FixedPointConfig(num_bytes=1, scaling_factor=100)
metrics_reporter = MetricsReporterWithMockedChannels()
self._train_fl_model(
sec_agg_enable=True,
fixedpoint=fixedpoint,
users_per_round=2,
epochs=3,
metrics_reporter=metrics_reporter,
report_train_metrics=True,
report_train_metrics_after_aggregation=True,
train_metrics_reported_per_epoch=26,
)
def count_word(result, word):
return str(result).count(word)
# We have 26 users, 2 users_per_round, which makes 13 rounds per epoch
# we also have 3 epochs. So we should get 39 reports for overflow.
# (train_metrics_reported_per_epoch is large so we don't miss a report)
assertEqual(
count_word(metrics_reporter.stdout_results, "overflow per round"),
39,
metrics_reporter.stdout_results,
)
# for tensorboard results, we write 39*2 results related to overflow,
# as we report each {covert, aggregate} overflow once
assertEqual(
count_word(metrics_reporter.tensorboard_results, "overflow per round"),
78,
metrics_reporter.tensorboard_results,
)
| canife-main | FLSim/flsim/secure_aggregation/tests/test_secagg_integration.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from flsim.channels.message import Message
from flsim.common.pytest_helper import assertEqual, assertRaises, assertTrue
from flsim.secure_aggregation.secure_aggregator import (
FixedPointConfig,
FixedPointConverter,
SecureAggregator,
utility_config_flatter,
)
from flsim.servers.sync_secagg_servers import SyncSecAggServerConfig
from flsim.utils import test_utils as utils
from flsim.utils.test_utils import (
SampleNet,
create_model_with_value,
model_parameters_equal_to_value,
)
from hydra.utils import instantiate
from omegaconf import OmegaConf
class TestSecureAggregator:
def _create_model(self, model_param_value):
"""
Creates a two-layer model
"""
fl_model = utils.SampleNet(utils.TwoFC())
fl_model.fl_get_module().fill_all(model_param_value)
return fl_model.fl_get_module()
def _create_server(self, model, fixedpoint, channel=None):
return instantiate(
SyncSecAggServerConfig(fixedpoint=fixedpoint),
global_model=model,
channel=channel,
)
def test_fixedpoint_init(self) -> None:
"""
Tests that FixedPointConverter init works correctly
"""
converter = FixedPointConverter(
**OmegaConf.structured(FixedPointConfig(num_bytes=2, scaling_factor=1000))
)
assertEqual(converter.max_value, 32767)
assertEqual(converter.min_value, -32768)
with assertRaises(ValueError):
converter = FixedPointConverter(
**OmegaConf.structured(
FixedPointConfig(num_bytes=9, scaling_factor=1000)
)
)
with assertRaises(ValueError):
converter = FixedPointConverter(
**OmegaConf.structured(
FixedPointConfig(num_bytes=3, scaling_factor=-100)
)
)
def test_floating_to_fixedpoint(self) -> None:
"""
Tests whether conversion from floating point to fixed point works
"""
# hence minValue = -32768, maxValue = 32767
converter = FixedPointConverter(
**OmegaConf.structured(FixedPointConfig(num_bytes=2, scaling_factor=100))
)
x = torch.tensor(17.42)
y = converter.to_fixedpoint(x)
# y = x * scaling_factor = 1742.0 ==> round to 1742
assertEqual(y, torch.tensor(1742))
x = torch.tensor(17.4298)
y = converter.to_fixedpoint(x)
# y = x * scaling_factor = 1742.98 ==> round to 1743
assertEqual(y, torch.tensor(1743))
x = torch.tensor(-2.34)
y = converter.to_fixedpoint(x)
# y = x * scaling_factor = -234.0 ==> round to -234
assertEqual(y, torch.tensor(-234))
x = torch.tensor(-2.3456)
y = converter.to_fixedpoint(x)
# y = x * scaling_factor = -234.56 ==> round to -235
assertEqual(y, torch.tensor(-235))
x = torch.tensor(-2.3416)
y = converter.to_fixedpoint(x)
# y = x * scaling_factor = -234.16 ==> round to -234
assertEqual(y, torch.tensor(-234))
x = torch.tensor(12345.0167)
y = converter.to_fixedpoint(x)
# y = x * scaling_factor = 1234501.67 ==> adjust to maxValue 32767
assertEqual(y, torch.tensor(32767))
x = torch.tensor(-327.69)
y = converter.to_fixedpoint(x)
# y = x * scaling_factor = -32769 ==> adjust to minValue -32768
assertEqual(y, torch.tensor(-32768))
def test_fixed_to_floating_point(self) -> None:
"""
Tests whether conversion from fixed point to floating point works
"""
converter = FixedPointConverter(
**OmegaConf.structured(FixedPointConfig(num_bytes=1, scaling_factor=85))
)
x = torch.tensor(85)
y = converter.to_float(x)
# y = x / scaling_factor = 1.0
assertTrue(torch.allclose(y, torch.tensor(1.0), rtol=1e-10))
x = torch.tensor(157)
y = converter.to_float(x)
# y = x / scaling_factor = 1.847058823529412
assertTrue(torch.allclose(y, torch.tensor(1.847058823529412), rtol=1e-10))
def test_params_floating_to_fixedpoint(self) -> None:
"""
Tests whether the parameters of a model are converted correctly
from floating point to fixed point
"""
# hence minValue = -32768, maxValue = 32767
config = FixedPointConfig(num_bytes=2, scaling_factor=100)
model = self._create_model(6.328)
secure_aggregator = SecureAggregator(utility_config_flatter(model, config))
secure_aggregator.params_to_fixedpoint(model)
mismatched = utils.model_parameters_equal_to_value(model, 633.0)
assertEqual(mismatched, "", mismatched)
model = self._create_model(-3.8345)
secure_aggregator = SecureAggregator(utility_config_flatter(model, config))
secure_aggregator.params_to_fixedpoint(model)
mismatched = utils.model_parameters_equal_to_value(model, -383.0)
assertEqual(mismatched, "", mismatched)
def test_params_floating_to_fixedpoint_different_config_for_layers(self) -> None:
"""
Tests whether the parameters of a model are converted correctly
from floating point to fixed point, when we have different
FixedPointConverter configs for different layers
"""
config_layer1 = FixedPointConfig(num_bytes=2, scaling_factor=100)
# hence minValue = -32768, maxValue = 32767
config_layer2 = FixedPointConfig(num_bytes=1, scaling_factor=10)
# hence minValue = -128, maxValue = 127
config = {}
config["fc1.weight"] = config_layer1
config["fc1.bias"] = config_layer1
config["fc2.weight"] = config_layer2
config["fc2.bias"] = config_layer2
model = self._create_model(5.4728)
secure_aggregator = SecureAggregator(config)
secure_aggregator.params_to_fixedpoint(model)
for name, p in model.named_parameters():
if name == "fc1.weight" or name == "fc1.bias":
# round 547.28 to 547
assertTrue(torch.allclose(p, torch.tensor(547.0), rtol=1e-10))
if name == "fc2.weight" or name == "fc2.bias":
# round 54.728 to 55
assertTrue(torch.allclose(p, torch.tensor(55.0), rtol=1e-10))
def test_error_raised_per_layer_config_not_set(self) -> None:
"""
Tests whether all layers have their corresponding configs, when
per layer fixed point converter is used.
"""
config_layer1 = FixedPointConfig(num_bytes=8, scaling_factor=10000)
config = {}
config["fc1.weight"] = config_layer1
config["fc1.bias"] = config_layer1
model = self._create_model(600)
secure_aggregator = SecureAggregator(config)
with assertRaises(ValueError):
secure_aggregator.params_to_float(model)
with assertRaises(ValueError):
secure_aggregator.params_to_fixedpoint(model)
def test_params_fixed_to_floating_point(self) -> None:
"""
Tests whether the parameters of a model are converted correctly
from fixed point to floating point
"""
config = FixedPointConfig(num_bytes=3, scaling_factor=40)
model = self._create_model(880.0)
secure_aggregator = SecureAggregator(utility_config_flatter(model, config))
secure_aggregator.params_to_float(model)
mismatched = utils.model_parameters_equal_to_value(model, 22.0)
assertEqual(mismatched, "", mismatched)
def test_params_fixed_to_floating_point_different_config_for_layers(self) -> None:
"""
Tests whether the parameters of a model are converted correctly
from fixed point to floating point, when we have different
FixedPointConverter configs for different layers
"""
config_layer1 = FixedPointConfig(num_bytes=2, scaling_factor=30)
config_layer2 = FixedPointConfig(num_bytes=1, scaling_factor=80)
config = {}
config["fc1.weight"] = config_layer1
config["fc1.bias"] = config_layer1
config["fc2.weight"] = config_layer2
config["fc2.bias"] = config_layer2
model = self._create_model(832.8)
secure_aggregator = SecureAggregator(config)
secure_aggregator.params_to_float(model)
for name, p in model.named_parameters():
if name == "fc1.weight" or name == "fc1.bias":
# 832.8 / 30 = 27.76
assertTrue(torch.allclose(p, torch.tensor(27.76), rtol=1e-10))
if name == "fc2.weight" or name == "fc2.bias":
# 832.8 / 80 = 10.41
assertTrue(torch.allclose(p, torch.tensor(10.41), rtol=1e-10))
def test_conversion_overflow(self) -> None:
"""
Tests whether secure aggeragtion conversion overflow
variable gets updated correctly
"""
model = self._create_model(70.0)
config = FixedPointConfig(num_bytes=1, scaling_factor=10)
# hence minValue = -128, maxValue = 127
secure_aggregator = SecureAggregator(utility_config_flatter(model, config))
for name, _ in model.named_parameters():
assertEqual(secure_aggregator.converters[name].get_convert_overflow(), 0)
secure_aggregator.params_to_fixedpoint(model)
# 70 * 10 = 700. Overflow occurs for all parameters
# model : --[fc1=(2,5)]--[fc2=(5,1)]--
assertEqual(
secure_aggregator.converters["fc1.weight"].get_convert_overflow(), 10
)
assertEqual(secure_aggregator.converters["fc1.bias"].get_convert_overflow(), 5)
assertEqual(
secure_aggregator.converters["fc2.weight"].get_convert_overflow(), 5
)
assertEqual(secure_aggregator.converters["fc2.bias"].get_convert_overflow(), 1)
# test reset conversion overflow
for name, _ in model.named_parameters():
secure_aggregator.converters[name].get_convert_overflow(reset=True)
assertEqual(secure_aggregator.converters[name].get_convert_overflow(), 0)
def test_secure_aggregator_step_large_range(self) -> None:
"""
Tests whether secure aggregation operations work correctly
when the step() method is called, and when the num_bytes is
big, so we do not have a possible fixedpoint overflow
"""
scaling_factor = 10
num_bytes = 4
global_param = 8.0
client_param = 2.123
num_clients = 10
fixedpoint = FixedPointConfig(
num_bytes=num_bytes, scaling_factor=scaling_factor
)
server = self._create_server(
SampleNet(create_model_with_value(global_param)), fixedpoint=fixedpoint
)
clients = [create_model_with_value(client_param) for _ in range(num_clients)]
server.init_round()
for client in clients:
server.receive_update_from_client(Message(SampleNet(client), weight=1.0))
expected_param = float(round(global_param - client_param, ndigits=1))
server.step()
mismatched = model_parameters_equal_to_value(
server.global_model.fl_get_module(), expected_param
)
assertEqual(mismatched, "", mismatched)
def test_secure_aggregator_step_small_range(self) -> None:
"""
Tests whether secure aggregation operations work correctly
when the step() method is called, and when the num_bytes is
small so we have possible fixedpoint conversion overflows
"""
scaling_factor = 100
num_bytes = 1
global_param = 8
client_param = 2.123
num_clients = 10
fixedpoint = FixedPointConfig(
num_bytes=num_bytes, scaling_factor=scaling_factor
)
server = self._create_server(
SampleNet(create_model_with_value(global_param)), fixedpoint=fixedpoint
)
clients = [create_model_with_value(client_param) for _ in range(num_clients)]
server.init_round()
for client in clients:
server.receive_update_from_client(Message(SampleNet(client), weight=1.0))
# when a client update is converted to fixedpoint: 2.123 -> 212.3 -> 127.
# when adding `num_clients` updates, the sum would actually get smaller, i.e.
# 127+127+..+127=128-num_clients in bit representation when `num_bytes=1.
# So, the update is (128-10)/10 = 11.8 (in fixedpoint). Convert to float is 0.118
expected_param = float(global_param - (0.118 * num_clients) / num_clients)
server.step()
mismatched = model_parameters_equal_to_value(
server.global_model.fl_get_module(), expected_param
)
assertEqual(mismatched, "", mismatched)
client_param = 0.2
clients = [create_model_with_value(client_param) for _ in range(num_clients)]
server.init_round()
for client in clients:
server.receive_update_from_client(Message(SampleNet(client), weight=1.0))
# when a client update is converted to fixedpoint: 0.2 -> 20.
# when adding `num_clients` updates, the sum would actually get smaller, i.e.
# 20+20+..+20=(200%128)=72 in bit representation when `num_bytes=1.
# So, the update is (72)/10 = 7.2 (in fixedpoint). Convert to float is 0.072
new_expected_param = float(expected_param - (0.072 * num_clients) / num_clients)
server.step()
mismatched = model_parameters_equal_to_value(
server.global_model.fl_get_module(), new_expected_param
)
assertEqual(mismatched, "", mismatched)
def test_aggregation_overflow(self) -> None:
"""
Tests whether secure aggregation overflow
variable are updated correctly during aggregation
"""
scaling_factor = 10
num_bytes = 1
global_param = 6
client_param = 2.8
num_clients = 10
fixedpoint = FixedPointConfig(
num_bytes=num_bytes, scaling_factor=scaling_factor
)
server = self._create_server(
SampleNet(create_model_with_value(global_param)), fixedpoint=fixedpoint
)
clients = [create_model_with_value(client_param) for _ in range(num_clients)]
server.init_round()
# model : --[fc1=(2,5)]--[fc2=(5,1)]--
assertEqual(
server._secure_aggregator.get_aggregate_overflow(),
0,
)
for client in clients:
server.receive_update_from_client(Message(SampleNet(client), weight=1.0))
num_params = sum(
p.numel()
for p in server.global_model.fl_get_module().parameters()
if p.requires_grad
)
# Client update in fixedpoint is 28. When adding `num_clients` updates,
# the sum would overflow, i.e. 28+28+..+28=(280%128)=24 in bit representation
# when `num_bytes=1, Hence [280/128]=2 aggr overflows occur for any parameter.
assertEqual(
server._secure_aggregator.get_aggregate_overflow(),
2 * num_params,
)
# test reset aggregation overflow
server._secure_aggregator.get_aggregate_overflow(reset=True)
assertEqual(
server._secure_aggregator.get_aggregate_overflow(),
0,
)
| canife-main | FLSim/flsim/secure_aggregation/tests/test_secure_aggregation.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# Cifar-10 dataset specific utils for use in the tutorials
import random
from typing import Any, Dict, Generator, Iterable, Iterator, List, Optional, Tuple
import torch
import torch.nn.functional as F
from flsim.data.data_provider import IFLDataProvider, IFLUserData
from flsim.data.data_sharder import FLDataSharder, SequentialSharder
from flsim.interfaces.data_loader import IFLDataLoader
from flsim.interfaces.metrics_reporter import Channel
from flsim.interfaces.model import IFLModel
from flsim.metrics_reporter.tensorboard_metrics_reporter import FLMetricsReporter
from flsim.utils.data.data_utils import batchify
from flsim.utils.simple_batch_metrics import FLBatchMetrics
from opacus.validators import ModuleValidator
from torch import nn
from torch.utils.data import Dataset
from torchvision import models, transforms
from torchvision.datasets.cifar import CIFAR10
from torchvision.datasets.vision import VisionDataset
from tqdm import tqdm
def collate_fn(batch: Tuple) -> Dict[str, Any]:
feature, label = batch
return {"features": feature, "labels": label}
class DataLoader(IFLDataLoader):
SEED = 2137
random.seed(SEED)
def __init__(
self,
train_dataset: VisionDataset,
eval_dataset: VisionDataset,
test_dataset: VisionDataset,
sharder: FLDataSharder,
batch_size: int,
drop_last: bool = False,
collate_fn=collate_fn,
):
assert batch_size > 0, "Batch size should be a positive integer."
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.test_dataset = test_dataset
self.batch_size = batch_size
self.drop_last = drop_last
self.sharder = sharder
self.collate_fn = collate_fn
def fl_train_set(self, **kwargs) -> Iterable[Dict[str, Generator]]:
rank = kwargs.get("rank", 0)
world_size = kwargs.get("world_size", 1)
yield from self._batchify(self.train_dataset, self.drop_last, world_size, rank)
def fl_eval_set(self, **kwargs) -> Iterable[Dict[str, Generator]]:
yield from self._batchify(self.eval_dataset, drop_last=False)
def fl_test_set(self, **kwargs) -> Iterable[Dict[str, Generator]]:
yield from self._batchify(self.test_dataset, drop_last=False)
def _batchify(
self,
dataset: VisionDataset,
drop_last: bool = False,
world_size: int = 1,
rank: int = 0,
) -> Generator[Dict[str, Generator], None, None]:
# pyre-fixme[16]: `VisionDataset` has no attribute `__iter__`.
data_rows: List[Dict[str, Any]] = [self.collate_fn(batch) for batch in dataset]
for _, (_, user_data) in enumerate(self.sharder.shard_rows(data_rows)):
batch = {}
keys = user_data[0].keys()
for key in keys:
attribute = {
key: batchify(
[row[key] for row in user_data],
self.batch_size,
drop_last,
)
}
batch = {**batch, **attribute}
yield batch
class UserData(IFLUserData):
def __init__(self, user_data: Dict[str, Generator], eval_split=0.0):
self._user_batches = []
self._num_batches = 0
self._num_examples = 0
for features, labels in zip(user_data["features"], user_data["labels"]):
self._num_batches += 1
self._num_examples += UserData.get_num_examples(labels)
self._user_batches.append(UserData.fl_training_batch(features, labels))
def train_data(self) -> Iterator[Dict[str, torch.Tensor]]:
"""
Iterator to return a user batch data
"""
for batch in self._user_batches:
yield batch
def eval_data(self):
return []
def num_train_examples(self) -> int:
"""
Returns the number of examples
"""
return self._num_examples
def num_train_batches(self) -> int:
"""
Returns the number of batches
"""
return self._num_batches
def num_eval_batches(self):
return 0
def num_eval_examples(self):
return 0
@staticmethod
def get_num_examples(batch: List) -> int:
return len(batch)
@staticmethod
def fl_training_batch(
features: List[torch.Tensor], labels: List[float]
) -> Dict[str, torch.Tensor]:
return {"features": torch.stack(features), "labels": torch.Tensor(labels)}
class LEAFUserData(IFLUserData):
def __init__(self, user_data: Dict[str, Generator], eval_split):
self._user_batches = []
self._eval_batches = []
self._num_train_batches = 0
self._num_train_examples = 0
self._eval_split = eval_split
self._num_eval_batches = 0
self._num_eval_examples = 0
user_features = list(user_data["features"])
user_labels = list(user_data["labels"])
total = sum(len(batch) for batch in user_labels)
for features, labels in zip(user_features, user_labels):
if self._num_eval_examples < int(total * self._eval_split):
self._num_eval_batches += 1
self._num_eval_examples += LEAFUserData.get_num_examples(labels)
self._eval_batches.append(
LEAFUserData.fl_training_batch(features, labels)
)
else:
self._num_train_batches += 1
self._num_train_examples += LEAFUserData.get_num_examples(labels)
self._user_batches.append(
LEAFUserData.fl_training_batch(features, labels)
)
def train_data(self) -> Iterator[Dict[str, torch.Tensor]]:
"""
Iterator to return a user batch data
"""
for batch in self._user_batches:
yield batch
def eval_data(self):
for batch in self._eval_batches:
yield batch
def num_train_batches(self):
return self._num_train_batches
def num_eval_batches(self):
return self._num_eval_batches
def num_train_examples(self) -> int:
"""
Returns the number of examples
"""
return self._num_train_examples
def num_eval_examples(self):
return self._num_eval_examples
@staticmethod
def get_num_examples(batch: List) -> int:
return len(batch)
@staticmethod
def fl_training_batch(
features: List[torch.Tensor], labels: List[float]
) -> Dict[str, torch.Tensor]:
return {"features": torch.stack(features), "labels": torch.Tensor(labels)}
class LEAFDataLoader(IFLDataLoader):
SEED = 2137
random.seed(SEED)
def __init__(
self,
train_dataset: Dataset,
eval_dataset: Dataset,
test_dataset: Dataset,
batch_size: int,
drop_last: bool = False,
):
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.test_dataset = test_dataset
self.batch_size = batch_size
self.drop_last = drop_last
def fl_train_set(self, **kwargs) -> Iterable[Dict[str, Generator]]:
yield from self._batchify(self.train_dataset, self.drop_last)
def fl_eval_set(self, **kwargs) -> Iterable[Dict[str, Generator]]:
yield from self._batchify(self.eval_dataset, drop_last=False)
def fl_test_set(self, **kwargs) -> Iterable[Dict[str, Generator]]:
yield from self._batchify(self.test_dataset, drop_last=False)
def _batchify(
self, dataset: Dataset, drop_last=False
) -> Generator[Dict[str, Generator], None, None]:
# pyre-fixme[16]: `Dataset` has no attribute `__iter__`.
for one_user_inputs, one_user_labels in dataset:
data = list(zip(one_user_inputs, one_user_labels))
random.shuffle(data)
one_user_inputs, one_user_labels = zip(*data)
batch = {
"features": batchify(one_user_inputs, self.batch_size, drop_last),
"labels": batchify(one_user_labels, self.batch_size, drop_last),
}
yield batch
# TODO: Could just be a CanaryDataProvider that inherits DataProvider and adds the methods num_test_users() and get_test_user()
class DataProvider(IFLDataProvider):
def __init__(self, data_loader):
self.data_loader = data_loader
self._train_users = self._create_fl_users(data_loader.fl_train_set(), eval_split=0.0)
self._eval_users = self._create_fl_users(data_loader.fl_eval_set(), eval_split=1.0)
self._test_users = self._create_fl_users(data_loader.fl_test_set(), eval_split=1.0)
def train_user_ids(self) -> List[int]:
return list(self._train_users.keys())
def num_train_users(self) -> int:
return len(self._train_users)
def num_test_users(self) -> int: # TODO: Canary modification
return len(self._test_users)
def get_test_user(self, user_index: int) -> IFLUserData: # TODO: Canary modification
if user_index in self._test_users:
return self._test_users[user_index]
else:
raise IndexError(
f"Index {user_index} is out of bound for list with len {self.num_test_users()}"
)
def get_train_user(self, user_index: int) -> IFLUserData:
if user_index in self._train_users:
return self._train_users[user_index]
else:
raise IndexError(
f"Index {user_index} is out of bound for list with len {self.num_users()}"
)
def train_users(self) -> Iterable[IFLUserData]:
for user_data in self._train_users.values():
yield user_data
def eval_users(self) -> Iterable[IFLUserData]:
for user_data in self._eval_users.values():
yield user_data
def test_users(self) -> Iterable[IFLUserData]:
for user_data in self._test_users.values():
yield user_data
def _create_fl_users(self, iterator: Iterator, eval_split) -> Dict[int, IFLUserData]:
return {
user_index: LEAFUserData(user_data, eval_split)
for user_index, user_data in tqdm(
enumerate(iterator), desc="Creating FL User", unit="user"
)
}
def build_data_provider(
local_batch_size, examples_per_user, image_size
) -> DataProvider:
# 1. Create training, eval, and test datasets like in non-federated learning.
transform = transforms.Compose(
[
transforms.Resize(image_size),
transforms.CenterCrop(image_size),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
]
)
train_dataset = CIFAR10(
root="./cifar10", train=True, download=True, transform=transform
)
test_dataset = CIFAR10(
root="./cifar10", train=False, download=True, transform=transform
)
test_dataset = CIFAR10(
root="./cifar10", train=False, download=True, transform=transform
)
# 2. Create a sharder, which maps samples in the training data to clients.
sharder = SequentialSharder(examples_per_shard=examples_per_user)
# 3. Shard and batchify training, eval, and test data.
fl_data_loader = DataLoader(
train_dataset=train_dataset,
eval_dataset=test_dataset,
test_dataset=test_dataset,
sharder=sharder,
batch_size=local_batch_size,
drop_last=False,
)
# 4. Wrap the data loader with a data provider.
# pyre-fixme[45]: Cannot instantiate abstract class `DataProvider`.
data_provider = DataProvider(fl_data_loader)
print(f"Clients in total: {data_provider.num_train_users()}")
return data_provider
class Resnet18(nn.Module):
def __init__(self, num_classes, in_channels=3, pretrained=False, *args, **kwargs):
super().__init__()
self.backbone = models.resnet18(pretrained=pretrained)
# Replace batch norm with group norm
self.backbone = ModuleValidator.fix(self.backbone)
self.backbone.fc = nn.Linear(self.backbone.fc.in_features, num_classes)
self.backbone.conv1 = nn.Conv2d(in_channels, 64, kernel_size=7, stride=2, padding=3,bias=False)
def forward(self, x):
return self.backbone(x)
# class SimpleConvNet(nn.Module):
# def __init__(self, num_classes, *args, **kwargs):
# super().__init__()
# self.conv1 = nn.Conv2d(3, 6, 5)
# self.pool = nn.MaxPool2d(2, 2)
# self.conv2 = nn.Conv2d(6, 16, 5)
# self.fc1 = nn.Linear(16 * 5 * 5, 120)
# self.fc2 = nn.Linear(120, 84)
# self.fc3 = nn.Linear(84, num_classes)
# def forward(self, x):
# x = self.pool(F.relu(self.conv1(x)))
# x = self.pool(F.relu(self.conv2(x)))
# x = torch.flatten(x, 1) # flatten all dimensions except batch
# x = F.relu(self.fc1(x))
# x = F.relu(self.fc2(x))
# x = self.fc3(x)
# return x
class SimpleConvNet(nn.Module):
r"""
Simple CNN model following architecture from
https://github.com/TalwalkarLab/leaf/blob/master/models/celeba/cnn.py#L19
and https://arxiv.org/pdf/1903.03934.pdf
"""
def __init__(self, num_classes, in_channels=3, dropout_rate=0, *args, **kwargs):
super(SimpleConvNet, self).__init__()
self.out_channels = 32
self.stride = 1
self.padding = 2
self.layers = []
in_dim = in_channels
for _ in range(4):
self.layers.append(
nn.Conv2d(in_dim, self.out_channels, 3, self.stride, self.padding)
)
in_dim = self.out_channels
self.layers = nn.ModuleList(self.layers)
self.gn_relu = nn.Sequential(
nn.GroupNorm(self.out_channels, self.out_channels, affine=True),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
)
num_features = (
self.out_channels
* (self.stride + self.padding)
* (self.stride + self.padding)
)
self.dropout = nn.Dropout(dropout_rate)
self.fc = nn.Linear(num_features, num_classes)
def forward(self, x):
for conv in self.layers:
x = self.gn_relu(conv(x))
x = x.view(-1, self.num_flat_features(x))
x = self.fc(self.dropout(x))
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
# class SimpleConvNet(nn.Module):
# def __init__(self, in_channels, num_classes, dropout_rate=0):
# super(SimpleConvNet, self).__init__()
# self.out_channels = 32
# self.stride = 1
# self.padding = 2
# self.layers = []
# in_dim = in_channels
# for _ in range(4):
# self.layers.append(
# nn.Conv2d(in_dim, self.out_channels, 3, self.stride, self.padding)
# )
# in_dim = self.out_channels
# self.layers = nn.ModuleList(self.layers)
# self.gn_relu = nn.Sequential(
# nn.GroupNorm(self.out_channels, self.out_channels, affine=True),
# nn.ReLU(),
# nn.MaxPool2d(kernel_size=2, stride=2),
# )
# num_features = (
# self.out_channels
# * (self.stride + self.padding)
# * (self.stride + self.padding)
# )
# self.dropout = nn.Dropout(dropout_rate)
# self.fc = nn.Linear(num_features, num_classes)
# def forward(self, x):
# for conv in self.layers:
# x = self.gn_relu(conv(x))
# x = x.view(-1, self.num_flat_features(x))
# x = self.fc(self.dropout(x))
# return x
# def num_flat_features(self, x):
# size = x.size()[1:] # all dimensions except the batch dimension
# num_features = 1
# for s in size:
# num_features *= s
# return num_features
class FLModel(IFLModel):
def __init__(self, model: nn.Module, device: Optional[str] = None):
self.model = model
self.device = device
def fl_forward(self, batch) -> FLBatchMetrics:
features = batch["features"] # [B, C, 28, 28]
batch_label = batch["labels"]
stacked_label = batch_label.view(-1).long().clone().detach()
if self.device is not None:
features = features.to(self.device)
output = self.model(features)
if self.device is not None:
output, batch_label, stacked_label = (
output.to(self.device),
batch_label.to(self.device),
stacked_label.to(self.device),
)
loss = F.cross_entropy(output, stacked_label)
num_examples = self.get_num_examples(batch)
output = output.detach().cpu()
stacked_label = stacked_label.detach().cpu()
del features
return FLBatchMetrics(
loss=loss,
num_examples=num_examples,
predictions=output,
targets=stacked_label,
model_inputs=[],
)
def fl_create_training_batch(self, **kwargs):
features = kwargs.get("features", None)
labels = kwargs.get("labels", None)
return UserData.fl_training_batch(features, labels)
def fl_get_module(self) -> nn.Module:
return self.model
def fl_cuda(self) -> None:
self.model = self.model.to(self.device) # pyre-ignore
def get_eval_metrics(self, batch) -> FLBatchMetrics:
with torch.no_grad():
return self.fl_forward(batch)
def get_num_examples(self, batch) -> int:
return UserData.get_num_examples(batch["labels"])
class MetricsReporter(FLMetricsReporter):
ACCURACY = "Accuracy"
def __init__(
self,
channels: List[Channel],
target_eval: float = 0.0,
window_size: int = 5,
average_type: str = "sma",
log_dir: Optional[str] = None,
):
super().__init__(channels, log_dir)
self.set_summary_writer(log_dir=log_dir)
self._round_to_target = float(1e10)
def compare_metrics(self, eval_metrics, best_metrics):
print(f"Current eval accuracy: {eval_metrics}%, Best so far: {best_metrics}%")
if best_metrics is None:
return True
current_accuracy = eval_metrics.get(self.ACCURACY, float("-inf"))
best_accuracy = best_metrics.get(self.ACCURACY, float("-inf"))
return current_accuracy > best_accuracy
def compute_scores(self) -> Dict[str, Any]:
# compute accuracy
correct = torch.Tensor([0])
for i in range(len(self.predictions_list)):
all_preds = self.predictions_list[i]
pred = all_preds.data.max(1, keepdim=True)[1]
assert pred.device == self.targets_list[i].device, (
f"Pred and targets moved to different devices: "
f"pred >> {pred.device} vs. targets >> {self.targets_list[i].device}"
)
if i == 0:
correct = correct.to(pred.device)
correct += pred.eq(self.targets_list[i].data.view_as(pred)).sum()
# total number of data
total = sum(len(batch_targets) for batch_targets in self.targets_list)
accuracy = 100.0 * correct.item() / total
return {self.ACCURACY: accuracy}
def create_eval_metrics(
self, scores: Dict[str, Any], total_loss: float, **kwargs
) -> Any:
accuracy = scores[self.ACCURACY]
return {self.ACCURACY: accuracy}
class LEAFDataProvider(IFLDataProvider):
def __init__(self, data_loader):
self.data_loader = data_loader
self._train_users = self._create_fl_users(data_loader.fl_train_set(), eval_split=0.0)
self._eval_users = self._create_fl_users(data_loader.fl_eval_set(), eval_split=1.0)
self._test_users = self._create_fl_users(data_loader.fl_test_set(), eval_split=1.0)
def train_user_ids(self) -> List[int]:
return list(self._train_users.keys())
def num_train_users(self) -> int:
return len(self._train_users)
def num_test_users(self) -> int: # TODO: Canary modification
return len(self._test_users)
def get_test_user(self, user_index: int) -> IFLUserData: # TODO: Canary modification
if user_index in self._test_users:
return self._test_users[user_index]
else:
raise IndexError(
f"Index {user_index} is out of bound for list with len {self.num_test_users()}"
)
def get_train_user(self, user_index: int) -> IFLUserData:
if user_index in self._train_users:
return self._train_users[user_index]
else:
raise IndexError(
f"Index {user_index} is out of bound for list with len {self.num_train_users()}"
)
def train_users(self) -> Iterable[IFLUserData]:
for user_data in self._train_users.values():
yield user_data
def eval_users(self) -> Iterable[IFLUserData]:
for user_data in self._eval_users.values():
yield user_data
def test_users(self) -> Iterable[IFLUserData]:
for user_data in self._test_users.values():
yield user_data
def _create_fl_users(self, iterator: Iterator, eval_split) -> Dict[int, IFLUserData]:
return {
user_index: LEAFUserData(user_data, eval_split)
for user_index, user_data in tqdm(
enumerate(iterator), desc="Creating FL User", unit="user"
)
}
| canife-main | FLSim/flsim/utils/example_utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Dict, List, NamedTuple, Optional, Tuple, Union
from unittest.mock import MagicMock
import numpy as np
import torch
import torch.nn as nn
from flsim.common.logger import Logger
from flsim.data.data_provider import IFLUserData
from flsim.interfaces.batch_metrics import IFLBatchMetrics
from flsim.interfaces.metrics_reporter import (
Channel,
IFLMetricsReporter,
Metric,
TrainingStage,
)
from flsim.interfaces.model import IFLModel
from flsim.metrics_reporter.tensorboard_metrics_reporter import FLMetricsReporter
from flsim.utils.fl.common import FLModelParamUtils
from flsim.utils.simple_batch_metrics import FLBatchMetrics
from torch.utils.tensorboard import SummaryWriter
class DatasetFromList:
"""
Simple dataset from a list of tuples.
Each item in the outer list will be a batch and each
batch itself is a tuple of two lists, the raw_batch
and the batch.
"""
def __init__(self, list_dataset):
self.ds = list_dataset
def __len__(self):
_, batch = self.ds[0]
return len(self.ds) * len(batch)
def __iter__(self):
return iter(self.ds)
class DummyUserData(IFLUserData):
def __init__(self, data, model, from_data_provider=False):
self.data = data
self._num_examples: int = 0
self._num_batches: int = 0
self.model = model
self.from_data_provider = from_data_provider
for batch in self.data:
self._num_examples += (
batch["label"].shape[0] if self.from_data_provider else batch.shape[0]
)
self._num_batches += 1
def num_train_batches(self):
return self._num_batches
def num_train_examples(self):
return self._num_examples
def train_data(self):
for batch in self.data:
yield self.model.fl_create_training_batch(batch=batch)
def eval_data(self):
for batch in self.data:
yield self.model.fl_create_training_batch(batch=batch)
def num_eval_batches(self):
return self._num_batches
def num_eval_examples(self):
return self._num_examples
class Quadratic1D(nn.Module):
"""
a toy optimization example:
min f(x) = 100 x^2 - 1
minima is x=0.0, x is initialized at 1.0.
"""
def __init__(self):
super(Quadratic1D, self).__init__()
self.x = nn.Parameter(torch.ones(1))
self.y = torch.tensor([1.0])
def forward(self):
return 100 * torch.square(self.x) - self.y
class MockQuadratic1DFL(IFLModel):
"""
a dummy IFL wrapper for Quadratic1D
"""
def __init__(self, model):
self.model = model
def fl_forward(self, data=None):
loss = self.model()
return FLBatchMetrics(
loss=loss, num_examples=1, predictions=None, targets=None, model_inputs=None
)
def fl_create_training_batch(self):
pass
def fl_cuda(self):
pass
def fl_get_module(self):
return self.model
def get_eval_metrics(self):
pass
def get_num_examples(self):
pass
class TwoFC(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(2, 5)
self.fc2 = nn.Linear(5, 1)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
return x
def fill_all(self, value):
def fill(layer):
if type(layer) == nn.Linear:
layer.bias.data.fill_(value)
layer.weight.data.fill_(value)
self.apply(fill)
class Linear(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(2, 1)
def forward(self, x):
x = self.fc1(x)
return x
def fill_all(self, value):
def fill(layer):
if type(layer) == nn.Linear:
layer.bias.data.fill_(value)
layer.weight.data.fill_(value)
self.apply(fill)
class Metrics(IFLBatchMetrics):
def __init__(self, num_examples, loss):
self._num_examples = num_examples
self._loss = loss
@property
def loss(self) -> torch.Tensor:
return self._loss
@property
def num_examples(self) -> int:
return self._num_examples
@property
def predictions(self):
pass
@property
def targets(self):
pass
@property
def model_inputs(self):
pass
class FakeMetricReporter(IFLMetricsReporter):
def add_batch_metrics(self, metrics: IFLBatchMetrics) -> None:
pass
def aggregate(self, one_user_metrics: IFLMetricsReporter):
pass
def report_metrics(
self,
reset: bool,
stage: TrainingStage,
extra_metrics: Optional[List[Metric]] = None,
**kwargs,
) -> Tuple[Any, bool]:
return (None, False)
def reset(self):
pass
def compare_metrics(self, eval_metrics, best_metrics):
pass
class SimpleMetricReporter(FakeMetricReporter):
def __init__(self):
self.batch_metrics = []
def add_batch_metrics(self, metrics: IFLBatchMetrics) -> None:
self.batch_metrics.append(metrics)
class SampleNet(IFLModel):
def __init__(self, model):
self.sample_nn = model
self._num_examples = None
self._out = None
def fl_forward(self, batch):
y = self.sample_nn(batch)
return Metrics(len(batch), y.mean())
def fl_create_training_batch(self, batch=None, **kwargs):
return batch
def fl_get_module(self):
return self.sample_nn
def fl_cuda(self):
pass
def get_eval_metrics(self, batch):
with torch.no_grad():
return self.fl_forward(batch)
def get_num_examples(self, batch):
return len(batch)
class SampleNetHive(SampleNet):
def __init__(self, value=None):
self.sample_nn = TwoFC()
if value is not None:
self.sample_nn.fill_all(value)
self._num_examples = None
self._out = None
def fl_forward(self, batch):
x, y = batch["user_n"], batch["label"]
x = x.flatten().repeat(2).float()
y = y.flatten().float()
preds = self.sample_nn(x)
loss = nn.BCEWithLogitsLoss()(preds, y)
return Metrics(self.get_num_examples(batch), loss=loss)
def get_num_examples(self, batch):
return len(batch["label"])
def verify_models_equivalent_after_training(
model1: Union[nn.Module, IFLModel],
model2: Union[nn.Module, IFLModel],
model_init: Optional[Union[nn.Module, IFLModel]] = None,
rel_epsilon: Optional[float] = None,
abs_epsilon: Optional[float] = None,
) -> str:
"""This function accepts either nn.Module or IFLModel and checks that:
a) Model training did something:
model1 & model2 are different from model_init
b) model1 and model2 have same parameters
Return value: str. "" if both a) and b) are satisfied.
else, error message with the SAD (sum of absolute difference between
mismatched model parameters)
"""
model1 = model1.fl_get_module() if isinstance(model1, IFLModel) else model1
model2 = model2.fl_get_module() if isinstance(model2, IFLModel) else model2
if model_init is not None:
model_init = (
model_init.fl_get_module()
if isinstance(model_init, IFLModel)
else model_init
)
# Ensure that training actually did something to model 1.
if (
FLModelParamUtils.get_mismatched_param(
[model1, model_init], rel_epsilon=rel_epsilon, abs_epsilon=abs_epsilon
)
== ""
):
return "Model 1 training did nothing"
# Ensure that training actually did something to model 2.
if (
FLModelParamUtils.get_mismatched_param(
[model2, model_init], rel_epsilon=rel_epsilon, abs_epsilon=abs_epsilon
)
== ""
):
return "Model 2 training did nothing"
# check models identical under both configs
mismatched_param = FLModelParamUtils.get_mismatched_param(
[model1, model2], rel_epsilon=rel_epsilon, abs_epsilon=abs_epsilon
)
if mismatched_param != "":
summed_absolute_difference = (
(
model1.state_dict()[mismatched_param]
- model2.state_dict()[mismatched_param]
)
.abs()
.sum()
)
return (
f"Model 1, Model 2 mismatch. Param: {mismatched_param},"
f" Summed Absolute Difference={summed_absolute_difference}"
)
else:
return ""
def model_parameters_equal_to_value(model, value) -> str:
if isinstance(model, IFLModel):
model = model.fl_get_module()
for n, p in model.named_parameters():
if not torch.allclose(p.float(), torch.tensor(value)):
summed_absolute_difference = (p - torch.tensor(value)).abs().sum()
return (
n
+ f"{p} did not match with {value}: Summed Absolute Difference={summed_absolute_difference}"
)
return ""
def check_inherit_logging_level(obj: Any, level: int) -> bool:
Logger.set_logging_level(level)
return obj.logger.getEffectiveLevel() == level
class MockRecord(NamedTuple):
tag: str = ""
value: Union[float, Dict[str, float]] = 0.0
global_step: int = 0
walltime: float = 0
class MetricsReporterWithMockedChannels(FLMetricsReporter):
"""Simulates an FL reporter with STDOUT and Tensorboard channels
STDOUT and Tensorboard channels are mocked
"""
def __init__(self):
super().__init__([Channel.STDOUT, Channel.TENSORBOARD])
self.tensorboard_results: List[MockRecord] = []
self.stdout_results: List[MockRecord] = []
def add_scalar(tag, scalar_value, global_step=None, walltime=None):
self.tensorboard_results.append(
MockRecord(tag, scalar_value, global_step, walltime)
)
return self
def add_scalars(main_tag, tag_scalar_dict, global_step=None, walltime=None):
for tag, value in tag_scalar_dict.items():
self.tensorboard_results.append(
MockRecord(f"{main_tag}/{tag}", value, global_step, walltime)
)
def printer(*args):
self.stdout_results.append(tuple(arg for arg in args))
SummaryWriter.add_scalar = MagicMock(side_effect=add_scalar)
SummaryWriter.add_scalars = MagicMock(side_effect=add_scalars)
self.print = MagicMock(side_effect=printer)
def compare_metrics(self, eval_metrics, best_metrics) -> bool:
return True
def compute_scores(self) -> Dict[str, Any]:
return {}
def create_eval_metrics(
self, scores: Dict[str, Any], total_loss: float, **kwargs
) -> Any:
return None
def set_summary_writer(self, log_dir: Optional[str]):
super().set_summary_writer("/tmp/")
class RandomEvalMetricsReporter(IFLMetricsReporter):
"""This metrics reporter is useful for unit testing. It does four things:
a) When report_metrics(stage=Eval) is called, it produced a random
eval result
b) It keeps track of the best eval result produced, and the model that
produced this result.
c) When report_metrics(stage=Test) is called, it returns the best eval result
Why? This is useful in testing the trainer.train() function, which returns
test eval results.
d) It can be queried to return the best_model, and the value of
the best result
"""
def __init__(self):
self._best_eval_result: float = -1
self._best_eval_model: IFLModel = None
def add_batch_metrics(self, metrics: IFLBatchMetrics) -> None:
pass
def aggregate(self, one_user_metrics: "IFLMetricsReporter"):
pass
def report_metrics(
self,
reset: bool,
stage: TrainingStage,
extra_metrics: Optional[List[Metric]] = None,
**kwargs,
) -> Tuple[Any, bool]:
if stage != TrainingStage.EVAL:
return self._best_eval_result, False
assert "model" in kwargs.keys(), f"Did not find model in kwargs: {kwargs}"
model: IFLModel = kwargs.get("model", None)
eval_result = np.random.random_sample()
if eval_result > self._best_eval_result:
print(
f"MetricReporter current_eval:{eval_result}, best_eval: {self._best_eval_result}"
)
self._best_eval_model = FLModelParamUtils.clone(model)
self._best_eval_result = eval_result
return (eval_result, True)
else:
return (eval_result, False)
def reset(self):
pass
@property
def best_eval_result(self) -> float:
return self._best_eval_result
@property
def best_eval_model(self) -> IFLModel:
return self._best_eval_model
def create_model_with_value(value) -> nn.Module:
model = TwoFC()
model.fill_all(value)
return model
| canife-main | FLSim/flsim/utils/test_utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| canife-main | FLSim/flsim/utils/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import abc
import torch
from flsim.interfaces.model import IFLModel
def FloatTensor(cuda_enabled: bool, *args):
if cuda_enabled:
return torch.cuda.FloatTensor(*args)
else:
return torch.FloatTensor(*args)
def tensor(data, dtype, cuda_enabled: bool):
return torch.tensor(data, dtype=dtype, device=device(cuda_enabled))
def device(cuda_enabled: bool) -> str:
return "cuda:{}".format(torch.cuda.current_device()) if cuda_enabled else "cpu"
class ICudaStateManager(abc.ABC):
"""Sends model from CPU-->GPU, and from GPU-->CPU,
if required, at 3 different times:
a) When trainer is initialized
b) Before training or eval is done
c) After training or eval is done
Centralizes all CPU-->GPU/GPU-->CPU moves
"""
@abc.abstractmethod
def on_trainer_init(self, model: IFLModel):
pass
@abc.abstractmethod
def before_train_or_eval(self, model: IFLModel):
pass
@abc.abstractmethod
def after_train_or_eval(self, model: IFLModel):
pass
class NoopCudaStateManager(ICudaStateManager):
def __init__(self):
pass
def on_trainer_init(self, model: IFLModel):
pass
def before_train_or_eval(self, model: IFLModel):
pass
def after_train_or_eval(self, model: IFLModel):
pass
class CudaTransferMinimizer(ICudaStateManager):
"""Minimize CPU<-->GPU memory bandwidth,
at the cost of increasing GPU memory consumption
Model is moved to GPU right when trainer is initialized.
All model copies stay on the GPU.
E.g: when Sync trainer creates clients, all of them
get models that are already in GPU
GPU<-->GPUMemory bandwidth is really high, 2 orders of magnitude
larger than CPUMemory<-->GPUMemory bandwidth.
THIS SHOULD BE THE DEFAULT UNLESS RUNNING OUT OF GPU MEMORY
"""
def __init__(self, cuda_enabled: bool):
self._cuda_enabled = cuda_enabled
def on_trainer_init(self, model: IFLModel):
"""When trainer is initialized, we move the model to GPU
Any furter copies of the model will stay on GPU
"""
if self._cuda_enabled:
model.fl_cuda()
def before_train_or_eval(self, model: IFLModel):
pass
def after_train_or_eval(self, model: IFLModel):
pass
class GPUMemoryMinimizer(ICudaStateManager):
"""Minimize GPU memory at the cost of increasing
CPU-->GPU and GPU-->CPU memory bandwidth consumption
Model is moved to GPU right before train/eval is called,
and moved out of GPU right after.
All operations other than training/eval happen on CPU.
E.g: global model aggregation happens on CPU
DONT USE THIS UNLESS YOU ARE RUNNING OUT OF GPU MEMORY
"""
def __init__(self, cuda_enabled: bool):
self._cuda_enabled = cuda_enabled
def on_trainer_init(self, model: IFLModel):
if self._cuda_enabled:
model.fl_get_module().to("cpu")
def before_train_or_eval(self, model: IFLModel):
if self._cuda_enabled:
model.fl_cuda()
def after_train_or_eval(self, model: IFLModel):
if self._cuda_enabled:
model.fl_get_module().to("cpu")
# default manager, does nothing
DEFAULT_CUDA_MANAGER: ICudaStateManager = NoopCudaStateManager()
# DEFAULT_CUDA_MANAGER: ICudaStateManager = GPUMemoryMinimizer() | canife-main | FLSim/flsim/utils/cuda.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
class ProcessState:
_instance: Optional["ProcessState"] = None
@staticmethod
def getInstance(**kwargs):
"""kwargs should specify:
rank: int, workflow_name: Optional[str], chronos_id: Optional[int]
"""
if ProcessState._instance is None:
ProcessState(**kwargs)
return ProcessState._instance
def __init__(self, rank: int):
"""
Virtually private constructor.
Handles logic for Singleton pattern.
"""
self._rank = rank
if ProcessState._instance is not None:
raise RuntimeError(
"ProcessState is a singleton. Cannot instantiate multiple times!"
)
else:
ProcessState._instance = self
@property
def rank(self):
return self._rank
@rank.setter
def rank(self, value):
if self._rank is not None:
raise RuntimeError("Shouldn't change 'rank' after initialized")
self._rank = value
| canife-main | FLSim/flsim/utils/process_state.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any
import torch
from flsim.interfaces.batch_metrics import IFLBatchMetrics
class FLBatchMetrics(IFLBatchMetrics):
def __init__(
self,
*,
loss: torch.Tensor,
num_examples: int,
predictions: torch.Tensor,
targets: torch.Tensor,
model_inputs: Any,
) -> None:
self._loss = loss
self._num_examples = num_examples
self._predictions = predictions
self._targets = targets
self._model_inputs = model_inputs
@property
def loss(self) -> torch.Tensor:
return self._loss
@property
def num_examples(self) -> int:
return self._num_examples
@property
def predictions(self) -> torch.Tensor:
return self._predictions
@property
def targets(self) -> torch.Tensor:
return self._targets
@property
def model_inputs(self) -> Any:
return self._model_inputs
| canife-main | FLSim/flsim/utils/simple_batch_metrics.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from collections import OrderedDict
from copy import deepcopy
from typing import Optional
import torch
from flsim.interfaces.model import IFLModel
# there are some pyre errors here related to torch.Tensor operations, please ignore
# them as they work fine despite the error saying otherwise.
class CountSketch:
"""
Implementation of the CountSketch data structure described here:
http://dimacs.rutgers.edu/~graham/ssbd/ssbd3.pdf
CountSketch is a data structure that can used to be compress a series of
numbers and then decompress them using a fix-sized matrix and a set
of pairwise independent hash functions.
This version is designed to compressed IFLModels, where each weight
simply gets an unique id, which is simply the number of weights added before
this weight, and the model's parameter names and weight
tensor shapes are stored for decompression.
"""
def __init__(
self,
width: int = 10000,
depth: int = 11,
prime: int = 2**31 - 1,
independence: int = 2,
h: Optional[torch.Tensor] = None,
g: Optional[torch.Tensor] = None,
device="cpu",
):
self.width: int = width
self.depth: int = depth
self.prime = prime
self.buckets = torch.zeros((self.depth, self.width), device=device)
self.independence = 4 if independence == 4 else 2
self.device = device
if h is None:
self.h = torch.randint(
low=1,
high=self.prime,
size=(self.depth, self.independence),
device=self.device,
)
else:
if list(h.size()) != [self.depth, self.independence]:
raise AssertionError(
f"Hash function h should be of size {[self.depth, self.independence]}, but got {list(h.size())}"
)
self.h = h.to(device=self.device)
if g is None:
self.g = torch.randint(
low=1,
high=self.prime,
size=(self.depth, self.independence),
device=self.device,
)
else:
if list(g.size()) != [self.depth, self.independence]:
raise AssertionError(
f"Hash function g should be of size {[self.depth, self.independence]}, but got {list(g.size())}"
)
self.g = g.to(device=self.device)
self.n = 0
self.param_sizes = OrderedDict()
def compute_hash_vector(self, x: torch.Tensor, hash: torch.Tensor) -> torch.Tensor:
"""
Computes the hash of a vector x that represents the ids using the hash functions in the parameter hash.
Args:
x: the vector of ids, the expected shape is [num_ids]
hash: the set of the hash functions to use. Expected shape is [self.depth, self.independence]
Returns:
Hash values as a torch.Tensor in the size [num_ids, num_hash]
"""
def pairwise(x: torch.Tensor, a: torch.Tensor, b: torch.Tensor):
"""
Perform a pairwise hash function that takes advantage of the broadcasting
rules of tensor operations.
Args:
x: the vector of ids, expected shape is [num_ids]
a: the coefficents used in the hash function,
expected shape is [num_ids, self.depth] or [self.depth]
b: the offset used in the hash function,
expected shape is [num_ids, self.depth] or [self.depth]
Returns:
A torch.Tensor of size [num_ids, self.depth] that represents the hashes.
"""
# pyre-ignore
return (a * x.unsqueeze(-1) + b) % self.prime
# should actually work for any independence, but need more testing
# alternative way to do with list comprehension:
# torch.cat([x ** i for i in range(self.independence)].view(1, -1), dim = 0)
if self.independence == 4:
a = hash[:, 0]
for i in range(1, hash.size(1)):
b = hash[:, i]
a = pairwise(x, a, b)
return a
return pairwise(x, hash[:, 0], hash[:, 1])
def h_hash(self, x: torch.Tensor) -> torch.Tensor:
# pyre-ignore
return self.compute_hash_vector(x, self.h) % self.width
def g_hash(self, x: torch.Tensor) -> torch.Tensor:
# pyre-ignore
return 2 * (self.compute_hash_vector(x, self.g) % 2) - 1
def update(self, x: torch.Tensor, weights: torch.Tensor) -> None:
self.n += x.numel()
idx = self.h_hash(x) # [num_id, self.depth]
sign = self.g_hash(x) # [num_id, self.depth]
signed_weights = (sign * weights.view(-1, 1)).t() # [self.depth, num_id]
# offset is used because put_ treats self as a 1D tensor.
offset = (
torch.arange(0, self.depth, device=self.device).view(-1, 1) * self.width
)
# use put_ instead of index_put_ due to better performance on GPU (100x)
# see N795398 for empirical results.
self.buckets.put_(idx.t() + offset, signed_weights, accumulate=True)
def query(self, x: torch.Tensor) -> torch.Tensor:
idx = self.h_hash(x)
sign = self.g_hash(x)
sketched_weights = self.buckets[range(0, self.depth), idx]
return torch.median(sketched_weights * sign, dim=1)[0]
def sketch_state_dict(self, state_dict: OrderedDict):
"""
Sketch a state_dict and all its weights while also resetting params and n.
Args:
state_dict: the dictionary containing parameter names and weights, usually
obtained by calling state_dict() on a nn.Module
"""
self.reset_buckets()
self.param_sizes = OrderedDict()
self.n = 0
for name, param in state_dict.items():
self.param_sizes[name] = param.size()
self.update(
torch.arange(self.n, self.n + param.numel(), device=self.device),
param.view(-1),
)
def reset_buckets(self):
self.buckets.fill_(0)
def set_params(self, state_dict: OrderedDict):
"""
Sketch a state_dict and all its weights while also resetting params and n.
Args:
state_dict: the dictionary containing parameter names and weights, usually
obtained by calling state_dict() on a nn.Module
"""
self.reset_buckets()
self.param_sizes = OrderedDict()
self.n = 0
for name, param in state_dict.items():
self.param_sizes[name] = param.size()
self.n += param.numel()
def sketch_model(self, model: IFLModel) -> None:
# pyre-fixme[6]: Expected `OrderedDict[typing.Any, typing.Any]` for 1st
# param but got `Dict[str, typing.Any]`.
self.sketch_state_dict(model.fl_get_module().state_dict())
def unsketch_model(self, k: int = -1) -> OrderedDict:
"""
Unsketchs the model by reconstructing the OrderDict of the
parameters and their weights from self.buckets and self.param_sizes.
Supports taking the top_k parameters with the largest weights
and zero out all the other weights.
"""
if k == -1:
k = self.n
elif k > self.n:
raise AssertionError(
"Cannot unsketch with a top_k greater than the number of parameters"
)
weights = self.query(torch.arange(0, self.n, device=self.device))
top, indices = torch.topk(torch.abs(weights), k, sorted=True, largest=True)
mask = torch.zeros_like(weights, device=weights.device)
mask[indices] = 1
weights[mask != 1] = 0
count = 0
state_dict = OrderedDict()
for param_name, param_size in self.param_sizes.items():
state_dict[param_name] = weights[count : count + param_size.numel()].view(
param_size
)
count += param_size.numel()
return state_dict
def linear_comb(self, wt1: float, cs, wt2: float):
self.buckets *= wt1
self.buckets += cs.buckets * wt2
# from N778597
def approx_L1(self):
estimates = torch.sum(torch.abs(self.buckets), dim=1)
return torch.median(estimates)
def approx_L2(self):
estimates = torch.sum(self.buckets**2)
return torch.sqrt(torch.median(estimates))
def get_size_in_bytes(self):
"""
Calculate CountSketch size in bytes.
"""
return self.buckets.numel() * self.buckets.element_size()
def to(self, device):
"""
Moves the CountSketch to device. Up to the user to make sure the device is valid.
"""
self.buckets = self.buckets.to(device)
self.h = self.h.to(device)
self.g = self.g.to(device)
self.device = device
def clone_count_sketch(copy: CountSketch) -> CountSketch:
cs = CountSketch(
copy.width,
copy.depth,
copy.prime,
copy.independence,
copy.h,
copy.g,
copy.device,
)
cs.param_sizes = deepcopy(copy.param_sizes)
cs.buckets = copy.buckets.detach().clone()
return cs
def linear_comb_count_sketch(
cs1: CountSketch, wt1: float, cs2: CountSketch, wt2: float
) -> CountSketch:
cs = clone_count_sketch(cs1)
cs.buckets = wt1 * cs.buckets + wt2 * cs2.buckets
return cs
| canife-main | FLSim/flsim/utils/count_sketch.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
import torch
import torch.nn as nn
import torch.nn.functional as F
from flsim.interfaces.model import IFLModel
from flsim.utils.simple_batch_metrics import FLBatchMetrics
from torch import Tensor
class TestDataSetting:
NUM_SHARDS: int = 10
SHARDING_COL_INDEX: int = 2
TEXT_COL_NAME: str = "text"
LABEL_COL_NAME: str = "label"
USER_ID_COL_NAME: str = "user_id"
class SimpleLinearNet(nn.Module):
def __init__(self, D_in: int, D_out: int) -> None:
"""
We create a simple linear model, with input dimension D_in and output dimension D_out
"""
super(SimpleLinearNet, self).__init__()
self.linear = nn.Linear(in_features=D_in, out_features=D_out, bias=False)
def forward(self, x) -> Tensor:
return self.linear(x)
class LinearFLModel(IFLModel):
def __init__(
self, D_in: int = 40, D_out: int = 1, use_cuda_if_available: bool = False
) -> None:
"""
create a sample dummy FL model for alphabet dataset
"""
self.model = SimpleLinearNet(D_in, D_out)
self.use_cuda_if_available = use_cuda_if_available
def fl_forward(self, batch) -> FLBatchMetrics:
text = batch[TestDataSetting.TEXT_COL_NAME]
batch_label = batch[TestDataSetting.LABEL_COL_NAME]
# stacked_label = torch.tensor(batch_label.view(-1), dtype=torch.long)
stacked_label = batch_label
if self.use_cuda_if_available:
text = text.cuda()
out = self.model(text)
if self.use_cuda_if_available:
out, batch_label, stacked_label = (
out.cuda(),
batch[TestDataSetting.LABEL_COL_NAME].cuda(),
stacked_label.cuda(),
)
loss = F.mse_loss(out, stacked_label)
# loss = F.mse_loss(out, batch_label)
num_examples = self.get_num_examples(batch)
return FLBatchMetrics(
loss=loss,
num_examples=num_examples,
predictions=out,
targets=batch_label,
model_inputs=batch,
)
def fl_create_training_batch(self, **kwargs) -> None:
return kwargs.get("batch", None)
def fl_get_module(self) -> nn.Module:
return self.model
def fl_cuda(self) -> None:
self.model = self.model.cuda()
def get_eval_metrics(self, batch) -> FLBatchMetrics:
with torch.no_grad():
return self.fl_forward(batch)
def get_num_examples(self, batch) -> int:
return len(batch[TestDataSetting.LABEL_COL_NAME])
class TwoLayerNet(nn.Module):
def __init__(self, D_in: int, H: int, D_out: int) -> None:
"""
In the constructor we instantiate two nn.Linear modules and assign them as
member variables.
D_in: input dimension
H: dimension of hidden layer
D_out: output dimension
"""
super(TwoLayerNet, self).__init__()
self.linear1 = nn.Linear(D_in, H)
self.linear2 = nn.Linear(H, D_out)
def forward(self, x) -> Tensor:
"""
In the forward function we accept a Variable of input data and we must
return a Variable of output data. We can use Modules defined in the
constructor as well as arbitrary operators on Variables.
"""
h_relu = F.relu(self.linear1(x))
y_pred = self.linear2(h_relu)
return F.log_softmax(y_pred, 1)
class DummyAlphabetFLModel(IFLModel):
def __init__(
self,
embedding_size: int = 10,
hidden_dim: int = 8,
use_cuda_if_available: bool = False,
) -> None:
"""
create a sample dummy FL model for alphabet dataset
"""
self.model = TwoLayerNet(embedding_size, hidden_dim, 2)
self.dummy_embedding = torch.rand(26, embedding_size)
self.use_cuda_if_available = use_cuda_if_available
def fl_forward(self, batch) -> FLBatchMetrics:
text = batch[TestDataSetting.TEXT_COL_NAME]
batch_label = batch[TestDataSetting.LABEL_COL_NAME]
stacked_label = torch.tensor(batch_label.view(-1), dtype=torch.long)
text_embeddings = self.dummy_embedding[text, :]
if self.use_cuda_if_available:
text_embeddings = text_embeddings.cuda()
out = self.model(text_embeddings)
if self.use_cuda_if_available:
out, batch_label, stacked_label = (
out.cuda(),
batch[TestDataSetting.LABEL_COL_NAME].cuda(),
stacked_label.cuda(),
)
loss = F.nll_loss(out, stacked_label)
# produce a large loss, so gradients are large
# this prevents unit tests from failing because of numerical issues
loss.mul_(100.0)
num_examples = self.get_num_examples(batch)
return FLBatchMetrics(
loss=loss,
num_examples=num_examples,
predictions=out,
targets=batch_label,
model_inputs=text_embeddings,
)
def fl_create_training_batch(self, **kwargs) -> None:
return kwargs.get("batch", None)
def fl_get_module(self) -> nn.Module:
return self.model
def fl_cuda(self) -> None:
self.model = self.model.cuda()
def get_eval_metrics(self, batch) -> FLBatchMetrics:
with torch.no_grad():
return self.fl_forward(batch)
def get_num_examples(self, batch) -> int:
return len(batch[TestDataSetting.LABEL_COL_NAME])
class MockFLModel(IFLModel):
r"""
Mock IFLModel for testing that will return
whatever the user pass into the constructor
"""
def __init__(
self,
num_examples_per_user: int = 1,
batch_labels: Optional[torch.Tensor] = None,
model_output: Optional[torch.Tensor] = None,
model_input: Optional[torch.Tensor] = None,
loss: Optional[torch.Tensor] = None,
) -> None:
self.model = TwoLayerNet(10, 8, 2)
self.num_examples_per_user = num_examples_per_user
self.batch_labels = self._get_or_return_dummy_tensor(batch_labels)
self.model_output = self._get_or_return_dummy_tensor(model_output)
self.model_input = self._get_or_return_dummy_tensor(model_input)
self.loss = self._get_or_return_dummy_tensor(loss)
def fl_forward(self, batch) -> FLBatchMetrics:
num_examples = self.get_num_examples(batch)
return FLBatchMetrics(
loss=self.loss,
num_examples=num_examples,
predictions=self.model_output,
targets=self.batch_labels,
model_inputs=self.model_input,
)
def fl_create_training_batch(self, **kwargs) -> None:
return kwargs.get("batch", None)
def fl_get_module(self) -> nn.Module:
return self.model
def fl_cuda(self) -> None:
pass
def get_eval_metrics(self, batch) -> FLBatchMetrics:
return FLBatchMetrics(
loss=self.loss,
num_examples=self.num_examples_per_user,
predictions=self.model_output,
targets=self.batch_labels,
model_inputs=self.model_input,
)
def get_num_examples(self, batch) -> int:
return self.num_examples_per_user
def _get_or_return_dummy_tensor(self, data: Optional[torch.Tensor]):
return data if data is not None else torch.Tensor([1])
class BiasOnly(nn.Module):
"""This module has only a bias term
It is useful for unit testing because the gradient will be constant.
"""
def __init__(self):
super(BiasOnly, self).__init__()
self.bias = nn.Parameter(torch.zeros(1))
def forward(self):
return self.bias
class ConstantGradientFLModel(IFLModel):
def __init__(
self,
) -> None:
"""A dummy model where the gradient is constant
This is useful in testing because the parameters of the model move by a
a constant amount in each optimizer.step()
Initial value of self.model.bias = 0
If using SGD, Final value of self.model.bias = lr * num_times_optimizer_step()_is_Called
"""
self.model = BiasOnly()
def fl_forward(self, batch) -> FLBatchMetrics:
# self.model() will return the value of bias
# we want the gradient to be negative, so self.model()['bias'] increase
# with each optimizer.step
loss = -1 * self.model()
return FLBatchMetrics(
loss=torch.Tensor(loss),
num_examples=1,
predictions=torch.Tensor(0),
targets=torch.Tensor(0),
model_inputs=torch.Tensor(0),
)
def fl_create_training_batch(self, **kwargs) -> None:
return kwargs.get("batch", None)
def fl_get_module(self) -> nn.Module:
return self.model
def fl_cuda(self) -> None:
self.model = self.model.cuda()
def get_eval_metrics(self, batch) -> FLBatchMetrics:
with torch.no_grad():
return self.fl_forward(batch)
def get_num_examples(self, batch) -> int:
return 1
| canife-main | FLSim/flsim/utils/sample_model.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import collections.abc as abc
import json
from typing import Any, Dict, List, Optional, Tuple, Type
from hydra import compose, initialize
from omegaconf import DictConfig, OmegaConf
def fullclassname(cls: Type[Any]) -> str:
"""
Returns the fully qualified class name of the input class.
"""
module = cls.__module__
name = cls.__qualname__
if module is not None and module != "__builtin__":
name = module + "." + name
return name
def _validate_cfg(component_class: Type[Any], cfg: Any) -> None:
"""
Validate that cfg doesn't have MISSING fields. This needs to be done only after
all defaults are set, typically in the base class.
We do this by making sure none of the parents have ``_set_defaults_in_cfg`` method.
"""
if not any(
hasattr(parent, "_set_defaults_in_cfg") for parent in component_class.__bases__
):
# looping over the config fields throws incase of missing field
for _ in cfg.items():
pass
def init_self_cfg(
component_obj: Any, *, component_class: Type, config_class: Type, **kwargs
) -> None:
"""
Initialize FL component config by constructing OmegaConf object,
setting defaults, and validating config.
"""
cfg = (
config_class(**kwargs)
if not hasattr(component_obj, "cfg")
else component_obj.cfg
)
cfg = OmegaConf.create(cfg) # convert any structure to OmegaConf
component_class._set_defaults_in_cfg(cfg) # set default cfg params for this class
# convert any structure to OmegaConf again, after setting defaults
cfg = OmegaConf.create(cfg) # pyre-ignore [6]
_validate_cfg(component_class, cfg) # validate the config
component_obj.cfg = cfg
# trainer config utils for consuming hydra configs
def _flatten_dict(
d: abc.MutableMapping, parent_key: str = "", sep: str = "."
) -> Dict[str, str]:
"""
Changes json of style
```
{
"trainer" : {
"_base_": "base_sync_trainer",
"aggregator": {
"_base_": "base_fed_avg_with_lr_sync_aggregator",
"lr": 0.1
}
}
}
```
to
```
{
"trainer._base_": "base_sync_trainer",
"trainer.aggregator._base_": "base_fed_avg_with_lr_sync_aggregator",
"trainer.aggregator.lr": 0.1,
}
```
"""
items = []
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
# if value is not a dict and is mutable, extend the items and flatten again.
# > hacky way of preserving dict values by checking if key has _dict as suffix.
if not new_key.endswith("_dict") and isinstance(v, abc.MutableMapping):
items.extend(_flatten_dict(v, new_key, sep=sep).items())
else:
# check if a number needs to be retained as a string
# the repalce with one dot is needed to handle floats
if type(v) is str and v.replace(".", "", 1).isdigit():
v = f'"{v}"' # enclose it with quotes if so.
items.append((new_key, v))
return dict(items)
def _handle_values_for_overrides_list(v: Any) -> Any:
"""
Handle the special massaging of some values of JSON need to for it to be supplied
to Hydra's overrides list.
"""
# python's None --> cmd line null for override list
v = "null" if v is None else v
# if value is a dict, convert it to string to work with override list.
# dump twice to escape quotes correctly.
v = json.dumps(json.dumps(v)) if type(v) is dict else v
# escape = char in value when present
v = v.replace(r"=", r"\=") if type(v) is str else v
return v
def _hydra_merge_order(dotlist_entry: str) -> Tuple:
"""
The override list needs to be ordered as the last one wins in case of
duplicates: https://hydra.cc/docs/advanced/defaults_list#composition-order
This function arranges the list so that _base_ is at the top, and we
proceed with overrides from top to bottom.
"""
key = dotlist_entry.split("=")[0]
# presence of "@" => it is a _base_ override
default_list_item_indicator = key.count("@") # 1 if true, 0 otherwise
# level in hierarchy; based on number of "."
hierarchy_level = key.count(".")
# multiply by -1 to keep the default list items on top
return (-1 * default_list_item_indicator, hierarchy_level, dotlist_entry)
def fl_json_to_dotlist(
json_config: Dict[str, Any], append_or_override: bool = True
) -> List[str]:
"""
Changes
```
{
"trainer._base_": "base_sync_trainer",
"trainer.aggregator._base_": "base_fed_avg_with_lr_sync_aggregator",
"trainer.aggregator.lr": 0.1,
}
```
to
```
[
"+trainer@trainer=base_sync_trainer",
"[email protected]=base_fed_avg_with_lr_sync_aggregator",
"trainer.aggregator.lr=0.1",
]
```
The override list grammar for reference:
https://hydra.cc/docs/advanced/override_grammar/basic
"""
dotlist_dict = _flatten_dict(json_config)
dotlist_list = []
for k, v in dotlist_dict.items():
if k.endswith("._base_"):
# trainer.aggregator._base_ --> trainer.aggregator
k = k.replace("._base_", "")
# extract aggregator from trainer.aggregator
config_group = k.split(".")[-1]
# trainer.aggregator --> [email protected]
k = f"+{config_group}@{k}"
# [email protected]=base_fed_avg_with_lr_sync_aggregator
dotlist_list.append(f"{k}={v}")
else:
v = _handle_values_for_overrides_list(v)
prefix = "++" if append_or_override else ""
dotlist_list.append(f"{prefix}{k}={v}")
sorted_dotlist_list = sorted(dotlist_list, key=_hydra_merge_order)
return sorted_dotlist_list
def fl_config_from_json(
json_config: Dict[str, Any], append_or_override: bool = True
) -> DictConfig:
"""
Accepts the FLSim config in json format and constructs a Hydra config object.
"""
with initialize(config_path=None, version_base="1.1"):
cfg = compose(
config_name=None,
overrides=fl_json_to_dotlist(json_config, append_or_override),
)
return cfg
def maybe_parse_json_config() -> Optional[DictConfig]:
"""
Parse the command line args and build a config object if json config is supplied.
This comes in handy when we want to supply a json config file during to buck run.
This function will no longer be relevant once FLSim entirely moves to YAML configs.
"""
cfg = None
parser = argparse.ArgumentParser(description="Run training loop for FL example")
parser.add_argument("--config-file", type=str, default=None, help="JSON config")
args, _ = parser.parse_known_args()
# if JSON config is specified, build a DictConfig
if args.config_file is not None:
with open(args.config_file, "r") as config_file:
json_config = json.load(config_file)
cfg = fl_config_from_json(json_config["config"])
# else: assume yaml config, and let hydra handle config construction
return cfg
def is_target(config, cls) -> bool:
return config._target_ == cls._target_
| canife-main | FLSim/flsim/utils/config_utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| canife-main | FLSim/flsim/utils/distributed/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
from enum import IntEnum
from itertools import chain
from typing import Iterable, List, Optional, Tuple
from warnings import warn
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.nn as nn
from flsim.common.logger import Logger
from flsim.utils.fl.common import FLModelParamUtils
class OperationType(IntEnum):
BROADCAST = 0
SUM_AND_BROADCAST = 1
SUM = 2
class FLDistributedUtils:
"""
More detailed note: https://fburl.com/5n8mnaf3. We cannot use PyTorch
DDP here, because DDP is tied to backward() and only provides
high-level APIs for reducing gradients, but in FL, after each round, we
need to perform all-reduce on the models (optionally with some pre and
post-processing) instead of just model gradients (e.g. FedAvg).
We reduce the number of all-reduce operations by flatten an entire model
into a 1D tensor, if the model size is less than a buffer limit of 256MB.
For large models, we group the model parameters into flatten buckets of
256MB each and call all-reduce() in each bucket using async operations.
"""
logger: logging.Logger = Logger.get_logger(__name__)
# equivalent to 256 MB of floats, same buffer size as in PyTorch DDP
MAX_BUFFER_SIZE = 2**28
WORLD_SIZE = 1 # number of processes
NUM_WORKERS = 1 # number of CPUs or GPUs
# run distributed training on CPU, default False
DISTRIBUTED_TRAINING_ON_CPU = False
DISTRIBUTED_BACKEND = dist.Backend.NCCL
@classmethod
def distributed_training_on_cpu(cls):
cls.DISTRIBUTED_TRAINING_ON_CPU = True
cls.DISTRIBUTED_BACKEND = dist.Backend.GLOO
@classmethod
def distributed_training_on_cuda(cls):
"""
this is on by default, use if you have called
distributed_training_on_cpu and want to enable
cpu distributed again.
"""
cls.DISTRIBUTED_TRAINING_ON_CPU = False
cls.DISTRIBUTED_BACKEND = (
dist.Backend.GLOO
if (cls.WORLD_SIZE > cls.NUM_WORKERS or cls.NUM_WORKERS % cls.WORLD_SIZE)
else dist.Backend.NCCL
)
@classmethod
def calc_num_processes_and_workers(
cls, distributed_world_size: int, cuda: bool
) -> Tuple[int, int]:
"""
Checks resources on the machine and returns
the distributed world size and the number of workers.
For cpu we do not allow more than one process per cpu.
For cuda we do
"""
if cuda:
assert torch.cuda.is_available(), (
"distributed_world_size is greater than 1 "
"use only if cuda is supported or distributed_training_on_cuda"
"has been called!"
)
num_gpus = torch.cuda.device_count()
if distributed_world_size > num_gpus and distributed_world_size % num_gpus:
warn(
f"There are {num_gpus} physical cuda workers (i.e gpus), "
f"you are asking {distributed_world_size} workers, "
"we need equal number of workers per gpu"
)
return distributed_world_size, num_gpus
else:
num_cpus = mp.cpu_count()
if distributed_world_size > num_cpus:
raise Warning(
f"Only {num_cpus} CPUs are available, "
f"but {distributed_world_size} workers were requested."
)
return min(distributed_world_size, num_cpus), num_cpus
@classmethod
def setup_distributed_training(cls, distributed_world_size: int, use_cuda=True):
cls.WORLD_SIZE, cls.NUM_WORKERS = cls.calc_num_processes_and_workers(
distributed_world_size, use_cuda
)
if use_cuda:
cls.distributed_training_on_cuda()
else:
cls.distributed_training_on_cpu()
@classmethod
def distributed_operation(
cls,
params: Iterable[torch.Tensor],
op: OperationType = OperationType.SUM_AND_BROADCAST,
src: int = -1,
dst: int = -1,
):
"""
Group params into a list of flatten buffers and call the distributed
operation on each buffer asynchronously.
The actual async operation for each buffer is done in the helper function
`_distributed_operation`
Starting with an unprocessed buffer, loops over params and does one of the following:
* appends the param to the current unprocessed buffer if buffer has space
* if buffer cannot fit the param, if the param can fit into a new buffer
sends the current buffer `_distributed_operation` and creates a new buffer
or else sends param to `_distributed_operation` and keeps the buffer for the
next param in the list.
At the end the function joins all async ops and puts processed values from each flattened
buffer into their respective param.
Note:
In all operations it is assumed that the master worker is the worker with rank 0.
"""
if cls.WORLD_SIZE == 1:
return
# temp variable of list of model params sent organized into one buffer
operation_results = [] # operation results a list of (handle, buffer)
param_references = [] # list of param-lists in each buffer
buffered_params = [] # buffer to hord tensors until enough for dist operation
offset = 0
with torch.no_grad():
for param in params:
sz = param.numel()
if sz + offset <= cls.MAX_BUFFER_SIZE:
# append the params and postpone the operation
buffered_params.append(param)
offset += sz
continue
# do the operation, the buffer cannot be appended anymore
process_independently = sz > cls.MAX_BUFFER_SIZE
tensor_list = [param] if process_independently else buffered_params
operation_result = cls._distributed_operation(
tensor_list, sz, op, src, dst
) # operation result is a tuple of (handle, buffer)
operation_results.append(operation_result)
param_references.append(tensor_list)
offset = offset if process_independently else sz
buffered_params = buffered_params if process_independently else [param]
if len(buffered_params) > 0:
operation_result = cls._distributed_operation(
buffered_params, offset, op, src, dst
) # operation result is a tuple of (handle, buffer)
operation_results.append(operation_result)
param_references.append(buffered_params)
# wait on the async handle
for handle, _ in operation_results:
handle.wait()
# copy data from flattened buffers to the actual tensors.
for params, (_, buffer) in zip(param_references, operation_results):
cls._get_params_from_buffer(params, buffer)
@classmethod
def _distributed_operation(
cls,
params: List[torch.Tensor],
numels: int,
op: OperationType,
src: int = -1,
dst: int = -1,
):
"""
Returns a tuple of handle and buffer. Caller is RESPONSIBLE for awaiting
on handle and then use whatever that's filled in the buffer.
Creates a buffer of the size of 'numels'. Then, we loop over the
'params', which is a list of tensors, and copy each tensor (which is
avset of parameters from model) to buffer one by one. After that, we
callvall_reduce() function in PyTorch distributed as an async
operation to all processes in the group (and get async handle to
return after this).
Args:
params: List[torch.Tensor], a buffer group of parameters to perform
async operation at one time
numels: total number of scalar elements in params
Returns:
handle: an async handle
buffer: within distributed operation, params: List[torch.Tensor] is flattened
as a buffer (1D Tensor) and sent to all_reduce. buffer will store the
result of distributed option once it is finished.
Note:
Size of each param in params are not required to be the same. params is first flatten
to a 1D tensor. E.g:
params = Tensor(
[1,2,3,4], [ [5,6], [7,8] ], [9,10]
)
then buffer is
[1,2,3,4,5,6,7,8,9,10]
Example:
if worker 1 has
params = [
Tensor([1,2,3,4]),
Tensor([ [5,6], [7,8] ]),
Tensor([9,10])
]
and worker 2 has
params = [
Tensor([10,20,30,40]),
Tensor([ [50,60], [70,80] ]),
Tensor([90,100])
]
and if the operation type is sum, the returned buffer will be:
Tensor([11, 22, 33, 44, 55, 66, 77, 88, 99, 110])
"""
# TODO: enable all_reduce on mixed dtypes with dtype-based bucketing
# currently the assumption is that there is at least one float tensor
# so all layers could be casted to float
# NOTE: seems to work for mixed int and float types
generic_type = torch.float
for p in params:
if p.dtype != generic_type:
cls.logger.warning("non float tensor types sent to all reduce")
buffer = params[0].new_empty(numels, dtype=generic_type)
offset = 0
for p in params:
sz = p.numel()
buffer[offset : offset + sz].copy_(p.data.view(-1))
offset += sz
if op == OperationType.SUM_AND_BROADCAST:
handle = dist.all_reduce(
buffer,
op=dist.ReduceOp.SUM,
group=cls._get_default_group(),
async_op=True,
)
elif op == OperationType.SUM:
if dst < 0:
cls.logger.debug("dst is not defined setting 0 as the default value")
dst = 0
cls.logger.warning("Operation reduce is not supported on CPU.")
if not (
cls.DISTRIBUTED_TRAINING_ON_CPU
or cls.DISTRIBUTED_BACKEND == dist.Backend.NCCL
):
# GLOO on GPU does not support reduce
cls.logger.warning("Changing reduce operation to reduce all.")
handle = dist.all_reduce(
buffer,
op=dist.ReduceOp.SUM,
group=cls._get_default_group(),
async_op=True,
)
else:
handle = dist.reduce(
buffer,
dst,
op=dist.ReduceOp.SUM,
group=cls._get_default_group(),
async_op=True,
)
elif op == OperationType.BROADCAST:
if src < 0:
cls.logger.debug(
"Distributed copy operation (broadcast) needs a source."
"Assigning 0 as the default source"
)
src = 0
handle = dist.broadcast(
buffer,
src,
group=cls._get_default_group(),
async_op=True,
)
else:
raise ValueError(f"Operation {op} not found. Please check the parameters.")
return (handle, buffer)
@classmethod
def _get_params_from_buffer(cls, params: List[torch.Tensor], buffer: torch.Tensor):
"""
Inverse the buffering operation in all_reduce and copies the data
in buffer into each param in params.
i.e. Copies all-reduced grads back into their original place. However,
more generally speaking, what this function actually does is treating the
'buffer' (i.e. the 2nd param) as a well-flattened 1D tensor of the list
of params and copy all the params back to the buffer.
"""
# TODO: (jesikmin) T55869097 Check whether the size of buffer is same as
# the total number of elements of params
# copy all-reduced grads back into their original place
offset = 0
for p in params:
sz = p.numel()
p.data.copy_(buffer[offset : offset + sz].view_as(p))
offset += sz
@classmethod
def _get_default_group(cls):
return dist.group.WORLD
@classmethod
def is_master_worker(cls):
"""
We assume that worker 0 is the master worker.
"""
return (not dist.is_initialized()) or dist.get_rank() == 0
@classmethod
def suppress_output(cls):
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
# force print the result when kwargs contains force and value is True
if kwargs.pop("force", False):
builtin_print(*args, **kwargs)
__builtin__.print = print
@classmethod
def dist_init(
cls,
rank: int,
world_size: int,
init_method: str,
use_cuda: bool = True,
):
cls.setup_distributed_training(world_size, use_cuda)
if not cls.DISTRIBUTED_TRAINING_ON_CPU:
device = torch.device(f"cuda:{rank % cls.NUM_WORKERS}")
torch.cuda.set_device(device)
if world_size > 1:
dist.init_process_group(
backend=cls.DISTRIBUTED_BACKEND,
init_method=init_method,
world_size=world_size,
rank=rank,
)
@classmethod
def synchronize_model_across_workers(
cls,
operation: OperationType,
model: nn.Module,
weights: Optional[torch.Tensor] = None,
only_federated_params: bool = False,
):
state_dict = FLModelParamUtils.get_state_dict(
model, only_federated_params=only_federated_params
)
if weights is not None:
cls.distributed_operation(
params=chain([weights], state_dict.values()), op=operation
)
else:
cls.distributed_operation(params=state_dict.values(), op=operation)
| canife-main | FLSim/flsim/utils/distributed/fl_distributed.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from enum import Enum
from flsim.utils.fl.stats import AverageType, RandomVariableStatsTrackerMA
class TargetMetricDirection(Enum):
MIN = "min"
MAX = "max"
class TargetMetricTracker:
"""
Tracks the sliding window of eval metric throughout the course of training
and reports the round if eval metric target is reached
"""
def __init__(
self,
target_value: float,
window_size: int,
average_type: AverageType,
direction: TargetMetricDirection,
):
self._stats = RandomVariableStatsTrackerMA(
window_size=window_size, mode=average_type
)
self.target_value = target_value
self.window_size = window_size
self.direction = direction
def update_and_check_target(
self,
current_eval_metric: float,
) -> bool:
"""
Updates the stats tracker with latest eval metric
Return value:
True if target metric is reached.
"""
self._stats.update(current_eval_metric)
if self._stats.num_samples < self.window_size:
return False
return (
self._stats.mean() > self.target_value
if self.direction == TargetMetricDirection.MAX
else self._stats.mean() < self.target_value
)
@property
def mean(self):
return self._stats.mean()
| canife-main | FLSim/flsim/utils/fl/target_metric.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| canife-main | FLSim/flsim/utils/fl/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
import math
from typing import List, Optional, Union
import torch
from flsim.interfaces.model import IFLModel
from flsim.utils.fl.personalized_model import FLModelWithPrivateModules
from torch import nn
from torch.optim.optimizer import Optimizer
class FLModelParamUtils:
@classmethod
def get_state_dict(cls, model: nn.Module, only_federated_params: bool):
if only_federated_params and isinstance(model, FLModelWithPrivateModules):
state_dict = model.federated_state_dict()
else:
state_dict = model.state_dict()
return state_dict
@classmethod
def load_state_dict(cls, model: nn.Module, state_dict, only_federated_params: bool):
if only_federated_params and isinstance(model, FLModelWithPrivateModules):
model.load_federated_state_dict(state_dict)
else:
model.load_state_dict(state_dict)
@classmethod
def zero_weights(cls, model: nn.Module, only_federated_params=False) -> None:
state_dict = cls.get_state_dict(model, only_federated_params)
for _name, param in state_dict.items():
param.data.fill_(0.0)
@classmethod
def pseudo_random_weights(cls, model: nn.Module, seed: int = 1) -> None:
torch.manual_seed(seed)
for _name, param in model.state_dict().items():
param.data.uniform_()
@classmethod
def get_mismatched_param(
cls,
models: List[nn.Module],
rel_epsilon: Optional[float] = None,
abs_epsilon: Optional[float] = None,
) -> str:
"""Compare all the models in the given list of models.
It returns an empty string if all the models have the same parameters.
It returns the name of the first parameter that is different if any.
"""
if rel_epsilon is None and abs_epsilon is not None:
print("WARNING: rel_epsilon is not specified, abs_epsilon is ignored.")
if len(models) <= 1:
return ""
dicts = [aModel.state_dict() for aModel in models]
# verify new models have all params same
rtol_atol = {}
if rel_epsilon is not None:
rtol_atol["rtol"] = rel_epsilon
if abs_epsilon is not None:
rtol_atol["atol"] = abs_epsilon
for name, param in dicts[0].items():
for adict in dicts[1:]:
# if a parameter name does not exist in a model, return early
if name not in adict.keys():
return name
param_here = adict[name]
# if epsilon is specified, do approx comparison
if not torch.allclose(param.float(), param_here.float(), **rtol_atol):
return name
return ""
@classmethod
def linear_comb_models(
cls,
model1: nn.Module,
wt1: float,
model2: nn.Module,
wt2: float,
model_to_save: nn.Module,
only_federated_params: bool = False,
) -> None:
"""sets model_to_save = model1*wt1 + model2*wt2"""
global_params = cls.get_state_dict(model_to_save, only_federated_params)
params_model1 = cls.get_state_dict(model1, only_federated_params)
params_model2 = cls.get_state_dict(model2, only_federated_params)
assert (
global_params.keys() == params_model1.keys() == params_model2.keys()
), "Models should have the same set of parameters, including order."
with torch.no_grad():
for name, global_param in global_params.items():
global_param.data = (
params_model1[name].data * wt1 + params_model2[name].data * wt2
)
cls.load_state_dict(model_to_save, global_params, only_federated_params)
@classmethod
def average_models(
cls,
models: List[nn.Module],
model_to_save: nn.Module,
weights: Optional[List[float]] = None,
) -> None:
"""Averages parameters of input models. Saves the average model in model_to_save
Args:
models: collection of models. These will be changed in-place
model_to_save: update this model with the average
weights: (optional) use weighted average
Returns:
none
"""
assert weights is None or len(weights) == len(models), (
"Weights should have the same length as models. len(wts):"
+ str(len(weights))
+ ", len(models):"
+ str(len(models))
)
wts_divisor = len(models)
if weights is not None:
for w in weights:
assert w >= 0, "Weights must be non-negative. Found:" + str(w)
wts_divisor = sum(weights)
assert wts_divisor > 0, "Sum of weights must be positive:" + str(weights)
cls.zero_weights(model_to_save, only_federated_params=True)
for idx, aModel in enumerate(models):
wts_numerator = 1 if weights is None else weights[idx]
wt = wts_numerator / wts_divisor
cls.linear_comb_models(
aModel, wt, model_to_save, 1, model_to_save, only_federated_params=True
)
@classmethod
def copy_models(
cls,
from_model: nn.Module,
to_models: List[nn.Module],
only_federated_params: bool = False,
) -> None:
"""Copy from_model into every model in to_models
Args:
from_model: a model
to_models: collection of models. These will be changed in-place
only_federated_params: copy only federated params.
Returns:
none
"""
from_state_dict = cls.get_state_dict(from_model, only_federated_params)
for m in to_models:
cls.load_state_dict(m, from_state_dict, only_federated_params)
@classmethod
def clone(
cls, model: Union[nn.Module, IFLModel], dtype: Optional[torch.dtype] = None
):
"""
Clones a pytorch module, and allows for a change of precision.
TODO If needed we can also add device here.
"""
new_model = copy.deepcopy(model)
if isinstance(new_model, IFLModel):
if dtype == torch.float32:
new_model.fl_get_module().float()
elif dtype == torch.float64:
new_model.fl_get_module().double()
return new_model
else:
return (
new_model.float()
if dtype == torch.float32
else (new_model.double() if dtype == torch.float64 else new_model)
)
@classmethod
def set_gradient(cls, model: nn.Module, reference_gradient: nn.Module) -> None:
"""Set gradient of model to the parameters of reference_gradient
Args:
model: nn.Module
reference_gradient: nn.Module - gradient is the parameters of this model
"""
# Use parameters() since state_dict() may include non-learnable params.
for m, ref in zip(model.parameters(), reference_gradient.parameters()):
m.grad = ref.detach().clone().type(m.type())
@classmethod
def reconstruct_gradient(
cls, old_model: nn.Module, new_model: nn.Module, grads: nn.Module
) -> None:
# compute approximate gradient:
# grads = old_model - new_model
cls.subtract_model(old_model, new_model, grads)
@classmethod
def get_trainable_params(cls, model: nn.Module):
return filter(lambda p: p.requires_grad, model.parameters())
@classmethod
def get_gradient_l2_norm_raw(cls, model: nn.Module) -> float:
total_norm = 0
for p in cls.get_trainable_params(model):
if p.grad is None:
continue
param_norm = p.grad.data.norm(2)
total_norm += param_norm.item() ** 2
total_norm = total_norm ** (1.0 / 2)
return total_norm
@classmethod
def get_num_trainable_params(cls, model: nn.Module) -> int:
total_params = 0
for p in cls.get_trainable_params(model):
total_params += p.numel()
return total_params
@classmethod
def get_gradient_l2_norm_normalized(cls, model: nn.Module) -> float:
"""Compute l2-norm-of-gradient/sqrt(num-params)
If gradients are all independent, l2 norm grows as sqrt() of number
of parameters. Eg: in Xavier Initialization
"""
return cls.get_gradient_l2_norm_raw(model) / math.sqrt(
cls.get_num_trainable_params(model)
)
@classmethod
def debug_model_norm(cls, model: nn.Module):
norm = 0
for p in model.parameters():
norm += torch.sum(torch.abs(p))
return norm
@classmethod
def get_mismatched_param_max_difference(cls, models: List[nn.Module]):
if len(models) <= 1:
return 0.0
dicts = [aModel.state_dict() for aModel in models]
max_diff = 0
# compute maximum element-wise difference of model parameters
for name, param in dicts[0].items():
for adict in dicts[1:]:
param_here = adict[name]
param_diff = torch.max(torch.abs(param - param_here)).item()
# pyre-fixme[58]: `<` is not supported for operand types
# `Union[float, int]` and `int`.
max_diff = param_diff if (param_diff > max_diff) else max_diff
# if epsilon is specified, do approx comparison
return max_diff
@classmethod
def clip_gradients(cls, max_normalized_l2_norm: float, model: nn.Module) -> None:
"""Clip gradients in model parameters by maximum value for normalized
L2 norm (max_normalized_norm).
"""
max_unnormalized_l2_norm = max_normalized_l2_norm * math.sqrt(
cls.get_num_trainable_params(model)
)
torch.nn.utils.clip_grad_norm_(model.parameters(), max_unnormalized_l2_norm)
@classmethod
def step_with_modified_lr(
cls, optimizer: Optimizer, base_lr: float, lr_normalizer: float
) -> None:
for param_group in optimizer.param_groups:
param_group["lr"] = base_lr * lr_normalizer
# pyre-ignore[20] Call `Optimizer.step` expects argument `closure`
# but closure is optional in torch.optim.Optimizer
optimizer.step()
@classmethod
def multiply_model_by_weight(
cls,
model: nn.Module,
weight: float,
model_to_save: nn.Module,
only_federated_params: bool = False,
):
"""
Returns model_to_save = model * weight
"""
FLModelParamUtils.linear_comb_models(
model, weight, model, 0, model_to_save, only_federated_params
)
@classmethod
def subtract_model(
cls,
minuend: nn.Module,
subtrahend: nn.Module,
difference: nn.Module,
only_federated_params: bool = False,
):
"""
Returns difference = minuend - subtrahend
"""
FLModelParamUtils.linear_comb_models(
minuend, 1, subtrahend, -1, difference, only_federated_params
)
@classmethod
def add_model(
cls,
model1: nn.Module,
model2: nn.Module,
model_to_save: nn.Module,
only_federated_params: bool = False,
):
"""
Returns model_to_save = model1 + model2
"""
FLModelParamUtils.linear_comb_models(
model1, 1, model2, 1, model_to_save, only_federated_params
)
| canife-main | FLSim/flsim/utils/fl/common.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import abc
import copy
from typing import Dict, Iterable
import torch
class FLModelWithPrivateModules(abc.ABC):
"""
This class emulates the user-private modules in FL by making them reference to
class-level global attributes.
The user of this class must make sure the invariance that the user-private
modules must reference to class-level attributes.
In federated learning, this emulation should be handled at the following points:
when a new instance is created, when models are copied to each user, when doing
forward propagation, when backprop, and when user models are averaged back to the
server.
"""
USER_PRIVATE_MODULE_PREFIX = "USER_PRIVATE_MODULE"
user_private_module_dict: Dict[str, torch.nn.Module] = {}
@classmethod
def clear_user_private_module_dict(cls):
cls.user_private_module_dict.clear()
@classmethod
def get_user_private_parameters(cls) -> Iterable[torch.Tensor]:
"""Return emulated mapping that maps each user to her private params."""
for module in cls.user_private_module_dict.values():
for param in module.parameters():
yield param
def _get_user_private_module_attr_name(self, module_name):
return f"{self.USER_PRIVATE_MODULE_PREFIX}_{module_name}"
def _maybe_set_up_user_private_modules(self, forced: bool = False):
"""
Set an instance's private modules to class attributes to share among
all users. This function runs only when all user-private attributes
have been set.
"""
if not forced:
for module_name in self._get_user_private_module_names():
# The user-private modules may not be set during component creation.
if not hasattr(self, module_name) or getattr(self, module_name) is None:
return
# Initialize the class attributes if not exist.
for module_name in self._get_user_private_module_names():
if module_name not in self.user_private_module_dict:
self.user_private_module_dict[module_name] = getattr(self, module_name)
# Replace instance-based private attributes with the class attributes.
# for module_name in self._get_user_private_module_names():
# Remove instance version if not removed.
if hasattr(self, module_name):
delattr(self, module_name)
setattr(
self,
self._get_user_private_module_attr_name(module_name),
self.user_private_module_dict[module_name],
)
def _set_forward_hooks(self):
"""Set forward hooks to reuse the forward() of the parent class.
The pre-forward hook changes the name of the user-private parameters
back to the original ones to reuse the forward() function of the parent
class. The forward hook changes the name back to have the
USER_PRIVATE_MODULE_PREFIX.
"""
def set_user_private_modules(module, inputs):
for key in module._get_user_private_module_names():
setattr(module, key, module.user_private_module_dict[key])
def remove_user_private_modules(module, inputs, outputs):
for key in module._get_user_private_module_names():
delattr(module, key)
self.register_forward_pre_hook(set_user_private_modules)
self.register_forward_hook(remove_user_private_modules)
def __deepcopy__(self, memo):
orig_deepcopy_method = self.__deepcopy__
self.__deepcopy__ = None
# Don't want to copy the user-private modules which point to the
# class-level attributes.
for module_name in self._get_user_private_module_names():
delattr(self, self._get_user_private_module_attr_name(module_name))
cp = copy.deepcopy(self, memo)
# Re-set-up the user-private params to the class-level attributes.
self._maybe_set_up_user_private_modules(forced=True)
cp._maybe_set_up_user_private_modules(forced=True)
self.__deepcopy__ = orig_deepcopy_method
return cp
def get_user_private_attr(self, module_name):
return getattr(self, self._get_user_private_module_attr_name(module_name))
@classmethod
@abc.abstractmethod
def _get_user_private_module_names(cls) -> Iterable[str]:
"""Return an iterable of the modules of the class to be private."""
pass
def federated_state_dict(self):
"""Return a state dict of federated modules."""
state_dict = self.state_dict()
# Do not copy user private param modules.
for key in state_dict.keys():
if key.startswith(self.USER_PRIVATE_MODULE_PREFIX):
del state_dict[key]
return state_dict
def load_federated_state_dict(self, state_dict: Dict):
"""Load from a state dict of federated modules."""
# pyre-fixme[16]: `FLModelWithPrivateModules` has no attribute
# `load_state_dict`.
missing_keys, unexpected_keys = self.load_state_dict(
state_dict=state_dict, strict=False
)
assert len(unexpected_keys) == 0, "There should be no unexpected keys"
for key in missing_keys:
assert key.startswith(
self.USER_PRIVATE_MODULE_PREFIX
), f"Missing non-user-private parameter {key}"
return missing_keys, unexpected_keys
| canife-main | FLSim/flsim/utils/fl/personalized_model.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
from collections import deque
from enum import Enum
from typing import List, Optional
import numpy as np
import pandas as pd
class AverageType(Enum):
SMA = "sma"
EMA = "ema"
@staticmethod
def from_str(name: str):
name_upper = name.upper()
names = [e.name for e in AverageType]
assert name_upper in names, "Unknown average type:" + name
return AverageType[name_upper]
class QuantilesTracker:
"""
Tracks the mean, standard deviation, and quantiles of a random variable
Note:
We store the samples in memory so be careful with large number of
samples
"""
def __init__(self):
self._samples: List = []
def update(self, val: float) -> None:
self._samples.append(val)
def quantile(self, p) -> float:
if len(self._samples) == 0:
return float("Inf")
return np.quantile(self._samples, p)
@property
def median_val(self) -> float:
return self.quantile(0.5)
@property
def lower_quartile_val(self) -> float:
return self.quantile(0.25)
@property
def upper_quartile_val(self) -> float:
return self.quantile(0.75)
class ModelSequenceNumberTracker:
r"""
Class to keep track of the current global sequence number and statistics.
Keeps track of "model_seqnum," which indicates the checkpoint version of global model.
Example:
For example, if global model_seqnum is 10 and the local model_seqnum
(i.e. model_seqnum of a particular device state) is 7, then we record
the diff of model_seqnum, which is 3 in this case.
"""
def __init__(self):
self._current_model_seqnum = 0
self.seqnum_diff_stats = RandomVariableStatsTracker()
def increment(self) -> int:
r"""
Increments the global model_seqnum
"""
self._current_model_seqnum += 1
return self._current_model_seqnum
def get_staleness_and_update_stats(self, client_seqnum: int) -> int:
r"""
Compares the current global model_seqnum with model_seqnum of
a particular client
"""
# Seqnum_diff will be 0 for sequential training.
seqnum_diff = self._current_model_seqnum - client_seqnum
self.seqnum_diff_stats.update(seqnum_diff)
return seqnum_diff
def print_stats(self) -> None:
print(f"ModelSeqNum: {self.current_seqnum}")
print(f"\tSeqnumDiff, {self.seqnum_diff_stats.as_str()}")
def mean(self) -> float:
r"""
Returns the mean difference between the global seq_num and local seq_num
"""
return self.seqnum_diff_stats.mean()
def standard_deviation(self) -> float:
r"""
Returns the SD difference between the global seq_num and local seq_num
"""
return self.seqnum_diff_stats.standard_deviation()
@property
def current_seqnum(self) -> int:
r"""
Current global model seq num
"""
return self._current_model_seqnum
class RandomVariableStatsTracker:
"""Keeps track of mean, variance, min and max values of a random variable"""
def __init__(self, tracks_quantiles: bool = False):
self._sum: float = 0
self._sum_squares: float = 0
self._min_val: float = float("Inf")
self._max_val: float = -float("Inf")
self._num_samples: int = 0
self._quant_tracker: Optional[QuantilesTracker] = (
QuantilesTracker() if tracks_quantiles else None
)
def update(self, val: float) -> None:
self._sum += val
self._sum_squares += val * val
self._min_val = min(self._min_val, val)
self._max_val = max(self._max_val, val)
self._num_samples += 1
if self._quant_tracker is not None:
self._quant_tracker.update(val)
def mean(self) -> float:
if not self._num_samples:
return float("Inf")
return self._sum / self._num_samples
def standard_deviation(self) -> float:
if not self._num_samples:
return float("Inf")
mean_sum_squares = self._sum_squares / self._num_samples
mean_squared = self.mean() * self.mean()
variance = mean_sum_squares - mean_squared
return math.sqrt(variance + 1e-6)
@property
def num_samples(self) -> int:
return self._num_samples
@property
def min_val(self) -> float:
return self._min_val
@property
def max_val(self) -> float:
return self._max_val
@property
def mean_val(self) -> float:
return self.mean()
@property
def standard_deviation_val(self) -> float:
return self.standard_deviation()
@property
def median_val(self) -> float:
if self._quant_tracker is None:
return float("inf")
return self._quant_tracker.median_val
@property
def lower_quartile_val(self) -> float:
if self._quant_tracker is None:
return float("inf")
return self._quant_tracker.lower_quartile_val
@property
def upper_quartile_val(self) -> float:
if self._quant_tracker is None:
return float("inf")
return self._quant_tracker.upper_quartile_val
def as_str(self) -> str:
return (
f"Mean:{self.mean():.3f}, "
f"SD:{self.standard_deviation():.3f}, "
f"Min:{self.min_val:.3f}, Max:{self.max_val:.3f}"
)
class RandomVariableStatsTrackerMA(RandomVariableStatsTracker):
"""
Tracks the simple moving or exponential moving mean and
standard deviation of a random variable
Note:
We store the window_size number of samples in memory so please
keep window_size to be a reasonable number
"""
def __init__(self, window_size: int, mode=AverageType.SMA, decay_factor=0.5):
super().__init__()
self._samples = deque(maxlen=window_size)
self.window_size = window_size
self.decay_factor = decay_factor
self.mode = mode
def update(self, val: float) -> None:
super().update(val)
self._samples.append(val)
def mean(self) -> float:
if not self._num_samples:
raise ValueError("There are no samples in tracker.")
return (
np.mean(self._samples)
if self.mode == AverageType.SMA
else pd.Series(self._samples).ewm(alpha=self.decay_factor).mean().iloc[-1]
)
def standard_deviation(self) -> float:
if not self._num_samples:
raise ValueError("There are no samples in tracker.")
return (
np.std(self._samples)
if self.mode == AverageType.SMA
else pd.Series(self._samples).ewm(alpha=self.decay_factor).std().iloc[-1]
)
| canife-main | FLSim/flsim/utils/fl/stats.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pytest
from flsim.common.pytest_helper import assertEqual
from flsim.utils.async_trainer.async_example_weights import (
AsyncExampleWeightConfig,
ExampleWeight,
)
from flsim.utils.async_trainer.async_staleness_weights import (
AsyncStalenessWeightConfig,
ConstantStalenessWeightConfig,
PolynomialStalenessWeightConfig,
StalenessWeight,
ThresholdStalenessWeightConfig,
)
from flsim.utils.async_trainer.async_weights import AsyncWeightConfig
from flsim.utils.tests.helpers.test_async_weights_utils import (
AsyncExampleWeightsTestUtils,
AsyncStalenessWeightsTestUtils,
)
from hydra.utils import instantiate
class TestAsyncExampleWeights:
# two parametrize together produce a cartesian product
@pytest.mark.parametrize(
"example_weight_config, example_weight_class",
AsyncExampleWeightsTestUtils.EXAMPLE_WEIGHT_TEST_CONFIGS,
)
@pytest.mark.parametrize(
"staleness_weight_config, staleness_weight_class",
AsyncStalenessWeightsTestUtils.STALENESS_WEIGHT_TEST_CONFIGS,
)
def test_string_conversion(
self,
example_weight_config: AsyncExampleWeightConfig,
example_weight_class: ExampleWeight,
staleness_weight_config: AsyncStalenessWeightConfig,
staleness_weight_class: StalenessWeight,
) -> None:
"""Check that strings are correctly converted to AsyncWeight"""
obj = instantiate(
AsyncWeightConfig(
staleness_weight=staleness_weight_config,
example_weight=example_weight_config,
)
)
assertEqual(obj.example_weight.__class__, example_weight_class)
assertEqual(obj.staleness_weight.__class__, staleness_weight_class)
@pytest.mark.parametrize(
"example_weight_config, example_weight_class",
AsyncExampleWeightsTestUtils.EXAMPLE_WEIGHT_TEST_CONFIGS,
)
def test_weight_compute(
self,
example_weight_config: AsyncExampleWeightConfig,
example_weight_class: ExampleWeight,
avg_num_examples: int = 1,
avg_staleness: int = 1,
) -> None:
"""Test that all weight computation works as expected"""
max_num_examples = 10000
max_staleness = 10000
cutoff = 5000
value_after_cutoff = 0.001
exponent = 0.5
# dict below tells us how to initialize weight object for different
# staleness weight types
staleness_weight_configs = [
ConstantStalenessWeightConfig(),
ThresholdStalenessWeightConfig(
cutoff=cutoff, value_after_cutoff=value_after_cutoff
),
PolynomialStalenessWeightConfig(exponent=exponent),
]
for staleness_weight_config in staleness_weight_configs:
staleness_weight_obj = instantiate(staleness_weight_config)
# for 10 random integers
for _ in range(10):
num_examples = np.random.randint(1, max_num_examples)
staleness = np.random.randint(1, max_staleness)
staleness_weight = staleness_weight_obj.weight(staleness)
example_weight_config.avg_num_examples = avg_num_examples
example_weight_obj = instantiate(example_weight_config)
example_weight = example_weight_obj.weight(num_examples)
expected_combined_weight = example_weight * staleness_weight
combined_weight_object = instantiate(
AsyncWeightConfig(
example_weight=example_weight_config,
staleness_weight=staleness_weight_config,
)
)
combined_weight = combined_weight_object.weight(
num_examples=num_examples, staleness=staleness
)
assertEqual(expected_combined_weight, combined_weight)
| canife-main | FLSim/flsim/utils/tests/test_async_weights.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pandas as pd
import pytest
from flsim.common.pytest_helper import assertAlmostEqual, assertEqual
from flsim.utils.fl.stats import (
AverageType,
ModelSequenceNumberTracker,
RandomVariableStatsTracker,
RandomVariableStatsTrackerMA,
)
class TestStatsTracker:
def test_stats_tracker(self) -> None:
"""Test that we can accurately keep track of stats"""
np.random.seed(100)
# check mean and standard_deviation using a normal random
stats_tracker = RandomVariableStatsTracker()
for _ in range(1000):
# test with normal random
rv = np.random.normal(loc=0, scale=10)
stats_tracker.update(rv)
assertAlmostEqual(stats_tracker.mean(), 0.0, delta=0.5)
assertAlmostEqual(stats_tracker.standard_deviation(), 10.0, delta=1.0)
stats_tracker2 = RandomVariableStatsTracker()
for i in range(1000):
stats_tracker2.update(i - 10)
assertEqual(stats_tracker2.min_val, -10)
assertEqual(stats_tracker2.max_val, 989)
def test_sequence_tracker(self) -> None:
seqnum_tracker = ModelSequenceNumberTracker()
num_global_step = 5
for _ in range(num_global_step):
seqnum_tracker.increment()
assertEqual(num_global_step, seqnum_tracker.current_seqnum)
num_clients = 100
client_seqnums = np.random.randint(10000, size=num_clients)
staleness_weights = []
for client_seqnum in client_seqnums:
staleness = seqnum_tracker.get_staleness_and_update_stats(
client_seqnum=client_seqnum
)
assertEqual(num_global_step - client_seqnum, staleness)
staleness_weights.append(staleness)
expected_mean = np.mean(staleness_weights)
assertAlmostEqual(expected_mean, seqnum_tracker.mean(), delta=1e-6)
expected_sd = np.std(staleness_weights)
assertAlmostEqual(expected_sd, seqnum_tracker.standard_deviation(), delta=1e-6)
@pytest.mark.parametrize(
"max_val",
[10, 100, 1000],
)
@pytest.mark.parametrize(
"window_size",
[5, 50],
)
@pytest.mark.parametrize(
"average_type",
[AverageType.SMA, AverageType.EMA],
)
def test_moving_average(self, max_val, window_size, average_type) -> None:
decay_factor = 0.5
stats_tracker = RandomVariableStatsTrackerMA(
window_size=window_size, mode=average_type
)
values = np.arange(1, max_val, 1)
for i in values:
stats_tracker.update(i)
values = np.array(values[-window_size:])
if average_type == AverageType.SMA:
expected_mean = values.mean()
expected_std = values.std()
else:
v = pd.Series(values)
expected_mean = v.ewm(alpha=decay_factor).mean().iloc[-1]
expected_std = v.ewm(alpha=decay_factor).std().iloc[-1]
assertEqual(stats_tracker.mean(), expected_mean)
assertEqual(stats_tracker.standard_deviation(), expected_std)
def test_quantiles_tracker(self) -> None:
stats_tracker = RandomVariableStatsTracker(tracks_quantiles=True)
values = []
for i in range(100):
stats_tracker.update(i)
values.append(i)
assertEqual(stats_tracker.median_val, np.quantile(values, 0.5))
assertEqual(stats_tracker.lower_quartile_val, np.quantile(values, 0.25))
assertEqual(stats_tracker.upper_quartile_val, np.quantile(values, 0.75))
| canife-main | FLSim/flsim/utils/tests/test_stats.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Type
import numpy as np
import pytest
from flsim.common.pytest_helper import (
assertAlmostEqual,
assertEqual,
assertGreaterEqual,
assertTrue,
)
from flsim.utils.async_trainer.training_event_generator import (
AsyncTrainingEventGenerator,
AsyncTrainingEventGeneratorConfig,
AsyncTrainingEventGeneratorFromList,
AsyncTrainingEventGeneratorFromListConfig,
AsyncTrainingStartTimeDistrConfig,
ConstantAsyncTrainingStartTimeDistr,
ConstantAsyncTrainingStartTimeDistrConfig,
EventTimingInfo,
IEventGenerator,
PoissonAsyncTrainingStartTimeDistr,
PoissonAsyncTrainingStartTimeDistrConfig,
)
from flsim.utils.timing.training_duration_distribution import (
PerExampleGaussianDurationDistribution,
PerExampleGaussianDurationDistributionConfig,
PerUserGaussianDurationDistribution,
PerUserGaussianDurationDistributionConfig,
)
from omegaconf import OmegaConf
class TestEventDistributionsUtil:
def test_simulated_training_training_event_generator(self) -> None:
"""Check that EventDistributionFromList works correctly by inputing
a sample distribution, and confirming that the output is correct
"""
timing_info1 = EventTimingInfo(prev_event_start_to_current_start=1, duration=2)
timing_info2 = EventTimingInfo(prev_event_start_to_current_start=2, duration=1)
timing_info3 = EventTimingInfo(prev_event_start_to_current_start=2, duration=5)
random_list = [timing_info1, timing_info2, timing_info3]
distr = AsyncTrainingEventGeneratorFromList(
**OmegaConf.structured(
AsyncTrainingEventGeneratorFromListConfig(training_events=random_list)
)
)
assertTrue(
distr.time_to_next_event_start()
== timing_info1.prev_event_start_to_current_start
)
assertTrue(
distr.training_duration(num_training_examples=1) == timing_info1.duration
)
assertTrue(
distr.time_to_next_event_start()
== timing_info2.prev_event_start_to_current_start
)
assertTrue(
distr.training_duration(num_training_examples=1) == timing_info2.duration
)
assertTrue(
distr.time_to_next_event_start()
== timing_info3.prev_event_start_to_current_start
)
assertTrue(
distr.training_duration(num_training_examples=1) == timing_info3.duration
)
def _duration_normality_check(
self,
event_generator: IEventGenerator,
sample_count: int,
expected_mean: float,
expected_sd: float,
epsilon: float,
) -> None:
durations = []
for _ in range(sample_count):
durations.append(event_generator.training_duration(num_training_examples=1))
# normality check doesn't verify mean and variance
assertAlmostEqual(np.mean(durations), expected_mean, delta=epsilon)
assertAlmostEqual(np.std(durations), expected_sd, delta=epsilon)
def test_poisson_training_event_generator(self) -> None:
"""Check that TrainingEventDistritubion makes sense by checking that
generated event durations follow normal distribution.
"""
np.random.seed(1)
# follows Poisson
event_rate_per_sec, duration_mean, duration_sd = 10, 1, 5
# set training_duration_min to a very negative number, to not bound the distribution
duration_distr = PerExampleGaussianDurationDistributionConfig(
training_duration_mean=duration_mean, training_duration_sd=duration_sd
)
training_start_time_distr = PoissonAsyncTrainingStartTimeDistrConfig(
training_rate=event_rate_per_sec
)
distr = AsyncTrainingEventGenerator(
**OmegaConf.structured(
AsyncTrainingEventGeneratorConfig(
training_start_time_distribution=training_start_time_distr,
duration_distribution_generator=duration_distr,
)
)
)
self._duration_normality_check(
distr,
1000, # sample_count
duration_mean,
duration_sd,
epsilon=1,
)
def test_constant_training_event_distribution(self) -> None:
"""Check that ConstantAsyncTrainingStartTimeDistr generates the right
next_event_time
"""
np.random.seed(1)
min_mean = 0.0001
max_mean = 10
max_sd = 1
event_rate_per_sec = np.random.uniform(min_mean, max_mean)
duration_mean = np.random.uniform(0, max_mean)
duration_sd = np.random.uniform(0, max_sd)
training_start_time_distr = ConstantAsyncTrainingStartTimeDistrConfig(
training_rate=event_rate_per_sec
)
duration_distr = PerExampleGaussianDurationDistributionConfig(
training_duration_mean=duration_mean, training_duration_sd=duration_sd
)
distr = AsyncTrainingEventGenerator(
**OmegaConf.structured(
AsyncTrainingEventGeneratorConfig(
training_start_time_distribution=training_start_time_distr,
duration_distribution_generator=duration_distr,
)
)
)
assertEqual(distr.time_to_next_event_start(), 1 / event_rate_per_sec)
self._duration_normality_check(
distr,
1000, # sample_count
duration_mean,
duration_sd,
epsilon=1,
)
def test_constant_training_event_distribution_zero_sd(self) -> None:
"""Check that ConstantAsyncTrainingStartTimeDistr generates the right
constant training_duration and next_event_time when SD is zero
"""
np.random.seed(1)
min_mean = 0.0001
max_mean = 10
for _num_rand_vals in range(10):
event_rate_per_sec = np.random.uniform(min_mean, max_mean)
duration_mean = np.random.uniform(0, max_mean)
training_start_time_distr = ConstantAsyncTrainingStartTimeDistrConfig(
training_rate=event_rate_per_sec
)
duration_distr = PerExampleGaussianDurationDistributionConfig(
training_duration_mean=duration_mean, training_duration_sd=0
)
distr = AsyncTrainingEventGenerator(
**OmegaConf.structured(
AsyncTrainingEventGeneratorConfig(
training_start_time_distribution=training_start_time_distr,
duration_distribution_generator=duration_distr,
)
)
)
# generate duration and time_to_next_event_start two times
for _ in range(2):
assertAlmostEqual(
distr.training_duration(num_training_examples=1),
duration_mean,
delta=1e-4,
)
assertEqual(distr.time_to_next_event_start(), 1 / event_rate_per_sec)
def test_training_duration_min_bound(self) -> None:
"""Check that training_duration_min bound is followed"""
np.random.seed(1)
max_mean = 10
max_sd = 1
# generate 10 random event generators
for _num_rand_vals in range(10):
duration_mean = np.random.uniform(0, max_mean)
duration_sd = np.random.uniform(0, max_sd)
# choose a duration_min that is likely to be hit often
duration_min = duration_mean
per_user_duration_distr = PerUserGaussianDurationDistribution(
**OmegaConf.structured(
PerUserGaussianDurationDistributionConfig(
training_duration_mean=duration_mean,
training_duration_sd=duration_sd,
training_duration_min=duration_min,
)
)
)
per_example_duration_distr = PerExampleGaussianDurationDistribution(
**OmegaConf.structured(
PerExampleGaussianDurationDistributionConfig(
training_duration_mean=duration_mean,
training_duration_sd=duration_sd,
training_duration_min=duration_min,
)
)
)
# generate 100 random num_examples
for _ in range(100):
num_examples = np.random.randint(low=1, high=1e5)
per_user_gaussian_duration = per_user_duration_distr.training_duration(
num_training_examples=num_examples
)
assertGreaterEqual(per_user_gaussian_duration, duration_min)
# for per-example training duration, duration-min_bound applies to each example
# while training_duration() returns time for each user
# so actual bound is
per_example_gaussian_duration = (
per_example_duration_distr.training_duration(
num_training_examples=num_examples
)
)
assertGreaterEqual(
per_example_gaussian_duration, duration_min * num_examples
)
@pytest.mark.parametrize(
"start_time_distr_config_class, start_time_distr_class",
[
(
PoissonAsyncTrainingStartTimeDistrConfig,
PoissonAsyncTrainingStartTimeDistr,
),
(
ConstantAsyncTrainingStartTimeDistrConfig,
ConstantAsyncTrainingStartTimeDistr,
),
],
)
def test_string_conversion(
self,
start_time_distr_config_class: AsyncTrainingStartTimeDistrConfig,
start_time_distr_class: Type,
) -> None:
"""Check that strings are correctly converted to TrainingEventGenerator"""
training_rate = 1
duration_mean_sec = 1
training_duration_sd = 1
training_start_time_distr = OmegaConf.structured(start_time_distr_config_class)
training_start_time_distr.training_rate = training_rate
duration_distr = PerExampleGaussianDurationDistributionConfig(
training_duration_mean=duration_mean_sec,
training_duration_sd=training_duration_sd,
)
assertEqual(
AsyncTrainingEventGenerator(
**OmegaConf.structured(
AsyncTrainingEventGeneratorConfig(
training_start_time_distribution=training_start_time_distr,
duration_distribution_generator=duration_distr,
)
)
)._training_start_time_distr.__class__,
start_time_distr_class,
)
| canife-main | FLSim/flsim/utils/tests/test_training_event_generator.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from flsim.common.pytest_helper import (
assertFalse,
assertGreater,
assertLess,
assertTrue,
)
from flsim.utils.fl.stats import AverageType
from flsim.utils.fl.target_metric import TargetMetricDirection, TargetMetricTracker
class TestTargetMetricTest:
def test_target_metric_optimize_for_max(self) -> None:
"""
Test if target tracker returns true when the sliding window returns
true when optimizing for max value (e.g accuracy)
For example, target = 90, window_size = 3
eval accuracies = [80, 81, 89, 90, 95]
[80, 81, 89] -> false
[81, 89, 90] -> false
the window [89, 90, 95] -> true
"""
metrics = [80, 81, 89, 90, 95]
target_value = 90
for average_type in [AverageType.SMA, AverageType.EMA]:
target_tracker = TargetMetricTracker(
target_value=target_value,
window_size=3,
average_type=average_type,
direction=TargetMetricDirection.MAX,
)
for metric in metrics[:-1]:
assertFalse(target_tracker.update_and_check_target(metric))
assertLess(target_tracker.mean, target_value)
assertTrue(target_tracker.update_and_check_target(metrics[-1]))
assertGreater(target_tracker.mean, target_value)
def test_target_metric_optimize_for_min(self) -> None:
"""
Test if target tracker returns true when the sliding window returns
true when optimizing for min value (e.g loss)
For example, target = 0.1, window_size = 3
eval loss = [0.5, 0.4, 0.15, 0.04, 0.1
[0.5, 0.4, 0.15] -> false
[0.4, 0.15, 0.04] -> false
the window [0.15, 0.04, 0.1] -> true
"""
metrics = [0.5, 0.4, 0.15, 0.04, 0.1]
target_value = 0.1
for average_type in [AverageType.SMA, AverageType.EMA]:
target_tracker = TargetMetricTracker(
target_value=target_value,
window_size=3,
average_type=average_type,
direction=TargetMetricDirection.MIN,
)
for metric in metrics[:-1]:
assertFalse(target_tracker.update_and_check_target(metric))
assertGreater(target_tracker.mean, target_value)
assertTrue(target_tracker.update_and_check_target(metrics[-1]))
assertLess(target_tracker.mean, target_value)
| canife-main | FLSim/flsim/utils/tests/test_target_metric.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from flsim.common.pytest_helper import assertEqual
from flsim.utils.config_utils import _flatten_dict, fl_json_to_dotlist
class TestConfigUtils:
def test_flatten_dict(self) -> None:
assertEqual(_flatten_dict({}), {})
assertEqual(_flatten_dict({"a": 1}), {"a": 1})
assertEqual(_flatten_dict({"a": None}), {"a": None})
# checks neesting, exp notation
assertEqual(
_flatten_dict(
{
"a": {
"b": {"c": {"val": 3}, "val": 2, "_base_": "b"},
"_base_": "a",
"val": 1,
},
"d": "1e-4",
}
),
{
"a.b.c.val": 3,
"a.b.val": 2,
"a.b._base_": "b",
"a._base_": "a",
"a.val": 1,
"d": "1e-4",
},
)
# checks string floats
assertEqual(_flatten_dict({"e": "5.5"}), {"e": '"5.5"'})
# make sure json in list remains untouched
assertEqual(
_flatten_dict(
{
"a": {"b": 1},
"l": [1, 2, 3],
"ld": [{"a": 1, "b": {"bb": 2}, "c": [11, 22]}, {"z": "xyz"}],
}
),
{
"a.b": 1,
"l": [1, 2, 3],
"ld": [{"a": 1, "b": {"bb": 2}, "c": [11, 22]}, {"z": "xyz"}],
},
)
# make sure json in key with suffix _dict remains untouched
assertEqual(
_flatten_dict(
{
"a": {"b": 1},
"c": {"d_dict": {"A": 1, "B": "2.2", "C": {"key": "three"}}},
}
),
{
"a.b": 1,
"c.d_dict": {"A": 1, "B": "2.2", "C": {"key": "three"}},
},
)
# check with _base_
assertEqual(
_flatten_dict(
{
"_base_": {"_base_": "base1", "_base": "base2", "base_": "base3"},
"_base": {"_base_": "base1", "_base": "base2", "base_": "base3"},
"base_": {"_base_": "base1", "_base": "base2", "base_": "base3"},
}
),
{
"_base_._base_": "base1",
"_base_._base": "base2",
"_base_.base_": "base3",
"_base._base_": "base1",
"_base._base": "base2",
"_base.base_": "base3",
"base_._base_": "base1",
"base_._base": "base2",
"base_.base_": "base3",
},
)
def test_json_to_dotlist(self) -> None:
assertEqual(fl_json_to_dotlist({}, append_or_override=False), [])
assertEqual(fl_json_to_dotlist({"a": 1}, append_or_override=False), ["a=1"])
assertEqual(
fl_json_to_dotlist({"a": None}, append_or_override=False), ["a=null"]
)
# checks neesting, exp notation
assertEqual(
fl_json_to_dotlist(
{
"a": {
"b": {"c": {"val": 3}, "val": 2, "_base_": "b"},
"_base_": "a",
"val": 1,
},
"d": "1e-4",
},
append_or_override=False,
),
["+a@a=a", "[email protected]=b", "d=1e-4", "a.val=1", "a.b.val=2", "a.b.c.val=3"],
)
# checks string floats
assertEqual(
fl_json_to_dotlist({"e": "5.5"}, append_or_override=False), ['e="5.5"']
)
# make sure json in list remains untouched
assertEqual(
fl_json_to_dotlist(
{
"a": {"b": 1},
"l": [1, 2, 3],
"ld": [{"a": 1, "b": {"bb": 2}, "c": [11, 22]}, {"z": "xyz"}],
},
append_or_override=False,
),
[
"l=[1, 2, 3]",
"ld=[{'a': 1, 'b': {'bb': 2}, 'c': [11, 22]}, {'z': 'xyz'}]",
"a.b=1",
],
)
# make sure json in key with suffix _dict is handled correctly
assertEqual(
fl_json_to_dotlist(
{
"a": {"b": 1},
"c": {"d_dict": {"A": 1, "B": "2.2", "C": {"3": "three"}}},
},
append_or_override=False,
),
[
"a.b=1",
'c.d_dict="{\\"A\\": 1, \\"B\\": \\"2.2\\", \\"C\\": {\\"3\\": \\"three\\"}}"',
],
)
# check with _base_
assertEqual(
fl_json_to_dotlist(
{
"_base_": {
"_base_": "base1",
"_base": "base2",
"base_": "base3",
"base": "just_base",
},
"_base": {
"_base_": "base1",
"_base": "base2",
"base_": "base3",
"base": "just_base",
},
"base_": {
"_base_": "base1",
"_base": "base2",
"base_": "base3",
"base": "just_base",
},
"base": "just_base",
},
append_or_override=False,
),
[
"+_base@_base=base1",
"+_base_@_base_=base1",
"+base_@base_=base1",
"base=just_base",
"_base._base=base2",
"_base.base=just_base",
"_base.base_=base3",
"_base_._base=base2",
"_base_.base=just_base",
"_base_.base_=base3",
"base_._base=base2",
"base_.base=just_base",
"base_.base_=base3",
],
)
def test_json_to_dotlist_append_or_override(self) -> None:
assertEqual(fl_json_to_dotlist({}), [])
assertEqual(fl_json_to_dotlist({"a": 1}), ["++a=1"])
assertEqual(fl_json_to_dotlist({"a": None}), ["++a=null"])
# checks neesting, exp notation
assertEqual(
fl_json_to_dotlist(
{
"a": {
"b": {"c": {"val": 3}, "val": 2, "_base_": "b"},
"_base_": "a",
"val": 1,
},
"d": "1e-4",
}
),
[
"+a@a=a",
"[email protected]=b",
"++d=1e-4",
"++a.val=1",
"++a.b.val=2",
"++a.b.c.val=3",
],
)
# checks string floats
assertEqual(fl_json_to_dotlist({"e": "5.5"}), ['++e="5.5"'])
# make sure json in list remains untouched
assertEqual(
fl_json_to_dotlist(
{
"a": {"b": 1},
"l": [1, 2, 3],
"ld": [{"a": 1, "b": {"bb": 2}, "c": [11, 22]}, {"z": "xyz"}],
}
),
[
"++l=[1, 2, 3]",
"++ld=[{'a': 1, 'b': {'bb': 2}, 'c': [11, 22]}, {'z': 'xyz'}]",
"++a.b=1",
],
)
# make sure json in key with suffix _dict is handled correctly
assertEqual(
fl_json_to_dotlist(
{
"a": {"b": 1},
"c": {"d_dict": {"A": 1, "B": "2.2", "C": {"3": "three"}}},
}
),
[
"++a.b=1",
'++c.d_dict="{\\"A\\": 1, \\"B\\": \\"2.2\\", \\"C\\": {\\"3\\": \\"three\\"}}"',
],
)
# check with _base_
assertEqual(
fl_json_to_dotlist(
{
"_base_": {
"_base_": "base1",
"_base": "base2",
"base_": "base3",
"base": "just_base",
},
"_base": {
"_base_": "base1",
"_base": "base2",
"base_": "base3",
"base": "just_base",
},
"base_": {
"_base_": "base1",
"_base": "base2",
"base_": "base3",
"base": "just_base",
},
"base": "just_base",
}
),
[
"+_base@_base=base1",
"+_base_@_base_=base1",
"+base_@base_=base1",
"++base=just_base",
"++_base._base=base2",
"++_base.base=just_base",
"++_base.base_=base3",
"++_base_._base=base2",
"++_base_.base=just_base",
"++_base_.base_=base3",
"++base_._base=base2",
"++base_.base=just_base",
"++base_.base_=base3",
],
)
| canife-main | FLSim/flsim/utils/tests/test_config_utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from flsim.common.pytest_helper import assertEqual, assertTrue
from flsim.data.data_provider import FLDataProviderFromList
from flsim.utils.async_trainer.async_user_selector import (
RandomAsyncUserSelector,
RoundRobinAsyncUserSelector,
)
from flsim.utils.sample_model import MockFLModel
@pytest.fixture
def num_users() -> int:
return 20
@pytest.fixture
def num_trials() -> int:
return 100
class TestAsyncUserSelectorUtils:
def test_random_user_selector(self, num_users, num_trials) -> None:
# users are 0....n-1
# number of examples per user: [1, 2, 3...., n-1, n]
num_examples_per_user = list(range(1, num_users + 1))
data = [
[1] * num_example
for num_example, _ in zip(num_examples_per_user, range(num_users))
]
data_provider = FLDataProviderFromList(
train_user_list=data,
eval_user_list=data,
test_user_list=data,
model=MockFLModel(),
)
random_user_selector = RandomAsyncUserSelector(data_provider=data_provider)
for _ in range(0, num_trials):
random_user_info = random_user_selector.get_random_user()
random_user, user_index = (
random_user_info.user_data,
random_user_info.user_index,
)
assertTrue(user_index >= 0 and user_index < num_users)
assertEqual(random_user.num_train_examples(), user_index + 1)
def test_round_robin_user_selector(self, num_users, num_trials) -> None:
# users are 0....n-1
# number of examples per user: [10, num_users, 30...., 10*n-1, 10*n]
multiplier = 10
num_examples_per_user = [multiplier * i for i in list(range(1, num_users + 1))]
# pyre-fixme[6]: Expected `IFLDataProvider` for 1st param but got `List[int]`.
round_robin_user_selector = RoundRobinAsyncUserSelector(num_examples_per_user)
data = [
[1] * num_example
for num_example, _ in zip(num_examples_per_user, range(num_users))
]
data_provider = FLDataProviderFromList(
train_user_list=data,
eval_user_list=data,
test_user_list=data,
model=MockFLModel(),
)
round_robin_user_selector = RoundRobinAsyncUserSelector(
data_provider=data_provider
)
for num_trial in range(0, num_trials):
random_user_info = round_robin_user_selector.get_random_user()
random_user, user_index = (
random_user_info.user_data,
random_user_info.user_index,
)
assertEqual(user_index, num_trial % num_users)
assertEqual(random_user.num_train_examples(), (user_index + 1) * multiplier)
| canife-main | FLSim/flsim/utils/tests/test_async_user_selector.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from flsim.common.pytest_helper import assertEqual, assertLessEqual, assertTrue
from flsim.utils.data.data_utils import batchify, merge_dicts
from flsim.utils.data.fake_data_utils import FakeDataProvider, FakeUserData
class TestDataUtils:
def test_batchify(self) -> None:
assertEqual(list(batchify([1, 2, 3, 4, 5], 2)), [[1, 2], [3, 4], [5]])
assertEqual(list(batchify([1, 2, 3, 4, 5], 3)), [[1, 2, 3], [4, 5]])
assertEqual(list(batchify([1, 2, 3, 4], 2)), [[1, 2], [3, 4]])
assertEqual(list(batchify([1, 2, 3, 4], 1)), [[1], [2], [3], [4]])
def test_merge_dicts(self) -> None:
expected = {"a": torch.Tensor([1.0, 2.0])}
for key, actual in merge_dicts(
[{"a": torch.Tensor([1])}, {"a": torch.Tensor([2])}]
).items():
assertTrue(key in expected)
assertTrue(torch.all(actual.eq(expected[key])))
expected = {"a": torch.Tensor([1.0]), "b": torch.Tensor([2.0])}
for key, actual in merge_dicts(
[{"a": torch.Tensor([1])}, {"b": torch.Tensor([2])}]
).items():
assertTrue(key in expected)
assertTrue(torch.all(actual.eq(expected[key])))
def user_data_test_util(
self,
user_dataset,
expected_num_examples,
expected_batch_size,
expected_num_batches,
) -> None:
assertEqual(user_dataset.num_train_examples(), expected_num_examples)
for i, batch in enumerate(user_dataset.train_data()):
assertLessEqual(len(batch["data"]), expected_batch_size)
last_batch = i
assertEqual(last_batch + 1, expected_num_batches)
def test_fake_user_data(self) -> None:
def gen_batch(n, value=None):
return {"data": [torch.ones(n, 10)], "label": [1] * n}
num_examples = 100
batch_size = 10
num_batches = num_examples // batch_size
user_dataset = FakeUserData(gen_batch, num_batches, batch_size)
self.user_data_test_util(user_dataset, num_examples, batch_size, num_batches)
def test_fake_data_provider(self) -> None:
def gen_batch(n, value=None):
return {"data": [torch.ones(n, 10)], "label": [1] * n}
num_batches = 2
batch_size = 10
num_users = 100
fl_data_provider = FakeDataProvider(
gen_batch, num_batches, batch_size, num_users
)
assertEqual(fl_data_provider.num_train_users(), num_users)
assertEqual(fl_data_provider.train_user_ids(), list(range(num_users)))
ad_hoc_users = [0, 3, 10, 50, 99]
num_examples = num_batches * batch_size
for user in ad_hoc_users:
user_dataset = fl_data_provider.get_train_user(user)
self.user_data_test_util(
user_dataset, num_examples, batch_size, num_batches
)
self.user_data_test_util(
# pyre-fixme[16]: `Iterable` has no attribute `__getitem__`.
fl_data_provider.test_users()[0],
num_examples,
batch_size,
num_batches,
)
| canife-main | FLSim/flsim/utils/tests/test_data_utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import hydra
import numpy as np
import pytest
from flsim.common.pytest_helper import assertEqual, assertRaises
from flsim.utils.async_trainer.async_staleness_weights import (
AsyncStalenessWeightConfig,
ConstantStalenessWeightConfig,
PolynomialStalenessWeightConfig,
StalenessWeight,
ThresholdStalenessWeightConfig,
)
from flsim.utils.tests.helpers.test_async_weights_utils import (
AsyncStalenessWeightsTestUtils,
)
from hydra.utils import instantiate
class TestAsyncStalenessWeights:
@pytest.mark.parametrize(
"staleness_weight_config, staleness_weight_class",
AsyncStalenessWeightsTestUtils.STALENESS_WEIGHT_TEST_CONFIGS,
)
def test_string_conversion(
self,
staleness_weight_config: AsyncStalenessWeightConfig,
staleness_weight_class: StalenessWeight,
) -> None:
obj = instantiate(staleness_weight_config)
assertEqual(obj.__class__, staleness_weight_class)
@pytest.mark.parametrize(
"avg_staleness",
AsyncStalenessWeightsTestUtils.AVG_TEST_STALENESS,
)
def test_constant_weight_compute(self, avg_staleness) -> None:
"""Test that all constant weight computation works as expected"""
max_staleness = 10000
obj = instantiate(ConstantStalenessWeightConfig(avg_staleness=avg_staleness))
for _i in range(10):
staleness = np.random.randint(1, max_staleness)
numerator = AsyncStalenessWeightsTestUtils.get_constant_wt()
denom = AsyncStalenessWeightsTestUtils.get_constant_wt()
assertEqual(obj.weight(staleness), numerator / denom)
@pytest.mark.parametrize(
"avg_staleness",
AsyncStalenessWeightsTestUtils.AVG_TEST_STALENESS,
)
def test_threshold_weight_compute(self, avg_staleness) -> None:
"""Test that threshold weight computation works as expected"""
max_staleness = 10000
for _i in range(10):
cutoff = np.random.randint(1, max_staleness)
value_after_cutoff = np.random.uniform(low=0.0, high=1.0)
obj = instantiate(
ThresholdStalenessWeightConfig(
avg_staleness=avg_staleness,
cutoff=cutoff,
value_after_cutoff=value_after_cutoff,
)
)
staleness = np.random.randint(1, max_staleness)
numerator = AsyncStalenessWeightsTestUtils.get_threshold_wt(
staleness=staleness,
cutoff=cutoff,
value_after_cutoff=value_after_cutoff,
)
denom = AsyncStalenessWeightsTestUtils.get_threshold_wt(
staleness=avg_staleness,
cutoff=cutoff,
value_after_cutoff=value_after_cutoff,
)
assertEqual(obj.weight(staleness), numerator / denom)
@pytest.mark.parametrize(
"avg_staleness",
AsyncStalenessWeightsTestUtils.AVG_TEST_STALENESS,
)
def test_polynomial_weight_compute(self, avg_staleness) -> None:
"""Test that threshold weight computation works as expected"""
max_staleness = 10000
for _i in range(10):
exponent = np.random.uniform(low=0.0, high=1.0)
obj = instantiate(
PolynomialStalenessWeightConfig(
avg_staleness=avg_staleness, exponent=exponent
)
)
staleness = np.random.randint(1, max_staleness)
numerator = AsyncStalenessWeightsTestUtils.get_polynomial_wt(
staleness=staleness, exponent=exponent
)
denom = AsyncStalenessWeightsTestUtils.get_polynomial_wt(
staleness=avg_staleness, exponent=exponent
)
assertEqual(obj.weight(staleness), numerator / denom)
def test_polynomial_weight_zero_exponent(self) -> None:
"""For polynomial weight, if exponent is zero, wt=1 regardless of
staleness or average staleness
"""
max_staleness = 10000
# test for 10 random values of staleness and avg staleness
for _i in range(10):
staleness = np.random.randint(1, max_staleness)
avg_staleness = np.random.randint(1, max_staleness)
obj = instantiate(
PolynomialStalenessWeightConfig(
avg_staleness=avg_staleness, exponent=0.0
)
)
assertEqual(obj.weight(staleness), 1.0)
def test_polynomial_weight_bad_exponent(self) -> None:
"""For polynomial weight, exponent must be between 0 and 1, else error"""
cfg = PolynomialStalenessWeightConfig(avg_staleness=0, exponent=-0.1)
# negative exponent causes error
with assertRaises(
# pyre-fixme[6]: Expected `Type[typing.Any]` for 1st param but got
# `Tuple[typing.Type[AssertionError],
# typing.Type[hydra.errors.HydraException]]`.
(
AssertionError, # with Hydra 1.1
hydra.errors.HydraException, # with Hydra 1.0
),
):
cfg.exponent = -0.1
instantiate(cfg)
# exponent greater than 1.0 causes error
with assertRaises(
# pyre-fixme[6]: Expected `Type[typing.Any]` for 1st param but got
# `Tuple[typing.Type[AssertionError],
# typing.Type[hydra.errors.HydraException]]`.
(
AssertionError, # with Hydra 1.1
hydra.errors.HydraException, # with Hydra 1.0
),
):
cfg.exponent = 1.1
instantiate(cfg)
# exponent = 0.0 is fine
cfg.exponent = 0.0
instantiate(cfg)
# exponent = 1.0 is fine
cfg.exponent = 1.0
instantiate(cfg)
| canife-main | FLSim/flsim/utils/tests/test_async_staleness_weights.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| canife-main | FLSim/flsim/utils/tests/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from flsim.common.pytest_helper import assertEqual, assertGreater, assertLess
from flsim.utils.async_trainer.device_state import DeviceState, TrainingSchedule
class TestDeviceStateUtil:
def test_next_event_time_update(self) -> None:
"""Check that next_event_time in DeviceState is updated correctly depending
on its state
"""
training_schedule = TrainingSchedule(
creation_time=0, start_time=12, end_time=16
)
device_state = DeviceState(training_schedule)
assertEqual(device_state.next_event_time(), training_schedule.start_time)
device_state.training_started()
assertEqual(device_state.next_event_time(), training_schedule.end_time)
device_state.training_ended()
assertEqual(device_state.next_event_time(), training_schedule.end_time)
def test_device_next_event_time_comparison(self) -> None:
"""Check whether comparison operator for DeviceState acts as expected"""
# create two devices, 1 & 2
# device1 has current, training_start and training_end times
# that are slightly before device2's respective times
# 1. verify initial ordering
# 2. advance training for device1 followed by device2,
# verify ordering is as expected in every step
training_schedule1 = TrainingSchedule(
creation_time=0, start_time=12, end_time=16
)
device_state_1 = DeviceState(training_schedule1)
training_schedule2 = TrainingSchedule(
creation_time=0, start_time=12.1, end_time=16.1
)
device_state_2 = DeviceState(training_schedule2)
assertLess(device_state_1, device_state_2)
device_state_1.training_started()
assertGreater(device_state_1, device_state_2)
device_state_2.training_started()
assertLess(device_state_1, device_state_2)
device_state_1.training_ended()
assertLess(device_state_1, device_state_2)
device_state_2.training_ended()
assertLess(device_state_1, device_state_2)
def test_device_next_event_time_comparison_equality(self) -> None:
"""Check whether comparison operator for DeviceState acts as expected
when next_event_time is equal
"""
# create two devices, 1 & 2
# device1.training_start = device2.current_time
# device1.training_end = device2.training_schedule
# 1. verify initial ordering
# 2. advance device training
# verify ordering is as expected in every step: when next_event_time is
# equal, device that is 'further along' in training is 'lesser'
training_schedule1 = TrainingSchedule(
creation_time=0, start_time=12, end_time=16
)
device_state_1 = DeviceState(training_schedule1)
training_schedule2 = TrainingSchedule(
creation_time=0, start_time=16, end_time=20
)
device_state_2 = DeviceState(training_schedule2)
assertLess(device_state_1, device_state_2)
device_state_1.training_started() # relevant time for both is 12
# device that is further along in training is 'Less'
assertLess(device_state_1, device_state_2)
# verify that ordering is independent of which device_state is the
# first parameter
assertGreater(device_state_2, device_state_1)
device_state_2.training_started()
assertLess(device_state_1, device_state_2)
device_state_1.training_ended()
assertLess(device_state_1, device_state_2)
device_state_2.training_ended()
assertLess(device_state_1, device_state_2)
| canife-main | FLSim/flsim/utils/tests/test_device_state.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from flsim.common.pytest_helper import assertEmpty, assertEqual, assertRaises
from flsim.utils.fl.common import FLModelParamUtils
from flsim.utils.fl.personalized_model import FLModelWithPrivateModules
from flsim.utils.tests.helpers.test_models import (
FC_PRIVATE_MODULE_NAMES,
PersonalizedFCModel,
)
@pytest.fixture(scope="class")
def prepare_fl_model_with_private_modules(request) -> None:
request.cls.model_with_private_modules = PersonalizedFCModel()
@pytest.mark.usefixtures("prepare_fl_model_with_private_modules")
class TestFLModelWithPrivateModules:
def test_clear_dict_module(self) -> None:
# number of private parameters are number of fc layers in
# private_module_names times 2 (W and b in each layer)
assertEqual(
len(list(FLModelWithPrivateModules.get_user_private_parameters())),
len(FC_PRIVATE_MODULE_NAMES) * 2,
)
FLModelWithPrivateModules.clear_user_private_module_dict()
assertEmpty(list(FLModelWithPrivateModules.get_user_private_parameters()))
def test_get_user_private_attr(self) -> None:
# pyre-ignore[16]: for pytest fixture
fc1_layer = self.model_with_private_modules.get_user_private_attr("fc1")
assertEqual(FLModelParamUtils.get_num_trainable_params(fc1_layer), 10 * 5 + 5)
with assertRaises(AttributeError):
self.model_with_private_modules.get_user_private_attr("fc2")
| canife-main | FLSim/flsim/utils/tests/test_personalized_model.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import collections
import math
import torch
import torch.nn as nn
from flsim.common.pytest_helper import (
assertAlmostEqual,
assertEqual,
assertFalse,
assertRaises,
assertTrue,
)
from flsim.utils.fl.common import FLModelParamUtils
from flsim.utils.fl.personalized_model import FLModelWithPrivateModules
from flsim.utils.tests.helpers.test_models import (
FCModel,
LinearRegression,
PersonalizedLinearRegression,
)
from flsim.utils.tests.helpers.test_utils import FLTestUtils
PRIVATE_SLOPE_MODULE_NAME: str = (
FLModelWithPrivateModules.USER_PRIVATE_MODULE_PREFIX + "_a"
)
class TestFLModelParamUtils:
def test_get_state_dict(self) -> None:
model = LinearRegression()
assertEqual(
set(FLModelParamUtils.get_state_dict(model, False).keys()), {"a", "b"}
)
assertEqual(
set(FLModelParamUtils.get_state_dict(model, True).keys()), {"a", "b"}
)
personalized_model = PersonalizedLinearRegression()
assertEqual(
set(FLModelParamUtils.get_state_dict(personalized_model, False).keys()),
{PRIVATE_SLOPE_MODULE_NAME, "b"},
)
assertEqual(
set(FLModelParamUtils.get_state_dict(personalized_model, True).keys()),
{"b"},
)
def test_load_state_dict(self) -> None:
personalized_model = PersonalizedLinearRegression()
state_dict = collections.OrderedDict()
state_dict[PRIVATE_SLOPE_MODULE_NAME] = torch.tensor([1.0])
state_dict["b"] = torch.tensor([0.5])
FLModelParamUtils.load_state_dict(personalized_model, state_dict, False)
assertEqual(
dict(FLModelParamUtils.get_state_dict(personalized_model, False)),
dict(state_dict),
)
# load_state_dict should work if non-private modules were given with
# only_federated_params set as True
state_dict_without_private_module = collections.OrderedDict()
state_dict_without_private_module["b"] = torch.tensor([0.3])
FLModelParamUtils.load_state_dict(
personalized_model, state_dict_without_private_module, True
)
assertEqual(
dict(FLModelParamUtils.get_state_dict(personalized_model, False)),
{PRIVATE_SLOPE_MODULE_NAME: torch.tensor([1.0]), "b": torch.tensor([0.3])},
)
# throws when unexpected key is provided
state_dict["c"] = torch.tensor([0.0])
with assertRaises(AssertionError):
FLModelParamUtils.load_state_dict(personalized_model, state_dict, True)
# throws when non-private (i.e. federated module) is missing
state_dict_with_missing_non_private_module = collections.OrderedDict()
state_dict_with_missing_non_private_module["a"] = torch.tensor([1.0])
with assertRaises(AssertionError):
FLModelParamUtils.load_state_dict(
personalized_model, state_dict_with_missing_non_private_module, True
)
def test_zero_weights(self) -> None:
personalized_model = PersonalizedLinearRegression()
FLModelParamUtils.load_state_dict(
personalized_model,
collections.OrderedDict(
[
(PRIVATE_SLOPE_MODULE_NAME, torch.tensor([2.0])),
("b", torch.tensor([1.0])),
]
),
False,
)
FLModelParamUtils.zero_weights(personalized_model, True)
assertEqual(
dict(FLModelParamUtils.get_state_dict(personalized_model, False)),
{PRIVATE_SLOPE_MODULE_NAME: torch.tensor([2.0]), "b": torch.tensor([0.0])},
)
FLModelParamUtils.zero_weights(personalized_model)
assertEqual(
dict(FLModelParamUtils.get_state_dict(personalized_model, False)),
{PRIVATE_SLOPE_MODULE_NAME: torch.tensor([0.0]), "b": torch.tensor([0.0])},
)
def test_get_trainable_params(self) -> None:
fc_model = FCModel()
assertEqual(len(list(FLModelParamUtils.get_trainable_params(fc_model))), 6)
def test_get_num_trainable_params(self) -> None:
fc_model = FCModel()
assertEqual(
FLModelParamUtils.get_num_trainable_params(fc_model),
10 * 5 + 5 * 3 + 3 * 1 + 5 + 3 + 1,
)
def test_get_gradient_l2_norm_raw(self) -> None:
fc_model = FCModel()
# set all gradients to 0, l2 norm should be zero
for p in FLModelParamUtils.get_trainable_params(fc_model):
p.grad = torch.zeros_like(p)
assertEqual(FLModelParamUtils.get_gradient_l2_norm_raw(fc_model), 0.0)
# set all gradients to 1, non-normalized l2 norm should be = sqrt(#params)
num_trainable_params = FLModelParamUtils.get_num_trainable_params(fc_model)
for p in FLModelParamUtils.get_trainable_params(fc_model):
p.grad = torch.ones_like(p)
assertAlmostEqual(
FLModelParamUtils.get_gradient_l2_norm_raw(fc_model),
math.sqrt(num_trainable_params),
delta=1e-4,
)
# all gradients are std-normal-random, normalized grad norm = 1
torch.manual_seed(1)
for p in FLModelParamUtils.get_trainable_params(fc_model):
p.grad = torch.randn_like(p)
assertAlmostEqual(
FLModelParamUtils.get_gradient_l2_norm_normalized(fc_model), 1, delta=1e-1
)
def test_model_linear_comb(self) -> None:
"""Test that computing linear comibination works for a model"""
FLTestUtils.compare_model_linear_comb(FCModel(), FCModel())
def test_gradient_reconstruction(self) -> None:
"""Test that gradient reconstruction works with a model.
Create model, run some operations on it.
"""
model, copy_model, reconstructed_grad = FCModel(), FCModel(), FCModel()
FLTestUtils.compare_gradient_reconstruction(
model, copy_model, reconstructed_grad
)
def test_fed_async_aggregation_with_weights(self) -> None:
"""Test that weights work for FedAsync aggregation"""
torch.manual_seed(1)
num_models = 4
models = [FCModel() for i in range(num_models)]
temp_model = FLModelParamUtils.clone(models[0])
# verify that 0 weights work as expected
# pyre-fixme[6]: Expected `List[nn.modules.module.Module]` for 1st param but
# got `List[FCModel]`.
FLModelParamUtils.average_models(models, temp_model, [0, 0, 0, 1])
assertTrue(
FLModelParamUtils.get_mismatched_param([temp_model, models[3]]) == ""
)
# verify that equal weights work as expected
# pyre-fixme[6]: Expected `List[nn.modules.module.Module]` for 1st param but
# got `List[FCModel]`.
FLModelParamUtils.average_models(models, temp_model, [1, 1, 1, 1])
temp_model_no_wts = FLModelParamUtils.clone(models[0])
# pyre-fixme[6]: Expected `List[nn.modules.module.Module]` for 1st param but
# got `List[FCModel]`.
FLModelParamUtils.average_models(models, temp_model_no_wts)
assertTrue(
FLModelParamUtils.get_mismatched_param([temp_model, temp_model_no_wts])
== ""
)
# verify that unequal weights work as expected
temp_model_1 = FLModelParamUtils.clone(models[0])
# pyre-fixme[6]: Expected `List[nn.modules.module.Module]` for 1st param but
# got `List[FCModel]`.
FLModelParamUtils.average_models(models, temp_model_1, [1, 1, 2, 2])
temp_model_2 = FLModelParamUtils.clone(models[0])
# pyre-fixme[6]: Expected `List[nn.modules.module.Module]` for 1st param but
# got `List[FCModel]`.
FLModelParamUtils.average_models(models, temp_model_2, [2, 2, 1, 1])
temp_model_3 = FLModelParamUtils.clone(models[0])
FLModelParamUtils.average_models([temp_model_1, temp_model_2], temp_model_3)
temp_model_4 = FLModelParamUtils.clone(models[0])
# pyre-fixme[6]: Expected `List[nn.modules.module.Module]` for 1st param but
# got `List[FCModel]`.
FLModelParamUtils.average_models(models, temp_model_4, [1, 1, 1, 1])
mismatched_param = FLModelParamUtils.get_mismatched_param(
[temp_model_3, temp_model_4], 1e-6
)
assertTrue(
mismatched_param == "",
(
f"Mismatched param name: {mismatched_param}\n"
f"temp_model_3:{temp_model_3}\n"
f"temp_model_4:{temp_model_4}\n",
f"total_difference:{self._compute_difference_in_norm(temp_model_3, temp_model_4)}",
),
)
def _compute_difference_in_norm(
self, model1: torch.nn.Module, model2: torch.nn.Module
) -> float:
total_difference = 0.0
for (parameter1, parameter2) in zip(model1.parameters(), model2.parameters()):
total_difference += torch.norm(parameter1.data - parameter2.data)
return total_difference
def test_simple_model_copy(self) -> None:
"""Test that FedAsync aggregation works for a simple Model"""
num_models = 4
orig_models = [FCModel() for i in range(num_models)]
# pyre-fixme[6]: Expected `List[nn.modules.module.Module]` for 1st param but
# got `List[FCModel]`.
FLTestUtils.average_and_verify_models(orig_models)
def test_debug_model_norm(self) -> None:
fc_model = FCModel()
for p in fc_model.parameters():
torch.nn.init.constant_(p, 0.0)
assertEqual(FLModelParamUtils.debug_model_norm(fc_model), 0)
for p in fc_model.parameters():
p.data.fill_(1.0)
assertEqual(
FLModelParamUtils.debug_model_norm(fc_model),
FLModelParamUtils.get_num_trainable_params(fc_model),
)
def test_set_gradient(self) -> None:
model = LinearRegression()
reconstructed_gradient = LinearRegression()
# pyre-fixme[41]: `data` cannot be reassigned. It is a read-only property.
reconstructed_gradient.a.data = torch.FloatTensor([0.5])
# pyre-fixme[41]: `data` cannot be reassigned. It is a read-only property.
reconstructed_gradient.b.data = torch.FloatTensor([1.0])
FLModelParamUtils.set_gradient(
model=model, reference_gradient=reconstructed_gradient
)
assertEqual(model.a.grad, reconstructed_gradient.a)
assertEqual(model.b.grad, reconstructed_gradient.b)
def test_get_mismatched_param(self) -> None:
a_val, b_val = 0.5, 1.0
class MismatchingLinearRegression(nn.Module):
def __init__(self):
super().__init__()
self.a = nn.Parameter(torch.FloatTensor([a_val]))
self.c = nn.Parameter(torch.FloatTensor([b_val]))
def forward(self, x):
return self.a + self.c * x
model_1, model_2 = LinearRegression(), LinearRegression()
# pyre-fixme[41]: `data` cannot be reassigned. It is a read-only property.
model_1.a.data, model_1.b.data = (
torch.FloatTensor([a_val]),
torch.FloatTensor([b_val]),
)
# pyre-fixme[41]: `data` cannot be reassigned. It is a read-only property.
model_2.a.data, model_2.b.data = (
torch.FloatTensor([a_val]),
torch.FloatTensor([b_val]),
)
# 1) models have same params => return an empty string
assertEqual(FLModelParamUtils.get_mismatched_param([model_1, model_2]), "")
# 2) only param 'a' is different => return 'a'
# pyre-fixme[41]: `data` cannot be reassigned. It is a read-only property.
model_2.a.data = torch.FloatTensor([b_val])
assertEqual(FLModelParamUtils.get_mismatched_param([model_1, model_2]), "a")
# 3) only param 'b' is different => return 'b'
# pyre-fixme[41]: `data` cannot be reassigned. It is a read-only property.
model_2.a.data, model_2.b.data = (
torch.FloatTensor([a_val]),
torch.FloatTensor([a_val]),
)
assertEqual(FLModelParamUtils.get_mismatched_param([model_1, model_2]), "b")
# 4) both param 'a' and 'b' are different
# => return the first mismatched param, which is 'a'
# pyre-fixme[41]: `data` cannot be reassigned. It is a read-only property.
model_2.a.data = torch.FloatTensor([b_val])
assertEqual(FLModelParamUtils.get_mismatched_param([model_1, model_2]), "a")
# 5) param 'b' in model_1 is missing in MismatchingLinearRegression
# => return 'b'
assertEqual(
FLModelParamUtils.get_mismatched_param(
[model_1, MismatchingLinearRegression()]
),
"b",
)
def test_copy_models(self) -> None:
torch.manual_seed(1)
fc_model = FCModel()
torch.manual_seed(2)
copied_fc_model = FCModel()
assertFalse(
FLTestUtils.do_two_models_have_same_weights(fc_model, copied_fc_model)
)
FLModelParamUtils.copy_models(fc_model, [copied_fc_model])
assertTrue(
FLTestUtils.do_two_models_have_same_weights(fc_model, copied_fc_model)
)
| canife-main | FLSim/flsim/utils/tests/test_model_param_utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from flsim.common.pytest_helper import assertAlmostEqual, assertEqual
from flsim.utils.timing.training_duration_distribution import (
DurationDistributionFromList,
DurationDistributionFromListConfig,
DurationInfo,
PerUserHalfNormalDurationDistribution,
PerUserHalfNormalDurationDistributionConfig,
PerUserUniformDurationDistribution,
PerUserUniformDurationDistributionConfig,
)
from flsim.utils.timing.training_time_estimator import (
AsyncTrainingTimeEstimator,
SyncTrainingTimeEstimator,
get_training_time,
)
from omegaconf import OmegaConf
class TestTrainingTimeEstimator:
def test_time_from_list(self) -> None:
"""
Test training time from list
Assuming UPR = 2
Sync would be the sum of slowest user between rounds
round 1
user_1: duration = 4
user_2: duration = 3
round 2
user_3: duration = 2
user_4: duration = 1
total = 4 + 2 = 6
Async would be the
user_1: duration = 4, start_time = 1
user_2: duration = 3, start_time = 1
user_3: duration = 2, start_time = 2
user_4: duration = 1, start_time = 3
users training @ time 1: user 1, user 2
users training @ time 3: user 2, user 3
users training @ time 4: user 3, user 4
users training @ time 5: user 4 finishes training
"""
training_events = [
DurationInfo(duration=4),
DurationInfo(duration=3),
DurationInfo(duration=2),
DurationInfo(duration=1),
]
async_start_times = [1, 1, 2, 3]
sync_training_dist = DurationDistributionFromList(
**OmegaConf.structured(
DurationDistributionFromListConfig(training_events=training_events)
)
)
async_training_dist = DurationDistributionFromList(
**OmegaConf.structured(
DurationDistributionFromListConfig(training_events=training_events)
)
)
num_users = len(training_events)
epochs = 1
users_per_round = 2
sync_estimator = SyncTrainingTimeEstimator(
total_users=len(training_events),
users_per_round=users_per_round,
epochs=epochs,
training_dist=sync_training_dist,
)
async_estimator = AsyncTrainingTimeEstimator(
total_users=num_users,
users_per_round=users_per_round,
epochs=epochs,
training_dist=async_training_dist,
start_times=async_start_times,
)
async_time = async_estimator.training_time()
sync_time = sync_estimator.training_time()
assertEqual(sync_time, 6)
assertEqual(async_time, 5)
def test_uniform_training_time(self) -> None:
"""
Test uniform training time
Sync and Async should have the same training time if
UPR = 1 and duration_min close to duration_mean
"""
torch.manual_seed(0)
num_users = 1000
epochs = 1
users_per_round = 1
duration_mean = 1.00
duration_min = 0.99999
training_dist = PerUserUniformDurationDistribution(
**OmegaConf.structured(
PerUserUniformDurationDistributionConfig(
training_duration_mean=duration_mean,
training_duration_min=duration_min,
)
)
)
sync_time, async_time = get_training_time(
num_users=num_users,
users_per_round=users_per_round,
epochs=epochs,
training_dist=training_dist,
)
assertAlmostEqual(sync_time, async_time, delta=1e-3)
def test_per_user_half_normal(self) -> None:
"""
Test half normal training time
Sync and Async should have the following training time
sync_training_time = async_training_time = num_users * duration_min
if UPR = 1 and duraton_std is close to 0
"""
torch.manual_seed(0)
num_users = 1000
epochs = 1
users_per_round = 1
duration_std = 1e-6
duration_min = 1.0
training_dist = PerUserHalfNormalDurationDistribution(
**OmegaConf.structured(
PerUserHalfNormalDurationDistributionConfig(
training_duration_sd=duration_std,
training_duration_min=duration_min,
)
)
)
sync_time, async_time = get_training_time(
num_users=num_users,
users_per_round=users_per_round,
epochs=epochs,
training_dist=training_dist,
)
assertAlmostEqual(sync_time, async_time, delta=1e-3)
assertAlmostEqual(sync_time, num_users * duration_min, delta=1e-3)
assertAlmostEqual(async_time, num_users * duration_min, delta=1e-3)
| canife-main | FLSim/flsim/utils/tests/test_training_time_estimator.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import pytest
from flsim.common.pytest_helper import assertEqual
from flsim.utils.async_trainer.async_example_weights import (
AsyncExampleWeightConfig,
ExampleWeight,
)
from flsim.utils.tests.helpers.test_async_weights_utils import ( # noqa
AsyncExampleWeightsTestUtils,
)
from hydra.utils import instantiate
class TestAsyncExampleWeights:
@pytest.mark.parametrize(
"example_weight_config, example_weight_class",
AsyncExampleWeightsTestUtils.EXAMPLE_WEIGHT_TEST_CONFIGS,
)
def test_string_conversion(
self,
example_weight_config: AsyncExampleWeightConfig,
example_weight_class: ExampleWeight,
) -> None:
"""Check that strings are correctly converted to ExampleWeight"""
obj = instantiate(example_weight_config)
assertEqual(obj.__class__, example_weight_class)
@pytest.mark.parametrize(
"example_weight_config, example_weight_class",
AsyncExampleWeightsTestUtils.EXAMPLE_WEIGHT_TEST_CONFIGS,
)
@pytest.mark.parametrize(
"avg_num_examples",
AsyncExampleWeightsTestUtils.AVG_NUMBER_OF_EXAMPLES,
)
def test_example_weight_compute(
self,
example_weight_config: AsyncExampleWeightConfig,
example_weight_class: ExampleWeight,
avg_num_examples: int,
) -> None:
"""Test that all weight computation works as expected"""
# generate 10 random integers
max_num_examples = 10000
for _ in range(10):
num_examples = np.random.randint(1, max_num_examples)
example_weight_config.avg_num_examples = avg_num_examples
obj = instantiate(example_weight_config)
assertEqual(
obj.weight(num_examples),
AsyncExampleWeightsTestUtils.expected_weight(
avg_num_examples=avg_num_examples,
num_examples=num_examples,
example_weight_class=example_weight_class,
),
)
| canife-main | FLSim/flsim/utils/tests/test_async_example_weights.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, List, Optional
import torch
import torch.nn as nn
from flsim.data.data_provider import IFLDataProvider
from flsim.interfaces.metrics_reporter import IFLMetricsReporter
from flsim.interfaces.model import IFLModel
from flsim.utils.fl.common import FLModelParamUtils
class FLTestUtils:
@classmethod
def compare_model_linear_comb(cls, model1: nn.Module, model2: nn.Module):
temp_modelA = FLModelParamUtils.clone(model1)
temp_modelB = FLModelParamUtils.clone(model1)
temp_modelC = FLModelParamUtils.clone(model1)
# model1 + 0*model2 = model1
FLModelParamUtils.linear_comb_models(model1, 1, model2, 0, temp_modelA)
assert FLModelParamUtils.get_mismatched_param([model1, temp_modelA]) == ""
# model1 + model2 != model1
FLModelParamUtils.linear_comb_models(model1, 1, model2, 1, temp_modelA)
assert FLModelParamUtils.get_mismatched_param([model1, temp_modelA]) != ""
# (2*model1 + 3*model1 ) - 4*model1 = model1
FLModelParamUtils.linear_comb_models(model1, 2, model1, 3, temp_modelA)
FLModelParamUtils.linear_comb_models(model1, 4, model1, 0, temp_modelB)
FLModelParamUtils.linear_comb_models(
temp_modelA, 1, temp_modelB, -1, temp_modelC
)
assert FLModelParamUtils.get_mismatched_param([model1, temp_modelC], 1e-5) == ""
# test that resuing one of the input models as model_to_save also works
# model1 = model1 - model2, followed by model2 = model1 + model2
# model2 should be the same as original model1
temp_modelA = FLModelParamUtils.clone(model1)
FLModelParamUtils.linear_comb_models(model1, 1, model2, -1, model1)
FLModelParamUtils.linear_comb_models(model1, 1, model2, 1, model1)
assert FLModelParamUtils.get_mismatched_param([model1, temp_modelA], 1e-5) == ""
@classmethod
def random_grad(cls, model: nn.Module):
for param in model.parameters():
param.grad = torch.rand_like(param) # pyre-ignore
@classmethod
def compare_gradient_reconstruction(
cls, model0: nn.Module, copy_model0: nn.Module, reconstructed_grad: nn.Module
):
"""Test that gradient reconstruction post-optimization works
Moment-based optimizers for FL require approximate gradient reconstruction from
two models: original model, and new model after FL optmization step
approx_gradient = original_model - new_model
This test checks that gradient reconstruction works as expected
"""
# copy model.0 into copy_model.0
# create optimizerA for model.0, take 1 step of gradient descent on model.0,
# moving to model.1
# reconstruct original gradients by reconstructred_grad = model.1 - model.0
# set grad(copy_model.0) = reconstructed_grad
# create optimizerB for copy_model.0
# take 1 step of gradient descent on copy_model.0, moving to copy_model.1
# check model.1 = copy_model.1
learning_rate = 1.0
FLModelParamUtils.copy_models(model0, [copy_model0])
optimizer = torch.optim.SGD(model0.parameters(), lr=learning_rate)
# take a few steps of gradient descent
for _i in range(0, 10):
optimizer.zero_grad()
cls.random_grad(model0)
optimizer.step()
copy_optimizer = torch.optim.SGD(copy_model0.parameters(), lr=learning_rate)
copy_optimizer.zero_grad()
FLModelParamUtils.reconstruct_gradient(
old_model=copy_model0, new_model=model0, grads=reconstructed_grad
)
FLModelParamUtils.set_gradient(
model=copy_model0, reference_gradient=reconstructed_grad
)
copy_optimizer.step()
assert (
FLModelParamUtils.get_mismatched_param(
[model0, copy_model0], rel_epsilon=1e-4
)
== ""
)
@classmethod
def _verify_averaged_and_orig_models(
cls, orig_models: List[nn.Module], new_models: List[nn.Module]
) -> None:
"""Verify that:
a) Every model in new_models is the same
b) Every model in new_models is the 'average' of models in orig_models
"""
assert len(orig_models) == len(new_models)
if len(orig_models) == 0:
return
orig_dicts = [dict(aModel.named_parameters()) for aModel in orig_models]
new_dicts = [dict(aModel.named_parameters()) for aModel in new_models]
assert len(orig_dicts) == len(new_dicts)
if len(orig_dicts) == 0:
return
# verify new models have all params same
assert FLModelParamUtils.get_mismatched_param(new_models) == ""
# verify that new_models have average of old models
for name, param in new_dicts[0].items():
orig_tensors = torch.stack([thedict[name] for thedict in orig_dicts])
orig_shape = orig_tensors[0].shape
averaged = torch.mean(orig_tensors, 0, keepdim=True)
averaged_reshaped = averaged.view(orig_shape)
assert torch.allclose(averaged_reshaped, param, atol=1e-6)
@classmethod
def average_and_verify_models(cls, orig_models: List[nn.Module]) -> None:
"""Compute the average of models in orig_models, and verify the average"""
if len(orig_models) == 0:
return
models = [FLModelParamUtils.clone(orig_model) for orig_model in orig_models]
temp_model = FLModelParamUtils.clone(models[0])
FLModelParamUtils.average_models(models, temp_model)
FLModelParamUtils.copy_models(temp_model, models)
cls._verify_averaged_and_orig_models(orig_models, models)
@classmethod
def do_two_models_have_same_weights(cls, model1, model2) -> bool:
for p1, p2 in zip(model1.parameters(), model2.parameters()):
if p1.data.ne(p2.data).sum() > 0:
return False
return True
@classmethod
def train_non_fl(
cls,
data_provider: IFLDataProvider,
global_model: IFLModel,
optimizer: torch.optim.Optimizer,
metrics_reporter: Optional[IFLMetricsReporter] = None,
epochs: int = 1,
cuda_enabled: bool = False,
):
if cuda_enabled:
global_model.fl_cuda()
for _ in range(epochs):
for one_user_data in data_provider.train_users():
for batch in one_user_data.train_data():
optimizer.zero_grad()
batch_metrics = global_model.fl_forward(batch)
if metrics_reporter is not None:
metrics_reporter.add_batch_metrics(batch_metrics)
batch_metrics.loss.backward()
optimizer.step() # pyre-ignore
return global_model, metrics_reporter
@classmethod
def run_nonfl_training(
cls,
model: IFLModel,
optimizer: torch.optim.Optimizer,
data_loader: torch.utils.data.DataLoader,
epochs: int,
) -> IFLModel:
torch.manual_seed(1)
for _ in range(epochs):
for training_batch in data_loader:
FLTestUtils.run_nonfl_training_one_batch(
model=model, optimizer=optimizer, training_batch=training_batch
)
return model
@classmethod
def run_nonfl_training_one_batch(
cls, model: IFLModel, optimizer: torch.optim.Optimizer, training_batch: Any
):
optimizer.zero_grad()
batch_metrics = model.fl_forward(training_batch)
loss = batch_metrics.loss
loss.backward()
optimizer.step() # pyre-ignore
| canife-main | FLSim/flsim/utils/tests/helpers/test_utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Type
import numpy as np
from flsim.utils.timing.training_duration_distribution import (
DurationDistributionConfig,
PerExampleGaussianDurationDistributionConfig,
PerUserGaussianDurationDistributionConfig,
)
class ConstantEventGenTestConfig:
"""This class specifies expected behavior for a training_simulator
that uses an event generator where:
a) Time between events is always 1 sec (or 1 unit of time)
b) Training duration per example is 1 sec. 4 examples per user
Thus, overall training duration is 4 secs
c) There are 4 users, trained over 1 epoch
This class is used both in a unit-test below, and in an integration
test for ASyncTrainer
In this case, training proceeds as follows:
t=0, user1 STARTS training. model_seqnum=0
t=1, user2 STARTS training. model_seqnum=0
t=2, user3 STARTS training. model_seqnum=0
t=3, user4 STARTS training. model_seqnum=0
t=4, user1 FINISHES trng. Jobs pending=4. seqnum=1. SeqnumDiff=0
t=5, user2 FINISHES trng. Jobs pending=3. seqnum=2. SeqnumDiff=1
t=6, user3 FINISHES trng. Jobs pending=2. seqnum=3. SeqnumDiff=2
t=7, user4 FINISHES trng. Jobs pending=1. seqnum=4. SeqnumDiff=3
So, average number of pending jobs: mean([4, 3, 2, 1]) = 10/4 = 2.5
SeqnumDiffs: [0,1,2,3]. Mean=6/4=1.5, SD = 1.18
"""
num_examples_per_user = 4
training_duration_distribution_config: Type[
DurationDistributionConfig
] = PerExampleGaussianDurationDistributionConfig
training_rate = 1
training_duration_mean = 1
training_duration_sd = 0
num_users = 4
pending_jobs = training_rate * num_users * training_rate
seqnum_diffs = [0, 1, 2, 3]
mean_pending_jobs = np.mean(pending_jobs)
mean_seqnum_diff = np.mean(seqnum_diffs)
sd_seqnum_diff = np.std(seqnum_diffs)
class ConstantEventGenTestConfigPerUserGaussian(ConstantEventGenTestConfig):
"""Same as ConstantEventGenTestConfig, but assumes that training duration
distribution is a Per-User gaussian
"""
# in parent class, training_duration_distrib was PerExampleGaussian
# thus, parent training time per user = #examples-per-user * training_duration_mean
# however, in this class, training_duration_distrb = PerUserGaussian
# thus, training time per user = training_duration_mean
# so we multiply training_duration_mean by #examples-per-user to keep
# training duration constant
num_examples_per_user = 1
training_duration_mean = 4
training_duration_distribution_config: Type[
DurationDistributionConfig
] = PerUserGaussianDurationDistributionConfig
class PoissonEventGenTestConfig:
"""This class specifies expected behavior for a training_simulator
that uses an event generator where:
a) Time between events is 1 sec, Poisson distributed
b) Training duration per example is 1/4 sec. 4 examples per user
Thus, overall training duration is 1 sec
c) There are 4 users,
This class is used both in a unit-test below, and in an integration
test for AsyncTrainer
"""
num_examples_per_user = 4
training_duration_distribution_config: Type[
DurationDistributionConfig
] = PerExampleGaussianDurationDistributionConfig
training_rate = 1
training_duration_mean = 1 / 4
training_duration_sd = 0
num_users = 4
mean_pending_jobs = 1.25
mean_seqnum_diff = 0.25
sd_seqnum_diff = 0.433
class PoissonEventGenTestConfigPerUserGaussian(PoissonEventGenTestConfig):
"""Same as PoissonEventGenTestConfig, but assumes that training duration
distribution is a Per-User gaussian
"""
num_examples_per_user = 1
training_duration_distribution_config: Type[
DurationDistributionConfig
] = PerUserGaussianDurationDistributionConfig
training_duration_mean = 1
| canife-main | FLSim/flsim/utils/tests/helpers/test_training_simulator_utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
import string
from typing import Tuple
import torch
from flsim.data.data_provider import FLDataProviderFromList
from flsim.data.data_sharder import PowerLawSharder, SequentialSharder
from flsim.data.dataset_data_loader import FLDatasetDataLoaderWithBatch
from flsim.interfaces.model import IFLModel
from flsim.utils.sample_model import DummyAlphabetFLModel, TestDataSetting
from torch.utils.data import Dataset
class Utils:
@staticmethod
def get_label(character: str) -> torch.Tensor:
return torch.tensor(ord(character) % 2 == 0, dtype=torch.bool)
@staticmethod
def get_text(character: str) -> torch.Tensor:
return torch.tensor(ord(character) - ord("a"), dtype=torch.long)
@staticmethod
def get_characters(num: int):
"""Return [a,b,c,d,.....z,a,b,c,d....] till we get num total chars"""
characters = list(string.ascii_lowercase) * (1 + num // 26)
return characters[:num]
class DummyAlphabetDataset(Dataset):
"""
create a dummy PyTorch Dataset of k characters
"""
def __init__(self, num_rows: int = 26):
self.num_rows = num_rows
self._data_rows = DummyAlphabetDataset.provide_data(self.num_rows)
def __getitem__(self, index):
return self._data_rows[index]
def __len__(self):
return self.num_rows
@staticmethod
def provide_data(num_rows: int = 26):
"""Generate num_row rows of data. Each row:
"label":0 or 1, "text":int between 0 and 25
"""
characters = Utils.get_characters(num_rows)
return [
{
TestDataSetting.LABEL_COL_NAME: Utils.get_label(character),
TestDataSetting.TEXT_COL_NAME: Utils.get_text(character),
}
for character in characters
]
@staticmethod
def create_data_provider_and_loader(
dataset: Dataset, examples_per_user: int, batch_size: int, model
) -> Tuple[FLDataProviderFromList, FLDatasetDataLoaderWithBatch]:
"""
Creates a data provider and data loader of type IFLDataProvider for a dataset
"""
fl_data_sharder = SequentialSharder(examples_per_shard=examples_per_user)
fl_data_loader = FLDatasetDataLoaderWithBatch(
dataset,
dataset,
dataset,
fl_data_sharder,
batch_size,
batch_size,
batch_size,
)
fl_data_provider = FLDataProviderFromList(
fl_data_loader.fl_train_set(),
fl_data_loader.fl_eval_set(),
fl_data_loader.fl_test_set(),
model,
)
return fl_data_provider, fl_data_loader
@staticmethod
def create_data_provider_and_loader_uneven_split(
num_examples: int,
num_fl_users: int,
batch_size: int,
model: IFLModel,
alpha: float = 0.2,
) -> Tuple[FLDataProviderFromList, FLDatasetDataLoaderWithBatch]:
"""
Creates a data proivder and data loader with uneven number of
examples per user following the power law distribution with order alpha
"""
dataset = DummyAlphabetDataset(num_examples)
fl_data_sharder = PowerLawSharder(num_shards=num_fl_users, alpha=alpha)
fl_data_loader = FLDatasetDataLoaderWithBatch(
dataset,
dataset,
dataset,
fl_data_sharder,
batch_size,
batch_size,
batch_size,
)
fl_data_provider = FLDataProviderFromList(
fl_data_loader.fl_train_set(),
fl_data_loader.fl_eval_set(),
fl_data_loader.fl_test_set(),
model,
)
return fl_data_provider, fl_data_loader
@staticmethod
def create_data_provider_and_loader_train_and_eval_users(
num_train_examples: int,
num_eval_examples: int,
examples_per_user: int,
train_batch_size: int,
eval_batch_size: int,
) -> Tuple[FLDataProviderFromList, FLDatasetDataLoaderWithBatch]:
"""
Creates a data proivder and data loader with different number of
train and eval clients
"""
model = DummyAlphabetFLModel()
train_dataset = DummyAlphabetDataset(num_train_examples)
eval_dataset = DummyAlphabetDataset(num_eval_examples)
fl_data_sharder = SequentialSharder(examples_per_shard=examples_per_user)
fl_data_loader = FLDatasetDataLoaderWithBatch(
train_dataset,
eval_dataset,
eval_dataset,
fl_data_sharder,
train_batch_size,
eval_batch_size,
eval_batch_size,
)
fl_data_provider = FLDataProviderFromList(
fl_data_loader.fl_train_set(),
fl_data_loader.fl_eval_set(),
fl_data_loader.fl_test_set(),
model,
)
return fl_data_provider, fl_data_loader
class NonOverlappingDataset(Dataset):
"""
Create a dataset with non-overlapping non-zero entries for users. This will be useful
for testing that clients will have orthogonal updates in linear regression problems.
"""
def __init__(
self,
num_users: int = 10,
num_nonzeros_per_user: int = 4,
num_data_per_user: int = 6,
):
self.num_users = num_users
self.num_nonzeros_per_user = num_nonzeros_per_user
self.num_data_per_user = num_data_per_user
self._data_rows = NonOverlappingDataset.provide_data(
num_users=self.num_users,
num_nonzeros_per_user=self.num_nonzeros_per_user,
num_data_per_user=self.num_data_per_user,
)
def __getitem__(self, index):
return self._data_rows[index]
def __len__(self):
return self.num_data_per_user * self.num_users
@staticmethod
def provide_data(
num_users: int = 10, num_nonzeros_per_user: int = 4, num_data_per_user: int = 6
):
"""
Generate data. Each successive group of num_user_data rows has
num_user_nonzeros non-overlapping non-zero entries
"""
num_rows = num_data_per_user * num_users
num_cols = num_nonzeros_per_user * num_users
non_overlap_data = torch.zeros(num_rows, num_cols)
for row in range(num_rows):
col_start = math.floor(row / num_data_per_user) * num_nonzeros_per_user
non_overlap_data[
row, col_start : col_start + num_nonzeros_per_user
] = torch.rand(1, num_nonzeros_per_user)
labels = torch.rand(num_rows, 1)
return [
{
TestDataSetting.LABEL_COL_NAME: labels[row, :],
TestDataSetting.TEXT_COL_NAME: non_overlap_data[row, :],
}
for row in range(num_rows)
]
@staticmethod
def create_data_provider_and_loader(
dataset: Dataset, examples_per_user: int, batch_size: int, model: IFLModel
) -> Tuple[FLDataProviderFromList, FLDatasetDataLoaderWithBatch]:
"""
Creates a data provider and data loader of type IFLDataProvider for a dataset
"""
fl_data_sharder = SequentialSharder(examples_per_shard=examples_per_user)
fl_data_loader = FLDatasetDataLoaderWithBatch(
dataset,
dataset,
dataset,
fl_data_sharder,
batch_size,
batch_size,
batch_size,
)
fl_data_provider = FLDataProviderFromList(
fl_data_loader.fl_train_set(),
fl_data_loader.fl_eval_set(),
fl_data_loader.fl_test_set(),
model,
)
return fl_data_provider, fl_data_loader
class RandomDataset(Dataset):
"""
Create a dataset with random entries and labels.
"""
def __init__(
self, num_users: int = 10, num_data_per_user: int = 6, dim_data: int = 40
):
self.num_users = num_users
self.num_data_per_user = num_data_per_user
self.dim_data = dim_data
self._data_rows = RandomDataset.provide_data(
num_users=self.num_users,
num_data_per_user=self.num_data_per_user,
dim_data=self.dim_data,
)
def __getitem__(self, index):
return self._data_rows[index]
def __len__(self):
return self.num_data_per_user * self.num_users
@staticmethod
def provide_data(
num_users: int = 10, num_data_per_user: int = 6, dim_data: int = 40
):
"""
Generate data which is a random matrix.
"""
num_rows = num_data_per_user * num_users
num_cols = dim_data
random_data = torch.randn(num_rows, num_cols)
labels = torch.rand(num_rows, 1)
return [
{
TestDataSetting.LABEL_COL_NAME: labels[row, :],
TestDataSetting.TEXT_COL_NAME: random_data[row, :],
}
for row in range(num_rows)
]
@staticmethod
def create_data_provider_and_loader(
dataset: Dataset, examples_per_user: int, batch_size: int, model: IFLModel
) -> Tuple[FLDataProviderFromList, FLDatasetDataLoaderWithBatch]:
"""
Creates a data provider and data loader of type IFLDataProvider for a dataset
"""
fl_data_sharder = SequentialSharder(examples_per_shard=examples_per_user)
fl_data_loader = FLDatasetDataLoaderWithBatch(
dataset,
dataset,
dataset,
fl_data_sharder,
batch_size,
batch_size,
batch_size,
)
fl_data_provider = FLDataProviderFromList(
fl_data_loader.fl_train_set(),
fl_data_loader.fl_eval_set(),
fl_data_loader.fl_test_set(),
model,
)
return fl_data_provider, fl_data_loader
| canife-main | FLSim/flsim/utils/tests/helpers/test_data_utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from flsim.active_user_selectors.simple_user_selector import (
SequentialActiveUserSelectorConfig,
)
from flsim.channels.base_channel import FLChannelConfig
from flsim.clients.base_client import ClientConfig
from flsim.common.timeout_simulator import (
NeverTimeOutSimulatorConfig,
TimeOutSimulatorConfig,
)
from flsim.interfaces.model import IFLModel
from flsim.optimizers.local_optimizers import LocalOptimizerSGDConfig
from flsim.optimizers.optimizer_scheduler import (
ConstantLRSchedulerConfig,
OptimizerSchedulerConfig,
)
from flsim.servers.sync_servers import SyncServerConfig
from flsim.trainers.sync_trainer import SyncTrainer, SyncTrainerConfig
from omegaconf import OmegaConf
# have to create a variable because python linter doesn't like performing function calls
# in argument defaults (B008, https://github.com/PyCQA/flake8-bugbear#list-of-warnings)
NEVER_TIMEOUT_CONFIG = NeverTimeOutSimulatorConfig()
CONSTANT_LR_SCHEDULER_CONFIG = ConstantLRSchedulerConfig()
FED_AVG_SYNC_SERVER_CONFIG = SyncServerConfig(
active_user_selector=SequentialActiveUserSelectorConfig()
)
def create_sync_trainer(
model: IFLModel,
local_lr: float,
users_per_round: int,
epochs: int,
user_epochs_per_round: int = 1,
do_eval: bool = True,
server_config: SyncServerConfig = FED_AVG_SYNC_SERVER_CONFIG,
timeout_simulator_config: TimeOutSimulatorConfig = NEVER_TIMEOUT_CONFIG,
local_lr_scheduler: OptimizerSchedulerConfig = CONSTANT_LR_SCHEDULER_CONFIG,
report_train_metrics: bool = False,
report_train_metrics_after_aggregation: bool = False,
dropout_rate: float = 1.0,
train_metrics_reported_per_epoch: int = 1,
):
# first disable report_train_metrics_after_aggregation. we will call
# it outside of train() afterwise the post aggregation train metrics is
# not returned
sync_trainer = SyncTrainer(
model=model,
cuda_enabled=False,
**OmegaConf.structured(
SyncTrainerConfig(
epochs=epochs,
do_eval=do_eval,
always_keep_trained_model=False,
timeout_simulator=timeout_simulator_config,
train_metrics_reported_per_epoch=train_metrics_reported_per_epoch,
eval_epoch_frequency=1,
report_train_metrics=report_train_metrics,
report_train_metrics_after_aggregation=report_train_metrics_after_aggregation,
client=ClientConfig(
epochs=user_epochs_per_round,
optimizer=LocalOptimizerSGDConfig(
lr=local_lr,
),
lr_scheduler=local_lr_scheduler,
shuffle_batch_order=False,
),
channel=FLChannelConfig(),
server=server_config,
users_per_round=users_per_round,
dropout_rate=dropout_rate,
)
),
)
return sync_trainer
| canife-main | FLSim/flsim/utils/tests/helpers/test_sync_trainer_utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| canife-main | FLSim/flsim/utils/tests/helpers/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from math import log10, sqrt
from flsim.utils.async_trainer.async_example_weights import (
EqualExampleWeight,
EqualExampleWeightConfig,
ExampleWeight,
LinearExampleWeight,
LinearExampleWeightConfig,
Log10ExampleWeight,
Log10ExampleWeightConfig,
SqrtExampleWeight,
SqrtExampleWeightConfig,
)
from flsim.utils.async_trainer.async_staleness_weights import (
ConstantStalenessWeight,
ConstantStalenessWeightConfig,
PolynomialStalenessWeight,
PolynomialStalenessWeightConfig,
ThresholdStalenessWeight,
ThresholdStalenessWeightConfig,
)
class AsyncExampleWeightsTestUtils:
EXAMPLE_WEIGHT_TEST_CONFIGS = [
(EqualExampleWeightConfig(), EqualExampleWeight),
(LinearExampleWeightConfig(), LinearExampleWeight),
(SqrtExampleWeightConfig(), SqrtExampleWeight),
(Log10ExampleWeightConfig(), Log10ExampleWeight),
]
AVG_NUMBER_OF_EXAMPLES = [1, 10000]
@classmethod
def expected_weight(
cls,
avg_num_examples: int,
num_examples: int,
example_weight_class: ExampleWeight,
) -> float:
if example_weight_class == EqualExampleWeight:
return 1.0
elif example_weight_class == LinearExampleWeight:
return num_examples / avg_num_examples
elif example_weight_class == SqrtExampleWeight:
return sqrt(num_examples) / sqrt(avg_num_examples)
elif example_weight_class == Log10ExampleWeight:
return log10(1 + num_examples) / log10(1 + avg_num_examples)
else:
raise AssertionError(f"Unknown example_weight type:{example_weight_class}")
class AsyncStalenessWeightsTestUtils:
STALENESS_WEIGHT_TEST_CONFIGS = [
(ConstantStalenessWeightConfig(), ConstantStalenessWeight),
(
ThresholdStalenessWeightConfig(cutoff=1, value_after_cutoff=0.1),
ThresholdStalenessWeight,
),
(PolynomialStalenessWeightConfig(exponent=0.5), PolynomialStalenessWeight),
]
AVG_TEST_STALENESS = [1, 10000]
@classmethod
def get_constant_wt(cls) -> float:
return 1.0
@classmethod
def get_threshold_wt(
cls, staleness: int, cutoff: int, value_after_cutoff: float
) -> float:
return 1.0 if staleness <= cutoff else value_after_cutoff
@classmethod
def get_polynomial_wt(cls, staleness: int, exponent: float) -> float:
return 1 / ((1 + staleness) ** exponent)
| canife-main | FLSim/flsim/utils/tests/helpers/test_async_weights_utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Iterable
import torch
import torch.nn as nn
from flsim.utils.fl.personalized_model import FLModelWithPrivateModules
class LinearRegression(nn.Module):
def __init__(self):
super().__init__()
self.a = nn.Parameter(torch.randn(1, requires_grad=True, dtype=torch.float))
self.b = nn.Parameter(torch.randn(1, requires_grad=True, dtype=torch.float))
def forward(self, x):
return self.a + self.b * x
LR_PRIVATE_MODULE_NAMES = {"a"}
class PersonalizedLinearRegression(LinearRegression, FLModelWithPrivateModules):
@classmethod
def _get_user_private_module_names(cls) -> Iterable[str]:
return LR_PRIVATE_MODULE_NAMES
def __init__(self):
super().__init__()
# Set up user-private module attributes whenever we create a new
# instance.
self._maybe_set_up_user_private_modules()
# Set forward hooks to reuse the forward() of the parent class.
self._set_forward_hooks()
class FCModel(nn.Module):
def __init__(self):
super(FCModel, self).__init__()
self.fc1 = nn.Linear(10, 5)
self.fc2 = nn.Linear(5, 3)
self.fc3 = nn.Linear(3, 1)
def forward(self, x):
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x
FC_PRIVATE_MODULE_NAMES = {"fc1", "fc3"}
class PersonalizedFCModel(FCModel, FLModelWithPrivateModules):
@classmethod
def _get_user_private_module_names(cls) -> Iterable[str]:
return FC_PRIVATE_MODULE_NAMES
def __init__(self):
super().__init__()
# Set up user-private module attributes whenever we create a new
# instance.
self._maybe_set_up_user_private_modules()
# Set forward hooks to reuse the forward() of the parent class.
self._set_forward_hooks()
| canife-main | FLSim/flsim/utils/tests/helpers/test_models.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Optional, Tuple
import numpy as np
import torch
from flsim.clients.base_client import ClientConfig
from flsim.common.timeout_simulator import (
NeverTimeOutSimulatorConfig,
TimeOutSimulatorConfig,
)
from flsim.data.data_provider import IFLDataProvider
from flsim.interfaces.metrics_reporter import IFLMetricsReporter
from flsim.interfaces.model import IFLModel
from flsim.optimizers.async_aggregators import AsyncAggregatorConfig
from flsim.optimizers.local_optimizers import LocalOptimizerSGDConfig
from flsim.optimizers.optimizer_scheduler import (
ConstantLRSchedulerConfig,
OptimizerSchedulerConfig,
)
from flsim.trainers.async_trainer import AsyncTrainer, AsyncTrainerConfig
from flsim.utils.async_trainer.async_example_weights import (
AsyncExampleWeightConfig,
EqualExampleWeightConfig,
)
from flsim.utils.async_trainer.async_staleness_weights import (
AsyncStalenessWeightConfig,
ConstantStalenessWeightConfig,
)
from flsim.utils.async_trainer.async_user_selector import AsyncUserSelectorType
from flsim.utils.async_trainer.async_weights import AsyncWeightConfig
from flsim.utils.async_trainer.training_event_generator import (
AsyncTrainingEventGeneratorConfig,
ConstantAsyncTrainingStartTimeDistrConfig,
EventGeneratorConfig,
)
from flsim.utils.fl.common import FLModelParamUtils
from flsim.utils.sample_model import DummyAlphabetFLModel
from flsim.utils.test_utils import (
FakeMetricReporter,
verify_models_equivalent_after_training,
)
from flsim.utils.tests.helpers.test_data_utils import DummyAlphabetDataset
from flsim.utils.tests.helpers.test_utils import FLTestUtils
from flsim.utils.timing.training_duration_distribution import (
PerExampleGaussianDurationDistributionConfig,
)
from omegaconf import OmegaConf
CONSTANT_LR_SCHEDULER_CONFIG = ConstantLRSchedulerConfig()
EQUAL_EXAMPLE_WEIGHT_CONFIG = EqualExampleWeightConfig()
CONSTANT_STALENESS_WEIGHT_CONFIG = ConstantStalenessWeightConfig()
def create_event_generator_config(
training_rate: int, training_duration_mean: float, training_duration_sd: float
) -> EventGeneratorConfig:
return AsyncTrainingEventGeneratorConfig(
training_start_time_distribution=ConstantAsyncTrainingStartTimeDistrConfig(
training_rate=training_rate
),
duration_distribution_generator=PerExampleGaussianDurationDistributionConfig(
training_duration_mean=training_duration_mean,
training_duration_sd=training_duration_sd,
),
)
def create_async_trainer(
model: IFLModel,
local_lr: float,
epochs: int,
event_generator_config: EventGeneratorConfig,
aggregator_config: AsyncAggregatorConfig,
example_weight_config: AsyncExampleWeightConfig = EQUAL_EXAMPLE_WEIGHT_CONFIG,
local_lr_scheduler_config: OptimizerSchedulerConfig = CONSTANT_LR_SCHEDULER_CONFIG,
staleness_weight_config: AsyncStalenessWeightConfig = CONSTANT_STALENESS_WEIGHT_CONFIG,
timeout_simulator_config: Optional[TimeOutSimulatorConfig] = None,
max_staleness: float = 1e10,
report_train_metrics_after_aggregation: bool = False,
eval_epoch_frequency: float = 1.0,
always_keep_trained_model: bool = False,
do_eval: bool = False,
train_metrics_reported_per_epoch: int = 1,
):
async_trainer = AsyncTrainer(
model=model,
cuda_enabled=False,
**OmegaConf.structured(
AsyncTrainerConfig(
aggregator=aggregator_config,
client=ClientConfig(
epochs=1,
optimizer=LocalOptimizerSGDConfig(
lr=local_lr,
),
lr_scheduler=local_lr_scheduler_config,
),
epochs=epochs,
training_event_generator=event_generator_config,
async_user_selector_type=AsyncUserSelectorType.ROUND_ROBIN,
async_weight=AsyncWeightConfig(
example_weight=example_weight_config,
staleness_weight=staleness_weight_config,
),
timeout_simulator=timeout_simulator_config
or NeverTimeOutSimulatorConfig(),
max_staleness=max_staleness,
do_eval=do_eval,
report_train_metrics_after_aggregation=report_train_metrics_after_aggregation,
eval_epoch_frequency=eval_epoch_frequency,
always_keep_trained_model=always_keep_trained_model,
train_metrics_reported_per_epoch=train_metrics_reported_per_epoch,
)
),
)
return async_trainer
def get_nonfl_optimizer(
nonfl_model: IFLModel,
fl_local_lr: float,
fl_aggregator_config: AsyncAggregatorConfig,
) -> torch.optim.Optimizer:
"""Given FL trainer settings (local_lr, aggregator config),
return a non-fl Optimizer that will produce equivalent behavior
"""
agg_type = fl_aggregator_config._target_
# if FL global optimizer was Adam, then FL local lr should be 1.0,
# otherwise we cannot produce same results between FL and non-FL
assert ("FedAdam" not in agg_type) or (
fl_local_lr == 1.0
), f"When using FedAdam, fl_local lr should be 1. Instead, its {fl_local_lr}"
# non_fl lr = fl_global_optimizer_lr*fl_local_optimizer_lr
# Example: fl_local_optimizer lr=1.0, then non_fl LR = fl_global_optimizer_lr
# Example: fl_local_optimizer lr=0.1, then non_fl LR = 0.1*fl_global_optimizer_lr
# pyre-fixme[16]: `AsyncAggregatorConfig` has no attribute `lr`.
nonfl_lr = fl_aggregator_config.lr * fl_local_lr
if "FedAvgWithLR" in agg_type:
optimizer = torch.optim.SGD(
nonfl_model.fl_get_module().parameters(),
lr=nonfl_lr,
# pyre-fixme[16]: `AsyncAggregatorConfig` has no attribute `momentum`.
momentum=fl_aggregator_config.momentum,
)
elif "FedAdam" in agg_type:
optimizer = torch.optim.Adam(
nonfl_model.fl_get_module().parameters(),
lr=nonfl_lr,
# pyre-fixme[16]: `AsyncAggregatorConfig` has no attribute `weight_decay`.
weight_decay=fl_aggregator_config.weight_decay,
# pyre-fixme[16]: `AsyncAggregatorConfig` has no attribute `beta1`.
# pyre-fixme[16]: `AsyncAggregatorConfig` has no attribute `beta2`.
betas=(fl_aggregator_config.beta1, fl_aggregator_config.beta2),
# pyre-fixme[16]: `AsyncAggregatorConfig` has no attribute `eps`.
eps=fl_aggregator_config.eps,
)
else:
raise AssertionError(f"Unknown aggregator {agg_type}")
return optimizer
def assert_fl_nonfl_same(
global_model: IFLModel,
fl_data_provider: IFLDataProvider,
nonfl_data_loader: torch.utils.data.DataLoader,
epochs: int,
local_lr: float,
aggregator_config: AsyncAggregatorConfig,
training_rate: int = 1,
training_duration_mean: int = 0,
training_duration_sd: int = 0,
) -> str:
"""
Given:
data_for_fl={user1:batch1, user2:batch2}
data_for_non_fl={batch1, batch2}
Check that the following produce the same trained model:
1. FL training, 1 user per round. global_opt=SGD, local_lr=x, global_lr=x
2. Non-FL training, opt=SGD, lr=x
Return value: model parameters that don't match between fl and non-fl training
"""
# will be used later to verify training indeed took place
reference_untrained_model = FLModelParamUtils.clone(global_model)
nonfl_model = FLModelParamUtils.clone(reference_untrained_model)
nonfl_optimizer = get_nonfl_optimizer(
nonfl_model=nonfl_model,
fl_local_lr=local_lr,
fl_aggregator_config=aggregator_config,
)
trained_fl_model, trained_nonfl_model = run_fl_nonfl_training(
fl_model=global_model,
nonfl_model=nonfl_model,
nonfl_optimizer=nonfl_optimizer,
fl_data_provider=fl_data_provider,
nonfl_data_loader=nonfl_data_loader,
epochs=epochs,
fl_local_lr=local_lr,
fl_aggregator_config=aggregator_config,
training_rate=training_rate,
training_duration_mean=training_duration_mean,
training_duration_sd=training_duration_sd,
)
return verify_models_equivalent_after_training(
trained_fl_model,
trained_nonfl_model,
reference_untrained_model,
rel_epsilon=1e-4,
abs_epsilon=1e-4,
)
def run_fl_nonfl_training(
fl_model: IFLModel,
nonfl_model: IFLModel,
nonfl_optimizer: torch.optim.Optimizer,
fl_data_provider: IFLDataProvider,
nonfl_data_loader: torch.utils.data.DataLoader,
epochs: int,
fl_local_lr: float,
fl_aggregator_config: AsyncAggregatorConfig,
training_rate: int = 1,
training_duration_mean: int = 0,
training_duration_sd: int = 0,
example_weight_config: AsyncExampleWeightConfig = EQUAL_EXAMPLE_WEIGHT_CONFIG,
staleness_weight_config: AsyncStalenessWeightConfig = CONSTANT_STALENESS_WEIGHT_CONFIG,
) -> Tuple[IFLModel, IFLModel]:
"""Run the following training
1. FL training: train fl_model with fl_data_provider, fl_aggregator_config and fl_local_lr,
2. Non-FL training: train nonfl_model with nonfl_data_loader and nonfl_optmizer
"""
fl_trained_model, _ = run_fl_training(
fl_model=fl_model,
fl_data_provider=fl_data_provider,
epochs=epochs,
local_lr=fl_local_lr,
aggregator_config=fl_aggregator_config,
training_rate=training_rate,
training_duration_mean=training_duration_mean,
training_duration_sd=training_duration_sd,
example_weight_config=example_weight_config,
staleness_weight_config=staleness_weight_config,
always_keep_trained_model=True,
)
nonfl_trained_model = FLTestUtils.run_nonfl_training(
model=nonfl_model,
optimizer=nonfl_optimizer,
data_loader=nonfl_data_loader,
epochs=epochs,
)
return fl_trained_model, nonfl_trained_model
def run_fl_training(
fl_model: IFLModel,
fl_data_provider: IFLDataProvider,
epochs: int,
local_lr: float,
aggregator_config: AsyncAggregatorConfig,
training_rate: int = 1,
training_duration_mean: float = 0,
training_duration_sd: float = 0,
example_weight_config: AsyncExampleWeightConfig = EQUAL_EXAMPLE_WEIGHT_CONFIG,
staleness_weight_config: AsyncStalenessWeightConfig = CONSTANT_STALENESS_WEIGHT_CONFIG,
metrics_reporter: Optional[IFLMetricsReporter] = None,
do_eval: bool = False,
report_train_metrics_after_aggregation: bool = False,
eval_epoch_frequency: float = 1.0,
always_keep_trained_model: float = False,
) -> Tuple[IFLModel, Any]:
torch.manual_seed(1)
async_trainer = create_async_trainer(
model=fl_model,
local_lr=local_lr,
epochs=epochs,
aggregator_config=aggregator_config,
event_generator_config=create_event_generator_config(
training_rate=training_rate,
training_duration_mean=training_duration_mean,
training_duration_sd=training_duration_sd,
),
example_weight_config=example_weight_config,
staleness_weight_config=staleness_weight_config,
do_eval=do_eval,
report_train_metrics_after_aggregation=report_train_metrics_after_aggregation,
eval_epoch_frequency=eval_epoch_frequency,
)
if metrics_reporter is None:
metrics_reporter = FakeMetricReporter()
fl_trained_model, test_metrics = async_trainer.train(
data_provider=fl_data_provider,
metrics_reporter=metrics_reporter,
num_total_users=fl_data_provider.num_train_users(),
distributed_world_size=1,
)
return fl_trained_model, test_metrics
def get_data(
num_examples: int,
num_fl_users: int,
examples_per_user: int,
fl_batch_size: int,
nonfl_batch_size: int,
model: IFLModel,
) -> Tuple[IFLDataProvider, torch.utils.data.DataLoader]:
fl_data_provider = get_fl_data_provider(
num_examples=num_examples,
num_fl_users=num_fl_users,
examples_per_user=examples_per_user,
batch_size=fl_batch_size,
model=model,
)
# non-FL data
dummy_dataset = DummyAlphabetDataset(num_examples)
nonfl_data_loader = torch.utils.data.DataLoader(
dummy_dataset, batch_size=nonfl_batch_size, shuffle=False
)
return fl_data_provider, nonfl_data_loader
def get_fl_data_provider(
num_examples: int,
num_fl_users: int,
examples_per_user: int,
batch_size: int,
model: IFLModel,
) -> IFLDataProvider:
dummy_dataset = DummyAlphabetDataset(num_examples)
data_provider, data_loader = DummyAlphabetDataset.create_data_provider_and_loader(
dummy_dataset, examples_per_user, batch_size, model
)
return data_provider
assert data_loader.num_total_users == num_fl_users, "Error in data sharding"
def get_nonfl_batch_size(fl_batch_size: int, min_examples_per_user: int) -> int:
# how to chose batch_size for non-fl training?
# if fl_batch_size is bigger than min_examples_per_user, use fl_batch_size
# however, if fl_batch_size is *smaller* than min_examples_per_user, must
# use min_examples_per_user as the batch size.
# example: fl_batch_size=128. 2 users, each with 4 data points
# non-fl training must run 2 batches of training to match fl-training.
# so batch size must be 4
# example: fl_batch_size=8. min_examples_per_user = 1
# nonfl_batch_size = 8
return min(fl_batch_size, min_examples_per_user)
def _equal_data_split_params(
num_examples: int,
num_fl_users: int,
fl_batch_size: int,
one_batch_per_user_only: bool,
) -> Tuple[int, int]:
"""Assume FL data is equally split among users
Find examples_per_user and nonfl_batch_size
"""
assert (
num_examples % num_fl_users == 0
), f"Expect num_examples({num_examples}) to be multiple of num_fl_users({num_fl_users})"
examples_per_user = num_examples // num_fl_users
if one_batch_per_user_only:
assert (
fl_batch_size >= examples_per_user
), f"Expected each user to have max 1 batch. Batch_size:{fl_batch_size}, num_examples_per_user:{examples_per_user}"
nonfl_batch_size = get_nonfl_batch_size(
fl_batch_size=fl_batch_size, min_examples_per_user=examples_per_user
)
assert nonfl_batch_size == min(
fl_batch_size, examples_per_user
), f"fl_batch_size:{fl_batch_size}, nonfl_batch_size:{nonfl_batch_size} "
f"examples_per_user:{examples_per_user}. To ensure FL and non-FL take the same number of local SGD steps,"
"nonfl_batch_size should be the same as the lower of (examples_per_user, fl_batch_size)"
return examples_per_user, nonfl_batch_size
def _unequal_data_split_params(
num_examples: int,
num_fl_users: int,
max_examples_per_user: int,
fl_batch_size: int,
one_batch_per_user_only: bool,
) -> Tuple[int, int]:
r"""
FL data may be unequally split among users with sequential sharding
E.g: max_num_examples_per_user = 8. total examples = 12. num_users=2
user1: 8 examples. user2: 4 examples
"""
# if equal split. i.e, if every user can get max_examples_per_user
if num_examples / num_fl_users == max_examples_per_user:
return _equal_data_split_params(
num_examples=num_examples,
num_fl_users=num_fl_users,
fl_batch_size=fl_batch_size,
one_batch_per_user_only=one_batch_per_user_only,
)
# we must have enough examples such that at least one user gets max_examples_per_user
assert max_examples_per_user > num_fl_users
# last user gets leftover examples
examples_with_last_user = num_examples % max_examples_per_user
if one_batch_per_user_only:
assert fl_batch_size >= max_examples_per_user, (
f"Expected each user to have max 1 batch. Batch_size:{fl_batch_size},"
f" num_examples_per_user:{max_examples_per_user}"
)
nonfl_batch_size = get_nonfl_batch_size(
fl_batch_size=fl_batch_size, min_examples_per_user=examples_with_last_user
)
assert nonfl_batch_size == min(fl_batch_size, max_examples_per_user), (
f"fl_batch_size:{fl_batch_size}, nonfl_batch_size:{nonfl_batch_size} "
f"max_examples_per_user:{max_examples_per_user}. "
f"To ensure FL and non-FL take the same number of local SGD steps,"
f"nonfl_batch_size should be the same as the lower of (max_examples_per_user, fl_batch_size)",
)
return max_examples_per_user, nonfl_batch_size
def get_unequal_split_data(
num_examples: int,
num_fl_users: int,
max_examples_per_user: int,
fl_batch_size: int,
model: IFLModel,
one_batch_per_user_only: bool = False,
):
r"""
If FL data is unequally split among users with sequential sharding
E.g: max_num_examples_per_user = 8. total examples = 12. num_users=2
user1: 8 examples. user2: 4 examples
Return FLDataProvider and non-FL DataLoader
"""
examples_per_user, nonfl_batch_size = _unequal_data_split_params(
num_examples=num_examples,
num_fl_users=num_fl_users,
max_examples_per_user=max_examples_per_user,
fl_batch_size=fl_batch_size,
one_batch_per_user_only=one_batch_per_user_only,
)
return get_data(
num_examples=num_examples,
num_fl_users=num_fl_users,
examples_per_user=examples_per_user,
fl_batch_size=fl_batch_size,
nonfl_batch_size=nonfl_batch_size,
model=model,
)
def get_equal_split_data(
num_examples: int,
num_fl_users: int,
fl_batch_size: int,
model: IFLModel,
one_batch_per_user_only: bool = False,
) -> Tuple[IFLDataProvider, torch.utils.data.DataLoader]:
"""Assume FL data is equally split among users
Return FLDataProvider and non-fl DataLoader
"""
examples_per_user, nonfl_batch_size = _equal_data_split_params(
num_examples=num_examples,
num_fl_users=num_fl_users,
fl_batch_size=fl_batch_size,
one_batch_per_user_only=one_batch_per_user_only,
)
return get_data(
num_examples=num_examples,
num_fl_users=num_fl_users,
examples_per_user=examples_per_user,
fl_batch_size=fl_batch_size,
nonfl_batch_size=nonfl_batch_size,
model=model,
)
def assert_fl_nonfl_same_equal_data_split(
fl_batch_size: int,
num_examples: int,
num_fl_users: int,
epochs: int,
local_lr: float,
aggregator_config: AsyncAggregatorConfig,
training_rate: int = 1,
training_duration_mean: int = 0,
training_duration_sd: int = 0,
) -> str:
# TODO: can this test share common code (eg, nonFL training) with
# test_trainer._test_fl_nonfl_same_equal_data_split?
torch.manual_seed(1)
# create dummy FL model on alphabet
global_model = DummyAlphabetFLModel()
# if aggregator is FedAdam, each user should have only one batch
# otherwise non-FL and FL training won't be the same
one_batch_per_user_only = "FedAdam" in aggregator_config._target_
fl_data_provider, nonfl_data_loader = get_equal_split_data(
num_examples=num_examples,
num_fl_users=num_fl_users,
fl_batch_size=fl_batch_size,
model=global_model,
one_batch_per_user_only=one_batch_per_user_only,
)
return assert_fl_nonfl_same(
global_model=global_model,
fl_data_provider=fl_data_provider,
nonfl_data_loader=nonfl_data_loader,
epochs=epochs,
local_lr=local_lr,
aggregator_config=aggregator_config,
training_rate=training_rate,
training_duration_mean=training_duration_mean,
training_duration_sd=training_duration_sd,
)
def async_train_one_user(
global_model_at_training_start: IFLModel,
global_model_at_training_end: IFLModel,
batches,
local_lr: float,
) -> IFLModel:
local_model = FLModelParamUtils.clone(global_model_at_training_start)
local_optimizer = torch.optim.SGD(
local_model.fl_get_module().parameters(), lr=local_lr
)
for batch in batches:
FLTestUtils.run_nonfl_training_one_batch(
model=local_model, optimizer=local_optimizer, training_batch=batch
)
simulate_async_global_model_update(
global_model=global_model_at_training_end,
local_model_before_training=global_model_at_training_start,
local_model_after_training=local_model,
)
return global_model_at_training_end
def simulate_async_global_model_update(
global_model: IFLModel,
local_model_before_training: IFLModel,
local_model_after_training: IFLModel,
) -> None:
# TODO: use AsyncAggregator._update_global_model, after John's refactoring
reconstructed_grad = FLModelParamUtils.clone(global_model)
FLModelParamUtils.reconstruct_gradient(
old_model=local_model_before_training.fl_get_module(),
new_model=local_model_after_training.fl_get_module(),
grads=reconstructed_grad.fl_get_module(),
)
FLModelParamUtils.set_gradient(
model=global_model.fl_get_module(),
reference_gradient=reconstructed_grad.fl_get_module(),
)
optimizer = torch.optim.SGD(global_model.fl_get_module().parameters(), lr=1.0)
optimizer.step()
def run_fl_training_with_event_generator(
fl_model: IFLModel,
fl_data_provider: IFLDataProvider,
epochs: int,
local_lr: float,
aggregator_config: AsyncAggregatorConfig,
training_event_generator_config: EventGeneratorConfig,
example_weight_config: AsyncExampleWeightConfig = EQUAL_EXAMPLE_WEIGHT_CONFIG,
staleness_weight_config: AsyncStalenessWeightConfig = CONSTANT_STALENESS_WEIGHT_CONFIG,
) -> IFLModel:
torch.manual_seed(1)
async_trainer = create_async_trainer(
model=fl_model,
local_lr=local_lr,
epochs=epochs,
aggregator_config=aggregator_config,
event_generator_config=training_event_generator_config,
example_weight_config=example_weight_config,
staleness_weight_config=staleness_weight_config,
)
fl_trained_model, _ = async_trainer.train(
data_provider=fl_data_provider,
metrics_reporter=FakeMetricReporter(),
num_total_users=fl_data_provider.num_train_users(),
distributed_world_size=1,
)
return fl_trained_model
def get_safe_global_lr(fl_batch_size: int, max_examples_per_user: int) -> float:
"""Return a global_lr to use in FL, that can produce equivalent training
results as non-fl training.
Return value: either 1.0, or a random float between 0 and 10
global_lr can be anything if each user has exactly 1 batch.
otherwise, it must be 1.0
why? because FL will take exactly one global step for each user
while non-FL will take num_examples/batch_size global steps
"""
if fl_batch_size >= max_examples_per_user:
return np.random.random_sample() * 10
else:
return 1.0
| canife-main | FLSim/flsim/utils/tests/helpers/test_async_trainer_utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from hydra.core.config_store import ConfigStore # @manual
from .training_duration_distribution import (
DurationDistributionFromListConfig,
PerExampleGaussianDurationDistributionConfig,
PerUserExponentialDurationDistributionConfig,
PerUserGaussianDurationDistributionConfig,
PerUserHalfNormalDurationDistributionConfig,
PerUserUniformDurationDistributionConfig,
)
ConfigStore.instance().store(
name="base_per_example_gaussian_duration_distribution",
node=PerExampleGaussianDurationDistributionConfig,
group="duration_distribution_generator",
)
ConfigStore.instance().store(
name="base_per_user_gaussian_duration_distribution",
node=PerUserGaussianDurationDistributionConfig,
group="duration_distribution_generator",
)
ConfigStore.instance().store(
name="base_per_user_half_normal_duration_distribution",
node=PerUserHalfNormalDurationDistributionConfig,
group="duration_distribution_generator",
)
ConfigStore.instance().store(
name="base_per_user_uniform_duration_distribution",
node=PerUserUniformDurationDistributionConfig,
group="duration_distribution_generator",
)
ConfigStore.instance().store(
name="base_per_user_exponential_duration_distribution",
node=PerUserExponentialDurationDistributionConfig,
group="duration_distribution_generator",
)
ConfigStore.instance().store(
name="base_duration_distribution_from_list",
node=DurationDistributionFromListConfig,
group="duration_distribution_generator",
)
| canife-main | FLSim/flsim/utils/timing/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import abc
import copy
from dataclasses import dataclass, field
from typing import List
import torch
from flsim.utils.config_utils import fullclassname, init_self_cfg
from omegaconf import MISSING
from torch.distributions.exponential import Exponential
from torch.distributions.half_normal import HalfNormal
from torch.distributions.normal import Normal
from torch.distributions.uniform import Uniform
@dataclass
class DurationInfo:
duration: float = 0
class IDurationDistribution(abc.ABC):
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=DurationDistributionConfig,
**kwargs,
)
validate_args = False if self.cfg.training_duration_sd == 0 else True
self.gaussian_generator: Normal = Normal(
torch.tensor([self.cfg.training_duration_mean], dtype=torch.float),
torch.tensor([self.cfg.training_duration_sd], dtype=torch.float),
validate_args=validate_args,
)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def bounded_gaussian_sample(self) -> float:
raw_sample = self.gaussian_generator.sample().item()
# pyre-fixme[16]: `IDurationDistribution` has no attribute `cfg`.
return max(raw_sample, self.cfg.training_duration_min)
@abc.abstractmethod
def training_duration(self, num_training_examples: int) -> float:
pass
class PerExampleGaussianDurationDistribution(IDurationDistribution):
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=PerExampleGaussianDurationDistributionConfig,
**kwargs,
)
super().__init__(**kwargs)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def training_duration(self, num_training_examples: int) -> float:
one_example_duration = self.bounded_gaussian_sample()
return num_training_examples * one_example_duration
class PerUserGaussianDurationDistribution(IDurationDistribution):
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=PerUserGaussianDurationDistributionConfig,
**kwargs,
)
super().__init__(**kwargs)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def training_duration(self, num_training_examples: int) -> float:
return self.bounded_gaussian_sample()
class PerUserHalfNormalDurationDistribution(IDurationDistribution):
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=PerUserHalfNormalDurationDistributionConfig,
**kwargs,
)
super().__init__(**kwargs)
self.generator: HalfNormal = HalfNormal(scale=self.cfg.training_duration_sd)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def training_duration(self, num_training_examples: int) -> float:
# pyre-fixme[16]: `PerUserHalfNormalDurationDistribution` has no attribute
# `cfg`.
return self.cfg.training_duration_min + self.generator.sample()
class PerUserUniformDurationDistribution(IDurationDistribution):
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=PerUserUniformDurationDistributionConfig,
**kwargs,
)
super().__init__(**kwargs)
assert (
self.cfg.training_duration_sd == 0.0
), "Cannot set training duration sd for uniform distribution"
self.generator: Uniform = Uniform(
low=self.cfg.training_duration_min,
high=2 * self.cfg.training_duration_mean - self.cfg.training_duration_min,
)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def training_duration(self, num_training_examples: int) -> float:
return self.generator.sample().item()
class PerUserExponentialDurationDistribution(IDurationDistribution):
"""
Exponetial Duration where training_duration_mean is the rate parameter
"""
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=PerUserExponentialDurationDistributionConfig,
**kwargs,
)
super().__init__(**kwargs)
assert (
self.cfg.training_duration_sd == 0.0
), "Cannot set training duration sd for exponetial"
self.generator: Exponential = Exponential(
rate=1 / self.cfg.training_duration_mean
)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def training_duration(self, num_training_examples: int) -> float:
return self.generator.sample().item()
class DurationDistributionFromList(IDurationDistribution):
"""
This class simulates IDurationDistribution
It returns traing duration from a fixed list
Useful for writing unit tests for components that use TrainingEventGenerator
"""
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=DurationDistributionFromListConfig,
**kwargs,
)
super().__init__(**kwargs)
self.training_events = list(self.cfg.training_events)
self.distr: List[DurationInfo] = copy.deepcopy(self.training_events)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def training_duration(self, num_training_examples: int) -> float:
return self.distr.pop(0).duration
@dataclass
class DurationDistributionConfig:
_target_: str = MISSING
_recursive_: bool = False
training_duration_mean: float = 0.0
training_duration_sd: float = 0.0
training_duration_min: float = float("-inf")
@dataclass
class PerExampleGaussianDurationDistributionConfig(DurationDistributionConfig):
_target_: str = fullclassname(PerExampleGaussianDurationDistribution)
@dataclass
class PerUserGaussianDurationDistributionConfig(DurationDistributionConfig):
_target_: str = fullclassname(PerUserGaussianDurationDistribution)
@dataclass
class PerUserHalfNormalDurationDistributionConfig(DurationDistributionConfig):
_target_: str = fullclassname(PerUserHalfNormalDurationDistribution)
@dataclass
class PerUserUniformDurationDistributionConfig(DurationDistributionConfig):
_target_: str = fullclassname(PerUserUniformDurationDistribution)
@dataclass
class PerUserExponentialDurationDistributionConfig(DurationDistributionConfig):
_target_: str = fullclassname(PerUserExponentialDurationDistribution)
@dataclass
class DurationDistributionFromListConfig(DurationDistributionConfig):
_target_: str = fullclassname(DurationDistributionFromList)
training_events: List[DurationInfo] = field(default_factory=list)
| canife-main | FLSim/flsim/utils/timing/training_duration_distribution.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import random
from typing import List, Optional
import numpy as np
from flsim.utils.timing.training_duration_distribution import IDurationDistribution
class TrainingTimeEstimator:
def __init__(
self,
total_users: int,
users_per_round: int,
epochs: int,
training_dist: IDurationDistribution,
num_examples: Optional[List[int]] = None,
):
self.total_users = total_users
self.users_per_round = users_per_round
self.epochs = epochs
self.rounds = int(self.epochs * self.total_users / self.users_per_round)
self.num_examples: Optional[List[int]] = num_examples
self.training_dist = training_dist
def random_select(self):
"""
Simulate user random selection to return the number of examples for that user
"""
return 1 if self.num_examples is None else random.choice(self.num_examples)
class SyncTrainingTimeEstimator(TrainingTimeEstimator):
def __init__(
self,
total_users: int,
users_per_round: int,
epochs: int,
training_dist: IDurationDistribution,
num_examples: Optional[List[int]] = None,
):
super().__init__(
total_users=total_users,
users_per_round=users_per_round,
epochs=epochs,
num_examples=num_examples,
training_dist=training_dist,
)
def training_time(self):
"""
Returns training time for SyncFL
"""
round_completion_time = [
self.round_completion_time(
users_per_round=self.users_per_round,
num_examples=self.num_examples,
training_dist=self.training_dist,
)
for x in range(self.rounds)
]
return sum(round_completion_time)
def round_completion_time(
self,
users_per_round: int,
num_examples: List[int],
training_dist: IDurationDistribution,
):
"""
Return the max completion time: straggler effect
"""
training_times = [
training_dist.training_duration(self.random_select())
for _ in range(users_per_round)
]
return max(training_times)
class AsyncTrainingTimeEstimator(TrainingTimeEstimator):
def __init__(
self,
total_users: int,
users_per_round: int,
epochs: int,
training_dist: IDurationDistribution,
num_examples: Optional[List[int]] = None,
start_times: Optional[List[int]] = None,
):
super().__init__(
total_users=total_users,
users_per_round=users_per_round,
epochs=epochs,
num_examples=num_examples,
training_dist=training_dist,
)
self.start_times = start_times
def training_time(self):
"""
Returns the training time for AsyncFL
Assuming client starts training at a linear rate
"""
user_training_events = self.total_users * self.epochs
training_durations = [
self.training_dist.training_duration(self.random_select())
for _ in range(user_training_events)
]
training_start_times = self.training_start_times(
training_durations, user_training_events
)
training_end_times = self.list_sum(training_start_times, training_durations)
return max(training_end_times)
def training_start_times(self, training_durations, user_training_events):
if self.start_times is None:
training_start_delta = np.mean(training_durations) / self.users_per_round
return [
user_index * training_start_delta
for user_index in range(user_training_events)
]
else:
return self.start_times
@classmethod
def list_sum(cls, listA: List[float], listB: List[float]):
ret_val = map(lambda x, y: x + y, listA, listB)
return list(ret_val)
def get_training_time(
num_users: int,
users_per_round: int,
epochs: int,
training_dist: IDurationDistribution,
num_examples: Optional[List[int]] = None,
):
"""
Returns the estimated training time between SyncFL and AsyncFL
"""
sync_estimator = SyncTrainingTimeEstimator(
total_users=num_users,
users_per_round=users_per_round,
epochs=epochs,
num_examples=num_examples,
training_dist=training_dist,
)
async_estimator = AsyncTrainingTimeEstimator(
total_users=num_users,
users_per_round=users_per_round,
epochs=epochs,
num_examples=num_examples,
training_dist=training_dist,
)
async_time = async_estimator.training_time()
sync_time = sync_estimator.training_time()
print(f"Sync {sync_time} Async {async_time}")
return sync_time, async_time
| canife-main | FLSim/flsim/utils/timing/training_time_estimator.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import abc
from dataclasses import dataclass
from flsim.utils.config_utils import fullclassname, init_self_cfg
from omegaconf import MISSING
class StalenessWeight(abc.ABC):
def __init__(self, **kwargs):
"""avg_staleness is used to 'normalize' the weight, such that
weight=1 when staleness=avg_staleness
"""
init_self_cfg(
self,
component_class=__class__,
config_class=AsyncStalenessWeightConfig,
**kwargs,
)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
@abc.abstractmethod
def _raw_weight(self, staleness: int) -> float:
pass
def weight(self, staleness: int) -> float:
assert staleness >= 0, "Staleness must be non-negative"
# pyre-fixme[16]: `StalenessWeight` has no attribute `cfg`.
return self._raw_weight(staleness) / self._raw_weight(self.cfg.avg_staleness)
class ConstantStalenessWeight(StalenessWeight):
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=ConstantStalenessWeightConfig,
**kwargs,
)
super().__init__(**kwargs)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def _raw_weight(self, staleness: int) -> float:
return 1.0
class ThresholdStalenessWeight(StalenessWeight):
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=ThresholdStalenessWeightConfig,
**kwargs,
)
super().__init__(**kwargs)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def _raw_weight(self, staleness: int) -> float:
# pyre-fixme[16]: `ThresholdStalenessWeight` has no attribute `cfg`.
if staleness <= self.cfg.cutoff:
return 1.0
else:
return self.cfg.value_after_cutoff
class PolynomialStalenessWeight(StalenessWeight):
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=PolynomialStalenessWeightConfig,
**kwargs,
)
super().__init__(**kwargs)
assert (
self.cfg.exponent <= 1 and self.cfg.exponent >= 0
), f"PolynomialExponent must be between 0 and 1, inclusive. Got {self.cfg.exponent}"
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def _raw_weight(self, staleness: int) -> float:
# pyre-fixme[16]: `PolynomialStalenessWeight` has no attribute `cfg`.
denom = (1 + staleness) ** self.cfg.exponent
return 1 / denom
@dataclass
class AsyncStalenessWeightConfig:
_target_: str = MISSING
_recursive_: bool = False
avg_staleness: int = 1
@dataclass
class ConstantStalenessWeightConfig(AsyncStalenessWeightConfig):
_target_: str = fullclassname(ConstantStalenessWeight)
@dataclass
class ThresholdStalenessWeightConfig(AsyncStalenessWeightConfig):
_target_: str = fullclassname(ThresholdStalenessWeight)
cutoff: int = MISSING
value_after_cutoff: float = MISSING
@dataclass
class PolynomialStalenessWeightConfig(AsyncStalenessWeightConfig):
_target_: str = fullclassname(PolynomialStalenessWeight)
exponent: float = MISSING
| canife-main | FLSim/flsim/utils/async_trainer/async_staleness_weights.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import abc
from dataclasses import dataclass
from enum import Enum, auto
import numpy as np
from flsim.data.data_provider import IFLDataProvider, IFLUserData
@dataclass
class AsyncUserSelectorInfo:
r"""
Dataclass to encapsulate a selected user for async training
user_data (IFLUserData): seleected user data in the dataset
user_index (int): the index for user_data assuming IFLDataProvider.train_data is a List
"""
user_data: IFLUserData
user_index: int
class AsyncUserSelector(abc.ABC):
def __init__(self, data_provider: IFLDataProvider):
self.data_provider: IFLDataProvider = data_provider
@abc.abstractmethod
def get_random_user(self) -> AsyncUserSelectorInfo:
r"""
Returns a random IFLUserData from the dataset and the user index (for testing)
"""
pass
class RandomAsyncUserSelector(AsyncUserSelector):
def __init__(self, data_provider: IFLDataProvider):
super().__init__(data_provider)
def get_random_user(self) -> AsyncUserSelectorInfo:
user_index = np.random.randint(0, self.data_provider.num_train_users())
return AsyncUserSelectorInfo(
user_data=self.data_provider.get_train_user(user_index),
user_index=user_index,
)
class RoundRobinAsyncUserSelector(AsyncUserSelector):
r"""
Chooses users in round-robin order, starting from user=0.
Particularly useful for testing.
"""
def __init__(self, data_provider: IFLDataProvider):
super().__init__(data_provider)
self.current_user_index: int = 0
def get_random_user(self) -> AsyncUserSelectorInfo:
user_index = self.current_user_index
self.current_user_index = (
self.current_user_index + 1
) % self.data_provider.num_train_users()
return AsyncUserSelectorInfo(
user_data=self.data_provider.get_train_user(user_index),
user_index=user_index,
)
class AsyncUserSelectorType(Enum):
RANDOM = auto()
ROUND_ROBIN = auto()
class AsyncUserSelectorFactory:
@classmethod
def create_users_selector(
cls, type: AsyncUserSelectorType, data_provider: IFLDataProvider
):
if type == AsyncUserSelectorType.RANDOM:
return RandomAsyncUserSelector(data_provider)
elif type == AsyncUserSelectorType.ROUND_ROBIN:
return RoundRobinAsyncUserSelector(data_provider)
else:
raise AssertionError(f"Unknown user selector type: {type}")
| canife-main | FLSim/flsim/utils/async_trainer/async_user_selector.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
Note: Ignore pyre errors here, because we are dynamically instantiating
class attributes for our Configs and Pyre just complains that it cannot
figure out where the attributes are getting initialized. Putting Optional here
is not a choice for us, because we want to differentiate between Optional and
strictly required attributes in our Config classes.
Reference: https://fburl.com/4cdf3akr
"""
from __future__ import annotations
from dataclasses import dataclass
from flsim.utils.async_trainer.async_example_weights import (
AsyncExampleWeightConfig,
EqualExampleWeightConfig,
)
from flsim.utils.async_trainer.async_staleness_weights import (
AsyncStalenessWeightConfig,
ConstantStalenessWeightConfig,
)
from flsim.utils.config_utils import fullclassname, init_self_cfg
from flsim.utils.fl.stats import RandomVariableStatsTracker
from hydra.utils import instantiate
from omegaconf import OmegaConf
class AsyncWeight:
def __init__(self, **kwargs) -> None:
init_self_cfg(
self,
component_class=__class__, # pyre-fixme[10]: Name `__class__` is used but not defined.
config_class=AsyncWeightConfig,
**kwargs,
)
# pyre-fixme[16]: `AsyncWeight` has no attribute `cfg`.
self.example_weight = instantiate(self.cfg.example_weight)
self.staleness_weight = instantiate(self.cfg.staleness_weight)
self.stats = RandomVariableStatsTracker()
@classmethod
def _set_defaults_in_cfg(cls, cfg):
if OmegaConf.is_missing(cfg.staleness_weight, "_target_"):
cfg.staleness_weight = ConstantStalenessWeightConfig()
if OmegaConf.is_missing(cfg.example_weight, "_target_"):
cfg.example_weight = EqualExampleWeightConfig()
def weight(self, num_examples: float, staleness: int) -> float:
weight = self.example_weight.weight(
num_examples
) * self.staleness_weight.weight(staleness)
self.stats.update(weight)
return weight
@dataclass
class AsyncWeightConfig:
_target_: str = fullclassname(AsyncWeight)
_recursive_: bool = False
staleness_weight: AsyncStalenessWeightConfig = AsyncStalenessWeightConfig()
example_weight: AsyncExampleWeightConfig = AsyncExampleWeightConfig()
| canife-main | FLSim/flsim/utils/async_trainer/async_weights.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from hydra.core.config_store import ConfigStore # @manual
from .async_example_weights import (
EqualExampleWeightConfig,
LinearExampleWeightConfig,
Log10ExampleWeightConfig,
SqrtExampleWeightConfig,
)
from .async_staleness_weights import (
ConstantStalenessWeightConfig,
PolynomialStalenessWeightConfig,
ThresholdStalenessWeightConfig,
)
from .async_weights import AsyncWeightConfig
from .training_event_generator import (
AsyncTrainingEventGeneratorConfig,
AsyncTrainingEventGeneratorFromListConfig,
ConstantAsyncTrainingStartTimeDistrConfig,
PoissonAsyncTrainingStartTimeDistrConfig,
)
ConfigStore.instance().store(
name="base_log10_example_weight",
node=Log10ExampleWeightConfig,
group="example_weight",
)
ConfigStore.instance().store(
name="base_sqrt_example_weight",
node=SqrtExampleWeightConfig,
group="example_weight",
)
ConfigStore.instance().store(
name="base_linear_example_weight",
node=LinearExampleWeightConfig,
group="example_weight",
)
ConfigStore.instance().store(
name="base_equal_example_weight",
node=EqualExampleWeightConfig,
group="example_weight",
)
ConfigStore.instance().store(
name="base_constant_staleness_weight",
node=ConstantStalenessWeightConfig,
group="staleness_weight",
)
ConfigStore.instance().store(
name="base_threshold_staleness_weight",
node=ThresholdStalenessWeightConfig,
group="staleness_weight",
)
ConfigStore.instance().store(
name="base_polynomial_staleness_weight",
node=PolynomialStalenessWeightConfig,
group="staleness_weight",
)
ConfigStore.instance().store(
name="base_async_weight",
node=AsyncWeightConfig,
group="async_weight",
)
ConfigStore.instance().store(
name="base_poisson_training_start_time_distribution",
node=PoissonAsyncTrainingStartTimeDistrConfig,
group="training_start_time_distribution",
)
ConfigStore.instance().store(
name="base_constant_training_start_time_distribution",
node=ConstantAsyncTrainingStartTimeDistrConfig,
group="training_start_time_distribution",
)
ConfigStore.instance().store(
name="base_async_training_event_generator_from_list",
node=AsyncTrainingEventGeneratorFromListConfig,
group="training_event_generator",
)
ConfigStore.instance().store(
name="base_async_training_event_generator",
node=AsyncTrainingEventGeneratorConfig,
group="training_event_generator",
)
| canife-main | FLSim/flsim/utils/async_trainer/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""Pyre complains about non-default class members in config being not initialized
"""
from __future__ import annotations
import abc
from dataclasses import dataclass
from math import log10, sqrt
from flsim.utils.config_utils import fullclassname, init_self_cfg
from omegaconf import MISSING
class ExampleWeight(abc.ABC):
def __init__(self, **kwargs):
"""avg_num_examples is used to 'normalize' the weight, such that
weight=1 for the average user
"""
init_self_cfg(
self,
component_class=__class__,
config_class=AsyncExampleWeightConfig,
**kwargs,
)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
@abc.abstractmethod
def _raw_weight(self, num_examples: float) -> float:
pass
def weight(self, num_examples: float) -> float:
assert num_examples > 0, "Num examples must be positive"
return self._raw_weight(num_examples) / self._raw_weight(
# pyre-fixme[16]: `ExampleWeight` has no attribute `cfg`.
self.cfg.avg_num_examples
)
class EqualExampleWeight(ExampleWeight):
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=EqualExampleWeightConfig,
**kwargs,
)
super().__init__(**kwargs)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def _raw_weight(self, num_examples: float) -> float:
return 1.0
class LinearExampleWeight(ExampleWeight):
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=LinearExampleWeightConfig,
**kwargs,
)
super().__init__(**kwargs)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def _raw_weight(self, num_examples: float) -> float:
return num_examples
class SqrtExampleWeight(ExampleWeight):
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=SqrtExampleWeightConfig,
**kwargs,
)
super().__init__(**kwargs)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def _raw_weight(self, num_examples: float) -> float:
return sqrt(num_examples)
class Log10ExampleWeight(ExampleWeight):
def __init__(self, **kwargs):
init_self_cfg(
self,
component_class=__class__,
config_class=Log10ExampleWeightConfig,
**kwargs,
)
super().__init__(**kwargs)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def _raw_weight(self, num_examples: float) -> float:
return log10(1 + num_examples)
@dataclass
class AsyncExampleWeightConfig:
_target_: str = MISSING
_recursive_: bool = False
avg_num_examples: int = 1
@dataclass
class EqualExampleWeightConfig(AsyncExampleWeightConfig):
_target_: str = fullclassname(EqualExampleWeight)
@dataclass
class LinearExampleWeightConfig(AsyncExampleWeightConfig):
_target_: str = fullclassname(LinearExampleWeight)
@dataclass
class SqrtExampleWeightConfig(AsyncExampleWeightConfig):
_target_: str = fullclassname(SqrtExampleWeight)
@dataclass
class Log10ExampleWeightConfig(AsyncExampleWeightConfig):
_target_: str = fullclassname(Log10ExampleWeight)
| canife-main | FLSim/flsim/utils/async_trainer/async_example_weights.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from dataclasses import dataclass
from enum import Enum, auto
from flsim.utils.async_trainer.training_event_generator import IEventGenerator
class TrainingState(Enum):
# Orderinig is important
# For devices that have the same next_event_time(), we want devices that
# "further along" in training to be chosen first
# hence, TRAINING_FINISHED < TRAINING < WAITING_FOR_START
TRAINING_FINISHED = auto()
TRAINING = auto()
WAITING_FOR_START = auto()
# https://docs.python.org/3/library/enum.html#orderedenum
def __lt__(self, other):
if self.__class__ is other.__class__:
return self.value < other.value
return NotImplemented
@dataclass
class TrainingSchedule:
r"""
Class to represent a client training time duration
"""
creation_time: float
start_time: float
end_time: float
class TrainingScheduleFactory:
@classmethod
def create(
cls,
current_time: float,
event_generator: IEventGenerator,
num_examples: int,
):
creation_time = current_time
start_time = creation_time + event_generator.time_to_next_event_start()
duration = event_generator.training_duration(num_examples)
end_time = start_time + duration
return TrainingSchedule(creation_time, start_time, end_time)
class DeviceState:
r"""
Represents the state of a device that's either waiting to start training,
or in the middle of training
"""
def __init__(self, training_schedule: TrainingSchedule):
self.training_schedule: TrainingSchedule = training_schedule
self.training_state: TrainingState = TrainingState.WAITING_FOR_START
def get_training_state(self):
return self.training_state
# when we start training, we get initial model as input
def training_started(self) -> None:
self.training_state = TrainingState.TRAINING
def training_ended(self) -> None:
self.training_state = TrainingState.TRAINING_FINISHED
def next_event_time(self):
if self.training_state == TrainingState.WAITING_FOR_START:
return self.training_schedule.start_time
else:
return self.training_schedule.end_time
def __lt__(self, other):
# if two device states have the same 'next_event_time', chose the one
# that has 'smaller' training_state
# smaller training state => further along in training
if self.next_event_time() == other.next_event_time():
return self.training_state < other.training_state
return self.next_event_time() < other.next_event_time()
| canife-main | FLSim/flsim/utils/async_trainer/device_state.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import abc
import copy
import math
from dataclasses import dataclass
from typing import List
import numpy as np
from flsim.utils.config_utils import fullclassname, init_self_cfg
from flsim.utils.timing.training_duration_distribution import (
DurationDistributionConfig,
PerExampleGaussianDurationDistributionConfig,
)
from hydra.utils import instantiate
from omegaconf import MISSING, OmegaConf
@dataclass
class EventTimingInfo:
r"""
Used only for testing
"""
prev_event_start_to_current_start: int
duration: int
class IAsyncTrainingStartTimeDistr(abc.ABC):
"""Abstract class for generating training-start events in AsyncFL"""
def __init__(self, **kwargs) -> None:
init_self_cfg(
self,
component_class=__class__, # pyre-fixme[10]: Name `__class__` is used but not defined.
config_class=AsyncTrainingStartTimeDistrConfig,
**kwargs,
)
assert (
# pyre-fixme[16]: `IAsyncTrainingStartTimeDistr` has no attribute `cfg`.
self.cfg.training_rate
> 0
), f"Event rate must be positive, got {self.cfg.training_rate}"
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
@abc.abstractmethod
def time_to_next_event_start(self) -> float:
pass
class PoissonAsyncTrainingStartTimeDistr(IAsyncTrainingStartTimeDistr):
"""Training start times are poisson distributed"""
def __init__(self, **kwargs) -> None:
init_self_cfg(
self,
component_class=__class__,
config_class=PoissonAsyncTrainingStartTimeDistrConfig,
**kwargs,
)
super().__init__(**kwargs)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def time_to_next_event_start(self) -> float:
# if events are poisson distributed,
# time to next event is exponentially distributed
# -ln(U)/lambda = time to next event (from CDF of exponential distribution)
u = np.random.random()
# pyre-fixme[16]: `PoissonAsyncTrainingStartTimeDistr` has no attribute `cfg`.
return -(math.log(u)) / self.cfg.training_rate
class ConstantAsyncTrainingStartTimeDistr(IAsyncTrainingStartTimeDistr):
"""Gap between training start times is constant"""
def __init__(self, **kwargs) -> None:
init_self_cfg(
self,
component_class=__class__,
config_class=ConstantAsyncTrainingStartTimeDistrConfig,
**kwargs,
)
super().__init__(**kwargs)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def time_to_next_event_start(self) -> float:
# pyre-fixme[16]: `ConstantAsyncTrainingStartTimeDistr` has no attribute `cfg`.
return 1.0 / self.cfg.training_rate
class IEventGenerator(abc.ABC):
"""Class that generates both training_start and training_duration events"""
@abc.abstractmethod
def time_to_next_event_start(self) -> float:
pass
@abc.abstractmethod
def training_duration(self, num_training_examples: int) -> float:
pass
class AsyncTrainingEventGenerator(IEventGenerator):
"""Class that generates both training_start and training_duration events"""
def __init__(self, **kwargs) -> None:
init_self_cfg(
self,
component_class=__class__,
config_class=AsyncTrainingEventGeneratorConfig,
**kwargs,
)
self._validate_cfg()
self._training_start_time_distr = instantiate(
# pyre-fixme[16]: `AsyncTrainingEventGenerator` has no attribute `cfg`.
self.cfg.training_start_time_distribution
)
self._training_duration_distr = instantiate(
self.cfg.duration_distribution_generator
)
def _validate_cfg(self):
# looping over the config fields throws incase of missing field
for _ in self.cfg.items():
pass
@classmethod
def _set_defaults_in_cfg(cls, cfg):
if OmegaConf.is_missing(cfg.training_start_time_distribution, "_target_"):
cfg.training_start_time_distribution = (
ConstantAsyncTrainingStartTimeDistrConfig()
)
if OmegaConf.is_missing(cfg.duration_distribution_generator, "_target_"):
cfg.duration_distribution_generator = (
PerExampleGaussianDurationDistributionConfig()
)
def time_to_next_event_start(self) -> float:
return self._training_start_time_distr.time_to_next_event_start()
def training_duration(self, num_training_examples: int) -> float:
return self._training_duration_distr.training_duration(num_training_examples)
class AsyncTrainingEventGeneratorFromList(IEventGenerator):
"""This class simulates TrainingEventGenerator
It returns time-to-next-event and event-duration from a fixed list
Useful for writing unit tests for components that use TrainingEventGenerator
"""
def __init__(self, **kwargs) -> None:
init_self_cfg(
self,
component_class=__class__,
config_class=AsyncTrainingEventGeneratorFromListConfig,
**kwargs,
)
self._validate_cfg()
self.distr: List[EventTimingInfo] = copy.deepcopy(
# pyre-fixme[16]: `AsyncTrainingEventGeneratorFromList` has no attribute
# `cfg`.
list(self.cfg.training_events)
)
self.training_events = list(self.cfg.training_events)
self.current_event: EventTimingInfo = EventTimingInfo(
prev_event_start_to_current_start=0, duration=0
)
def _validate_cfg(self):
# looping over the config fields throws incase of missing field
for _ in self.cfg.items():
pass
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
def time_to_next_event_start(self) -> float:
self.set_next_event()
return self.current_event.prev_event_start_to_current_start
def training_duration(self, num_training_examples: int) -> float:
return self.current_event.duration
def set_next_event(self):
if len(self.distr) == 0:
self.distr = copy.deepcopy(self.training_events)
self.current_event = self.distr.pop(0)
@dataclass
class AsyncTrainingStartTimeDistrConfig:
_target_: str = MISSING
_recursive_: bool = False
# Average number of devices training per unit time
training_rate: float = 1.0
@dataclass
class PoissonAsyncTrainingStartTimeDistrConfig(AsyncTrainingStartTimeDistrConfig):
_target_: str = fullclassname(PoissonAsyncTrainingStartTimeDistr)
@dataclass
class ConstantAsyncTrainingStartTimeDistrConfig(AsyncTrainingStartTimeDistrConfig):
_target_: str = fullclassname(ConstantAsyncTrainingStartTimeDistr)
@dataclass
class EventGeneratorConfig:
_target_: str = MISSING
_recursive_: bool = False
@dataclass
class AsyncTrainingEventGeneratorFromListConfig(EventGeneratorConfig):
_target_: str = fullclassname(AsyncTrainingEventGeneratorFromList)
# list of (time-to-next-event-start, event-duration) tuples
training_events: List[EventTimingInfo] = MISSING
@dataclass
class AsyncTrainingEventGeneratorConfig(EventGeneratorConfig):
_target_: str = fullclassname(AsyncTrainingEventGenerator)
training_start_time_distribution: AsyncTrainingStartTimeDistrConfig = (
AsyncTrainingStartTimeDistrConfig()
)
duration_distribution_generator: DurationDistributionConfig = (
DurationDistributionConfig()
)
| canife-main | FLSim/flsim/utils/async_trainer/training_event_generator.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| canife-main | FLSim/flsim/utils/data/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import hashlib
import time
from collections import defaultdict
from itertools import zip_longest
from typing import Any, Dict, Generator, Iterable, List, Optional
import torch
def batchify(
iterable: Iterable[Any], batch_size: int, drop_last: Optional[bool] = False
) -> Generator:
"""
Groups list into batches
Example:
>>> batchify([1, 2, 3, 4, 5], 2)
>>> [[1, 2], [3, 4], [5]]
"""
iterators = [iter(iterable)] * batch_size
for batch in zip_longest(*iterators, fillvalue=None):
batch = [ex for ex in batch if ex is not None]
if drop_last and len(batch) != batch_size:
break
yield batch
def merge_dicts(batch: List[Dict[str, torch.Tensor]]) -> Dict[str, torch.Tensor]:
"""
Merge a list of dictionaries into one dictionary
Example:
>>> merge_dicts([{"a": torch.Tensor([1])}, {"a": torch.Tensor([2])}])
>>> {"a": torch.Tensor([1.0, 2.0])},
"""
res = defaultdict(list)
for ex in batch:
for key, value in ex.items():
res[key].append(value)
return {k: torch.cat(v) for k, v in res.items()}
def stable_hash(base: int = 100000) -> int:
md5 = hashlib.md5(str(time.time()).encode("utf-8"))
return int.from_bytes(md5.digest(), byteorder="little") % base
| canife-main | FLSim/flsim/utils/data/data_utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Callable, Iterable, Iterator, List, Optional
from flsim.data.data_provider import (
FLDataProviderFromList,
IFLDataProvider,
IFLUserData,
)
from flsim.utils.sample_model import MockFLModel
def create_mock_data_provider(
num_users: int, examples_per_user: int
) -> IFLDataProvider:
# one_user_data has 1 batch of len = examples_per_user
one_user_data = [list(range(examples_per_user))]
data = [one_user_data] * num_users
return FLDataProviderFromList(
train_user_list=data,
eval_user_list=data,
test_user_list=data,
model=MockFLModel(num_examples_per_user=examples_per_user),
)
class FakeUserData(IFLUserData):
"""
fake data for a single user.
"""
def __init__(
self,
gen_batch: Callable[[int, Any], Any],
num_batches: int = 1,
batch_size: int = 2,
val: Optional[float] = None,
):
"""
gen_batch is a callable that gets a batch_size
and generates a simulated batch with the same size
"""
self.gen_batch = gen_batch
self._num_batches = num_batches
self.batch_size = batch_size
self.val = val
def train_data(self) -> Iterator[Any]:
# TODO add flag for a final batch being incomplete
for _ in range(self._num_batches):
yield self.gen_batch(self.batch_size, self.val)
def num_train_examples(self) -> int:
return self._num_batches * self.batch_size
def num_batches(self) -> int:
return self._num_batches
def eval_data(self):
# TODO add flag for a final batch being incomplete
for _ in range(self._num_batches):
yield self.gen_batch(self.batch_size, self.val)
def num_eval_batches(self):
return 0
def num_train_batches(self):
return self._num_batches
def num_eval_examples(self):
return 0
class FakeDataProvider(IFLDataProvider):
def __init__(
self,
gen_batch: Callable[[int, Any], Any],
num_batches: int = 1,
batch_size: int = 2,
num_users: int = 10,
random: bool = False,
rank: int = 0,
world_size: int = 1,
):
self.user_data = [
FakeUserData(
gen_batch, num_batches, batch_size, None if random else i / num_users
)
for i in range(rank, num_users, world_size)
]
self.eval = FakeUserData(
gen_batch, num_batches, batch_size, None if random else 1
)
self._num_users = num_users // world_size
self._num_total_users = num_users
def __iter__(self) -> Iterable[IFLUserData]:
yield from self.user_data
def __getitem__(self, index) -> IFLUserData:
return self.user_data[index]
def train_user_ids(self) -> List[int]:
return list(range(self._num_users))
def num_train_users(self) -> int:
return self._num_users
def num_total_users(self) -> int:
return self._num_total_users
def get_train_user(self, user_index: int) -> IFLUserData:
return self.user_data[user_index]
def train_users(self) -> Iterable[IFLUserData]:
return self.user_data
def eval_users(self) -> Iterable[IFLUserData]:
return [self.eval]
def test_users(self) -> Iterable[IFLUserData]:
return [self.eval]
| canife-main | FLSim/flsim/utils/data/fake_data_utils.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from io import BytesIO
import numpy as np
import torch
import torch.utils.data as data
class DummyImageDataset(data.Dataset):
def __init__(
self,
num_classes=10,
num_images_per_class=10,
num_channels=1,
image_dim=(28, 28),
):
self.num_classes = num_classes
self.num_images_per_class = num_images_per_class
if num_channels == 1:
data_dim = (
self.num_classes * self.num_images_per_class,
image_dim[0],
image_dim[1],
)
else:
data_dim = (
self.num_classes * self.num_images_per_class,
num_channels,
image_dim[0],
image_dim[1],
)
self.data = torch.from_numpy(
np.random.uniform(low=0.0, high=1.0, size=data_dim)
)
self.targets = list(range(self.num_classes))
self.labels = torch.LongTensor(self.targets * self.num_images_per_class)
def get_dataset(self):
stream = BytesIO()
torch.save((self.data, self.labels), stream)
return stream
def __len__(self):
return self.num_classes * self.num_images_per_class
def __getitem__(self, idx):
return self.data[idx], self.labels[idx].item()
| canife-main | FLSim/flsim/utils/data/dummy_image_dataset.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
| canife-main | FLSim/flsim/metrics_reporter/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import abc
import copy
from typing import Any, Dict, List, Optional, Tuple
from flsim.common.timeline import Timeline
from flsim.interfaces.batch_metrics import IFLBatchMetrics
from flsim.interfaces.metrics_reporter import (
Channel,
IFLMetricsReporter,
Metric,
TrainingStage,
)
from torch.utils.tensorboard import SummaryWriter
class FLMetricsReporter(IFLMetricsReporter, abc.ABC):
"""
This is a MetricsReporter with Tensorboard support.
"""
def __init__(self, channels: List[Channel], log_dir: Optional[str] = None):
self.channels = channels
self.log_dir = log_dir
if Channel.TENSORBOARD in channels:
self.set_summary_writer(log_dir)
if Channel.STDOUT in channels:
self.print = print
self.losses = []
self.num_examples_list = []
self.predictions_list = []
self.targets_list = []
self.model_inputs_list = []
self.latest_scores: Dict[str, Any] = {}
self.best_eval_metrics = None
def set_summary_writer(self, log_dir: Optional[str]):
self.writer = SummaryWriter(log_dir=log_dir)
def add_batch_metrics(self, metrics: IFLBatchMetrics) -> None:
self.losses.append(metrics.loss.item())
self.num_examples_list.append(metrics.num_examples)
self.predictions_list.append(metrics.predictions)
self.targets_list.append(metrics.targets)
self.model_inputs_list.append(metrics.model_inputs)
def aggregate(self, one_user_metrics):
pass
def report_metrics(
self,
reset: bool,
stage: TrainingStage,
extra_metrics: Optional[List[Metric]] = None,
**kwargs,
) -> Tuple[Any, bool]:
metrics = self._report_metrics(
reset=reset, stage=stage, extra_metrics=extra_metrics, **kwargs
)
if stage != TrainingStage.EVAL:
return (metrics, False)
if self.best_eval_metrics is None or self.compare_metrics(
metrics, self.best_eval_metrics
):
self.best_eval_metrics = copy.deepcopy(metrics)
return (metrics, True)
else:
return (metrics, False)
def _report_metrics(
self,
reset: bool,
stage: TrainingStage,
extra_metrics: Optional[List[Metric]] = None,
**kwargs,
) -> Any:
timeline: Timeline = kwargs.get("timeline", Timeline(global_round=1))
# handle legacy case when epoch was provided
epoch = kwargs.get("epoch", 0)
if epoch > 0 and timeline.global_round == 1:
timeline = Timeline(epoch=epoch, round=1)
eval_metrics = None
training_stage_in_str = TrainingStage(stage).name.title()
if len(self.losses) > 0:
mean_loss = sum(self.losses) / len(self.losses)
if Channel.STDOUT in self.channels:
self.print(f"{timeline}, Loss/{training_stage_in_str}: {mean_loss}")
if Channel.TENSORBOARD in self.channels:
self.writer.add_scalar(
f"Loss/{training_stage_in_str}",
mean_loss,
timeline.global_round_num(),
)
scores = self.compute_scores()
self.latest_scores = scores
for score_name, score in scores.items():
if Channel.STDOUT in self.channels:
self.print(
f"{timeline}, {score_name}/{training_stage_in_str}: {score}"
)
if Channel.TENSORBOARD in self.channels:
self.writer.add_scalar(
f"{score_name}/{training_stage_in_str}",
score,
timeline.global_round_num(),
)
eval_metrics = self.create_eval_metrics(
scores, mean_loss, timeline=timeline, stage=stage
)
# handle misc reporting values
metrics = extra_metrics or []
for metric in metrics:
value = Metric.to_dict(metric.value) if metric.is_compund else metric.value
if Channel.STDOUT in self.channels:
self.print(
f"{timeline}, {metric.name}/{training_stage_in_str}: {value}"
)
if Channel.TENSORBOARD in self.channels:
self.writer.add_scalars(
f"{metric.name}/{training_stage_in_str}",
value,
timeline.global_round_num(),
) if metric.is_compund else self.writer.add_scalar(
f"{metric.name}/{training_stage_in_str}",
value,
timeline.global_round_num(),
)
if reset:
self.reset()
return eval_metrics
def reset(self):
self.losses = []
self.num_examples_list = []
self.predictions_list = []
self.targets_list = []
self.model_inputs_list = []
def get_latest_scores(self) -> Dict[str, Any]:
return self.latest_scores
@abc.abstractmethod
def compare_metrics(self, eval_metrics, best_metrics) -> bool:
"""One should provide concrete implementation of how to compare
eval_metrics and best_metrics.
Return True if eval_metrics is better than best_metrics
"""
pass
@abc.abstractmethod
def compute_scores(self) -> Dict[str, Any]:
"""One should override this method to specify how to compute scores
(e.g. accuracy) of the model based on metrics.
Return dictionary where key is name of the scores and value is
score.
"""
pass
@abc.abstractmethod
def create_eval_metrics(
self, scores: Dict[str, Any], total_loss: float, **kwargs
) -> Any:
"""One should provide concrete implementation of how to construct
object that represents evaluation metrics based on scores and total
loss. Most of the case, one would just pick one of the scores or
total loss as the evaluation metrics to pick the better model, but
this interface also allows s/he to make evaluation metrics more
complex and use it in conjunction with compare_metrics() function
to determine which metrics is the better one.
"""
pass
| canife-main | FLSim/flsim/metrics_reporter/tensorboard_metrics_reporter.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""This file contains optimizers for the server.
The server expects an IServerOptimizer with two abstract methods: step and zero_grad.
This interface is similar to torch.optim.Optimizer.
Typical usage example:
optimizer = OptimizerType.create_optimizer(model=model, config=config)
optimizer.step()
"""
from __future__ import annotations
import abc
from dataclasses import dataclass
from enum import Enum
import torch
import torch.nn as nn
from flsim.optimizers.layerwise_optimizers import LAMB, LARS
from flsim.utils.config_utils import fullclassname, init_self_cfg, is_target
from omegaconf import MISSING
class IServerOptimizer(abc.ABC):
def __init__(self, *, model: nn.Module, **kwargs):
init_self_cfg(
self,
component_class=__class__, # pyre-fixme[10]: Name `__class__` is used but not defined.
config_class=ServerOptimizerConfig,
**kwargs,
)
self.model = model
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
@abc.abstractmethod
@torch.no_grad()
def step(self, closure):
r"""Performs a single optimization step (parameter update).
Args:
closure (callable): A closure that reevaluates the model and
returns the loss. Optional for most optimizers.
.. note::
Unless otherwise specified, this function should not modify the
``.grad`` field of the parameters.
"""
raise NotImplementedError
@abc.abstractmethod
def zero_grad(self, set_to_none: bool = False):
r"""Sets the gradients of all optimized :class:`torch.Tensor` s to zero.
Args:
set_to_none (bool): instead of setting to zero, set the grads to None.
This will in general have lower memory footprint, and can modestly improve performance.
However, it changes certain behaviors. For example:
1. When the user tries to access a gradient and perform manual ops on it,
a None attribute or a Tensor full of 0s will behave differently.
2. If the user requests ``zero_grad(set_to_none=True)`` followed by a backward pass, ``.grad``\ s
are guaranteed to be None for params that did not receive a gradient.
3. ``torch.optim`` optimizers have a different behavior if the gradient is 0 or None
(in one case it does the step with a gradient of 0 and in the other it skips
the step altogether).
"""
raise NotImplementedError
class FedAvgWithLROptimizer(IServerOptimizer, torch.optim.SGD):
def __init__(self, *, model: nn.Module, **kwargs) -> None:
init_self_cfg(
self,
component_class=__class__,
config_class=FedAvgWithLROptimizerConfig,
**kwargs,
)
IServerOptimizer.__init__(self, model=model, **kwargs)
torch.optim.SGD.__init__(
self,
params=self.model.parameters(),
# pyre-ignore[16] Undefined attribute
lr=self.cfg.lr,
momentum=self.cfg.momentum,
)
def step(self, closure=None):
return torch.optim.SGD.step(self, closure)
def zero_grad(self, set_to_none: bool = False):
return torch.optim.SGD.zero_grad(self, set_to_none)
class FedAvgOptimizer(IServerOptimizer, torch.optim.SGD):
def __init__(self, *, model: nn.Module, **kwargs) -> None:
init_self_cfg(
self,
component_class=__class__,
config_class=FedAvgOptimizerConfig,
**kwargs,
)
IServerOptimizer.__init__(self, model=model, **kwargs)
torch.optim.SGD.__init__(
self,
params=self.model.parameters(),
lr=1.0,
momentum=0,
)
def step(self, closure=None):
return torch.optim.SGD.step(self, closure)
def zero_grad(self, set_to_none: bool = False):
return torch.optim.SGD.zero_grad(self, set_to_none)
class FedAdamOptimizer(IServerOptimizer, torch.optim.Adam):
def __init__(self, *, model: nn.Module, **kwargs) -> None:
init_self_cfg(
self,
component_class=__class__,
config_class=FedAdamOptimizerConfig,
**kwargs,
)
IServerOptimizer.__init__(self, model=model, **kwargs)
torch.optim.Adam.__init__(
self,
params=self.model.parameters(),
# pyre-ignore[16] Undefined attribute
lr=self.cfg.lr,
weight_decay=self.cfg.weight_decay,
betas=(self.cfg.beta1, self.cfg.beta2),
eps=self.cfg.eps,
)
def step(self, closure=None):
return torch.optim.Adam.step(self, closure)
def zero_grad(self, set_to_none: bool = False):
return torch.optim.Adam.zero_grad(self, set_to_none)
class FedLARSOptimizer(IServerOptimizer, LARS):
def __init__(self, *, model: nn.Module, **kwargs) -> None:
init_self_cfg(
self,
component_class=__class__,
config_class=FedLARSOptimizerConfig,
**kwargs,
)
IServerOptimizer.__init__(self, model=model, **kwargs)
LARS.__init__(
self,
params=self.model.parameters(),
# pyre-ignore[16] Undefined attribute
lr=self.cfg.lr,
weight_decay=self.cfg.weight_decay,
beta=self.cfg.beta,
)
def step(self, closure=None):
return LARS.step(self, closure)
def zero_grad(self, set_to_none: bool = False):
return LARS.zero_grad(self, set_to_none)
class FedLAMBOptimizer(IServerOptimizer, LAMB):
def __init__(self, *, model: nn.Module, **kwargs) -> None:
init_self_cfg(
self,
component_class=__class__,
config_class=FedLARSOptimizerConfig,
**kwargs,
)
IServerOptimizer.__init__(self, model=model, **kwargs)
LAMB.__init__(
self,
params=self.model.parameters(),
# pyre-ignore[16] Undefined attribute
lr=self.cfg.lr,
weight_decay=self.cfg.weight_decay,
beta1=self.cfg.beta1,
beta2=self.cfg.beta2,
eps=self.cfg.eps,
)
def step(self, closure=None):
return LAMB.step(self, closure)
def zero_grad(self, set_to_none: bool = False):
return LAMB.zero_grad(self, set_to_none)
@dataclass
class ServerOptimizerConfig:
_target_: str = MISSING
_recursive_: bool = False
@dataclass
class FedAvgOptimizerConfig(ServerOptimizerConfig):
_target_: str = fullclassname(FedAvgOptimizer)
@dataclass
class FedAvgWithLROptimizerConfig(ServerOptimizerConfig):
_target_: str = fullclassname(FedAvgWithLROptimizer)
lr: float = 0.001
momentum: float = 0.0
@dataclass
class FedAdamOptimizerConfig(ServerOptimizerConfig):
_target_: str = fullclassname(FedAdamOptimizer)
lr: float = 0.001
weight_decay: float = 0.00001
beta1: float = 0.9
beta2: float = 0.999
eps: float = 1e-8
@dataclass
class FedLARSOptimizerConfig(ServerOptimizerConfig):
_target_: str = fullclassname(FedLARSOptimizer)
lr: float = 0.001
weight_decay: float = 0.00001
beta: float = 0.9
@dataclass
class FedLAMBOptimizerConfig(ServerOptimizerConfig):
_target_: str = fullclassname(FedLAMBOptimizer)
lr: float = 0.001
weight_decay: float = 0.00001
beta1: float = 0.9
beta2: float = 0.999
eps: float = 1e-8
class OptimizerType(Enum):
fed_avg: str = FedAvgOptimizerConfig._target_
fed_avg_with_lr: str = FedAvgWithLROptimizerConfig._target_
fed_adam: str = FedAdamOptimizerConfig._target_
fed_lamb: str = FedLAMBOptimizerConfig._target_
fed_lars: str = FedLARSOptimizerConfig._target_
@staticmethod
def create_optimizer(
model: nn.Module, config: ServerOptimizerConfig
) -> IServerOptimizer:
if is_target(config, FedAvgWithLROptimizerConfig):
return torch.optim.SGD(
model.parameters(),
# pyre-ignore[16] Undefined attribute
lr=config.lr,
# pyre-ignore[16] Undefined attribute
momentum=config.momentum,
)
elif is_target(config, FedAdamOptimizerConfig):
return torch.optim.Adam(
model.parameters(),
lr=config.lr,
# pyre-ignore[16] Undefined attribute
weight_decay=config.weight_decay,
# pyre-ignore[16] Undefined attribute
betas=(config.beta1, config.beta2),
# pyre-ignore[16] Undefined attribute
eps=config.eps,
)
elif is_target(config, FedLARSOptimizerConfig):
# pyre-ignore[7]
return LARS(
model.parameters(),
lr=config.lr,
# pyre-ignore[16] Undefined attribute
beta=config.beta,
weight_decay=config.weight_decay,
)
elif is_target(config, FedLAMBOptimizerConfig):
# pyre-ignore[7]
return LAMB(
model.parameters(),
lr=config.lr,
beta1=config.beta1,
beta2=config.beta2,
weight_decay=config.weight_decay,
eps=config.eps,
)
elif is_target(config, FedAvgOptimizerConfig):
return torch.optim.SGD(
model.parameters(),
lr=1.0,
momentum=0,
)
else:
raise ValueError(
f"Optimizer type {config._target_} not found. Please update OptimizerType.create_optimizer"
)
| canife-main | FLSim/flsim/optimizers/server_optimizers.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch.optim.optimizer import Optimizer
class LARS(Optimizer):
r"""Implements LARS algorithm.
It has been proposed in `Large Batch Training of Convolutional Networks`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
beta (float, optional): coefficient used for computing
running averages of gradient. (default: 0.9)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
"""
def __init__(self, params, lr=1e-3, beta=0.9, weight_decay=0):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= beta < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(beta))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = {"lr": lr, "beta": beta, "weight_decay": weight_decay}
super(LARS, self).__init__(params, defaults)
@torch.no_grad()
def get_update(self, p, grad, state, group):
if group["weight_decay"] != 0:
grad.add_(p.data, alpha=group["weight_decay"])
# State initialization
if len(state) == 0:
state["step"] = 0
# Moving averages will be updated _in place_
# Exponential moving average of gradient values
state["exp_avg"] = torch.clone(grad).detach()
# m_{t-1}
exp_avg = state["exp_avg"]
beta = group["beta"]
state["step"] += 1
# Decay the first moment running average coefficient
exp_avg.mul_(beta).add_(grad, alpha=1 - beta)
return exp_avg
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise RuntimeError("LARS does not support sparse gradients")
state = self.state[p]
update = self.get_update(p, grad, state, group)
update_norm = update.pow(2).sum().sqrt()
weight_norm = p.data.pow(2).sum().sqrt()
# The LAMB paper suggests bounding the weight norm by some
# hyperparameters but we choose to eliminate unnecessary
# hyperparameters
scaling_function = weight_norm
assert update_norm != 0
update.mul_(scaling_function / update_norm)
p.data.add_(update, alpha=-group["lr"])
return loss
class LAMB(LARS):
r"""Implements LAMB algorithm.
It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
beta1 (float, optional): coefficient used for computing
running averages of gradient (default 0.9)
beta2 (float, optional): coefficient used for computing
running average of gradient squared (default 0.999)
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
"""
def __init__(
self, params, lr=1e-3, beta1=0.9, beta2=0.999, eps=1e-8, weight_decay=0
):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= beta1 < 1.0:
raise ValueError("Invalid beta1: {}".format(beta1))
if not 0.0 <= beta2 < 1.0:
raise ValueError("Invalid beta2: {}".format(beta2))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = {
"lr": lr,
"beta1": beta1,
"beta2": beta2,
"eps": eps,
"weight_decay": weight_decay,
}
Optimizer.__init__(self, params, defaults)
@torch.no_grad()
def get_update(self, p, grad, state, group):
# State initialization
if len(state) == 0:
state["step"] = 0
# Moving averages will be updated _in place_
# Exponential moving average of gradient values
state["exp_avg"] = torch.zeros_like(p, memory_format=torch.preserve_format)
# Exponential moving average of squared gradient values
state["exp_avg_sq"] = torch.zeros_like(
p, memory_format=torch.preserve_format
)
# m_{t-1} and v_{t-1}
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
beta1 = group["beta1"]
beta2 = group["beta2"]
state["step"] += 1
bias_correction1 = 1 - beta1 ** state["step"]
bias_correction2 = 1 - beta2 ** state["step"]
# m_t = (beta1 * m_{t-1} + (1-beta1)*g_t)
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
# v_t = (beta2 * v_{t-1} + (1-beta2)*g_t^2)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2)
m_update = torch.div(exp_avg, bias_correction1)
v_update = torch.div(exp_avg_sq, bias_correction2)
# denom = sqrt(v_t) + eps
denom = torch.add(v_update.sqrt(), group["eps"])
# update = l2-penalty + m_{t} / denom
m_update.div_(denom)
if group["weight_decay"] != 0:
m_update.add_(p.data, alpha=group["weight_decay"])
return m_update
| canife-main | FLSim/flsim/optimizers/layerwise_optimizers.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from hydra.core.config_store import ConfigStore # @manual
from .async_aggregators import (
FedAdamAsyncAggregatorConfig,
FedAdamFedBuffAggregatorConfig,
FedAvgWithLRAsyncAggregatorConfig,
FedAvgWithLRFedBuffAggregatorConfig,
FedAvgWithLRWithMomentumAsyncAggregatorConfig,
)
from .local_optimizers import LocalOptimizerFedProxConfig, LocalOptimizerSGDConfig
from .optimizer_scheduler import (
ArmijoLineSearchSchedulerConfig,
ConstantLRSchedulerConfig,
LRBatchSizeNormalizerSchedulerConfig,
)
from .server_optimizers import (
FedAdamOptimizerConfig,
FedAvgOptimizerConfig,
FedAvgWithLROptimizerConfig,
FedLAMBOptimizerConfig,
FedLARSOptimizerConfig,
)
from .sync_aggregators import (
FedAdamSyncAggregatorConfig,
FedAvgSyncAggregatorConfig,
FedAvgWithLRSyncAggregatorConfig,
FedLAMBSyncAggregatorConfig,
FedLARSSyncAggregatorConfig,
)
ConfigStore.instance().store(
name="base_optimizer_sgd",
node=LocalOptimizerSGDConfig,
group="optimizer",
)
ConfigStore.instance().store(
name="base_optimizer_fedprox",
node=LocalOptimizerFedProxConfig,
group="optimizer",
)
ConfigStore.instance().store(
name="base_constant_lr_scheduler",
node=ConstantLRSchedulerConfig,
group="lr_scheduler",
)
ConfigStore.instance().store(
name="base_lr_batch_size_normalizer_scheduler",
node=LRBatchSizeNormalizerSchedulerConfig,
group="lr_scheduler",
)
ConfigStore.instance().store(
name="base_armijo_line_search_lr_scheduer",
node=ArmijoLineSearchSchedulerConfig,
group="lr_scheduler",
)
ConfigStore.instance().store(
name="base_fed_avg_sync_aggregator",
node=FedAvgSyncAggregatorConfig,
group="aggregator",
)
ConfigStore.instance().store(
name="base_fed_avg_with_lr_sync_aggregator",
node=FedAvgWithLRSyncAggregatorConfig,
group="aggregator",
)
ConfigStore.instance().store(
name="base_fed_adam_sync_aggregator",
node=FedAdamSyncAggregatorConfig,
group="aggregator",
)
ConfigStore.instance().store(
name="base_fed_lars_sync_aggregator",
node=FedLARSSyncAggregatorConfig,
group="aggregator",
)
ConfigStore.instance().store(
name="base_fed_lamb_sync_aggregator",
node=FedLAMBSyncAggregatorConfig,
group="aggregator",
)
ConfigStore.instance().store(
name="base_fed_avg_with_lr_async_aggregator",
node=FedAvgWithLRAsyncAggregatorConfig,
group="aggregator",
)
ConfigStore.instance().store(
name="base_fed_avg_with_lr_with_momentum_async_aggregator",
node=FedAvgWithLRWithMomentumAsyncAggregatorConfig,
group="aggregator",
)
ConfigStore.instance().store(
name="base_fed_adam_async_aggregator",
node=FedAdamAsyncAggregatorConfig,
group="aggregator",
)
ConfigStore.instance().store(
name="base_fed_avg_with_lr_hybrid_aggregator",
node=FedAvgWithLRFedBuffAggregatorConfig,
group="aggregator",
)
ConfigStore.instance().store(
name="base_fed_adam_hybrid_aggregator",
node=FedAdamFedBuffAggregatorConfig,
group="aggregator",
)
ConfigStore.instance().store(
name="base_fed_adam",
node=FedAdamOptimizerConfig,
group="server_optimizer",
)
ConfigStore.instance().store(
name="base_fed_avg",
node=FedAvgOptimizerConfig,
group="server_optimizer",
)
ConfigStore.instance().store(
name="base_fed_avg_with_lr",
node=FedAvgWithLROptimizerConfig,
group="server_optimizer",
)
ConfigStore.instance().store(
name="base_fed_lars",
node=FedLARSOptimizerConfig,
group="server_optimizer",
)
ConfigStore.instance().store(
name="base_fed_lamb",
node=FedLAMBOptimizerConfig,
group="server_optimizer",
)
| canife-main | FLSim/flsim/optimizers/__init__.py |
#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from dataclasses import dataclass
from typing import Any, Dict
import torch
from flsim.utils.config_utils import fullclassname, init_self_cfg
from omegaconf import MISSING
from torch.nn import Module as Model # @manual
class LocalOptimizer:
def __init__(self, *, model: Model, **kwargs):
init_self_cfg(
self,
component_class=__class__, # pyre-fixme[10]: Name `__class__` is used but not defined.
config_class=LocalOptimizerConfig,
**kwargs,
)
self.model = model
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
class LocalOptimizerSGD(LocalOptimizer, torch.optim.SGD):
def __init__(self, *, model: Model, **kwargs) -> None:
init_self_cfg(
self,
component_class=__class__,
config_class=LocalOptimizerSGDConfig,
**kwargs,
)
super().__init__(model=model, **kwargs)
torch.optim.SGD.__init__(
self=self,
params=self.model.parameters(),
# pyre-fixme[16]: `LocalOptimizerSGD` has no attribute `cfg`.
lr=self.cfg.lr,
momentum=self.cfg.momentum,
weight_decay=self.cfg.weight_decay,
)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
@staticmethod
def dict_config(
lr: float = 0.001, momentum: float = 0.0, weight_decay: float = 0.0
) -> Dict[str, Any]:
"""Allows downstream functions to get configs given lr and momentum
With this function, we can change implementation of
LocalSGDOptimizer.dict_config without changing downstream code
"""
return {
"_target_": LocalOptimizerSGDConfig._target_,
"lr": lr,
"momentum": momentum,
"weight_decay": weight_decay,
}
class LocalOptimizerFedProx(LocalOptimizer, torch.optim.SGD):
def __init__(
self,
*,
model: Model,
**kwargs,
):
init_self_cfg(
self,
component_class=__class__,
config_class=LocalOptimizerFedProxConfig,
**kwargs,
)
super().__init__(model=model, **kwargs)
torch.optim.SGD.__init__(
self=self,
params=self.model.parameters(),
# pyre-fixme[16]: `LocalOptimizerFedProx` has no attribute `cfg`.
lr=self.cfg.lr,
momentum=self.cfg.momentum,
weight_decay=self.cfg.weight_decay,
)
@classmethod
def _set_defaults_in_cfg(cls, cfg):
pass
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
weight_decay = group["weight_decay"]
momentum = group["momentum"]
dampening = group["dampening"]
nesterov = group["nesterov"]
for p in group["params"]:
if p.grad is None:
continue
d_p = p.grad
param_state = self.state[p]
if "global_model" not in param_state:
param_state["global_model"] = torch.clone(p.data).detach()
if weight_decay != 0:
d_p = d_p.add(p, alpha=weight_decay)
if momentum != 0:
if "momentum_buffer" not in param_state:
buf = param_state["momentum_buffer"] = torch.clone(d_p).detach()
else:
buf = param_state["momentum_buffer"]
buf.mul_(momentum).add_(d_p, alpha=1 - dampening)
if nesterov:
d_p = d_p.add(buf, alpha=momentum)
else:
d_p = buf
d_p.add_(p.data - param_state["global_model"], alpha=self.cfg.mu)
p.add_(d_p, alpha=-group["lr"])
return loss
@dataclass
class LocalOptimizerConfig:
_target_: str = MISSING
_recursive_: bool = False
lr: float = 0.001
momentum: float = 0.0
weight_decay: float = 0.0
@dataclass
class LocalOptimizerSGDConfig(LocalOptimizerConfig):
_target_: str = fullclassname(LocalOptimizerSGD)
@dataclass
class LocalOptimizerFedProxConfig(LocalOptimizerConfig):
_target_: str = fullclassname(LocalOptimizerFedProx)
mu: float = 0.0
| canife-main | FLSim/flsim/optimizers/local_optimizers.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.