Dataset Viewer
	| python_code
				 stringlengths 0 4.04M | repo_name
				 stringlengths 7 58 | file_path
				 stringlengths 5 147 | 
|---|---|---|
| 
	#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import copy
import json
import sys
sys.path.append("./FLSim")
from FLSim.flsim.utils.config_utils import fl_config_from_json
def bool_flag(s):
    """
    Parse boolean arguments from the command line.
    """
    if s.lower() in ["False", "false"]:
        return False
    elif s.lower() in ["True", "true"]:
        return True
    else:
        raise argparse.ArgumentTypeError("invalid value for a boolean flag")
def flsim_args(parser):
    parser.add_argument(
        "--dp-level",
        default="user_level",
        type=str,
        help="FLSim DP level (User or item level DP). Defaults to user_level.",
    )
    parser.add_argument(
        "--gpu-mem-minimiser",
        default="False",
        type=bool_flag,
        help="FLSim, whether to use the GPUMemoryMinimiser",
    )
    parser.add_argument(
        "--debug-config",
        default="False",
        type=bool_flag,
        help="For debugging: Whether to use FLSim debug configs (without CanarySyncTrainer)",
    )
    parser.add_argument(
        "--users-per-round",
        default=1,
        type=int,
        help="FLSim, Sets the number of users per round for training + attacking FL models",
    )
    parser.add_argument(
        "--client-epochs",
        default=1,
        type=int,
        help="FLSim, number of local epochs per user",
    )
    parser.add_argument(
        "--num-local-updates",
        default=-1,
        type=int,
        help="FLSim, number of local updates made by a user. -1 if users have varying number of local batches (default)",
    )
    parser.add_argument(
        "--server-clip-const",
        default=1,
        type=int,
        help="Sets the FLSim 'clipping_value' parameter. This is the clipping constant of model updates.",
    )
    parser.add_argument(
        "--canary-design-reverse-server-clip",
        default=False,
        type=bool_flag,
        help="For debugging: If True, will design and test on unclipped server updates, but will still train the model on clipped server updates",
    )
    parser.add_argument(
        "--insert-canary-as-batch",
        default=False,
        type=bool_flag,
        help="Whether to insert the canary as a sample or an entire batch. Does not need to be set, will be updated based on canary-insert-batch-index",
    )
    parser.add_argument(
        "--canary-insert-global-round",
        default=-1,
        type=int,
        help="FLSim, the global round to insert the canary into, overrides canary-insert-epoch",
    )
    parser.add_argument(
        "--canary-insert-offset",
        default=1,
        type=int,
        help="FLSim, used in train_and_freeze and continuous testing and is the round period between attacks",
    )
    parser.add_argument(
        "--canary-insert-batch-index",
        default="batch",
        type=str,
        help="FLSim, the batch index to insert the canary. Options: 0,-1, 'batch', Default: batch (i.e inserts canary on its own)",
    )
    parser.add_argument(
        "--canary-design-local-models",
        type=bool_flag,
        default=False,
        help="For debugging: If True and canary_insert_batch_index=-1, then design canaries on the (num_local_updates-1)th model",
    )
    
    parser.add_argument(
        "--canary-insert-train-acc",
        default=-1,
        type=int,
        help="In FLSim, inserts canary after model achieves train acc >= canary-insert-train-acc, overrides canary-insert-epoch and canary-insert-global-round",
    )
    parser.add_argument(
        "--canary-insert-test-acc",
        default=-1,
        type=int,
        help="In FLSim, inserts canary after model achieves given test acc, overrides canary-insert-epoch, canary-insert-global-round and canary-insert-train-acc",
    )
    parser.add_argument(
        "--canary-insert-type",
        default="",
        type=str,
        help="Types: train (acc), test (acc)",
    )
    parser.add_argument(
        "--canary-test-type",
        default="freeze",
        type=str,
        help="Takes values: 'freeze', 'train_and_freeze', 'continuous'",
    )
    parser.add_argument(
        "--canary-insert-acc-threshold",
        default=-1,
        type=int,
        help="FLSim, Round or accuracy to design canary at and begin CANIFE attack",
    )
    parser.add_argument(
        "--canary-insert-epsilon",
        default=-1,
        type=float,
        help="FLSim, train model to target epsilon before inserting canary, Default: -1 (disabled)",
    )
    parser.add_argument(
        "--epsilon",
        default=-1,
        type=float,
        help="FLSim, will calibrate noise_multiplier to guarantee epsilon over fl-epochs Default -1 (disabled)",
    )
    parser.add_argument(
        "--fl-server-lr",
        default=-1,
        type=float,
        help="FLSim server lr, Default: -1 (uses FLSim config default)",
    )
    parser.add_argument(
        "--fl-client-lr",
        default=-1,
        type=float,
        help="FLSim client lr, Default: -1 (uses FLSim config default)",
    )
    parser.add_argument(
        "--fl-dropout",
        default=0,
        type=float,
        help="FLSim, model dropout if using simpleconv, Default: 0 (no dropout)",
    )
    parser.add_argument(
        "--fl-checkpoint-only",
        default=False,
        type=bool_flag,
        help="FLSim, Train until canary insertion, save checkpoint and then exit",
    )
    parser.add_argument(
        "--fl-load-checkpoint",
        default=False,
        type=bool_flag,
        help="FLSim, Attempt to load the checkpoint of the experiments parameters if possible, otherwise train from scratch",
    )
    parser.add_argument(
        "--fl-epochs",
        default=-1,
        type=int,
        help="FLSim number of epochs Default: -1 (uses FLSim config epochs)",
    )
    parser.add_argument(
        "--local-batch-size",
        default="",
        type=str,
        help="FLSim, Local batch size of FLSim clients",
    )
    
    parser.add_argument(
        "--override-noise-multiplier",
        default="False",
        type=bool_flag,
        help="FLSim, If True, will override noise multiplier with epsilon/sigma even when loading a DP checkpoint",
    )
def canary_args(parser):
    parser.add_argument(
        "--canary-normalize-optim-grad",
        default="True",
        type=bool_flag,
        help="Normalize grad",
    )
    
    # Takes values: Random, Image, Text
    parser.add_argument(
        "--canary-init",
        default="random",
        type=str,
        help="CANIFE, Method for initialising the canary sample. Default: Randomly initialised (from token space or image space)",
    )
    parser.add_argument(
        "--canary-epochs",
        default=5000,
        type=int,
        help="CANIFE, number of canary design iterations",
    )
    parser.add_argument(
        "--canary-iters",
        default=1,
        type=int,
        help="How many times to repeat the canary experiment. Default: 1",
    )
    parser.add_argument(
        "--canary-clip-const",
        default=1,
        type=float,
        help="CANIFE, Canary sample-grad clip factor. Only used for debugging.",
    )
    # loss1 - Square dot product with batch mean
    # loss2 - Square dot product with per sample gradients
    parser.add_argument(
        "--canary-loss",
        default="loss2",
        type=str,
        help="CANIFE, Canary loss to use. Defaults to loss2 (First term of Eq1 in paper)",
    )
    parser.add_argument(
        "--canary-norm-matching",
        default="True",
        type=bool_flag,
        help="CANIFE, If True, will optimise canary sample to have gradient matched to canary-norm-constant",
    )
    
    parser.add_argument(
        "--canary-norm-loss",
        default="hinge_squared",
        type=str,
        help="For debugging: hinge vs hinge_squared",
    )
    
    parser.add_argument(
        "--canary-norm-constant",
        default=1,
        type=int,
        help="CANIFE, If canary_norm_matching=True, will optimise canary to have norm >= canary-norm-consant",
    )
    # sample_grads = Orthogonal to sample grads
    # model_updates = Orthogonal to model updates
    parser.add_argument(
        "--canary-design-type",
        default="model_updates",
        type=str,
        help="CANIFE, whether to design on clipped model updates or on clipped sample grads. Default: model_updates",
    )
    # freeze / holdout
    # exact
    parser.add_argument(
        "--canary-setup",
        default="exact",
        type=str,
        help="CANIFE, Whether to form the design pool of mock clients from a holdout (test) set or 'exact' (design on current rounds clients)",
    )
    parser.add_argument(
        "--canary-insert-epoch",
        default="1",
        type=str,
        help="FLSim, Epoch to design canary from and carry out CANIFE attack",
    )
    parser.add_argument(
        "--canary-num-test-batches",
        default=50,
        type=int,
        help="Number of batches (from the training set) to test canary against",
    )
    parser.add_argument(
        "--canary-design-sample-size",
        default="",
        type=str,
        help="CANIFE, Design pool sample size. If empty will be inferred from canary-design-minibatch-size",
    )
    parser.add_argument(
        "--canary-design-pool-size",
        default="",
        type=str,
        help="CANIFE, Design pools size. If not empty and using model updates, will override sample size",
    )
    parser.add_argument(
        "--canary-design-minibatch-size",
        default="",
        type=str,
        help="CANIFE, Design optimisation minibatch size. If empty will be set to canary_design_sample_size or users_per_round",
    )
    
    parser.add_argument(
        "--benchmark-design",
        default="False",
        type=bool_flag,
        help="CANIFE, Whether to track canary design time or not. Default: False",
    )
    
    parser.add_argument(
        "--scale-canary-test",
        default="False",
        type=bool_flag,
        help="CANIFE, Debugging"
    )
def parse_args():
    parser = argparse.ArgumentParser(description="PyTorch CIFAR10 Mad Canaries")
    canary_args(parser)
    flsim_args(parser)
    parser.add_argument(
        "--task",
        default="FLSim",
        type=str,
        help="Task",
    )
    parser.add_argument(
        "--model-arch",
        default="simpleconv",
        type=str,
        help="Model arch options: lstm, resnet, simpleconv, shakes_lstm",
    )
    parser.add_argument(
        "--num-classes",
        default=10,
        type=int,
        help="",
    )
    
    parser.add_argument(
        "--sigma",
        type=float,
        default=0,
        metavar="S",
        help="Noise multiplier for DP (default 0)",
    )
    
    parser.add_argument(
        "--delta",
        type=float,
        default=1e-5,
        metavar="D",
        help="Target DP delta (default: 1e-5)",
    )
    parser.add_argument(
        "--disable-dp",
        type=bool_flag,
        default=False,
        help="Not used in FLSim/CANIFE. Disable privacy training and just train with vanilla SGD.",
    )
    parser.add_argument(
        "--skip-acc",
        type=bool_flag,
        default=False,
        help="If True, does not benchmark accuracy when loading a checkpointed model in central canary attack",
    )
    parser.add_argument(
        "--checkpoint",
        type=bool_flag,
        default=True,
        help="Save checkpoints every checkpoint_round during training",
    )
    parser.add_argument(
        "--checkpoint-path",
        type=str,
        default="./local_checkpoints",
        help="path of checkpoints (saving/loading)",
    )
    parser.add_argument(
        "--plot-path",
        type=str,
        default="",
        help="Will output experiment results to DUMP_PATH/PLOT_PATH. Default: '' ",
    )
    parser.add_argument(
        "--dump-path",
        type=str,
        default="./local_checkpoints",
        help="Output path of experiment run.",
    )
    parser.add_argument(
        "--checkpoint-round",
        type=int,
        default=5,
        metavar="k",
        help="Not used. FLSim, Checkpoint every k rounds",
    )
    parser.add_argument(
        "--dataset",
        type=str,
        default="CIFAR10",
        help="Options: CIFAR10, celeba, shakespeare, sent140",
    )
    parser.add_argument(
        "--data-root",
        type=str,
        default="../cifar10",
        help="Location of LEAF datsets or CIFAR10",
    )
    parser.add_argument(
        "--device", type=str, default="cpu", help="Device on which to run the code. Values: cpu or gpu"
    )
    parser.add_argument(
        "--master-port",
        default=12568,
        type=str,
        help="Slurm master port",
    )
    
    parser.add_argument(
        "--debug",
        type=int,
        default=0,
        help="debug level (default: 0)",
    )
    
    parser.add_argument(
        "--prettify-samples",
        type=bool_flag,
        default="False",
        help="CANIFE, For debugging. Disables data augmentation + outputs canary samples",
    )
    return parser.parse_args()
def create_flsim_cfg(args, base_config="./FLSim/examples/configs/"):    
    config_map = {
        "CIFAR10_True": "cifar10_resnet_canary_sample_level.json",
        "CIFAR10_False": "cifar10_resnet_canary_user_level.json",
        "celeba_True": "celeba_example.json",
        "celeba_False": "celeba_resnet_canary_user_level.json",
        "sent140_True": "sent140_config.json",
        "sent140_False": "sent140_canary_user_level.json",
        "femnist_False": "femnist_config.json",
        "shakespeare_False": "shakespeare_config.json"
    }
    config_key = f"{args.dataset}_{args.debug_config}"
    config_name = config_map.get(config_key, None)
    if config_name is None:
        raise Exception("No viable config provided")
    base_config += config_name
    with open(base_config, "r") as config_file:
        json_config = json.load(config_file)
        if args.dp_level == "server_level":
            json_config["config"]["trainer"]["server"]["privacy_setting"]["clipping_value"] = args.flsim_server_clip_const 
        cfg = fl_config_from_json(json_config["config"])
    if args.canary_insert_type != "":
        if args.canary_insert_type == "train":
            args.canary_insert_train_acc = args.canary_insert_acc_threshold
        elif args.canary_insert_type == "test":
            args.canary_insert_test_acc = args.canary_insert_acc_threshold
    if args.canary_insert_batch_index == "batch":
        args.insert_canary_as_batch = True
    else:
        args.canary_insert_batch_index = int(args.canary_insert_batch_index)
    # Data args
    if args.local_batch_size != "":
        cfg["data"]["local_batch_size"] = int(args.local_batch_size)
    if args.dataset == "CIFAR10":
        cfg["data"]["examples_per_user"] = max(args.local_batch_size, 1)*max(args.num_local_updates,1)
    cfg["data"]["data_root"] = args.data_root
    cfg["data"]["canary_iters"] = args.canary_iters
    cfg["data"]["debug_config"] = args.debug_config
    # Model args
    cfg["model"]["model_arch"] = args.model_arch
    cfg["model"]["dropout"] = args.fl_dropout
    # Trainer args
    cfg["trainer"]["checkpoint_only"] = args.fl_checkpoint_only
    cfg["trainer"]["load_checkpoint"] = args.fl_load_checkpoint
    if not args.debug_config:
        args.canary_insert_epoch = int(args.canary_insert_epoch)
        dict_args = copy.deepcopy(vars(args))
        cfg["trainer"]["users_per_round"] = args.users_per_round
        cfg["trainer"]["args"] = dict_args
    cfg["trainer"]["client"]["epochs"] = args.client_epochs
    if args.fl_server_lr != -1:
        cfg["trainer"]["server"]["server_optimizer"]["lr"] = args.fl_server_lr
    if args.fl_client_lr != -1:
        cfg["trainer"]["client"]["optimizer"]["lr"] = args.fl_client_lr
    if "privacy_setting" in cfg["trainer"]["server"]:
        cfg["trainer"]["server"]["privacy_setting"]["clipping_value"] = args.server_clip_const
        cfg["trainer"]["server"]["privacy_setting"]["target_delta"] = args.delta
        cfg["trainer"]["server"]["privacy_setting"]["noise_multiplier"] = args.sigma
    if args.fl_epochs != -1:
        cfg["trainer"]["epochs"] = args.fl_epochs
        
    if args.canary_test_type == "train_and_freeze" and args.epsilon > 0:
        cfg["trainer"]["always_keep_trained_model"] = True
    return cfg
 | 
	canife-main | 
	arg_handler.py | 
| 
	#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import logging
import sys
import matplotlib.pyplot as plt
sys.path.append("./FLSim")
from arg_handler import create_flsim_cfg, parse_args
from FLSim.examples.canary_example import run
from FLSim.flsim.common.logger import Logger
plt.rcParams.update({
    # "text.usetex": True,
    "font.family": "sans-serif",
    "font.sans-serif": ["Helvetica"]})
logging.basicConfig(
    format="%(asctime)s:%(levelname)s:%(message)s",
    datefmt="%m/%d/%Y %H:%M:%S",
    stream=sys.stdout,
)
logger = logging.getLogger("ddp")
logger.setLevel(level=logging.INFO)
num_class_map = {"CIFAR10": 10, "imagenet": 1000, "sent140": 2, "femnist": 62, "celeba": 2, "shakespeare": 80}
# ----------------- Args + Main -----------------
if __name__ == "__main__":
    args = parse_args()
    if not args.debug_config:
        args.canary_design_minibatch_size  = int(args.users_per_round) if args.canary_design_minibatch_size == "num_users" else args.canary_design_minibatch_size
        args.canary_design_pool_size  = int(args.users_per_round) if args.canary_design_pool_size == "num_users" else args.canary_design_pool_size
        if args.canary_design_type == "sample_grads": # Defaults for sample grads
            if args.canary_design_pool_size != "": # Design pool size overrides design sample size
                args.canary_design_sample_size = args.canary_design_pool_size
            else:
                args.canary_design_sample_size = 32 if args.canary_design_minibatch_size == "" else args.canary_design_minibatch_size
                args.canary_design_pool_size = args.canary_design_sample_size
            args.canary_design_minibatch_size  = args.canary_design_sample_size  if args.canary_design_minibatch_size == "" else args.canary_design_minibatch_size
            args.local_batch_size = 128 if args.local_batch_size == "" else args.local_batch_size
        else: # Defaults for model_updates
            args.local_batch_size = 128 if args.local_batch_size == "" else args.local_batch_size
            if args.canary_design_minibatch_size == "":
                args.canary_design_minibatch_size  = int(args.users_per_round) if args.canary_design_type == "model_updates" else int(args.local_batch_size)
            args.canary_design_sample_size = int(args.local_batch_size) * abs(args.num_local_updates) * int(args.canary_design_minibatch_size) if args.canary_design_sample_size == "" else args.canary_design_sample_size
            if args.canary_design_pool_size != "":
                args.canary_design_sample_size = int(args.canary_design_pool_size) * abs(args.num_local_updates) * int(args.local_batch_size)
        
        args.canary_design_sample_size = int(args.canary_design_sample_size)
        args.canary_design_minibatch_size = int(args.canary_design_minibatch_size)
        args.local_batch_size = int(args.local_batch_size)
        args.canary_design_pool_size = int(args.canary_design_pool_size) if args.canary_design_pool_size != "" else -1
            
    args.num_classes = num_class_map[args.dataset]
    if args.task == "FLSim": # Run FLSim with a canary attack
        # Load config and run flsim
        if args.debug == 1:
            Logger.set_logging_level(logging.DEBUG)
        cfg = create_flsim_cfg(args)
        print(args.dataset)
        run(cfg) | 
	canife-main | 
	launcher.py | 
| 
	#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
class Canary():
    def __init__(self, data, init_data, class_label, init_loss=0, init_grad=None, canary_grad=None, final_loss=0, health=0) -> None:
        """Canary class
        Args:
            data: Tensor of final optimised canary
            init_data: Tensor of initial canary (before optimisation)
            class_label: Canary class
            init_loss (int, optional): Initial canary loss. Defaults to 0.
            init_grad (tensor, optional): Initial canary gradient. Defaults to None.
            canary_grad (tensor, optional): Final canary gradient. Defaults to None.
            final_loss (int, optional): Final loss after optimisation. Defaults to 0.
            health (int, optional): Canary health between 0-1. Defaults to 0.
        """
        self.data = data
        self.init_data = init_data
        self.final_loss = final_loss
        self.init_loss = init_loss 
        self.class_label = class_label
        self.health = health
        self.grad = canary_grad
        self.init_grad = init_grad
        self.health = health | 
	canife-main | 
	canife/canary.py | 
| 
	#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
# flake8: noqa
from .canary import Canary
from .canary_analyser import CanaryAnalyser
from .canary_designer import CanaryDesigner
from .canary_designer_nlp import CanaryDesignerNLP | 
	canife-main | 
	canife/__init__.py | 
| 
	#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
import random
from collections import defaultdict
from timeit import default_timer as timer
import numpy as np
import torch
import torch.autograd as autograd
import torch.optim as optim
from hydra.utils import instantiate
from canife import Canary
from canife.utils import (
    clip_grad,
    compute_batch_grad,
    compute_local_update,
    compute_sample_grads,
    count_params,
    display_gpu_mem,
)
class CanaryDesigner():
    def __init__(self, grad_sample_module, canary_class=None, canary_loss="loss1", canary_norm_loss="hinge_squared", canary_design_type="sample_grads", canary_epochs=1000, 
                     canary_init="random", canary_preprocess=None, canary_clip_const=1, local_batch_size=128, canary_insert_batch_index=0, canary_design_local_models=False, 
                     server_clip_const=1, client_lr=1, num_classes=10, logger=None, local_updates=1, local_epochs=1, optimizer_config=None, dp_level="sample_level", 
                     gpu_mem_minimiser=False, canary_norm_matching=False, canary_norm_constant=50, canary_normalize_optim_grad=True,
                     in_channels=3, image_size=32, benchmark_design=False, **kwargs) -> None:
        self.canary_init = canary_init
        self.canary_loss = canary_loss
        self.canary_norm_loss = canary_norm_loss
        self.canary_norm_matching = canary_norm_matching
        self.canary_norm_constant = canary_norm_constant
        self.canary_normalize_optim_grad = canary_normalize_optim_grad
        
        self.canary_design_type = canary_design_type
        self.canary_class = canary_class
        self.canary_epochs = canary_epochs
        self.canary_clip_const = canary_clip_const
        self.canary_preprocess = canary_preprocess
        self.local_batch_size = local_batch_size
        self.canary_insert_batch_index = canary_insert_batch_index
        self.canary_design_local_models = canary_design_local_models
        self.canary_design_bias = 0
        self.canary_losses = canary_loss
        self.canary_type = "image"
        
        self.local_updates = local_updates
        self.local_epochs = local_epochs
        self.server_clip_const = server_clip_const
        self.client_lr = client_lr
        self.dp_level = dp_level
        self.num_classes = num_classes
        self.gpu_mem_minimiser = gpu_mem_minimiser
        
        self.logger = logger
        self.grad_sample_module = grad_sample_module
        self.optimizer_config = optimizer_config
        
        self.in_channels = in_channels
        self.image_size = image_size
        
        self.benchmark_design = benchmark_design
        self.benchmark_times = []
        # If user-level, design canary on unclipped gradients
        if self.dp_level == "user_level":
            self.canary_clip_const = float('inf')
    
    def get_analyser_args(self):
        """Returns attributes of CanaryDesigner which can be used to populate args when creating a CanaryAnalyser
        Returns:
            dict: attributes of CanaryDesigner
        """
        return self.__dict__
    def set_grad_sample_module(self, grad_sample_module):
        """
        Args:
            grad_sample_module (GradSampleModule): GradSampleModule to be used to compute per-sample gradients when designing the canary
        """
        self.grad_sample_module = grad_sample_module
    def _compute_clipped_grad(self, model, criterion, batch, device="cpu"):
        """Computes the clipped gradients of a batch
        Args:
            model: nn.Module to compute clipped grad
            criterion: Loss function
            batch: Batch to compute gradients from
            device (optional): Torch device. Defaults to "cpu".
        Returns:
            Clipped gradient of batch 
        """
        grad = compute_batch_grad(model, criterion, batch, device=device)
        # clip canary grad
        return clip_grad(grad, self.canary_clip_const)
    def _init_canary(self, canary_design_loader):
        """Initialises canary
        Args:
            canary_design_loader: Canary design pool, required for image initialisation
        Returns:
            canary: Canary as a tensor
        """
        if self.canary_init == "random":
            canary = torch.rand(size=(1,self.in_channels,self.image_size,self.image_size))
            canary = canary if self.canary_preprocess is None else self.canary_preprocess(canary)
            self.canary_class = random.randint(0, self.num_classes-1)
        else:
            if self.canary_design_type == "sample_grads": # The specific shapes of design loaders depends on sample_grads vs model_updates
                canary = next(iter(canary_design_loader))[0][0].clone().view(1,self.in_channels,self.image_size,self.image_size)
                self.canary_class = next(iter(canary_design_loader))[1][0].clone().item()
            else:
                canary = next(iter(canary_design_loader))[0][0][0].clone().view(1,self.in_channels,self.image_size,self.image_size)
                self.canary_class = next(iter(canary_design_loader))[0][1][0].clone().item()
        return canary.clone()
    
    def _compute_local_update(self, model, criterion, local_batches, device):
        """Computes a model update from a mock client who has local_batches
        Args:
            model: nn.Module
            criterion: Loss function
            local_batches: Clients local batches
            device: torch device
        Returns:
            model_update: An unscaled model update (clipped and then scaled by 1/lr * expected batch size)
        """
        initial_model_state = copy.deepcopy(model.state_dict())
        model_optimizer = instantiate(self.optimizer_config, model=model)
        local_model_state, local_model_before_insert, _ = compute_local_update(model, criterion, model_optimizer, local_batches, expected_batch_size=self.local_batch_size, local_epochs=self.local_epochs, reverse_batch_scaling=False, device=device)
        # Difference original and local model
        local_update = torch.tensor([]).to(device)
        for name, param in model.named_parameters():
            if param.requires_grad:
                local_update = torch.cat([local_update, (initial_model_state[name].data-local_model_state[name].data).flatten().detach().clone()])
        model.load_state_dict(initial_model_state) # Revert changes made by multiple local updates
        self.logger.debug(f"Mock client local update {local_update}, server clip const {self.server_clip_const}")
        # (1/lr)*B*clip(local update)
        # return (1/self.client_lr)*self.local_batch_size*clip_grad(local_update.cpu(), self.server_clip_const), local_model_before_insert
        return clip_grad(local_update.cpu(), self.server_clip_const), local_model_before_insert
    
    def _compute_aggregated_design_vectors(self, model, grad_dim, canary_design_loader, criterion, device):
        """Computes aggregated design vectors to craft canary on
        Args:
            model: nn.Module
            grad_dim: Gradient dimension of model
            canary_design_loader: Design loader
            criterion: Loss function
            device: torch device
        Returns:
            aggregated_design_vec: Either the aggregated sum of per-sample-gradients (if canary_design_type == sample_grads) or aggregated model updates (if canary_design_type == model_updates)
            batch_design_vecs: Individual per-sample gradients or individual model updates from mock design clients
            local_model_states: The final states of local models if canary_design_type=="model_updates"
        """
        aggregated_design_vec = torch.zeros(size=(grad_dim,))      
        batch_design_vecs = torch.tensor([])  
        local_model_states = []
        if self.canary_design_type == "sample_grads":
            batch_design_vecs = torch.zeros((grad_dim, ))
        elif self.canary_design_type == "model_updates":
            batch_design_vecs = torch.zeros((len(canary_design_loader), grad_dim))
        self.logger.info(" Computing sample grads/model updates from canary design pool...")
        for i, design_batch in enumerate(canary_design_loader):
            if i % 10 == 0:
                self.logger.debug(f" Computing sample grads/model updates of canary design batch={i+1}")
            if self.canary_design_type == "model_updates": # Scaled and clipped model updates
                local_update, before_insert_model_state, = self._compute_local_update(model, criterion, design_batch, device) # The design batch is a mock client's local daata
                batch_design_vecs[i] = local_update
                aggregated_design_vec += local_update
                local_model_states.append(before_insert_model_state)
                self.logger.debug(f"Mock client {i} scaled local update {local_update}")
                if i == 0:
                    self.logger.info(f"Local design updates are scaled by B={self.local_batch_size}, lr={self.client_lr}, clip const={self.server_clip_const}")
            elif self.canary_design_type == "gradient_pool":
                global_state = copy.deepcopy(self.grad_sample_module.state_dict())
                model_optimizer = instantiate(self.optimizer_config, model=self.grad_sample_module)
                _, _, local_step_sample_grads = compute_local_update(self.grad_sample_module, criterion, model_optimizer, design_batch, device=device, compute_sample_grads=True)
                self.grad_sample_module.load_state_dict(global_state) # Revert changes made by multiple local updates
                batch_design_vecs = torch.cat([batch_design_vecs, local_step_sample_grads], dim=0)
                aggregated_design_vec += local_step_sample_grads.sum(axis=0)
            else: 
                batch_design_vecs, _ = compute_sample_grads(self.grad_sample_module, criterion, design_batch, device=device, clipping_const=self.canary_clip_const)
                aggregated_design_vec += batch_design_vecs.sum(axis=0)
        return aggregated_design_vec, batch_design_vecs, local_model_states
    # Will be overriden for NLP
    def _init_canary_optimisation(self, canary_design_loader, device):
        """Initialises canaries for optimisation
        Args:
            canary_design_loader: Design pool
            device: Torch device
        Returns:
            init_canary: Initial Canary for metrics
            canary: Tensor canary to optimise
            canary_class: Tensor class of canary 
            canary_optimizer: Optimizer over canary
        """
        init_canary = self._init_canary(canary_design_loader)
        canary = init_canary.clone().to(device) # Clone because we keep the initial canary for statistics
        canary.requires_grad = True
        canary_class = torch.tensor([self.canary_class]).to(device)
        
        base_lr = 1
        canary_optimizer = optim.Adam([canary], lr=base_lr)
        return init_canary, canary, canary_class, canary_optimizer
    
    # Will be overriden for NLP
    def _forward_pass_canary(self, model, canary):
        """Runs a forward pass on a canary given a model
        Args:
            model: nn.Module
            canary: canary tensor
        Returns:
            output: Output of model(canary)
        """
        model.train()
        model.zero_grad()
        output = model(canary)
        return output
    # Will be overriden for NLP
    def _post_process_canary(self, model, criterion, canary, canary_class, device="cpu"):
        """Computes final gradient from the canary
        Args:
            model: nn.Module
            criterion: Loss function
            canary: tensor
            canary_class: tensor
            device (optional): torch device, defaults to "cpu".
        Returns:
            canary: Final canary after post-processsing
            canary_grad: Final canary gradient
        """
        canary_grad = self._compute_clipped_grad(model, criterion, [canary, canary_class], device=device).detach().cpu()
        return canary, canary_grad
    def _optimise(self, model, criterion, canary_design_loader, device="cpu"):
        """ Optimise over model and design loader to craft a canary
        Args:
            model: nn.Module
            criterion: Loss function
            canary_design_loader: DataLoader or list of tensors that mimics the batch structure of a DataLoader
            device (str, optional): Torch device, defaults to "cpu".
        Returns:
            canary: Canary object
        """
        display_gpu_mem(prefix="Start of optim", device=device, logger=self.logger)
        init_canary, canary, canary_class, canary_optimizer = self._init_canary_optimisation(canary_design_loader, device)
        model = model.to(device)
        model.zero_grad()
        
        # Init optim
        grad_dim = count_params(model)
        self.logger.info(f" Grad Dim {grad_dim}")
        canary_loss = torch.tensor(float("inf"))
        initial_model_state = copy.deepcopy(model.state_dict())
        local_model_states = []
        t, initial_canary_loss = 0,0
        optim_stats = defaultdict(list)
        best_canary = [float("inf"), None, 0]
        optim_improving = True
        aggregated_design_vec = torch.tensor([])
        x_grad_norm = 0
        display_gpu_mem(prefix="After moving model", device=device, logger=self.logger)
        # Compute the aggregated (sum or mean) grads of the canary design set and batch sample grads (if it fits into memory)
        if self.canary_loss == "loss1" or self.canary_design_sample_size <= self.canary_design_minibatch_size or self.canary_design_type != "sample_grads":
            aggregated_design_vec, batch_design_vecs, local_model_states = self._compute_aggregated_design_vectors(model, grad_dim, canary_design_loader, criterion, device)
        display_gpu_mem(prefix="After grad sample comp", device=device, logger=self.logger)
        self.logger.info("\n ===== Beginning canary optimization... =====")
        self.logger.info(f"Canary optimizer {canary_optimizer}")
        if self.canary_loss != "loss1" and (self.canary_design_sample_size <= self.canary_design_minibatch_size or self.canary_design_type != "sample_grads"): # i.e no minibatches
            target = batch_design_vecs # loss2 when sample grads fit into memory or when designing against model updates
            gradient_norms = torch.norm(target, dim=1)
            self.logger.info(f"Design norms {gradient_norms}") # Model updates or sample gradients
            self.logger.info(f"Average design norm {torch.mean(gradient_norms)}")
        else:
            target = aggregated_design_vec # loss1, optimisation target is the aggregated gradients or model updates
        display_gpu_mem(prefix="After target comp", device=device, logger=self.logger)
        parameters = []
        for p in model.parameters():
            if p.requires_grad:
                parameters.append(p)
        loss1_target = target.to(device)    
        epoch_target = loss1_target
        self.logger.debug(f"Pre-optim model arch {model}, {sum([p.flatten().sum() for p in model.parameters()])}")
        display_gpu_mem(prefix="Target moved to gpu", device=device, logger=self.logger)
        # grad_dim = 1
        model.zero_grad()
        while (t<=self.canary_epochs) and optim_improving:
            t+= 1
            if (t+1) % 100 == 0:
                loss_mean = np.mean(optim_stats["canary_loss"][-100:])
                self.logger.info(f" Canary optimisation, epoch={t}, initial loss={initial_canary_loss.item()}, average loss (last 100 iters)={loss_mean}, last loss={canary_loss.item()}")
            self.logger.debug(f" Canary grad (w.r.t canary loss) t={t}, {x_grad_norm}")
            if self.benchmark_design:
                start = timer()
                
            # Calculate loss of canary
            canary_optimizer.zero_grad()
            if (t+1) % 100 == 0 or t==1:
                display_gpu_mem(prefix=f"Start of optim t={t}", device=device, logger=self.logger)
            if len(local_model_states) > 0 and self.canary_insert_batch_index == -1 and self.canary_design_local_models:
                model.load_state_dict(local_model_states[random.randint(0, len(local_model_states)-1)]) # Randomly sample a local model to compute canary grad from
            if self.canary_loss == "loss2" and self.canary_design_sample_size > self.canary_design_minibatch_size or self.canary_loss == "loss_both": # minibatching
                if self.canary_design_type == "sample_grads": # Minibatch sample grads 
                    design_batch = next(iter(canary_design_loader))
                    epoch_target, _ = compute_sample_grads(self.grad_sample_module, criterion, design_batch, device, move_grads_to_cpu=False, clipping_const=self.canary_clip_const)  
                else: # Minibatch model updates 
                    idx = torch.ones(target.shape[0]).multinomial(num_samples=self.canary_design_minibatch_size, replacement=False).to(device)
                    epoch_target = target[idx]
            if (t+1) % 100 == 0 or t==1:
                display_gpu_mem(prefix=f"Minibatch optim t={t}", device=device, logger=self.logger)
            output = self._forward_pass_canary(model, canary) 
            loss = criterion(output, canary_class)
            self.logger.debug(f"Model canary {canary}, norm={torch.norm(canary)}")
            self.logger.debug(f"Model output {output}")
            self.logger.debug(f" Model loss t={t}, {loss}")
            canary_loss = torch.zeros(1, requires_grad=True).to(device)
            # hvp
            grad_f = autograd.grad(loss, parameters, create_graph=True, retain_graph=True)
            grad_f = torch.cat([g.flatten() for g in grad_f])
            self.logger.debug(f" Autograd grad_f t={t}, {grad_f}\n")
            self.logger.debug(f" Sum grad_f t={t}, {torch.sum(grad_f)}\n")
            temp_grad = grad_f.clone().detach().cpu()
            
            # Norm loss
            if self.canary_norm_matching and self.canary_norm_constant-torch.norm(grad_f) > 0:
                if self.canary_norm_loss == "hinge_squared":
                    canary_loss = canary_loss + grad_dim*((self.canary_norm_constant-torch.norm(grad_f)))**2
                else:
                    canary_loss = canary_loss + grad_dim*((self.canary_norm_constant-torch.norm(grad_f)))
            
            # Normalise canary grad
            if self.canary_normalize_optim_grad:
                grad_f = torch.nn.functional.normalize(grad_f, dim=0)*self.server_clip_const
                
            canary_loss = canary_loss + (grad_dim*(torch.sum(grad_f.view(1,-1) * epoch_target, dim=(1))**2).sum()/epoch_target.shape[0]) # Loss 1/2 term
            if self.canary_loss == "loss_both":
                canary_loss += (grad_dim*(torch.sum(grad_f.view(1,-1) * loss1_target, dim=(1))**2).sum()/loss1_target.shape[0])
                
            self.logger.debug(f" Canary loss t={t}, {canary_loss}\n")
            canary_loss.backward()
            canary_loss = canary_loss.detach().cpu()
            initial_canary_loss = canary_loss if t==1 else initial_canary_loss
            optim_stats["canary_loss"].append(canary_loss.item())
            optim_stats["canary_norm"].append(torch.norm(temp_grad).norm().item())
            x_grad_norm = torch.norm(canary.grad.detach()).cpu()
            if (t+1) % 100 == 0 or t==1:
                display_gpu_mem(prefix=f"Pre-end of optim t={t}", device=device, logger=self.logger)
            if t < self.canary_epochs:
                canary_optimizer.step()
            model.zero_grad()
            # if canary_loss < best_canary[0]:
            if True and t == self.canary_epochs:
                best_canary = [canary_loss.detach().cpu(), canary.detach().clone().cpu(), t]
            
            if (t+1) % 100 == 0 or t==1:
                display_gpu_mem(prefix=f"End of optim t={t}", device=device, logger=self.logger)
            if self.benchmark_design:
                end = timer()
                self.benchmark_times.append(end-start)
                
        best_canary_loss, canary, best_t = best_canary
        # Computes grad of canary from the model 
        # For NLP this will sample the canary and compute the exact gradient 
        canary, canary_grad = self._post_process_canary(model, criterion, canary, canary_class, device=device)
        init_canary, init_canary_grad = self._post_process_canary(model, criterion, init_canary, canary_class, device=device)
        self.logger.debug(f"Clipped gradient computed {torch.sum(canary_grad)}, {canary_grad}")
        self.logger.info(f" Grad Descent for canary...t={t}")
        self.logger.info(f" Best canary at t={best_t}, {best_canary_loss}")
        canary_health = ((initial_canary_loss-best_canary_loss) / initial_canary_loss).item()
        self.logger.info(f" Canary Norm {torch.norm(canary_grad).item()}")
        self.logger.info(f" Canary Health {canary_health}")
        if self.canary_loss == "loss1" or self.canary_design_sample_size <= self.canary_design_minibatch_size or self.canary_design_type == "model_updates":
            aggregated_design_vec = aggregated_design_vec/self.canary_design_pool_size
            self.canary_design_bias = -torch.dot(canary_grad/torch.norm(canary_grad), aggregated_design_vec).cpu().detach().item()
            self.logger.info(f"Canary grad {canary_grad}")
            self.logger.info(f"Canary grad normalised {canary_grad/torch.norm(canary_grad)}")
            self.logger.info(f"Dot Product <Canary/||grad(can)||, S> {-self.canary_design_bias}")
            self.logger.info(f"Dot Product <Canary/||grad(can)||, S+canary> {torch.dot(canary_grad/torch.norm(canary_grad), aggregated_design_vec + (canary_grad/torch.norm(canary_grad))).cpu().detach().item()}")
            self.logger.info(f"Canary batch gradients {aggregated_design_vec + canary_grad/torch.norm(canary_grad)}")
        self.logger.info(f" x.grad Norm {x_grad_norm}\n\n")
        
        self.canary_losses = optim_stats["canary_loss"]
        self.canary_norms = optim_stats["canary_norm"]
        model.load_state_dict(initial_model_state) 
        return Canary(canary, init_canary, canary_class.item(), init_loss=initial_canary_loss.item(), init_grad=init_canary_grad, 
                        final_loss=best_canary_loss.item(), canary_grad=canary_grad, health=canary_health)
    def _update_design_params(self, canary_design_loader, clients_per_round, design_minibatch_size=None, varying_local_batches=False):
        """Updates relevant design params (canary_design_sample_size, canary_design_pool_size, canary_design_minibatch_size) 
            will infer this from the canary_design_loader and other provided args
        Args:
            canary_design_loader: Design loader
            design_minibatch_size (optional): To override inferred minibatch size. Defaults to None which sets design_minibatch_size to num_local_updates
            varying_local_batches (bool): If True then clients have varying batch sizes. Defaults to False.
        """
        example_design_batch = next(iter(canary_design_loader))[0] if self.canary_design_type == "sample_grads" else canary_design_loader[0] # Either a batch of sample gradients or a mock client
        num_local_updates = -1
        
        if self.canary_design_type == "sample_grads":
            self.canary_design_minibatch_size = example_design_batch.shape[0]
            self.local_batch_size = self.canary_design_minibatch_size
            self.canary_design_sample_size = len(canary_design_loader) * self.canary_design_minibatch_size
            self.canary_design_pool_size = self.canary_design_sample_size
        else:
            if not varying_local_batches:
                self.local_batch_size = example_design_batch[0][0].shape[0]
                num_local_updates = len(example_design_batch)
            self.canary_design_minibatch_size = design_minibatch_size if design_minibatch_size else clients_per_round
            self.canary_design_sample_size = sum([sum([batch[0].shape[0] for batch in mock_client]) for mock_client in canary_design_loader])
            self.canary_design_pool_size = len(canary_design_loader)
            if self.canary_design_type == "gradient_pool":
                self.canary_design_pool_size = self.canary_design_sample_size
            if self.canary_design_type == "model_updates" and self.canary_design_minibatch_size > self.canary_design_pool_size:
                self.canary_design_minibatch_size = self.canary_design_pool_size
        self.logger.info(f"Designer inferred design sample size={self.canary_design_sample_size}, design pool={self.canary_design_pool_size}, minibatch size={self.canary_design_minibatch_size}, local updates={num_local_updates}, local client batch size={self.local_batch_size}")
    def design(self, model, criterion, canary_design_loader, clients_per_round=100, varying_local_batches=False, canary_design_minibatch_size=None, device="cpu"):
        """Designs a canary from a given model and design pool (canary_design_loader)
        Args:
            model: nn.Module
            criterion: Loss function
            canary_design_loader: Design loader
            varying_local_batches (bool, optional): If True, design clients contain varying batch sizes. Defaults to False.
            canary_design_minibatch_size (optional): Minibatch size for designing. Defaults to None.
            device (optional): Torch device to design on, defaults to "cpu".
        Returns:
            canary: Canary object
        """
        assert self.grad_sample_module is not None, "Must set_grad_sample_module before designing a canary"
                
        display_gpu_mem(prefix="Start of design", device=device, logger=self.logger) # For debugging
        self.grad_sample_module.to(device)
        display_gpu_mem(prefix="Grad sample module moved", device=device, logger=self.logger) # For debugging
        self.logger.debug(f"Design model arch {model}, {sum([p.flatten().sum() for p in model.parameters()])}")
        # Infer design parameters such as the design pool + sample size from the canary_design_loader
        self._update_design_params(canary_design_loader, clients_per_round, design_minibatch_size=canary_design_minibatch_size, varying_local_batches=varying_local_batches)
        # Optimise and find canary
        canary = self._optimise(model, criterion, canary_design_loader, device)
        # To avoid GPU mem issues with FLSim if using GPUMemoryMinimiser
        if self.gpu_mem_minimiser:
            self.grad_sample_module.to("cpu") 
            model.to("cpu")
        return canary | 
	canife-main | 
	canife/canary_designer.py | 
| 
	#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
import itertools
import re
import string
import unicodedata
from typing import List
import torch
# Sent140 character embeddings
class TextProcessorSent140():
    def __init__(self):
        self.all_letters = {c: i for i, c in enumerate(string.printable)}
        self.reverse_map_all_letters = {i: c for i, c in enumerate(string.printable)}
        self.num_letters = len(self.all_letters)
        self.vocab_size = self.num_letters+1
        self.UNK: int = self.num_letters
    
    def unicodeToAscii(self, s):
        return "".join(
            c
            for c in unicodedata.normalize("NFD", s)
            if unicodedata.category(c) != "Mn" and c in self.all_letters
        )
    def split_line(self, line):
        """split given line/phrase into list of words
        Args:
            line: string representing phrase to be split
        Return:
            list of strings, with each string representing a word
        """
        return re.findall(r"[\w']+|[.,!?;]", line)
    def flatten_list(self, nested_list):
        return list(itertools.chain.from_iterable(nested_list))
    def line_to_indices(self, line: str, max_seq_len: int):
        line_list = self.split_line(line)  # split phrase in words
        line_list = line_list
        chars = self.flatten_list([list(word) for word in line_list])
        # padding
        indices: List[int] = [
            self.all_letters.get(letter, self.UNK)
            for i, letter in enumerate(chars)
            if i < max_seq_len
        ]
        indices = indices + ([self.UNK] * (max_seq_len - len(indices)))
        return indices
    # Assume input is a tensor of indices
    def index_sequence_to_text(self, indices):
        line = ""
        for i in indices:
            line += self.reverse_map_all_letters.get(i.item(), "�")
        return line
    def text_to_index_sequence(self, text):
        return torch.tensor([self.all_letters.get(c, self.UNK) for c in text])
# Preprocessing for Shakespeare
class TextProcessorShakes():
    def __init__(self) -> None:
        self.all_letters = (
            "\n !\"&'(),-.0123456789:;>?ABCDEFGHIJKLMNOPQRSTUVWXYZ[]abcdefghijklmnopqrstuvwxyz}"
        )
        self.vocab_size = len(self.all_letters)
        
    def word_to_indices(self, word):
        """returns a list of character indices
        Args:
            word: string
        Return:
            indices: int list with length len(word)
        """
        indices = []
        for c in word:
            indices.append(self.all_letters.find(c))
        return indices
    def index_sequence_to_text(self, indices):
        line = ""
        for i in indices:
            line += self.all_letters[i]
        return line
    
    def _one_hot(self, index, size):
        """returns one-hot vector with given size and value 1 at given index"""
        vec = [0 for _ in range(size)]
        vec[int(index)] = 1
        return vec
    def letter_to_vec(self, letter):
        """returns one-hot representation of given letter"""
        index = self.all_letters.find(letter)
        return index  # _one_hot(index, NUM_LETTERS)
def get_plot_path(args, exp_num=1, file_suffix=".png"):
    plot_name = args.model_arch + "_" + args.canary_loss + "_B=" + str(args.local_batch_size)
    if args.canary_setup == "holdout":
        plot_name += "_CanaryDesign=" + str(args.canary_design_sample_size) + "_" + str(args.canary_design_minibatch_size)
    plot_name += "_" + args.canary_setup + "_checkpoint_epoch=" + str(args.canary_insert_epoch) + "_iter=" + str(exp_num)
    plot_path = args.dump_path + args.plot_path + "/" + plot_name + file_suffix
    return plot_path
def state_dict_to_cpu(state_dict):
    """Moves a state dict from GPU to CPU
    Args:
        state_dict: model state dict (on GPU)
    Returns:
        state_dict: model state dict (on CPU)
    """
    for k,v in state_dict.items():
        state_dict[k] = v.detach().clone().cpu()
    return state_dict
def display_gpu_mem(device, logger=None, prefix=""):
    """Debug function - displays device GPU memory statistics
    Args:
        device: GPU device
        logger (_type_, optional): Optional logger. Defaults to None.
        prefix (str, optional): Add prefix to debug output. Defaults to "".
    """
    if str(device) != "cpu":
        mem = torch.cuda.mem_get_info(device=device)
        if logger is None:
            print(prefix, torch.cuda.mem_get_info(device=device))
        else:
            logger.debug(f"{prefix} {mem} {round((mem[1] - mem[0]) / 1024**3, 4)}Gb used")
def count_params(model):
    """Counts number of parameters (that require grad) in a model
    Args:
        model: Model to count params
    Returns:
        Total number of parameters (that require grad)
    """
    return sum(p.numel() for p in model.parameters() if p.requires_grad)
def clip_grad(grad, clip_const=1):
    """Clip gradient
    Args:
        grad (tensor): Gradient to clip
        clip_const (int, optional): Clipping constant. Defaults to 1.
    Returns:
        Clipped gradient tensor
    """
    if torch.norm(grad) > clip_const:
        grad = grad*clip_const / torch.norm(grad)
    return grad
def compute_batch_grad(model, criterion, batch, device="cpu"):
    """Computes average gradients of a batch
    Args:
        model: nn.Module
        criterion: Loss function
        batch: Batch to compute average gradients 
        device (str, optional): Torch device. Defaults to "cpu".
    Returns:
        Batch gradients, moved to cpu
    """
    model.to(device)
    model.zero_grad()
    img = batch[0].to(device)
    target = batch[1].to(device)
    outputs = model(img)
    batch_losses = criterion(outputs, target)
    batch_losses.backward()
    batch_grads = torch.tensor([]).to(device)
    for p in model.parameters():
        if p.requires_grad:
            batch_grads = torch.cat([batch_grads, p.grad.detach().clone().flatten()])
    model.zero_grad()
    return batch_grads.cpu()
def compute_sample_grads(grad_sample_module, criterion, batch, device="cpu", move_grads_to_cpu=True, clipping=True, clipping_const=1):
    """Computes per-sample gradients given a GSM and a batch
    Args:
        grad_sample_module: GradSampleModule
        criterion: Loss function
        batch: Batch to compute per-sample grads of
        device (str, optional): Defaults to "cpu".
        move_grads_to_cpu (bool, optional): If True will move all sample grads to cpu. Defaults to True.
        clipping (bool, optional): Whether to clip per-sample-gradients. Defaults to True.
        clipping_const (int, optional): Clipping const. Defaults to 1.
    Returns:
        batch_grads: Per-sample gradients of batch
        clip_count: Number of per-sample gradients that were clipped
    """
    grad_sample_module.to(device)
    grad_sample_module.zero_grad()
    img = batch[0].to(device)
    target = batch[1].to(device)
    outputs = grad_sample_module(img)
    batch_losses = criterion(outputs, target)
    batch_losses.backward()
    batch_grads = torch.hstack([p.grad_sample.detach().clone().view(img.shape[0], -1) for p in grad_sample_module.parameters()])
    
    clip_count = 0
    if clipping:
        for i, grad in enumerate(batch_grads):
            grad_norm = torch.norm(grad)
            if grad_norm > clipping_const:
                clip_count += 1
                batch_grads[i] = batch_grads[i]*clipping_const / grad_norm
    
    # Calling zero-grad of GradSampleModule without DPOptimizer doesn't remove sample grads (?)
    grad_sample_module.zero_grad()
    for p in grad_sample_module.parameters():
        p.grad_sample = None
    if move_grads_to_cpu:
        return batch_grads.cpu(), clip_count
    else:
        return batch_grads, clip_count
def compute_local_update(model, criterion, optimizer, batches, reverse_batch_scaling=True, expected_batch_size=1, compute_sample_grads=False, local_epochs=1, device="cpu"):
    """Computes a model update given a set of local batches 
    Args:
        model: nn.Module
        criterion: Loss function
        optimizer: Model optimizer
        batches: Mock client local batches
        reverse_batch_scaling (bool, optional): Reverse 1/B averaging, multiplies gradients by B/expected B. Defaults to True.
        expected_batch_size (int, optional): The expected batch size. Defaults to 1.
        compute_sample_grads (bool, optional): Whether to also compute per-sample gradients. If True expects model to be a GSM. Defaults to False.
        local_epochs (int, optional): Number of local epochs to perform. Defaults to 1.
        device (str, optional): Defaults to "cpu".
    Returns:
        local_model_state: The model state dict after the local training. Can be used to compute a model update by differencing with global model.
        local_model_before_insert: Local model at step n-1 where n is the number of local batches
        sample_grads: The per-sample grads, defaults to empty tensor is compute_sample_grads=False
    """
    model.to(device)
    sample_grads = torch.tensor([])
    local_model_before_insert = None
    for epochs in range(local_epochs):
        for i, batch in enumerate(batches):
            img, target = batch
            model.zero_grad()
            if i == len(batches)-1:
                local_model_before_insert = state_dict_to_cpu(copy.deepcopy(model.state_dict()))
            img = img.to(device)
            target = target.to(device)
            outputs = model(img)
            batch_losses = criterion(outputs, target)
            batch_losses.backward()
            if reverse_batch_scaling:
                for p in model.parameters():
                    p.grad *= (img.shape[0]/expected_batch_size)
            if compute_sample_grads:
                sample_grads = torch.cat((sample_grads, torch.hstack([p.grad_sample.clone().cpu().view(img.shape[0], -1) for p in model.parameters()])), dim=0)
            optimizer.step()
    model.zero_grad()
    return model.state_dict(), local_model_before_insert, sample_grads | 
	canife-main | 
	canife/utils.py | 
| 
	#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import random
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import torch
import torch.nn.functional as F
from canife import CanaryDesigner
from canife.utils import TextProcessorSent140, TextProcessorShakes
class CanaryDesignerNLP(CanaryDesigner):
    def __init__(self, grad_sample_module, canary_class=None, canary_loss="loss1", canary_norm_loss="hinge_squared", canary_design_type="sample_grads", canary_epochs=1000, canary_init="random", canary_preprocess=None, canary_clip_const=1, 
                     local_batch_size=128, canary_insert_batch_index=0, canary_design_local_models=False, server_clip_const=1, client_lr=1, 
                     num_classes=10, logger=None, local_updates=1, local_epochs=1, optimizer_config=None, dp_level="sample_level", gpu_mem_minimiser=False,
                     canary_norm_matching=False, canary_norm_constant=50, canary_normalize_optim_grad=True,
                     benchmark_design=False, **kwargs) -> None:
        super().__init__(grad_sample_module=grad_sample_module, canary_class=canary_class, canary_loss=canary_loss, canary_norm_loss=canary_norm_loss, canary_design_type=canary_design_type, canary_epochs=canary_epochs, 
                        canary_init=canary_init, canary_preprocess=canary_preprocess, canary_clip_const=canary_clip_const, local_batch_size=local_batch_size, canary_insert_batch_index=canary_insert_batch_index, 
                        canary_design_local_models=canary_design_local_models, server_clip_const=server_clip_const, client_lr=client_lr, 
                        num_classes = num_classes, logger=logger, local_updates=local_updates, local_epochs=local_epochs, optimizer_config=optimizer_config, dp_level=dp_level, gpu_mem_minimiser=gpu_mem_minimiser, canary_norm_matching=canary_norm_matching, 
                        canary_norm_constant=canary_norm_constant, canary_normalize_optim_grad=canary_normalize_optim_grad, benchmark_design=benchmark_design, **kwargs)
        self.text_processor = TextProcessorShakes() if kwargs["dataset"] == "shakespeare" else TextProcessorSent140()
        self.canary_type = "nlp"
    def _init_canary_optimisation(self, canary_design_loader, device):
        """Initialises canaries for optimisation
        Args:
            canary_design_loader: Design pool
            device: Torch device
        Returns:
            init_canary: Initial Canary for metrics
            canary: Tensor canary to optimise
            canary_class: Tensor class of canary 
            canary_optimizer: Optimizer over canary
        """
        init_canary = self._init_canary(canary_design_loader)
        canary = init_canary.clone().to(device) # Clone because we keep the initial canary for statistics
        canary.requires_grad = True
        canary_class = torch.tensor([self.canary_class]).to(device)
        canary_optimizer = torch.optim.Adam([canary], lr=0.1)
        return init_canary, canary, canary_class, canary_optimizer
        
    def _init_canary(self, canary_design_loader):
        """Initialises canary
        Args:
            canary_design_loader: Canary design pool, required to infer sequence length for text initialisation
        Returns:
            canary: Canary as a tensor
        """
        # Initialise log coeffs
        if self.canary_design_type == "sample_grads":
            example_seq = next(iter(canary_design_loader))[0][0].clone()
            self.canary_class = next(iter(canary_design_loader))[1][0].clone().item()
        else:
            example_seq = next(iter(canary_design_loader))[0][0][0].clone()
            self.canary_class = next(iter(canary_design_loader))[0][1][0].clone().item()
        if self.canary_init == "random":
            log_coeffs = torch.rand(len(example_seq), self.text_processor.vocab_size)
            self.canary_class = random.randint(0, self.num_classes-1)
        else:
            log_coeffs = torch.zeros(len(example_seq), self.text_processor.vocab_size)
            indices = torch.arange(log_coeffs.size(0)).long()
            log_coeffs[indices, example_seq] = 12
        self.logger.info(f"Log coeffs initialised shape={log_coeffs.shape}")
        return log_coeffs
        
    def _forward_pass_canary(self, model, canary):
        """Runs a forward pass on a canary given a model
        
        Uses the Gumbel softmax method of Guo et al. (2021) (https://arxiv.org/abs/2104.13733)
        Args:
            model: nn.Module
            canary: canary tensor
        Returns:
            output: Output of model(canary)
        """
        model.train()
        model.zero_grad()
        # Gumbel softmax the log coeffs
        coeffs = F.gumbel_softmax(canary, hard=False) # T x V
        # Form soft embeddings
        embedding_weights = model.__dict__["_modules"]["embedding"].weight
        inputs_embeds = (coeffs @ embedding_weights) # T x D
        # Forward pass through model (using soft embeddings as input)
        pred = model(None, input_embeds=inputs_embeds.unsqueeze(0))
        return pred
    def _post_process_canary(self, model, criterion, canary, canary_class, device="cpu"):
        """Computes final gradient from the canary. Converts token distribution to text sample
        Args:
            model: nn.Module
            criterion: Loss function
            canary: tensor
            canary_class: tensor
            device (optional): torch device, defaults to "cpu".
        Returns:
            canary: Final canary after post-processsing
            canary_grad: Final canary gradient
        """
        # self._plot_canary_dist(canary)
        canary = F.gumbel_softmax(canary, hard=True).argmax(1).unsqueeze(0).long()
        canary_grad = self._compute_clipped_grad(model, criterion, [canary, canary_class], device=device).detach().cpu()
        return canary, canary_grad
    def _plot_canary_dist(self, canary):
        """
        For debugging. Plots the token distribution of the canary.
        
        Args:
            canary: canary token distribution to plot
        """
        coeffs = F.gumbel_softmax(canary, hard=False)
        for row in coeffs:
            row = np.array(row)
            sns.barplot(x=list(range(0, len(row))), y=row)
            plt.plot()
            plt.pause(1)
            plt.clf()
     | 
	canife-main | 
	canife/canary_designer_nlp.py | 
| 
	#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import copy
import math
import matplotlib.pyplot as plt
import numpy as np
import torch
from opacus import GradSampleModule
from scipy.stats import binomtest
from canife.utils import TextProcessorSent140, TextProcessorShakes, compute_sample_grads
from privacy_lint.privacy_lint.attack_results import AttackResults
class CanaryAnalyser():
    def __init__(self, plot_path, result_path, grad_sample_module=None, canary_epochs=1000, canary_loss="loss1", canary_norm_matching=None, canary_design_type="sample_grads", canary_setup="holdout", canary_init="random",
                    canary_design_minibatch_size=0, canary_design_sample_size = 0, canary_design_pool_size=0, local_batch_size=128, canary_clip_const=1, canary_insert_train_acc=0, canary_insert_test_acc=0, canary_losses=None, canary_norms=None,
                    canary_design_reverse_server_clip=False, canary_design_bias=0, canary_insert_epoch="unknown", canary_insert_global_round=-1, canary_insert_batch_index=-1, canary_insert_acc_threshold=-1, canary_normalize_optim_grad=True,
                    canary_design_local_models=False, local_updates=1, local_epochs=1, canary_type="image", delta=1e-5, sigma=0, epsilon=float('inf'), sample_rate=1, checkpoint_train_acc = 0, checkpoint_test_acc = 0,
                    model_arch="unknown", dataset="unknown", task="canary_attack", dp_level="sample_level", logger=None, benchmark_times=None, server_clip_const=1,
                    actual_sample_size=0, actual_pool_size=0 , actual_minibatch_size=0, canary_norm_constant=1, canary_norm_loss="hinge_squared", scale_canary_test=False, **kwargs) -> None:
        self.reset()
        self.epsilon = epsilon
        self.delta = delta
        self.sigma = sigma
        self.sample_rate = sample_rate
        
        self.global_round = 0
        self.canary_type = canary_type
        self.canary_loss = canary_loss
        self.canary_losses = canary_losses
        self.canary_norms = canary_norms
        self.canary_epochs = canary_epochs
        self.canary_init = canary_init
        self.canary_design_type = canary_design_type
        self.canary_setup = canary_setup
        self.canary_clip_const = canary_clip_const
        self.canary_design_minibatch_size = canary_design_minibatch_size
        self.canary_design_sample_size = canary_design_sample_size
        self.canary_design_pool_size = canary_design_pool_size
        self.scale_canary_test = scale_canary_test
        
        self.actual_sample_size = actual_sample_size
        self.actual_pool_size = actual_pool_size
        self.actual_minibatch_size =  actual_minibatch_size
        
        self.canary_design_reverse_server_clip = canary_design_reverse_server_clip
        self.canary_design_bias = canary_design_bias
        self.local_batch_size = local_batch_size
        self.canary_norm_matching = canary_norm_matching
        self.canary_norm_constant = canary_norm_constant
        self.canary_norm_loss = canary_norm_loss
        self.canary_normalize_optim_grad = canary_normalize_optim_grad
        
        self.model_arch = model_arch
        self.dataset = dataset
        self.canary_insert_epoch = canary_insert_epoch
        self.canary_insert_global_round = canary_insert_global_round
        self.canary_insert_batch_index = canary_insert_batch_index
        self.canary_insert_train_acc = canary_insert_train_acc
        self.canary_insert_test_acc = canary_insert_test_acc
        self.canary_insert_acc_threshold = canary_insert_acc_threshold
        self.canary_design_local_models = canary_design_local_models
        self.local_updates = local_updates
        self.local_epochs = local_epochs
        self.num_clients = "N/A"
        self.server_clip_const = server_clip_const
        
        self.accuracy_metrics = {"train": [], "eval": [], "test": []} # Used to track model accuracies
        self.checkpoint_train_acc = checkpoint_train_acc
        self.checkpoint_test_acc = checkpoint_test_acc
        self.empirical_eps_tracker = []
        self.logger = logger
        
        self.dp_level = dp_level
        self.task = task 
        self.base_plot_path = plot_path
        self.base_result_path = result_path
        self.grad_sample_module = grad_sample_module
        
        self.benchmark_times = benchmark_times if benchmark_times else []
        self.text_processor = TextProcessorSent140() if dataset == "sent140" else TextProcessorShakes()
    def reset(self):
        """ Resets attributes that track a canary attack
        
        """
        self.canary_healths = []
        self.canaries = []
        self.canary_dot_prods = {"with_canary": [], "without_canary": []}
        self.init_canary_dot_prods = {"with_canary": [], "without_canary": []}
        self.batch_clip_percs = []
        self.clip_rates = []
        self.num_tests = 0
    def _plot_canary_hist(self, canary_metrics, suffix=""):
        """ Plots canary histogram and associated attack metrics for a canary that is being analysed
        Args:
            canary_metrics (dict): Dict of canary metrics
            suffix (str, optional): Plot name suffix. Defaults to "".
        """
        if np.isnan(np.sum(canary_metrics["dot_prods_without_canary"])) or np.isnan(np.sum(canary_metrics["dot_prods_with_canary"])):
            self.logger.info("WARNING - Some dot products are NaN, these are being removed for plotting...")
            canary_metrics["dot_prods_without_canary"] = np.array(canary_metrics["dot_prods_without_canary"])[~np.isnan(canary_metrics["dot_prods_without_canary"])]
            canary_metrics["dot_prods_with_canary"] =  np.array(canary_metrics["dot_prods_with_canary"])[~np.isnan( canary_metrics["dot_prods_with_canary"])]
        if len(canary_metrics["dot_prods_without_canary"]) == 0 or len(canary_metrics["dot_prods_with_canary"]) == 0 :
            self.logger.info("Dot products were empty, likely all nans, optimisation has failed. Canary norm is likely 0...")
            return 
        
        bins = 25
        bins=None
        plt.hist(canary_metrics["dot_prods_without_canary"], bins=bins, label="Without canary (" + self.canary_design_type + "), m=" + str(round(canary_metrics["without_canary_mean"], 5)) + " std=" + str(round(canary_metrics["without_canary_sd"], 5)))
        plt.hist(canary_metrics["dot_prods_with_canary"], bins=bins, label="W/ canary (" + self.canary_design_type + ") m=" + str(round(canary_metrics["with_canary_mean"], 5)) + " std=" + str(round(canary_metrics["with_canary_sd"], 5)))
        plt.vlines(canary_metrics["mia_threshold"], ymin=0, ymax=10, color="red")
        plot_title = self.task + " " + self.dp_level +  " num_clients=" + str(self.num_clients) + " local_steps=" + str(self.local_updates) + " init=" + self.canary_init + "\n"
        plot_title += "Design: naive" if  self.canary_design_type == "naive" else f"Design: {self.canary_design_type } {self.canary_loss}"
        plot_title += f" Local Batch Size={self.local_batch_size} epoch={self.canary_insert_epoch}, round={self.canary_insert_global_round}"
        
        if len(self.accuracy_metrics["train"]) > 0 and len(self.accuracy_metrics["test"]) > 0:
            plot_title += f" (Train, Test): {round(self.accuracy_metrics['train'][-1],2)}, {round(self.accuracy_metrics['test'][-1],2)}"
        if self.canary_setup == "holdout" and self.canary_design_type != "naive":
            plot_title += f"\n Design Sample={self.canary_design_sample_size} Design Pool={self.canary_design_pool_size}"
            if self.canary_loss != "loss1":
                plot_title += f" Minibatch= {self.canary_design_minibatch_size}"
        if self.canary_setup == "exact" and self.canary_design_type != "naive":
            plot_title += "\n Canary Health (min, max, mean): {min}, {max}, {mean}".format(min=str(round(np.min(canary_metrics["canary_health_list"]), 4)), 
                                                                                        max=str(np.round(max(canary_metrics["canary_health_list"]),4)), mean=str(round(np.mean(canary_metrics["canary_health_list"]), 4)))
        else:
            plot_title += f"\n  Canary norm={round(canary_metrics['canary_norm'],3)} Canary Health: {round(canary_metrics['canary_health_list'][0],5)}"
        plot_title += f" (Acc, Max Acc, AUC): {round(canary_metrics['mia_acc'], 4)}, {round(canary_metrics['mia_max_acc'],4)}, {round(canary_metrics['mia_auc'],4)}"
        plot_title += f"\n (eps, delta)=({round(canary_metrics['initial_epsilon'],4)}, {canary_metrics['initial_delta']}), sigma={round(canary_metrics['final_sigma'],4)}, empirical= {round(canary_metrics['empirical_eps'],4)}, ({round(canary_metrics['empirical_eps_lower'],4)}, {round(canary_metrics['empirical_eps_upper'],4)})"
        plt.title(plot_title, fontdict={"fontsize": 10})
        plt.ylabel("Freq")
        plt.xlabel(r'<S, grad(canary)>')
        plt.legend()
        plt.tight_layout()
        
        full_path = self.plot_path + suffix + ".png"
        plt.savefig(full_path, bbox_inches='tight')
        self.logger.info(f" Plot Saved: {full_path}")
        plt.clf()
    def _plot_canary_losses(self):
        """Plots the optimisation loss of an analysed canary.
        """
        smoothed_loss = np.mean(np.array(self.canary_losses)[:(len(self.canary_losses)//100)*100].reshape(-1,100), axis=1)
        data_list = [("canary_norms", self.canary_norms), ("canary_loss_full", self.canary_losses), 
                     ("canary_loss_last_epochs", self.canary_losses[-1000:]), ("canary_loss_smoothed", smoothed_loss)]
        
        for item in data_list:
            name, data = item
            plt.plot(range(0, len(data)), data)
            plt.title(name)
            plt.ylabel(name)
            plt.xlabel("Epoch")
            plt.tight_layout()
            full_path = self.plot_path + f"_{name}.png"
            plt.savefig(full_path)
            self.logger.info(f" Plot Saved: {full_path}")
            plt.clf()
    def _plot_pr_curve(self, precision, recall, auprc=0, suffix=""):
        """Plots pr curves of an analysed canary
        Args:
            precision (list): Precision values
            recall (list): Recall values
            auprc (int, optional): Optional AUPRC to display in the plot title. Defaults to 0.
            suffix (str, optional): Plot name suffix. Defaults to "".
        """
        for i in range(recall.shape[0]-1):
            plt.plot((recall[i],recall[i]),(precision[i],precision[i+1]),'b-') 
            plt.plot((recall[i],recall[i+1]),(precision[i+1],precision[i+1]),'b-') 
        plt.title(f"PR Curve - MAP={auprc}")
        plt.xlabel("Recall")
        plt.ylabel("Precision")
        plot_name = self.plot_path + "_pr_curve_" + suffix
        plt.savefig(plot_name)
        self.logger.info(f" Plot Saved: {plot_name}")
        plt.clf()
    def _save_results(self, canary_metrics, additional_args):
        """Checkpoint analysed canary attack
        Args:
            canary_metrics (dict): All canary metrics to checkpoint
            additional_args (dict): Additional args i.e, from a CanaryDesigner
        """
        all_args = canary_metrics
        all_args.update(self.__dict__)
        remove_list = ["grad_sample_module", "canaries", "logger", "canary_losses", "text_processor", "canary_dot_prods", "init_canary_dot_prods", "canary_norms"]
        for attr in remove_list:
            all_args.pop(attr)
            
        if additional_args is not None:
            all_args.update(vars(additional_args)) 
        experiment_dict = {}
        all_args["canary_health"] = all_args["canary_health_list"][0] if len(all_args["canary_health_list"]) == 1 else np.mean(all_args["canary_health_list"])
        columns = list(canary_metrics.keys())
        row = [all_args[col] for col in columns]
        experiment_dict["row"] = row
        experiment_dict["columns"] = columns
        
        torch.save(experiment_dict, self.result_path + ".tar")
        self.logger.info(f" Experiment metrics saved {self.result_path}")
        self.logger.info(f"Saved columns {columns}")
        self.logger.info(f"Canary insert epoch={all_args['canary_insert_epoch']}, global round={all_args['canary_insert_global_round']}")
        
    def _save_canary(self, batched_canary, title):
        """Saves an output of the designed canary. Either as an image of a .txt for NLP
        Args:
            batched_canary: Batch with a single canary
            title: Title of the canary output file
        """
        if self.canary_type == "image":
            if self.dataset == "femnist":
                plt.imshow(np.transpose(batched_canary[0]).numpy(), cmap="gray")
            else:
                plt.imshow(np.transpose(batched_canary[0].numpy(), (1, 2, 0)))
            plt.title(title)
            plt.axis("off")
            plt.savefig(self.plot_path + "_" + title + ".png")
            plt.clf()
        elif self.canary_type == "nlp":
            try:
                with open(self.plot_path + "_" + title + ".txt", 'w') as f:
                    f.write(self.text_processor.index_sequence_to_text(batched_canary[0]))
            except:
                plt.clf()
                self.logger.info("Saving nlp error...")
    def ci_eps(self, fp, fn, n_pos, n_neg, delta=1e-5, bound="lower"):
        """Calculate the 95% CI for empirial epsilon via the Clopper-Pearson method
        Args:
            fp (_type_): False positives
            fn (function): False negatives
            n_pos (_type_): Number of positive examples
            n_neg (_type_): Number of negative examples
            delta (_type_, optional): DP delta. Defaults to 10e-5.
            bound (str, optional): "upper" or "lower" CI bounds. Defaults to "lower".
        Returns:
            empirial eps
        """
        fp = int(fp)
        fn = int(fn)
        
        fp_result = binomtest(k=fp, n=n_pos)
        fn_result = binomtest(k=fn, n=n_neg)
        
        if bound == "lower":
            fp_hi = fp_result.proportion_ci().high
            fn_hi = fn_result.proportion_ci().high
        else:
            fp_hi = fp_result.proportion_ci().low
            fn_hi = fn_result.proportion_ci().low
            
        return self.empirical_eps(1-fn_hi,fp_hi, delta=delta, type=bound)
    def empirical_eps(self, tpr, fpr, delta=1e-5, type=""):
        """Calculate empirical epsilon
        Args:
            tpr: True Positive Rate (TPR)
            fpr: False Positive Rate (FPR)
            delta: DP delta. Defaults to 10e-5.
            type (str, optional): "lower" or "upper" for CI calculations. Defaults to "".
        Returns:
            empirical eps
        """
        x = []        
        if 1-tpr > 0:
            x.append((1-delta-fpr)/(1-tpr))
        if fpr > 0:
            x.append((1-delta-(1-tpr))/fpr)
        if len(x) <= 1 or max(x) < 0:
            print(f"Warning empirical eps=inf, type={type} - {fpr}, {1-tpr}")
            x = [float("inf")]
        return math.log(max(x))
    
    def _compute_empirical_eps(self, attack_results: AttackResults, use_max_acc_threshold=False):
        n_pos, n_neg = len(attack_results.scores_train), len(attack_results.scores_test)     
        delta = 1/(n_pos + n_neg)
       
        max_empirical_eps = 0
        _, scores = attack_results._get_scores_and_labels_ordered()
        tpr_fpr = attack_results.get_tpr_fpr()
        
        if use_max_acc_threshold: # Calculate empirical eps from max acc threshold
            max_acc_thresh = attack_results.get_max_accuracy_threshold()[0]
            tp = int((attack_results.scores_train >= max_acc_thresh).sum().item())
            fp = int((attack_results.scores_test >= max_acc_thresh).sum().item())
            max_fp, max_fn, max_tp, max_tn = fp, n_pos-tp, tp, n_neg-fp
            max_tpr, max_fpr = max_tp / (max_tp + max_fn), max_fp/(max_fp+max_tn)
            max_empirical_eps = self.empirical_eps(max_tpr, max_fpr, delta=delta)
        else: # Maximise empirical eps over TPR/FPR
            for i, t in enumerate(scores):
                tpr, fpr = tpr_fpr[0][i], tpr_fpr[1][i]
                empirical_eps = self.empirical_eps(tpr, fpr, delta=delta)
                acc = attack_results.get_accuracy(t)
                
                if empirical_eps > max_empirical_eps and (empirical_eps != float("inf") or acc == 1):
                    tp = int((attack_results.scores_train >= t).sum().item())
                    fp = int((attack_results.scores_test >= t).sum().item())
                    max_empirical_eps = empirical_eps
                    max_fp, max_fn, max_tp, max_tn = fp, n_pos-tp, tp, n_neg-fp
                    max_tpr, max_fpr = tpr, fpr
        empirical_eps_lower = self.ci_eps(max_fp, max_fn, n_pos=n_pos, n_neg=n_neg, delta=delta)
        empirical_eps_upper = self.ci_eps(max_fp, max_fn, bound="upper", n_pos=n_pos, n_neg=n_neg, delta=delta)
        return max_empirical_eps, empirical_eps_lower, empirical_eps_upper, max_fp, max_fn, max_tp, max_tn
    
    def _compute_canary_metrics(self, initial_privacy_budget, final_privacy_budget, type="canary", correct_bias=False, plot_prc=True, **kwargs): 
        """Computes canary and attack metrics for checkpointing
        Args:
            initial_privacy_budget (dict): Initial privacy budget of the model
            final_privacy_budget (dict): Final privacy budget at the attack round
            type (str, optional): Type of canary metrics, either "init" or "canary". Defaults to "canary".
            correct_bias (bool, optional): Debugging, if True computes corrected bias metrics. Defaults to False.
            plot_prc (bool, optional): If True will plot PR curves. Defaults to True.
        Returns:
            canary_metrics: dict of canary metrics to checkpoint
        """
        canary_metrics = {}
        canary_metrics.update(kwargs)
        bias = self.canary_design_bias if correct_bias else 0
        canary_metrics["with_canary_mean"] = np.round(np.mean(canary_metrics["dot_prods_with_canary"], axis=0)+bias,10)
        canary_metrics["with_canary_var"] = np.round(np.var(canary_metrics["dot_prods_with_canary"], axis=0),10)
        canary_metrics["without_canary_mean"] = np.round(np.mean(canary_metrics["dot_prods_without_canary"], axis=0)+bias,10)
        canary_metrics["without_canary_var"] = np.round(np.var(canary_metrics["dot_prods_without_canary"], axis=0),10)
        results = AttackResults(torch.tensor(canary_metrics["dot_prods_with_canary"])+bias, torch.tensor(canary_metrics["dot_prods_without_canary"])+bias)
        max_accuracy_threshold, max_accuracy = results.get_max_accuracy_threshold()
        tpr, fpr = results.get_tpr_fpr()
        precision, recall = results.get_precision_recall()
        auprc = results.get_map()
        canary_metrics["mia_auc"] = results.get_auc()
        canary_metrics["mia_threshold"] = max_accuracy_threshold
        canary_metrics["mia_max_acc"] = max_accuracy 
        canary_metrics["mia_acc"] = results.get_accuracy(threshold=0.5).item()
        if plot_prc:
            self._plot_pr_curve(precision, recall, auprc=auprc, suffix=type)
        n_pos = len(results.scores_test)
        n_neg = len(results.scores_train)
        n = n_pos + n_neg
        self.logger.info(f"=== Computing metrics for type={type}")
        self.logger.info(f"Number of tests={self.num_tests}, without={len(results.scores_train)}, with={len(results.scores_test)}, n={n}")
        
        empirical_eps, empirical_eps_lower, empirical_eps_upper, fp, fn, tp, tn = self._compute_empirical_eps(attack_results=results, use_max_acc_threshold=False)
        self.logger.info(f"n={n}, tp={tp}, fp={fp}, tn={tn}, fn={fn}")
        
        fpr = fp/(fp+tn) 
        fnr = fn/(fn+tp)
        tpr = 1-fnr
        self.logger.info(f"FPR={fpr}, TPR={tpr}, FNR={fnr}")
        self.logger.info(f"Type={type}, Acc= {canary_metrics['mia_acc']}, empirical eps={empirical_eps}, lower, upper =({empirical_eps_lower},{empirical_eps_upper})\n")
            
        canary_metrics["fp"] = fp
        canary_metrics["fn"] = fn
        canary_metrics["tp"] = tp
        canary_metrics["tn"] = tn
        canary_metrics["empirical_eps_lower"] = empirical_eps_lower
        canary_metrics["empirical_eps_upper"] = empirical_eps_upper
        canary_metrics["empirical_eps"] = empirical_eps
        canary_metrics["without_canary_sd"] = math.sqrt(canary_metrics["without_canary_var"])
        canary_metrics["with_canary_sd"] = math.sqrt(canary_metrics["with_canary_var"])
        canary_metrics["sd_gap"] = abs(canary_metrics["without_canary_sd"] - canary_metrics["with_canary_sd"])
        canary_metrics["loss_gap"] = np.min(canary_metrics["dot_prods_with_canary"])+bias - np.max(canary_metrics["dot_prods_without_canary"])+bias
        canary_metrics["batch_clip_percs"] = kwargs["batch_clip_percs"]
        if type == 'canary':
            self.empirical_eps_tracker.append((canary_metrics["empirical_eps_lower"],  canary_metrics["empirical_eps"],  canary_metrics["empirical_eps_upper"]))
        self._add_privacy_metrics(canary_metrics, initial_privacy_budget, type="initial")
        self._add_privacy_metrics(canary_metrics, final_privacy_budget, type="final")
        return canary_metrics 
    def _add_privacy_metrics(self, metrics, privacy_budget, type="final"):
        """Adds privacy budget to canary metrics 
        Args:
            metrics (Canary metrics): Canary metrics
            privacy_budget (dict): Privacy budget
            type (str, optional): Type. Defaults to "final".
        """
        metrics[f"{type}_epsilon"] = privacy_budget["epsilon"]
        metrics[f"{type}_delta"] = privacy_budget["delta"]
        metrics[f"{type}_sigma"] = privacy_budget["sigma"]
        
    def add_clip_rate(self, clip_rate):
        """Add a clip rate e.g. a % of model updates that were clipped in the current test round
        Args:
            clip_rate (float): clip percentage
        """
        self.clip_rates.append(clip_rate)
    def add_canary(self, canary):
        """Add a canary to be analysed
        Args:
            canary (Canary): canary
        """
        self.canaries.append(canary)
    def set_canary(self, canary):
        """Set a canary, replacing all old canaries being tracked
        Args:
            canary
        """
        self.canaries = [canary]
    def reset_canaries(self):
        """Reset all tracked canaries
        """
        self.canaries = []
        
    def set_grad_sample_module(self, model):
        """Set GradSampleModule, not used in FLSim
        Args:
            model (GSM)
        """
        self.grad_sample_module = GradSampleModule(copy.deepcopy(model))
    def set_accuracy_metrics(self, accuracy_metrics):
        """Set accuracy metrics of model to checkpoint in canary_metrics
        Args:
            accuracy_metrics: FLSim accuracy metrics
        """
        self.accuracy_metrics = accuracy_metrics
        self.current_train_acc = accuracy_metrics["train"][-1] if len(accuracy_metrics["train"]) > 0 else 0
        self.current_test_acc = accuracy_metrics["test"][-1] if len(accuracy_metrics["test"]) > 0 else 0
    def test_against_batch(self, criterion, batch, canary, device="cpu"):
        """Debugging only, not used in FLSim.
        Args:
            criterion: torch criterion
            batch: Batch to test canary presence
            canary (Canary): canary
            device (str, optional): torch device. Defaults to "cpu".
        """
        assert self.grad_sample_module is not None, "Must set_grad_sample_module() before testing with a batch"
        
        if canary not in self.canaries:
            self.add_canary(canary)
        # Compute required gradients
        batch_sample_grads, clip_count = compute_sample_grads(self.grad_sample_module, criterion, batch, device=device, clipping_const=self.canary_clip_const)
        clip_perc = round(clip_count / self.local_batch_size, 8)*100
        self.batch_clip_percs.append(clip_perc)
        self.logger.info(f" Clip Percentage {clip_perc}")
        aggregated_batch_grad = torch.sum(batch_sample_grads, axis=0)
        canary_norm = torch.norm(canary.grad).item()
        self.logger.info(f" Canary grad norm: {canary_norm}\n")
    
        self.canary_dot_prods["without_canary"].append(torch.dot(canary.grad, aggregated_batch_grad).item())
        self.init_canary_dot_prods["without_canary"].append(torch.dot(canary.init_grad, aggregated_batch_grad).item())
        self.canary_dot_prods["with_canary"].append(torch.dot(canary.grad, aggregated_batch_grad + canary.grad).item())
        self.init_canary_dot_prods["with_canary"].append(torch.dot(canary.init_grad, aggregated_batch_grad+canary.init_grad).item())
        self.num_tests += 1
    def test_against_agg_grad(self, canary, aggregated_model, lr, num_clients, clip_factor=1, type="with"):
        """Tests canary against aggregated model udpates by computing a dot-product score.
        Args:
            canary (Canary): canary to test
            aggregated_model (tensor): Aggregated clipped (noisy) model updates
            lr: Client lr
            num_clients: Number of clients in the current round
            clip_factor (int, optional): Clip factor (not used). Defaults to 1.
            type (str, optional): Type of the test, either "with" or "without" canary. Defaults to "with".
        """
        self.num_clients = num_clients
        aggregated_batch_grad = torch.tensor([])
        for p in aggregated_model.parameters():
            aggregated_batch_grad = torch.cat([aggregated_batch_grad, p.detach().clone().cpu().flatten()])
        aggregated_batch_grad =  num_clients * aggregated_batch_grad * 1/clip_factor
        
        self.logger.debug(f"Aggregated grads {aggregated_batch_grad}")
        self.logger.debug(f"Norm of aggregated grads {torch.norm(aggregated_batch_grad)}")
        self.logger.debug(f"Clip factor {clip_factor}, num clients {num_clients}, lr {lr}, Batch size {self.local_batch_size}")
        self.logger.debug(f"Aggregated scaled grads {aggregated_batch_grad}")
        self.logger.info(f"Canary grad norm {torch.norm(canary.grad)}, Canary clip const {self.canary_clip_const}")
        
        # if self.canary_design_type == "sample_grads": # Designing against unclipped updates (must unscale)
        # aggregated_batch_grad = (1/lr) * num_clients * self.local_batch_size * aggregated_batch_grad * 1/clip_factor
        # else: # Designing against clipped and scaled updates (so no need to unscale)
        # Aggregate attack scores
        if type == "with" or type == "without":
            if self.canary_design_reverse_server_clip: 
                canary_dot_prod = torch.dot(canary.grad/torch.norm(canary.grad)**2, aggregated_batch_grad).item()
            else: 
                if self.scale_canary_test and torch.norm(canary.grad) < self.server_clip_const:
                    canary_dot_prod = torch.dot(canary.grad/(torch.norm(canary.grad)**2)*self.server_clip_const, aggregated_batch_grad).item()
                else:
                    canary_dot_prod = torch.dot((canary.grad/torch.norm(canary.grad)*self.server_clip_const)/self.server_clip_const**2, aggregated_batch_grad).item()
            self.canary_dot_prods[type+"_canary"].append(canary_dot_prod)
        
        # Aggregate canary init scores
        init_dot_prod = torch.dot(canary.init_grad/torch.norm(canary.init_grad), aggregated_batch_grad).item()
        if type == "without":
            self.init_canary_dot_prods[type+"_canary"].append(init_dot_prod)
        elif type == "with_init":
            self.init_canary_dot_prods["with_canary"].append(init_dot_prod)
        self.num_tests += 1
    def analyse(self, global_round=0, initial_privacy_budget=None, final_privacy_budget=None, one_step_budget=None,
                    disable_init_metrics=False, disable_bias_metrics=False, 
                    plot_hists=True, plot_canaries=True, plot_losses=True, plot_prc=True, args=None):
        """Analyse current set canary and checkpoint associated attack metrics and plots
        Args:
            global_round (int, optional): Global FL round of the attack. Defaults to 0.
            initial_privacy_budget (dict, optional): Initial model privacy budget. Defaults to None.
            final_privacy_budget (dict, optional): Current model privacy budget. Defaults to None.
            one_step_budget (dict, optional): Model one-step budget. Defaults to None.
            disable_init_metrics (bool, optional): If True will not compute canary init metrics. Defaults to False.
            disable_bias_metrics (bool, optional): If False will not compute bias corrected metrics. Defaults to False.
            plot_hists (bool, optional): If False will not plot attack histograms. Defaults to True.
            plot_canaries (bool, optional): If False will not output canaries. Defaults to True.
            plot_losses (bool, optional): If False will not output canary optimisation loss plots. Defaults to True.
            plot_prc (bool, optional): If False will not plot PR curves. Defaults to True.
            args (dict, optional): Additional args for checkpointing. Defaults to None.
        """
        assert len(self.canaries) > 0, "Cannot anaylse() before test_against_agg_grad() or test_against_batch() at least once"
        
        if final_privacy_budget is None:
            final_privacy_budget = {"epsilon": float('inf'), "delta": 0, "sigma": 0}
        if initial_privacy_budget is None:
            initial_privacy_budget = {"epsilon": 0, "delta": 0, "sigma": 0}
        if one_step_budget is None:
            one_step_budget = {"epsilon": 0, "delta": 0, "sigma": 0}
        self.global_round = global_round
        self.plot_path = self.base_plot_path + f"_global_round={global_round}"
        self.result_path = self.base_result_path + f"_global_round={global_round}"
        self.canary_healths = [canary.health for canary in self.canaries]
        canary_norm = np.mean([torch.norm(canary.grad).item() for canary in self.canaries])
        init_norm = np.mean([torch.norm(canary.init_grad).item() for canary in self.canaries])
        final_loss = np.mean([canary.final_loss for canary in self.canaries])
        init_loss = np.mean([canary.init_loss for canary in self.canaries])
        
        # Save initial and final canaries
        if plot_canaries:
            self._save_canary(self.canaries[0].data, "final_canary class " + str(self.canaries[0].class_label))
            self._save_canary(self.canaries[0].init_data, "init_canary class " + str(self.canaries[0].class_label))
        # Compute and save canary metrics
        canary_metrics = self._compute_canary_metrics(initial_privacy_budget, final_privacy_budget, type="canary", plot_prc=plot_prc, dot_prods_with_canary=self.canary_dot_prods["with_canary"], dot_prods_without_canary=self.canary_dot_prods["without_canary"], 
                                                canary_norm=canary_norm, 
                                                canary_health_list=self.canary_healths, batch_clip_percs=self.batch_clip_percs, final_loss=final_loss)
        if not disable_bias_metrics:
            canary_bias_corrected_metrics = self._compute_canary_metrics(initial_privacy_budget, final_privacy_budget, type="bias_canary", plot_prc=False, correct_bias=True, dot_prods_with_canary=self.canary_dot_prods["with_canary"], dot_prods_without_canary=self.canary_dot_prods["without_canary"], 
                                                    canary_norm=canary_norm, 
                                                    canary_health_list=self.canary_healths, batch_clip_percs=self.batch_clip_percs, final_loss=final_loss)
            canary_bias_corrected_metrics["mia_threshold"] = 0.5
            canary_metrics["bias_corrected_acc"] = canary_bias_corrected_metrics["mia_acc"]
        
        if not disable_init_metrics:
            init_canary_metrics = self._compute_canary_metrics(initial_privacy_budget, final_privacy_budget, type="init", plot_prc=plot_prc, dot_prods_with_canary=self.init_canary_dot_prods["with_canary"], dot_prods_without_canary=self.init_canary_dot_prods["without_canary"], 
                                                canary_norm=init_norm, 
                                                canary_health_list=[0], batch_clip_percs=self.batch_clip_percs, final_loss=init_loss)
        canary_metrics["sd_improvement"] = "n/a" if disable_init_metrics else init_canary_metrics["without_canary_sd"] - canary_metrics["without_canary_sd"]
        canary_metrics["init_without_canary_sd"] =  "n/a" if disable_init_metrics else init_canary_metrics["without_canary_sd"]
        canary_metrics["init_with_canary_sd"] =  "n/a" if disable_init_metrics else init_canary_metrics["with_canary_sd"]
        canary_metrics["mia_acc_improvement"] =  "n/a" if disable_init_metrics else canary_metrics["mia_max_acc"] - init_canary_metrics["mia_max_acc"]
        canary_metrics["dot_prods_with_init_canary"] =  "n/a" if disable_init_metrics else self.init_canary_dot_prods["with_canary"]
        canary_metrics["dot_prods_without_init_canary"] =  "n/a" if disable_init_metrics else self.init_canary_dot_prods["without_canary"]
        canary_metrics["one_step_eps"] = one_step_budget["epsilon"]
        self.logger.info(f"One step privacy metrics (no sampling) (eps,delta)={one_step_budget['epsilon']}, {one_step_budget['delta']}, sigma={one_step_budget['sigma']}")
        self.logger.info(f"Initial privacy metrics (eps,delta)={canary_metrics['initial_epsilon']}, {canary_metrics['initial_delta']}, sigma={canary_metrics['initial_sigma']}")
        self.logger.info(f"Final privacy metrics (eps,delta)={canary_metrics['final_epsilon']}, {canary_metrics['final_delta']}, sigma={canary_metrics['final_sigma']}, sample rate={self.sample_rate}")
        self.logger.info(f"Empirical epsilon tracker {self.empirical_eps_tracker}\n")
        self.logger.info(f"Checkpoint train acc {self.checkpoint_train_acc} Checkpoint test acc {self.checkpoint_test_acc}")
        self.logger.info(f"Current train acc {self.current_train_acc} Current test acc {self.current_test_acc}")
        self.logger.info(f"All accuracy metrics {self.accuracy_metrics}\n")
        if not disable_init_metrics:
            self.logger.info(f" SD Improvement: {canary_metrics['sd_improvement']}")
            self.logger.info(f" MIA Acc Improvement: {canary_metrics['mia_acc_improvement']}\n")
        # Save canary metrics
        self._save_results(canary_metrics, args)
        # Plot and save dot product histograms
        if plot_hists:
            self._plot_canary_hist(canary_metrics, suffix="_canary") # Final canary
            if not disable_bias_metrics:
                self._plot_canary_hist(canary_bias_corrected_metrics, suffix="_bias_corrected_canary") # Final canary (bias-corrected)
            if not disable_init_metrics:
                self._plot_canary_hist(init_canary_metrics, suffix="_init") # Initial canary
        # Save canary optim losses
        if plot_losses:
            self._plot_canary_losses()
            
        self.logger.info(f"Minibatch, sample size, pool size, {self.canary_design_minibatch_size, self.canary_design_sample_size, self.canary_design_pool_size}")
        self.logger.info(f"Actual minibatch, sample size, pool size, {self.actual_minibatch_size, self.actual_sample_size, self.actual_pool_size}") | 
	canife-main | 
	canife/canary_analyser.py | 
| 
	#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import glob
import pandas as pd
import torch
def extract_sweep(root_dir="local_checkpoints", csv_name=""):
    rows = []
    full_path = root_dir 
    tar_path = full_path + "/**/*.tar"
    print("Full path", full_path)
    for file in glob.glob(tar_path, recursive=True):
        exp_checkpoint = torch.load(file)
        row = exp_checkpoint["row"]
        # row.append(exp_checkpoint["batch_clip_percs"])
        columns = exp_checkpoint["columns"]
        columns.extend(["train_acc", "test_acc"])
        row.extend([-1,-1])
        
        if "accuracy_metrics" in columns:
            metrics = row[columns.index("accuracy_metrics")]
            if len(metrics["train"]) > 0:
                train_acc = metrics["train"][-1]
                row[-2] = train_acc
            if len(metrics["test"]) > 0:
                test_acc = metrics["test"][-1]
                row[-1] = test_acc
            
        rows.append(row)
    df = pd.DataFrame(rows, columns=columns)
    print(df.info(memory_usage="deep"))
    save_path = f"{args.path}{args.csv_name}"
    df.to_csv(save_path)
    print(f"Sweep extracted saved to {save_path}...")
if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Extract canife experiment")
    parser.add_argument("--path", type=str, help= "Path to location of experiment output")
    parser.add_argument("--csv-name", type=str, help= "Name of output .csv")
    args = parser.parse_args()
    extract_sweep(csv_name=args.csv_name, root_dir=args.path) | 
	canife-main | 
	plotting/extract_exp.py | 
| 
	#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import ast
import pathlib
import sys
from pathlib import Path
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import torch
sys.path.append("../canife")
sys.path.append("../privacy_lint")
from collections import defaultdict
from opacus.accountants import RDPAccountant
from opacus.accountants.utils import get_noise_multiplier
from canife import CanaryAnalyser
from privacy_lint.privacy_lint.attack_results import AttackResults
BASE_PATH = str(pathlib.Path(__file__).parent.resolve())
sns.set_theme(style="whitegrid")
def set_fontsize(size=14):
    usetex = matplotlib.checkdep_usetex(True)
    tex_fonts = {
        "text.usetex": usetex,
        "font.family": "serif",
        "axes.labelsize": size,
        "font.size": size,
        "legend.fontsize": size,
        "xtick.labelsize": size,
        "ytick.labelsize": size
    }
    plt.rcParams.update(tex_fonts)
    
FONT_SIZE = 20
set_fontsize(FONT_SIZE)
# convert pandas col names to readable plot labels
column_map = {
    "global_round": r"Global Round ($r$)",
    "empirical_eps_upper": r"$\hat{\varepsilon}_U$",
    "empirical_eps_lower": r"$\hat{\varepsilon}_L$",
    "empirical_eps": r"$\hat{\varepsilon}$",
    "current_test_acc": r"Model Test Accuracy",
    "current_train_acc": r"Model Train Accuracy",
    "canary_health": r"Canary Health",
    "mia_acc": r"Attack Accuracy ($\gamma = 0.5$)",
    "mia_max_acc": r"Attack Accuracy",
    "mia_max_acc_rolling": r"Attack Accuracy",
    "acc_rolling": r"Attack Accuracy",
    "final_epsilon": r"Privacy Budget ($\varepsilon$)",
    "one_step_eps": r"One-step $\varepsilon$",
    "num_clients": r"Clients Per Round",
    "canary_design_pool_size": r"Design Pool Size ($m$)",
    "canary_design_sample_size": "Design Sample Size",
    "average_sd": r"Mean Standard Deviation",
    "with_canary_sd": r"With Canary SD",
    "without_canary_sd": r"Without Canary SD",
    "mia_auc": r"Attack AUC",
    "empirical_global_eps": r"$\hat{\varepsilon}}$",
    "epsilon": r"$\varepsilon$",
    "canary_epochs": r"Design Iterations ($t$)",
    "canary_norm_constant": r"Canary Gradient Norm Constant",
    "dataset": "Dataset"
}
def print_full(x):
    pd.set_option('display.max_rows', len(x))
    pd.set_option('display.max_columns', len(x.columns))
    print(x)
    pd.reset_option('display.max_rows')
    
def format_axis(ax):
    xlabel = ax.xaxis.get_label()._text
    ylabel = ax.yaxis.get_label()._text
    
    xlabel = column_map.get(xlabel, xlabel)
    ylabel = column_map.get(ylabel, ylabel)
    
    ax.set_xlabel(xlabel)
    ax.set_ylabel(ylabel)
def save_plot(name="", fig=None):
    plt.tight_layout()
    if fig:
        fig.savefig(f"{BASE_PATH}/{name}.pdf", bbox_inches='tight', format="pdf")
    else:
        plt.savefig(f"{BASE_PATH}/{name}.pdf", bbox_inches='tight', format="pdf")
    plt.clf()
def extract_epsilon_metrics(df, override_empirical_eps=False, use_max_acc=False):
    extra_cols = defaultdict(list)
    if override_empirical_eps:
        print("Extracting empirical epsilon data...")
        analyser = CanaryAnalyser(None, None, None)
        
        for idx, x in df.iterrows():
            with_dot_prods = ast.literal_eval(x["dot_prods_with_canary"].replace('nan,', ''))
            without_dot_prods = ast.literal_eval(x["dot_prods_without_canary"].replace('nan,', ''))
            results = AttackResults(torch.tensor(with_dot_prods), torch.tensor(without_dot_prods))
            
            max_acc_thresh = results.get_max_accuracy_threshold()[0]
            n_pos, n_neg = len(results.scores_train), len(results.scores_test)            
            max_empirical_eps = 0
            
            _, scores = results._get_scores_and_labels_ordered()
            tpr_fpr = results.get_tpr_fpr()
            
            # delta = 1e-5
            delta = 1/(n_pos + n_neg)
            
            if use_max_acc: # Calculate empirical eps from max acc threshold
                tp = int((results.scores_train >= max_acc_thresh).sum().item())
                fp = int((results.scores_test >= max_acc_thresh).sum().item())
                max_fp, max_fn, max_tp, max_tn = fp, n_pos-tp, tp, n_neg-fp
                max_tpr, max_fpr = max_tp / (max_tp + max_fn), max_fp/(max_fp+max_tn)
                max_empirical_eps = analyser.empirical_eps(max_tpr, max_fpr, delta=delta)
            else: # Maximise empirical eps over TPR/FPR
                for i, t in enumerate(scores):
                    tpr, fpr = tpr_fpr[0][i], tpr_fpr[1][i]
                    empirical_eps = analyser.empirical_eps(tpr, fpr, delta=delta)
                    acc = results.get_accuracy(t)
                    
                    if empirical_eps > max_empirical_eps and (empirical_eps != float("inf") or acc == 1):
                        tp = int((results.scores_train >= t).sum().item())
                        fp = int((results.scores_test >= t).sum().item())
                        max_empirical_eps = empirical_eps
                        max_fp, max_fn, max_tp, max_tn = fp, n_pos-tp, tp, n_neg-fp
                        max_tpr, max_fpr = tpr, fpr
            
            lower_eps = analyser.ci_eps(max_fp, max_fn, n_pos, n_neg, delta=delta)
            upper_eps = analyser.ci_eps(max_fp, max_fn, bound="upper", n_pos=n_pos, n_neg=n_neg, delta=delta)
                        
            extra_cols["fp"].append(max_fp)
            extra_cols["fn"].append(max_fn)
            extra_cols["tp"].append(max_tp)
            extra_cols["tn"].append(max_tn)
            extra_cols["empirical_eps_lower"].append(lower_eps)
            extra_cols["empirical_eps_upper"].append(upper_eps)
            extra_cols["empirical_eps"].append(max_empirical_eps)
        for col in extra_cols.keys():
            df[col] = extra_cols[col]
            
        print("Empirical epsilon data added...")
def extract_global_empirical_eps(df, skip_ci=True):
    df["empirical_global_eps"] = 0
    df["empirical_global_eps_lower"] = 0
    df["empirical_global_eps_upper"] = 0
    df = df.sort_values(by="global_round")
    
    eps_list = df["epsilon"].unique()
    sample_rate = df["sample_rate"].unique()[0]
    print(f"Eps list {eps_list}, sample rate={sample_rate}")
    for eps in eps_list:
        temp_df = df[df["epsilon"] == eps]
        temp_df = temp_df.sort_values(by="global_round")
        df_eps = temp_df["empirical_eps"].values
        steps = temp_df["global_round"].values
        
        theoretical_sigma = temp_df["final_sigma"].mean()
        empirical_global_eps = calculate_global_eps(df_eps, theoretical_sigma=theoretical_sigma, steps=steps, sample_rate=sample_rate)
        df.loc[df["epsilon"] == eps, 'empirical_global_eps'] = empirical_global_eps
        print(f"eps={eps} estimate done...")
        if not skip_ci:
            empirical_global_eps = calculate_global_eps(temp_df["empirical_eps_lower"].clip(lower=0.2).values, theoretical_sigma=theoretical_sigma, steps=steps, sample_rate=sample_rate)
            df.loc[df["epsilon"] == eps, 'empirical_global_eps_lower'] = empirical_global_eps
            print(f"eps={eps} lower done...")
            empirical_global_eps = calculate_global_eps(temp_df["empirical_eps_upper"].values, theoretical_sigma=theoretical_sigma, steps=steps, sample_rate=sample_rate)
            df.loc[df["epsilon"] == eps, 'empirical_global_eps_upper'] = empirical_global_eps
            print(f"eps={eps} upper done...\n")
            
    return df
def compute_one_step_eps(sample_rate, noise, delta=1e-5):
    accountant = RDPAccountant()
    history_step = (noise, sample_rate, 1)
    accountant.history.append(history_step)
    current_budget = accountant.get_privacy_spent(delta=delta)
    return current_budget[0]
            
def calculate_global_eps(empirical_per_step_epsilons, theoretical_sigma, sample_rate=0.01, steps=1000, delta=1e-5, n=100, verbose=False):
    if type(steps) == int:
        steps = range(1, steps+1)
            
    accountant = RDPAccountant()
    previous_step = 0
    if verbose:
        theoretical_accountant = RDPAccountant()
        one_step_theoretical_eps = compute_one_step_eps(1, theoretical_sigma, delta)
    budgets = []
    for i,step in enumerate(steps):
        # One-step sigma based on current empirical eps
        if empirical_per_step_epsilons[i] == float("inf"): # Resort to theoretical sigma if empirical eps is inf
            estimated_sigma = theoretical_sigma
        else:
            estimated_sigma = get_noise_multiplier(target_epsilon=max(empirical_per_step_epsilons[i], 0.15),
                                                target_delta=1/n,
                                                sample_rate=1,
                                                steps=1)
        
        # Assume noise is constant for step-previous_step rounds (i.e time between last estimate and current)
        history_step = (estimated_sigma, sample_rate, step-previous_step)
        accountant.history.append(history_step)
        previous_step = step
        current_budget = accountant.get_privacy_spent(delta=delta)
        budgets.append(current_budget[0])
        
        if verbose:
            estimated_sigma_theoretical = get_noise_multiplier(target_epsilon=one_step_theoretical_eps,
                                                target_delta=delta,
                                                sample_rate=1,
                                                steps=1)
            history_step = (estimated_sigma_theoretical, sample_rate, step-previous_step)
            theoretical_accountant.history.append(history_step)
            theoretical_eps = theoretical_accountant.get_privacy_spent(delta=delta)[0]
        
            print(f"i={i}, global round={step}")
            print(f"Estimated empirical one-step sigma = {estimated_sigma} vs theoretical = {estimated_sigma_theoretical}")
            print(f"Estimated empirical one-step epsilon = {empirical_per_step_epsilons[i]} vs theoretical = {one_step_theoretical_eps}")
            print(f"Accumulated empirical budget {budgets[-1]} vs theoretical {theoretical_eps}\n")
    
    return budgets
def load_sweep(name, relative_path=False, override_empirical_eps=False):
    if relative_path:
        df = pd.read_csv(name)
    else:
        df = pd.read_csv(BASE_PATH + "/" + name + ".csv")
        
    df["sd_gap"] = np.sqrt(df["without_canary_var"]) - np.sqrt(df["with_canary_var"])
    print(df.columns)
    
    # For compatability with old sweeps where some metrics were tensors
    if df["mia_acc"].dtype == object:
        for s in ["tensor", "(", ")"]:
            df["mia_acc"] = df["mia_acc"].str.replace(s, "")
        df["mia_acc"] = df["mia_acc"].astype("float64")
    
    extract_epsilon_metrics(df, override_empirical_eps=override_empirical_eps)
    return df
def plot(csv_name):
    dataset = "sent140"
    model = "lstm"
    xlim = 8900
    main_df = load_sweep(f"{csv_name}", relative_path=True, override_empirical_eps=False)
    main_df["epsilon"] = main_df["epsilon"].astype("int")
    main_df = main_df[main_df["dataset"] == dataset]
    
    main_df = main_df[main_df["epsilon"].isin([10,30,50])]
    
    # Per-round empirical eps comparison
    for eps in main_df["epsilon"].unique():
        plot_df = main_df.copy()
        plot_df = plot_df[plot_df["epsilon"] == eps]
        plot_df.replace([float("inf")], np.nan, inplace=True)
        for y in ["one_step_eps"]:
            plot_df = main_df.copy()
            plot_df = plot_df[plot_df["epsilon"] == eps]
            ax = sns.lineplot(data=plot_df, x="global_round", y="empirical_eps", markers=False, label=r"$\hat{\varepsilon}_r$")
            plt.fill_between(plot_df["global_round"].values, plot_df["empirical_eps_lower"].values, plot_df["empirical_eps_upper"].values, alpha=.3)
            sns.lineplot(data=plot_df, x="global_round", y=y, markers=False, label=r"$\varepsilon_r$", ax=ax)
            plt.ylim(0)
            plt.xlim(0, xlim)
            plt.tight_layout()
            plt.draw()
            plt.legend(loc='upper right', bbox_to_anchor=(1, 0.95)) 
            format_axis(ax)
            ax.set_ylabel(r"Privacy Budget ($\varepsilon$)")
            save_plot(name=f"{dataset}_{eps}_{model}_per_round_eps")
    
    # Global empirical eps comparison
    plot_df = main_df.copy()
    
    main_palette = sns.color_palette("deep", 3)
    palette_dict = {10: main_palette[0], 30: main_palette[1], 50: main_palette[2]}
    palette = [palette_dict[eps] for eps in plot_df["epsilon"].unique()]
    ax = sns.lineplot(data=plot_df, x="global_round", y="final_epsilon", hue="epsilon", linestyle="--", palette=palette)
    sns.lineplot(data=plot_df, x="global_round", y="empirical_global_eps", hue="epsilon", ax=ax, label='_nolegend_', palette=palette)
    plt.xlim(0, xlim)
    format_axis(ax)
    hand, labl = ax.get_legend_handles_labels()
    handout=[]
    lablout=[]
    for h,l in zip(hand,labl):
       if l not in lablout:
            lablout.append(l)
            handout.append(h)
    legend1 = plt.legend(handout, lablout, title=r"$\varepsilon$")
    plt.ylim(0, 50)
    linestyles = ['-', "--"]
    dummy_lines = []
    titles = [r"Empirical $\hat{\varepsilon}$", r"Theoretical $\varepsilon$"]
    for b_idx, b in enumerate(titles):
        dummy_lines.append(ax.plot([],[], c="black", ls = linestyles[b_idx])[0])
    plt.legend([dummy_lines[i] for i in [0,1]], titles, loc="upper left", bbox_to_anchor=(0.4,0.6))
    ax.add_artist(legend1)
    save_plot(name=f"{dataset}_global_eps")
    
if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Plot example canife experiment")
    parser.add_argument("--csv-path", type=str, help= "Path to output .csv for plotting")
    args = parser.parse_args()
    global_eps_path = args.csv_path.split(".csv")[0] + "_extracted.csv"
    
    global_eps_csv_file = Path(global_eps_path)
    csv_file = Path(args.csv_path)
        
    if not csv_file.is_file():
        raise FileNotFoundError(f"Output .csv does not exist at the given file path {args.csv_path}")
        
    if not global_eps_csv_file.is_file():
        df = pd.read_csv(args.csv_path)
        df = extract_global_empirical_eps(df)
        df.to_csv(global_eps_csv_file)
        
    plot(csv_name=global_eps_csv_file) | 
	canife-main | 
	plotting/example_plotter.py | 
| 
	#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import glob
import os
import pandas as pd
import torch
USERNAME = os.getlogin()
print(f"USERNAME: {USERNAME}")
def extract_sweep(root_dir="saved_sweeps", csv_name=""):
    rows = []
    full_path = root_dir 
    tar_path = full_path + "/**/*.tar"
    print("Full path", full_path)
    for file in glob.glob(tar_path, recursive=True):
        exp_checkpoint = torch.load(file)
        row = exp_checkpoint["row"]
        columns = exp_checkpoint["columns"]
        columns.extend(["train_acc", "test_acc"])
        row.extend([-1,-1])
        
        if "accuracy_metrics" in columns:
            metrics = row[columns.index("accuracy_metrics")]
            if len(metrics["train"]) > 0:
                train_acc = metrics["train"][-1]
                row[-2] = train_acc
            if len(metrics["test"]) > 0:
                test_acc = metrics["test"][-1]
                row[-1] = test_acc
            
        rows.append(row)
    df = pd.DataFrame(rows, columns=columns)
    print(df.info(memory_usage="deep"))
    save_path = f"/checkpoints/{USERNAME}/" + csv_name + ".csv"
    df.to_csv(f"/checkpoints/{USERNAME}/" + csv_name + ".csv")
    print(f"Sweep extracted saved to {save_path}...")
if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="Extract canife sweep")
    parser.add_argument("--sweep", type=str, help= "Name of saved sweep")
    args = parser.parse_args()
    extract_sweep(csv_name=args.sweep, root_dir=f"/checkpoints/{USERNAME}/canife/{args.sweep}") | 
	canife-main | 
	plotting/extract_aws.py | 
| 
	#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
from setuptools import find_packages, setup
# 3.6.8 is the final Windows binary release for 3.6.x
REQUIRED_MAJOR = 3
REQUIRED_MINOR = 6
REQUIRED_MICRO = 8
version = {}
with open("flsim/version.py") as fp:
    exec(fp.read(), version)
__version__ = version["__version__"]
# Check for python version
if sys.version_info < (REQUIRED_MAJOR, REQUIRED_MINOR, REQUIRED_MICRO):
    error = (
        "Your version of python ({major}.{minor}.{micro}) is too old. You need "
        "python >= {required_major}.{required_minor}.{required_micro}"
    ).format(
        major=sys.version_info.major,
        minor=sys.version_info.minor,
        micro=sys.version_info.micro,
        required_major=REQUIRED_MAJOR,
        required_minor=REQUIRED_MINOR,
        required_micro=REQUIRED_MICRO,
    )
    sys.exit(error)
src_dir = os.path.abspath(os.path.dirname(__file__))
with open("README.md", "r", encoding="utf8") as fh:
    long_description = fh.read()
requirements_txt = os.path.join(src_dir, "requirements.txt")
with open(requirements_txt, encoding="utf8") as f:
    required = f.read().splitlines()
dev_required = []
setup(
    name="flsim",
    version=__version__,
    author="The FLSim Team",
    description="Federated Learning Simulator (FLSim) is a flexible, standalone core library that simulates FL settings with a minimal, easy-to-use API. FLSim is domain-agnostic and accommodates many use cases such as vision and text.",
    long_description=long_description,
    long_description_content_type="text/markdown",
    url="https://flsim.ai",
    project_urls={
        "Documentation": "https://flsim.ai/api",
        "Source": "https://github.com/facebookresearch/flsim",
    },
    license="Apache-2.0",
    install_requires=required,
    extras_require={"dev": dev_required},
    packages=find_packages(),
    keywords=[
        "PyTorch",
        "Federated Learning",
        "FL",
        "On device training",
        "Differential Privacy",
        "Secure Aggregation",
        "Privacy Preserving Machine Learning",
        "PPML",
        "PPAI",
    ],
    classifiers=[
        "Development Status :: 4 - Beta",
        "Intended Audience :: Developers",
        "Intended Audience :: Education",
        "Intended Audience :: Science/Research",
        "License :: OSI Approved :: Apache Software License",
        "Programming Language :: Python :: 3 :: Only",
        "Topic :: Scientific/Engineering",
    ],
    python_requires=f">={REQUIRED_MAJOR}.{REQUIRED_MINOR}.{REQUIRED_MICRO}",
)
 | 
	canife-main | 
	FLSim/setup.py | 
| 
	#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
 | 
	canife-main | 
	FLSim/examples/__init__.py | 
| 
	#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""
"""
import copy
import json
import os
import random
from typing import Any, Iterator, List, Tuple
import flsim.configs  # noqa
import hydra  # @manual
import numpy as np
import torch
import torch.nn as nn
from flsim.interfaces.metrics_reporter import Channel
from flsim.utils.config_utils import maybe_parse_json_config
from flsim.utils.example_utils import (
    DataLoader,
    DataProvider,
    FLModel,
    LEAFDataLoader,
    LEAFDataProvider,
    MetricsReporter,
    Resnet18,
    SequentialSharder,
    SimpleConvNet,
)
from hydra.utils import instantiate
from omegaconf import DictConfig, OmegaConf
from PIL import Image
from torch.utils.data import Dataset
from torchvision import transforms
from torchvision.datasets import ImageFolder
from torchvision.datasets.cifar import CIFAR10
from canife.utils import TextProcessorSent140, TextProcessorShakes, get_plot_path
IMAGE_SIZE = 32
# Datasets 
class ShakespeareDataset(Dataset):
    SEED = 7
    def __init__(
        self,
        data_root=None,
        num_users=None,
    ):
        self.text_processor = TextProcessorShakes()
        with open(data_root, "r") as f:
            dataset = json.load(f)
        user_ids = dataset["users"]
        random.seed(self.SEED)
        num_users = num_users if num_users is not None else len(user_ids)
        user_ids = random.sample(user_ids, min(len(user_ids), num_users))
        print(f"Creating dataset with {num_users} users")
        # Filter train and test datasets based on user_ids list
        self.dataset = dataset
        self.data = {}
        self.targets = {}
                
        # Populate self.data and self.targets
        for user_id, user_data in self.dataset["user_data"].items():
            if user_id not in user_ids:
                continue
            self.data[user_id] = list(user_data["x"])
            self.targets[user_id] = list(user_data["y"])
    def __iter__(self) -> Iterator[Tuple[List[torch.Tensor], List[Any]]]:
        for user_id in self.data.keys():
            yield self.__getitem__(user_id)
    def __getitem__(self, user_id: str) -> Tuple[List[torch.Tensor], List[Any]]:
        if user_id not in self.data or user_id not in self.targets:
            raise IndexError(f"User {user_id} is not in dataset")
        user_utterances = self.process_x(self.data[user_id])
        user_targets = self.process_y(self.targets[user_id])
        return user_utterances, user_targets
    def __len__(self) -> int:
        return len(self.data)
    def get_user_ids(self):
        return self.data.keys()
    def process_x(self, raw_x_batch):
        x_batch = [self.text_processor.word_to_indices(word) for word in raw_x_batch]
        x_batch = torch.LongTensor(x_batch)
        return x_batch
    def process_y(self, raw_y_batch):
        y_batch = [self.text_processor.letter_to_vec(c) for c in raw_y_batch]
        return y_batch
class CelebaDataset(Dataset):
    def __init__(
        self,
        data_root,
        image_root,
        num_users=None,
        transform=None,
        target_transform=None,
    ):
        with open(data_root, "r") as f:
            self.dataset = json.load(f)
        user_ids = self.dataset["users"]
        num_users = num_users if num_users is not None else len(user_ids)
        user_ids = random.sample(user_ids, min(len(user_ids), num_users))
        self.transform = transform
        self.target_transform = target_transform
        self.image_root = image_root
        self.image_folder = ImageFolder(image_root, transform)
        self.data = {}
        self.targets = {}
        # Populate self.data and self.targets
        for user_id, user_data in self.dataset["user_data"].items():
            if user_id in user_ids:
                self.data[user_id] = [
                    int(os.path.splitext(img_path)[0]) for img_path in user_data["x"]
                ]
                self.targets[user_id] = list(user_data["y"])
    def __iter__(self) -> Iterator[Tuple[List[torch.Tensor], List[Any]]]:
        for user_id in self.data.keys():
            yield self.__getitem__(user_id)
    def __getitem__(self, user_id: str) -> Tuple[List[torch.Tensor], List[Any]]:
        if user_id not in self.data or user_id not in self.targets:
            raise IndexError(f"User {user_id} is not in dataset")
        user_imgs = []
        for image_index in self.data[user_id]:
            user_imgs.append(self.image_folder[image_index - 1][0])
        user_targets = self.targets[user_id]
        if self.target_transform is not None:
            user_targets = [self.target_transform(target) for target in user_targets]
        return user_imgs, user_targets
    def __len__(self) -> int:
        return len(self.data)
class Sent140Dataset(Dataset):
    def __init__(self, data_root, max_seq_len):
        self.data_root = data_root
        self.max_seq_len = max_seq_len
        self.text_processor = TextProcessorSent140()
        self.vocab_size = self.text_processor.vocab_size
        self.embedding_size = 300
        with open(data_root, "r") as f:
            self.dataset = json.load(f)
        self.data = {}
        self.targets = {}
        self.num_classes = 2
        # Populate self.data and self.targets
        for user_id, user_data in self.dataset["user_data"].items():
            self.data[user_id] = self.process_x(list(user_data["x"]))
            self.targets[user_id] = self.process_y(list(user_data["y"]))
    def __len__(self):
        return len(self.data)
    def __iter__(self):
        for user_id in self.data.keys():
            yield self.__getitem__(user_id)
    def __getitem__(self, user_id: str):
        if user_id not in self.data or user_id not in self.targets:
            raise IndexError(f"User {user_id} is not in dataset")
        return self.data[user_id], self.targets[user_id]
    def process_x(self, raw_x_batch):
        x_batch = [e[4] for e in raw_x_batch]
        x_batch = [self.text_processor.line_to_indices(e, self.max_seq_len) for e in x_batch]
        x_batch = torch.LongTensor(x_batch)
        return x_batch
    def process_y(self, raw_y_batch):
        y_batch = [int(e) for e in raw_y_batch]
        return y_batch
class FemnistDatasetChunked(Dataset):
    IMAGE_SIZE = (28, 28)
    def __init__(
        self,
        data_root,
        num_users=None,
        transform=None,
        target_transform=None,
    ):
        with open(data_root, "r") as f:
            dataset = json.load(f)
        user_ids = []
        for _, chunk_data in dataset:
            user_ids.extend(list(chunk_data["user_data"].keys()))
        num_users = num_users if num_users is not None else len(user_ids)
        user_ids = random.sample(user_ids, min(len(user_ids), num_users))
        print(f"Creating dataset with {num_users} users")
        self.transform = transform
        self.transform = transform
        self.target_transform = target_transform
        self.data = {}
        self.targets = {}
        # Populate self.data and self.targets
        for _, chunk_data in dataset:
            for user_id in user_ids:
                if user_id in set(chunk_data["users"]):
                    self.data[user_id] = [
                        np.array(img) for img in chunk_data["user_data"][user_id]["x"]
                    ]
                    self.targets[user_id] = list(chunk_data["user_data"][user_id]["y"])
    def __iter__(self) -> Iterator[Tuple[List[torch.Tensor], List[Any]]]:
        for user_id in self.data.keys():
            yield self.__getitem__(user_id)
    def __getitem__(self, user_id: str) -> Tuple[List[torch.Tensor], List[Any]]:
        if user_id not in self.data or user_id not in self.targets:
            return [], []
        user_imgs, user_targets = self.data[user_id], self.targets[user_id]
        user_imgs = [
            Image.fromarray(img.reshape(FemnistDataset.IMAGE_SIZE)) for img in user_imgs
        ]
        user_imgs = [self.transform(img) for img in user_imgs]
        if self.target_transform is not None:
            user_targets = [self.target_transform(target) for target in user_targets]
        return user_imgs, user_targets
    def __len__(self) -> int:
        return len(self.data)
class FemnistDataset(Dataset):
    IMAGE_SIZE = (28, 28)
    def __init__(
        self,
        data_root,
        num_users=None,
        transform=None,
        target_transform=None,
    ):
        with open(data_root, "r") as f:
            dataset = json.load(f)
        user_ids = dataset["users"]
        num_users = num_users if num_users is not None else len(user_ids)
        user_ids = random.sample(user_ids, min(len(user_ids), num_users))
        print(f"Creating dataset with {num_users} users")
        self.transform = transform
        self.transform = transform
        self.target_transform = target_transform
        self.data = {}
        self.targets = {}
        # Populate self.data and self.targets
        for user_id in user_ids:
            if user_id in set(dataset["users"]):
                self.data[user_id] = [
                    np.array(img) for img in dataset["user_data"][user_id]["x"]
                ]
                self.targets[user_id] = list(dataset["user_data"][user_id]["y"])
    def __iter__(self) -> Iterator[Tuple[List[torch.Tensor], List[Any]]]:
        for user_id in self.data.keys():
            yield self.__getitem__(user_id)
    def __getitem__(self, user_id: str) -> Tuple[List[torch.Tensor], List[Any]]:
        if user_id not in self.data or user_id not in self.targets:
            return [], []
        user_imgs, user_targets = self.data[user_id], self.targets[user_id]
        user_imgs = [
            Image.fromarray(img.reshape(FemnistDataset.IMAGE_SIZE)) for img in user_imgs
        ]
        user_imgs = [self.transform(img) for img in user_imgs]
        if self.target_transform is not None:
            user_targets = [self.target_transform(target) for target in user_targets]
        return user_imgs, user_targets
    def __len__(self) -> int:
        return len(self.data)
# NLP Models
class Sent140StackedLSTMModel(nn.Module):
    def __init__(
        self, seq_len, num_classes, emb_size, n_hidden, vocab_size, dropout_rate, **kwargs
    ):
        super(Sent140StackedLSTMModel, self).__init__()
        self.seq_len = seq_len
        self.num_classes = num_classes
        self.n_hidden = n_hidden
        self.vocab_size = vocab_size
        self.emb_size = emb_size
        self.dropout_rate = dropout_rate
        self.embedding = nn.Embedding(self.vocab_size, self.emb_size)
        self.stacked_lstm = nn.LSTM(
            self.emb_size, self.n_hidden, 2, batch_first=True, dropout=self.dropout_rate
        )
        self.fc1 = nn.Linear(self.n_hidden, self.num_classes)
        self.dropout = nn.Dropout(p=self.dropout_rate)
        # self.out = nn.Linear(128, self.num_classes)
    def set_embedding_weights(self, emb_matrix, trainable=False):
        self.embedding.weight = torch.nn.Parameter(emb_matrix)
        if not trainable:
            self.embedding.weight.requires_grad = False
    def forward(self, features, input_embeds=None):
        # seq_lens = torch.sum(features != (self.vocab_size - 1), 1) - 1
        if features is not None:
            x = self.embedding(features)
        else:
            x = input_embeds
        outputs, _ = self.stacked_lstm(x)
        # outputs = outputs[torch.arange(outputs.size(0)), seq_lens]
        pred = self.fc1(self.dropout(outputs[:, -1]))
        return pred
class ShakespeareModel(nn.Module):
    def __init__(self, seq_len, num_classes, n_hidden, dropout_rate=0.0, **kwargs):
        super(ShakespeareModel, self).__init__()
        self.seq_len = seq_len
        self.num_classes = num_classes  # Number of characters supported
        self.n_hidden = n_hidden
        self.dropout_rate = dropout_rate
        self.embedding = nn.Embedding(self.num_classes, 8)
        self.stacked_lstm = nn.LSTM(
            8, self.n_hidden, 2, batch_first=True, dropout=self.dropout_rate
        )
        self.out = nn.Linear(self.n_hidden, self.num_classes)
    def forward(self, features, input_embeds=None):
        if features is not None:
            x = self.embedding(features)
        else:
            x = input_embeds
        outputs, _ = self.stacked_lstm(x)
        pred = self.out(outputs[:, -1])
        return pred
# Data providers
def build_data_provider_shakespeare(data_config):
    # Local testing
    # train_split = "/data/train/all_data_0_2_keep_0_train_9.json"
    # test_split = "/data/test/all_data_0_2_keep_0_test_9.json"
    
    # Full splits
    train_split = "/data/train/all_data_0_0_keep_0_train_9.json"
    test_split = "/data/test/all_data_0_0_keep_0_test_9.json"
    
    train_dataset = ShakespeareDataset(data_root=data_config.data_root + train_split)
    test_dataset = ShakespeareDataset(data_root=data_config.data_root + test_split)
    dataloader = LEAFDataLoader(
        train_dataset,
        test_dataset,
        test_dataset,
        batch_size=data_config.local_batch_size,
        drop_last=True,
    )
    data_provider = LEAFDataProvider(dataloader)
    return data_provider
    
def build_data_provider_sent140(
    local_batch_size, vocab_size, num_users, user_dist, max_seq_len, drop_last, data_path
):
    train_dataset = Sent140Dataset(
        data_root=data_path + "/data/train/all_data_0_15_keep_1_train_6.json",
        max_seq_len=max_seq_len,
    )
    eval_dataset = Sent140Dataset(
        data_root=data_path + "/data/test/all_data_0_15_keep_1_test_6.json",
        max_seq_len=max_seq_len,
    )
    test_dataset = Sent140Dataset(
        data_root=data_path + "/data/test/all_data_0_15_keep_1_test_6.json",
        max_seq_len=max_seq_len,
    )
    dataloader = LEAFDataLoader(
        train_dataset,
        eval_dataset,
        test_dataset,
        batch_size=local_batch_size,
        drop_last=drop_last,
    )
    data_provider = LEAFDataProvider(dataloader)
    return data_provider, train_dataset.vocab_size, train_dataset.embedding_size
def build_data_provider_cifar10(data_root, local_batch_size, examples_per_user, drop_last: bool = False, disable_aug=False):
    
    if disable_aug:
        transform_list = [transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)), transforms.ToTensor()]
    else:
        transform_list = [
            transforms.Resize(IMAGE_SIZE),
            transforms.CenterCrop(IMAGE_SIZE),
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
        ]
    
    transform = transforms.Compose(transform_list)
    train_dataset = CIFAR10(
        root=data_root, train=True, download=False, transform=transform
    )
    val_dataset = CIFAR10(
        root=data_root, train=False, download=False, transform=transform
    )
    test_dataset = CIFAR10(
        root=data_root, train=False, download=False, transform=transform
    )
    
    sharder = SequentialSharder(examples_per_shard=examples_per_user)    
    fl_data_loader = DataLoader(
        train_dataset, val_dataset, test_dataset, sharder, local_batch_size, drop_last
    )
    data_provider = DataProvider(fl_data_loader)
    print(f"Clients in total: {data_provider.num_train_users()}")
    return data_provider
def build_data_provider_celeba(data_config, trainer_config, disable_aug=False):
    IMAGE_SIZE: int = 32
    if disable_aug:
        IMAGE_SIZE = 128
        transform_list = [transforms.Resize((IMAGE_SIZE, IMAGE_SIZE)), transforms.ToTensor()]
    else:
        transform_list = [
            transforms.Resize(IMAGE_SIZE),
            transforms.CenterCrop(IMAGE_SIZE),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
        ]
        
    transform = transforms.Compose(transform_list)
    # Local testing
    # train_split = "/data/train/all_data_0_01_keep_0_train_9.json" if not "celeba_iid" in trainer_config.args.dataset else "/data/train/all_data_0_01_01_keep_1_train_9.json" 
    # test_split = "/data/test/all_data_0_01_keep_0_test_9.json"  if not "celeba_iid" in trainer_config.args.dataset  else "/data/test/all_data_0_01_01_keep_1_test_9.json" 
    # GPU Debug (Non-IID)
    # train_split = "/data/train/all_data_0_1_keep_1_train_9.json"  
    # test_split = "/data/test/all_data_0_1_keep_1_test_9.json"
    train_split = "/data/train/all_data_0_0_keep_0_train_9.json" if "celeba_iid" not in trainer_config.args.dataset else "/data/train/all_data_0_0_0_keep_0_train_9_iid.json" 
    test_split = "/data/test/all_data_0_0_keep_0_test_9.json"  if  "celeba_iid" not in trainer_config.args.dataset  else "/data/test/all_data_0_0_0_keep_0_test_9_iid.json" 
    train_dataset = CelebaDataset( # data_root arg should be leaf/celeba 
        data_root=data_config.data_root + train_split,
        image_root=data_config.data_root+"/data/raw/",
        transform=transform,
    )
    test_dataset = CelebaDataset(
        data_root=data_config.data_root + test_split,
        transform=transform,
        image_root=train_dataset.image_root,
    )
    print(
        f"Created datasets with {len(train_dataset)} train users and {len(test_dataset)} test users"
    )
    dataloader = LEAFDataLoader(
        train_dataset,
        test_dataset,
        test_dataset,
        batch_size=data_config.local_batch_size,
        drop_last=data_config.drop_last,
    )
    # data_provider = LEAFDataProvider(dataloader)
    data_provider = DataProvider(dataloader)
    print(f"Training clients in total: {data_provider.num_train_users()}")
    return data_provider
def build_data_provider_femnist(data_config, disable_aug=False):
    if disable_aug:
        transform_list = [transforms.ToTensor()]
    else:
        transform_list = [transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,)),]
        
    transform = transforms.Compose(transform_list)
    
    # Local debugging
    train_split = data_config.data_root + "/data/train/all_data_0_niid_05_keep_0_train_9.json"
    test_split = data_config.data_root + "/data/test/all_data_0_niid_05_keep_0_test_9.json" 
    train_dataset = FemnistDataset(
        data_root=train_split,
        transform=transform,
    )
    test_dataset = FemnistDataset(
        data_root=test_split,
        transform=transform,
    )
    print(
        f"Created datasets with {len(train_dataset)} train users and {len(test_dataset)} test users"
    )
    dataloader = LEAFDataLoader(
        train_dataset,
        test_dataset,
        test_dataset,
        batch_size=data_config.local_batch_size,
    )
    data_provider = LEAFDataProvider(dataloader)
    print(f"Training clients in total: {data_provider.num_train_users()}")
    return data_provider
    
def _get_checkpoint_path(cfg):
    filename =  cfg.args.checkpoint_path
    filename += f"/FLSim_dp={cfg.args.dp_level}_model={cfg.args.model_arch}_dataset={cfg.args.dataset}_num_clients={cfg.args.users_per_round}_test_size={cfg.args.local_batch_size}"
    filename += f"_insert_test_acc={cfg.args.canary_insert_test_acc}_insert_train_acc={cfg.args.canary_insert_train_acc}_client_epochs={cfg.args.client_epochs}"
    if cfg.args.epsilon != -1 or cfg.args.sigma != 0:
        if cfg.args.epsilon != -1:
            filename += f"_private_eps={cfg.args.epsilon}_delta={cfg.args.delta}"
        else:
            filename += f"_private_sigma={cfg.args.sigma}_delta={cfg.args.delta}"
    filename += ".tar"
    return filename
def _load_checkpoint(trainer_cfg, model, device="cpu"): 
    checkpoint_path = _get_checkpoint_path(trainer_cfg)
    print(f"\n====== Attempting to load checkpoint {checkpoint_path} ======")
    checkpoint = {}
    try:
        checkpoint = torch.load(checkpoint_path, map_location=torch.device(device))
        model.load_state_dict(checkpoint["state_dict"])
        if "epsilon" not in checkpoint:
            checkpoint["epsilon"] = float("inf")
        if "delta" not in checkpoint:
            checkpoint["delta"] = max(0, trainer_cfg.args.delta)
        if "noise_multiplier" not in checkpoint:
            checkpoint["noise_multiplier"] = max(0, trainer_cfg.args.sigma)
        if "steps" not in checkpoint:
            checkpoint["steps"] = -1 # Let CanarySyncTrainer compute this
        if "train_acc" not in checkpoint:
            checkpoint["train_acc"] = 0
        if "test_acc" not in checkpoint:
            checkpoint["test_acc"] = 0
            
        print(f"Checkpointed FL model loaded successfully epoch={checkpoint['epoch']}, round={checkpoint['round']}")
        print(f"Checkpointed model DP guarantees (eps, delta)=({checkpoint['epsilon']}, {checkpoint['delta']}) sigma={checkpoint['noise_multiplier']}")
        # TODO: Rework this?
        trainer_cfg.args.canary_insert_epoch = 1
        trainer_cfg.args.canary_insert_test_acc = -1
        trainer_cfg.args.canary_insert_train_acc = -1
    except FileNotFoundError:
        print("Checkpoint not found for the specific combination of parameters, resorting to training model from scratch")
    return checkpoint
def create_model(model_config, data_config, in_channels, vocab_size, emb_size):
    if model_config.model_arch == "resnet":
        model = Resnet18(num_classes=model_config.num_classes, in_channels=in_channels)
    elif model_config.model_arch == "lstm":
        model = Sent140StackedLSTMModel(
            seq_len=data_config.max_seq_len,
            num_classes=model_config.num_classes,
            emb_size=emb_size,
            n_hidden=model_config.n_hidden,
            vocab_size=vocab_size,
            dropout_rate=model_config.dropout,
        )
    elif model_config.model_arch == "shakes_lstm":
        model = ShakespeareModel(
            seq_len=model_config.seq_len,
            n_hidden=model_config.n_hidden,
            num_classes=model_config.num_classes,
            dropout_rate=model_config.dropout,
        )
    else:
        model = SimpleConvNet(num_classes=model_config.num_classes, in_channels=in_channels, dropout_rate=model_config.dropout)
    
    return model 
def create_data_provider(trainer_config, data_config):
    in_channels, vocab_size, emb_size = 0, 0, 0
    if trainer_config.args.dataset == "CIFAR10":
        data_provider = build_data_provider_cifar10(
            data_root=data_config.data_root,
            local_batch_size=data_config.local_batch_size,
            examples_per_user=data_config.examples_per_user,
            drop_last=False,
            disable_aug=trainer_config.args.prettify_samples
        )
        in_channels = 3
    elif "celeba" in trainer_config.args.dataset:
        data_provider = build_data_provider_celeba(data_config, trainer_config, disable_aug=trainer_config.args.prettify_samples)
        in_channels = 3
    elif "femnist" in trainer_config.args.dataset:
        data_provider = build_data_provider_femnist(data_config, disable_aug=trainer_config.args.prettify_samples)
        in_channels = 1
    elif "shakespeare" in trainer_config.args.dataset:
        data_provider = build_data_provider_shakespeare(data_config)
    else:
        data_provider, vocab_size, emb_size  = build_data_provider_sent140(      
            local_batch_size=data_config.local_batch_size,
            vocab_size=data_config.vocab_size,
            num_users=data_config.num_users,
            user_dist=data_config.user_dist,
            max_seq_len=data_config.max_seq_len,
            drop_last=False,
            data_path=data_config.data_root,
        )    
    return data_provider, in_channels, vocab_size, emb_size
# Main
def main_worker(
    trainer_config,
    data_config,
    model_config,
    use_cuda_if_available: bool = True,
    distributed_world_size: int = 1,
) -> None:
    original_trainer_config = copy.deepcopy(trainer_config) # If loading checkpoints, the trainer config is modified to change canary insert epochs to 1 
    emb_size, vocab_size = 0,0 # For sent140
    checkpoint_path = _get_checkpoint_path(trainer_config)
    if (trainer_config.args.fl_load_checkpoint) and not os.path.isfile(checkpoint_path):
        print(f"Checkpoint {checkpoint_path} does not exist, experiment exiting early...")
        return 
    
    if trainer_config.checkpoint_only:
        print(f"Checkpoint only run - will save checkpoint as {checkpoint_path}")
    data_provider, in_channels, vocab_size, emb_size = create_data_provider(trainer_config, data_config)
    
    for exp_num in range(0, data_config.canary_iters):
        torch.cuda.empty_cache()
        trainer_config = copy.deepcopy(original_trainer_config)
        if not data_config.debug_config:
            trainer_config["plot_path"] = get_plot_path(trainer_config.args, exp_num=exp_num, file_suffix="")
            trainer_config["result_path"] = get_plot_path(trainer_config.args, exp_num, ".tar")
        model = create_model(model_config, data_config, in_channels, vocab_size, emb_size)
        print(model)
        
        cuda_enabled = torch.cuda.is_available() and use_cuda_if_available
        device = torch.device(f"cuda:{0}" if cuda_enabled else "cpu")
        checkpoint = {}
        if trainer_config.load_checkpoint:
           checkpoint = _load_checkpoint(trainer_config, model, device)
                                                          
        global_model = FLModel(model, device)
        if cuda_enabled:
            global_model.fl_cuda()
        trainer = instantiate(trainer_config, model=global_model, cuda_enabled=cuda_enabled)
        metrics_reporter = MetricsReporter([Channel.TENSORBOARD, Channel.STDOUT])
        final_model, eval_score = trainer.train(
            data_provider=data_provider,
            metrics_reporter=metrics_reporter,
            num_total_users=data_provider.num_train_users(),
            distributed_world_size=1,
            checkpoint=checkpoint
        )
        if trainer_config.checkpoint_only and not trainer.insert_acc_achieved:
            trainer.logger.info("Failed to achieve insert accuracy, checkpointing model anyway...")
            trainer._checkpoint_model(trainer_config.epochs, 1, final=True)
        
        if not hasattr(trainer, "canary_analyser") and data_config.canary_iters > 1:
            trainer.logger.info("Experiment ended early - either checkpoint only or model failed to reach insertion epoch/accuracy for canary testing")
            return 
@hydra.main(config_path=None, config_name="celeba_config", version_base="1.1")
def run(cfg: DictConfig) -> None:
    print(OmegaConf.to_yaml(cfg))
    trainer_config = cfg.trainer
    data_config = cfg.data
    model_config = cfg.model
    main_worker(
        trainer_config,
        data_config,
        model_config,
        cfg.use_cuda_if_available,
        cfg.distributed_world_size,
    )
if __name__ == "__main__":
    cfg = maybe_parse_json_config()
    run(cfg)
 | 
	canife-main | 
	FLSim/examples/canary_example.py | 
| 
	#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""In this tutorial, we will train a binary classifier on LEAF's CelebA dataset with FLSim.
Before running this file, you need to download the dataset and partition the data by users.
1. Clone the leaf dataset by running `git clone https://github.com/TalwalkarLab/leaf.git`
2. Change direectory to celeba: `cd leaf/data/celeba || exit`
3. Download the data from http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html
    - Download or request the metadata files `identity_CelebA.txt` and `list_attr_celeba.txt`,
      and place them inside the data/raw folder.
    - Download the celebrity faces dataset from the same site. Place the images in a folder
       named `img_align_celeba` in the same folder as above.
4. Run the pre-processing script:
    - `./preprocess.sh --sf 0.01 -s niid -t 'user' --tf 0.90 -k 1 --spltseed 1`
Typical usage example:
    python3 celeba_example.py --config-file configs/celeba_config.json
"""
import json
import os
import random
from typing import Any, Iterator, List, Tuple
import flsim.configs  # noqa
import hydra  # @manual
import torch
from flsim.interfaces.metrics_reporter import Channel
from flsim.utils.config_utils import maybe_parse_json_config
from flsim.utils.example_utils import (
    DataProvider,
    FLModel,
    LEAFDataLoader,
    MetricsReporter,
    Resnet18,
    SimpleConvNet,
)
from hydra.utils import instantiate
from omegaconf import DictConfig, OmegaConf
from torch.utils.data import Dataset
from torchvision import transforms
from torchvision.datasets import ImageFolder
from canife.utils import get_plot_path
class CelebaDataset(Dataset):
    def __init__(
        self,
        data_root,
        image_root,
        num_users=None,
        transform=None,
        target_transform=None,
    ):
        with open(data_root, "r+") as f:
            self.dataset = json.load(f)
        user_ids = self.dataset["users"]
        num_users = num_users if num_users is not None else len(user_ids)
        user_ids = random.sample(user_ids, min(len(user_ids), num_users))
        self.transform = transform
        self.target_transform = target_transform
        self.image_root = image_root
        self.image_folder = ImageFolder(image_root, transform)
        self.data = {}
        self.targets = {}
        # Populate self.data and self.targets
        for user_id, user_data in self.dataset["user_data"].items():
            if user_id in user_ids:
                self.data[user_id] = [
                    int(os.path.splitext(img_path)[0]) for img_path in user_data["x"]
                ]
                self.targets[user_id] = list(user_data["y"])
    def __iter__(self) -> Iterator[Tuple[List[torch.Tensor], List[Any]]]:
        for user_id in self.data.keys():
            yield self.__getitem__(user_id)
    def __getitem__(self, user_id: str) -> Tuple[List[torch.Tensor], List[Any]]:
        if user_id not in self.data or user_id not in self.targets:
            raise IndexError(f"User {user_id} is not in dataset")
        user_imgs = []
        for image_index in self.data[user_id]:
            user_imgs.append(self.image_folder[image_index - 1][0])
        user_targets = self.targets[user_id]
        if self.target_transform is not None:
            user_targets = [self.target_transform(target) for target in user_targets]
        return user_imgs, user_targets
    def __len__(self) -> int:
        return len(self.data)
def build_data_provider(data_config, trainer_config):
    IMAGE_SIZE: int = 32
    transform = transforms.Compose(
        [
            transforms.Resize(IMAGE_SIZE),
            transforms.CenterCrop(IMAGE_SIZE),
            transforms.ToTensor(),
            transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
        ]
    )
    # Local testing
    # train_split = "/data/train/all_data_0_01_keep_0_train_9.json" if "celeba_iid" not in trainer_config.args.dataset else "/data/train/all_data_0_01_01_keep_0_train_9_iid.json" 
    # test_split = "/data/test/all_data_0_01_keep_0_test_9.json"  if "celeba_iid" not in trainer_config.args.dataset  else "/data/test/all_data_0_01_01_keep_0_test_9_iid.json" 
    train_split = "/data/train/all_data_0_0_keep_0_train_9.json" if  "celeba_iid" not in trainer_config.args.dataset else "/data/train/all_data_0_0_0_keep_0_train_9_iid.json" 
    test_split = "/data/test/all_data_0_0_keep_0_test_9.json"  if  "celeba_iid" not in trainer_config.args.dataset  else "/data/test/all_data_0_0_0_keep_0_test_9_iid.json" 
    train_dataset = CelebaDataset( # data_root arg should be leaf/celeba 
        data_root=data_config.data_root + train_split,
        image_root=data_config.data_root+"/data/raw/",
        transform=transform,
    )
    test_dataset = CelebaDataset(
        data_root=data_config.data_root + test_split,
        transform=transform,
        image_root=train_dataset.image_root,
    )
    print(
        f"Created datasets with {len(train_dataset)} train users and {len(test_dataset)} test users"
    )
    dataloader = LEAFDataLoader(
        train_dataset,
        test_dataset,
        test_dataset,
        batch_size=data_config.local_batch_size,
        drop_last=data_config.drop_last,
    )
    # data_provider = LEAFDataProvider(dataloader)
    data_provider = DataProvider(dataloader)
    print(f"Training clients in total: {data_provider.num_train_users()}")
    return data_provider
def _get_checkpoint_path(cfg):
    filename =  cfg.args.checkpoint_path
    filename += f"/FLSim_dp={cfg.args.dp_level}_model={cfg.args.model_arch}_dataset={cfg.args.dataset}_num_clients={cfg.args.users_per_round}_test_size={cfg.args.local_batch_size}"
    filename += f"_insert_test_acc={cfg.args.canary_insert_test_acc}_insert_train_acc={cfg.args.canary_insert_train_acc}"
    filename += ".tar"
    return filename
def main_worker(
    trainer_config,
    data_config,
    model_config,
    use_cuda_if_available: bool = True,
    distributed_world_size: int = 1,
) -> None:
    checkpoint_path = _get_checkpoint_path(trainer_config)
    if (trainer_config.args.fl_load_checkpoint) and not os.path.isfile(checkpoint_path):
        print(f"Checkpoint {checkpoint_path} does not exist, experiment exiting early...")
        return 
    data_provider = build_data_provider(data_config, trainer_config)
    
    for exp_num in range(0, data_config.canary_iters):
        torch.cuda.empty_cache()
        if not data_config.debug_config:
            trainer_config["plot_path"] = get_plot_path(trainer_config.args, exp_num=exp_num, file_suffix="")
            trainer_config["result_path"] = get_plot_path(trainer_config.args, exp_num, ".tar")
        if model_config.model_arch == "resnet":
            model = Resnet18(num_classes=2)
        else:
            model = SimpleConvNet(num_classes=2, dropout_rate=model_config.dropout)
        cuda_enabled = torch.cuda.is_available() and use_cuda_if_available
        device = torch.device(f"cuda:{0}" if cuda_enabled else "cpu")
        print(model)
        # pyre-fixme[6]: Expected `Optional[str]` for 2nd param but got `device`.
        global_model = FLModel(model, device)
        if cuda_enabled:
            global_model.fl_cuda()
        trainer = instantiate(trainer_config, model=global_model, cuda_enabled=cuda_enabled)
        metrics_reporter = MetricsReporter([Channel.TENSORBOARD, Channel.STDOUT])
        final_model, eval_score = trainer.train(
            data_provider=data_provider,
            metrics_reporter=metrics_reporter,
            num_total_users=data_provider.num_train_users(),
            distributed_world_size=1,
        )
        test_metrics = trainer.test(
            data_provider=data_provider,
            metrics_reporter=MetricsReporter([Channel.STDOUT]),
        )
        if hasattr(trainer, "canary_analyser") and trainer.canary_analyser:
            trainer.accuracy_metrics["test"].append(test_metrics["Accuracy"])
            trainer.canary_analyser.set_accuracy_metrics(trainer.accuracy_metrics)
            trainer.logger.info(f"Final accuracy metrics {trainer.accuracy_metrics}")
            trainer.logger.info("Analysing canary tests...")
            trainer.canary_analyser.analyse()
        else:
            if data_config.canary_iters > 1:
                trainer.logger.info("Experiment ended early - either checkpoint only or model failed to reach insertion epoch/accuracy for canary testing")
            return 
@hydra.main(config_path=None, config_name="celeba_config", version_base="1.1")
def run(cfg: DictConfig) -> None:
    print(OmegaConf.to_yaml(cfg))
    trainer_config = cfg.trainer
    data_config = cfg.data
    model_config = cfg.model
    main_worker(
        trainer_config,
        data_config,
        model_config,
        cfg.use_cuda_if_available,
        cfg.distributed_world_size,
    )
if __name__ == "__main__":
    cfg = maybe_parse_json_config()
    run(cfg)
 | 
	canife-main | 
	FLSim/examples/old_examples/celeba_example.py | 
| 
	#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""In this tutorial, we will train a binary sentiment classifier on LEAF's Sent140 dataset with FLSim.
Before running this file, you need to download the dataset and partition the data by users. We
provide the script get_data.sh for this purpose.
    Typical usage example:
    FedAvg
    python3 sent140_example.py --config-file configs/sent140_config.json
    FedBuff + SGDM
    python3 sent140_example.py --config-file configs/sent140_fedbuff_config.json
"""
import itertools
import json
import re
import string
import unicodedata
from typing import List
import flsim.configs  # noqa
import hydra  # @manual
import torch
import torch.nn as nn
from flsim.interfaces.metrics_reporter import Channel
from flsim.utils.config_utils import maybe_parse_json_config
from flsim.utils.example_utils import (
    FLModel,
    LEAFDataLoader,
    LEAFDataProvider,
    MetricsReporter,
)
from hydra.utils import instantiate
from omegaconf import DictConfig, OmegaConf
from torch.utils.data import Dataset
class Sent140StackedLSTMModel(nn.Module):
    def __init__(
        self, seq_len, num_classes, emb_size, n_hidden, vocab_size, dropout
    ):
        super(Sent140StackedLSTMModel, self).__init__()
        self.seq_len = seq_len
        self.num_classes = num_classes
        self.n_hidden = n_hidden
        self.vocab_size = vocab_size
        self.emb_size = emb_size
        self.dropout = dropout
        self.embedding = nn.Embedding(self.vocab_size + 1, self.emb_size)
        self.stacked_lstm = nn.LSTM(
            self.emb_size, self.n_hidden, 2, batch_first=True, dropout=self.dropout
        )
        self.fc1 = nn.Linear(self.n_hidden, self.num_classes)
        self.dropout = nn.Dropout(p=self.dropout)
        self.out = nn.Linear(128, self.num_classes)
    def set_embedding_weights(self, emb_matrix, trainable=False):
        self.embedding.weight = torch.nn.Parameter(emb_matrix)
        if not trainable:
            self.embedding.weight.requires_grad = False
    def forward(self, features):
        seq_lens = torch.sum(features != (self.vocab_size - 1), 1) - 1
        x = self.embedding(features)
        outputs, _ = self.stacked_lstm(x)
        outputs = outputs[torch.arange(outputs.size(0)), seq_lens]
        pred = self.fc1(self.dropout(outputs))
        return pred
class Sent140Dataset(Dataset):
    def __init__(self, data_root, max_seq_len):
        self.data_root = data_root
        self.max_seq_len = max_seq_len
        self.all_letters = {c: i for i, c in enumerate(string.printable)}
        self.num_letters = len(self.all_letters)
        self.UNK: int = self.num_letters
        self.vocab_size = 9930
        self.embedding_size = 300
        with open(data_root, "r+") as f:
            self.dataset = json.load(f)
        self.data = {}
        self.targets = {}
        self.num_classes = 2
        # Populate self.data and self.targets
        for user_id, user_data in self.dataset["user_data"].items():
            self.data[user_id] = self.process_x(list(user_data["x"]))
            self.targets[user_id] = self.process_y(list(user_data["y"]))
    def __len__(self):
        return len(self.data)
    def __iter__(self):
        for user_id in self.data.keys():
            yield self.__getitem__(user_id)
    def __getitem__(self, user_id: str):
        if user_id not in self.data or user_id not in self.targets:
            raise IndexError(f"User {user_id} is not in dataset")
        return self.data[user_id], self.targets[user_id]
    def unicodeToAscii(self, s):
        return "".join(
            c
            for c in unicodedata.normalize("NFD", s)
            if unicodedata.category(c) != "Mn" and c in self.all_letters
        )
    def line_to_indices(self, line: str, max_seq_len: int):
        line_list = self.split_line(line)  # split phrase in words
        line_list = line_list
        chars = self.flatten_list([list(word) for word in line_list])
        # padding
        indices: List[int] = [
            self.all_letters.get(letter, self.UNK)
            for i, letter in enumerate(chars)
            if i < max_seq_len
        ]
        indices = indices + ([self.UNK] * (max_seq_len - len(indices)))
        return indices
    def process_x(self, raw_x_batch):
        x_batch = [e[4] for e in raw_x_batch]
        x_batch = [self.line_to_indices(e, self.max_seq_len) for e in x_batch]
        x_batch = torch.LongTensor(x_batch)
        return x_batch
    def process_y(self, raw_y_batch):
        y_batch = [int(e) for e in raw_y_batch]
        return y_batch
    def split_line(self, line):
        """split given line/phrase into list of words
        Args:
            line: string representing phrase to be split
        Return:
            list of strings, with each string representing a word
        """
        return re.findall(r"[\w']+|[.,!?;]", line)
    def flatten_list(self, nested_list):
        return list(itertools.chain.from_iterable(nested_list))
def build_data_provider_vocab(
    local_batch_size, vocab_size, num_users, user_dist, max_seq_len, drop_last, data_path
):
    train_dataset = Sent140Dataset(
        data_root=data_path + "/data/train/all_data_0_15_keep_1_train_6.json",
        max_seq_len=max_seq_len,
    )
    eval_dataset = Sent140Dataset(
        data_root=data_path + "/data/test/all_data_0_15_keep_1_test_6.json",
        max_seq_len=max_seq_len,
    )
    test_dataset = Sent140Dataset(
        data_root=data_path + "/data/test/all_data_0_15_keep_1_test_6.json",
        max_seq_len=max_seq_len,
    )
    dataloader = LEAFDataLoader(
        train_dataset,
        eval_dataset,
        test_dataset,
        batch_size=local_batch_size,
        drop_last=drop_last,
    )
    data_provider = LEAFDataProvider(dataloader)
    return data_provider, train_dataset.vocab_size, train_dataset.embedding_size
def main_worker(
    trainer_config,
    model_config,
    data_config,
    use_cuda_if_available: bool = True,
    distributed_world_size: int = 1,
) -> None:
    data_provider, vocab_size, emb_size  = build_data_provider_vocab(      
            local_batch_size=data_config.local_batch_size,
            vocab_size=data_config.vocab_size,
            num_users=data_config.num_users,
            user_dist=data_config.user_dist,
            max_seq_len=data_config.max_seq_len,
            drop_last=False,
            data_path=data_config.data_root
        )
    model = Sent140StackedLSTMModel(
        seq_len=data_config.max_seq_len,
        num_classes=model_config.num_classes,
        emb_size=emb_size,
        n_hidden=model_config.n_hidden,
        vocab_size=vocab_size,
        dropout_rate=model_config.dropout_rate,
    )
    cuda_enabled = torch.cuda.is_available() and use_cuda_if_available
    device = torch.device(f"cuda:{0}" if cuda_enabled else "cpu")
    print(model)
    # pyre-fixme[6]: Expected `Optional[str]` for 2nd param but got `device`.
    global_model = FLModel(model, device)
    if cuda_enabled:
        global_model.fl_cuda()
    trainer = instantiate(trainer_config, model=global_model, cuda_enabled=cuda_enabled)
    metrics_reporter = MetricsReporter([Channel.TENSORBOARD, Channel.STDOUT])
    final_model, eval_score = trainer.train(
        data_provider=data_provider,
        metrics_reporter=metrics_reporter,
        num_total_users=data_provider.num_train_users(),
        distributed_world_size=distributed_world_size,
    )
    trainer.test(
        data_provider=data_provider,
        metrics_reporter=MetricsReporter([Channel.STDOUT]),
    )
@hydra.main(config_path=None, config_name="sent140_config", version_base="1.1")
def run(cfg: DictConfig) -> None:
    print(OmegaConf.to_yaml(cfg))
    trainer_config = cfg.trainer
    model_config = cfg.model
    data_config = cfg.data
    main_worker(trainer_config, model_config, data_config)
if __name__ == "__main__":
    cfg = maybe_parse_json_config()
    run(cfg)
 | 
	canife-main | 
	FLSim/examples/old_examples/sent140_example.py | 
| 
	#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
"""In this tutorial, we will train an image classifier with FLSim to simulate a federated learning training environment.
With this tutorial, you will learn the following key components of FLSim:
1. Data loading
2. Model construction
3. Trainer construction
    Typical usage example:
    python3 cifar10_example.py --config-file configs/cifar10_config.json
"""
import flsim.configs  # noqa
import hydra
import torch
from flsim.data.data_sharder import SequentialSharder
from flsim.interfaces.metrics_reporter import Channel
from flsim.utils.config_utils import maybe_parse_json_config
from flsim.utils.example_utils import (
    DataLoader,
    DataProvider,
    FLModel,
    MetricsReporter,
    Resnet18,
    SimpleConvNet,
)
from hydra.utils import instantiate
from omegaconf import DictConfig
from torchvision import transforms
from torchvision.datasets.cifar import CIFAR10
from canife.utils import get_plot_path
IMAGE_SIZE = 32
def build_data_provider(data_root, local_batch_size, examples_per_user, drop_last: bool = False):
    transform = transforms.Compose(
        [
            transforms.Resize(IMAGE_SIZE),
            transforms.CenterCrop(IMAGE_SIZE),
            transforms.ToTensor(),
            transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
        ]
    )
    train_dataset = CIFAR10(
        root=data_root, train=True, download=False, transform=transform
    )
    val_dataset = CIFAR10(
        root=data_root, train=False, download=False, transform=transform
    )
    test_dataset = CIFAR10(
        root=data_root, train=False, download=False, transform=transform
    )
    sharder = SequentialSharder(examples_per_shard=examples_per_user)
    fl_data_loader = DataLoader(
        train_dataset, val_dataset, test_dataset, sharder, local_batch_size, drop_last
    )
    data_provider = DataProvider(fl_data_loader)
    print(f"Clients in total: {data_provider.num_train_users()}")
    return data_provider
def main(
    trainer_config,
    data_config,
    model_config,
    use_cuda_if_available: bool = True,
) -> None:
    data_provider = build_data_provider(
        data_root=data_config.data_root,
        local_batch_size=data_config.local_batch_size,
        examples_per_user=data_config.examples_per_user,
        drop_last=False,
    )
    for exp_num in range(0, data_config.canary_iters):
        if not data_config.debug_config:
            trainer_config["plot_path"] = get_plot_path(trainer_config.args, exp_num=exp_num, file_suffix="")
            trainer_config["result_path"] = get_plot_path(trainer_config.args, exp_num, ".tar")
        cuda_enabled = torch.cuda.is_available() and use_cuda_if_available
        device = torch.device(f"cuda:{0}" if cuda_enabled else "cpu")
        if model_config.model_arch == "resnet":
            model = Resnet18(num_classes=10)
        else:
            model = SimpleConvNet(num_classes=10, dropout_rate=model_config.dropout)
        # pyre-fixme[6]: Expected `Optional[str]` for 2nd param but got `device`.
        global_model = FLModel(model, device)
        if cuda_enabled:
            global_model.fl_cuda()
            
        trainer = instantiate(trainer_config, model=global_model, cuda_enabled=cuda_enabled)
        print(f"Created {trainer_config._target_}")
        metrics_reporter = MetricsReporter([Channel.TENSORBOARD, Channel.STDOUT])
        final_model, eval_score = trainer.train(
            data_provider=data_provider,
            metrics_reporter=metrics_reporter,
            num_total_users=data_provider.num_train_users(),
            distributed_world_size=1,
        )
        test_metrics = trainer.test(
            data_provider=data_provider,
            metrics_reporter=MetricsReporter([Channel.STDOUT]),
        )
        if hasattr(trainer, "canary_analyser") and trainer.canary_analyser:
            trainer.accuracy_metrics["test"].append(test_metrics["Accuracy"])
            trainer.canary_analyser.set_accuracy_metrics(trainer.accuracy_metrics)
            trainer.logger.info(f"Final accuracy metrics {trainer.accuracy_metrics}")
            trainer.logger.info("Analysing canary tests...")
            trainer.canary_analyser.analyse()
        else:
            if data_config.canary_iters > 1:
                trainer.logger.info("Experiment ended early - either checkpoint only or model failed to reach insertion epoch/accuracy for canary testing")
            return 
@hydra.main(config_path=None, config_name="cifar10_tutorial", version_base="1.1")
def run(cfg: DictConfig) -> None:
    # print(OmegaConf.to_yaml(cfg))
    trainer_config = cfg.trainer
    data_config = cfg.data
    model_config = cfg.model 
    main(
        trainer_config,
        data_config,
        model_config
    )
if __name__ == "__main__":
    cfg = maybe_parse_json_config()
    run(cfg)
 | 
	canife-main | 
	FLSim/examples/old_examples/cifar10_example.py | 
End of preview. Expand
						in Data Studio
					
README.md exists but content is empty.
								
- Downloads last month
- 5
